@gravito/stream 1.0.0-alpha.6 → 1.0.0-beta.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +94 -5
- package/dist/index.cjs +2265 -31639
- package/dist/index.d.cts +1814 -0
- package/dist/index.d.ts +1805 -36
- package/dist/index.js +2700 -0
- package/package.json +11 -7
- package/dist/Consumer.d.ts +0 -67
- package/dist/Consumer.d.ts.map +0 -1
- package/dist/Job.d.ts +0 -76
- package/dist/Job.d.ts.map +0 -1
- package/dist/OrbitQueue.d.ts +0 -74
- package/dist/OrbitQueue.d.ts.map +0 -1
- package/dist/QueueManager.d.ts +0 -86
- package/dist/QueueManager.d.ts.map +0 -1
- package/dist/Queueable.d.ts +0 -63
- package/dist/Queueable.d.ts.map +0 -1
- package/dist/Worker.d.ts +0 -48
- package/dist/Worker.d.ts.map +0 -1
- package/dist/core/src/ConfigManager.d.ts +0 -26
- package/dist/core/src/ConfigManager.d.ts.map +0 -1
- package/dist/core/src/Container.d.ts +0 -39
- package/dist/core/src/Container.d.ts.map +0 -1
- package/dist/core/src/Event.d.ts +0 -6
- package/dist/core/src/Event.d.ts.map +0 -1
- package/dist/core/src/EventManager.d.ts +0 -124
- package/dist/core/src/EventManager.d.ts.map +0 -1
- package/dist/core/src/GlobalErrorHandlers.d.ts +0 -32
- package/dist/core/src/GlobalErrorHandlers.d.ts.map +0 -1
- package/dist/core/src/HookManager.d.ts +0 -29
- package/dist/core/src/HookManager.d.ts.map +0 -1
- package/dist/core/src/Listener.d.ts +0 -5
- package/dist/core/src/Listener.d.ts.map +0 -1
- package/dist/core/src/Logger.d.ts +0 -21
- package/dist/core/src/Logger.d.ts.map +0 -1
- package/dist/core/src/PlanetCore.d.ts +0 -116
- package/dist/core/src/PlanetCore.d.ts.map +0 -1
- package/dist/core/src/Route.d.ts +0 -13
- package/dist/core/src/Route.d.ts.map +0 -1
- package/dist/core/src/Router.d.ts +0 -168
- package/dist/core/src/Router.d.ts.map +0 -1
- package/dist/core/src/ServiceProvider.d.ts +0 -17
- package/dist/core/src/ServiceProvider.d.ts.map +0 -1
- package/dist/core/src/exceptions/AuthenticationException.d.ts +0 -5
- package/dist/core/src/exceptions/AuthenticationException.d.ts.map +0 -1
- package/dist/core/src/exceptions/AuthorizationException.d.ts +0 -5
- package/dist/core/src/exceptions/AuthorizationException.d.ts.map +0 -1
- package/dist/core/src/exceptions/GravitoException.d.ts +0 -15
- package/dist/core/src/exceptions/GravitoException.d.ts.map +0 -1
- package/dist/core/src/exceptions/ModelNotFoundException.d.ts +0 -7
- package/dist/core/src/exceptions/ModelNotFoundException.d.ts.map +0 -1
- package/dist/core/src/exceptions/ValidationException.d.ts +0 -15
- package/dist/core/src/exceptions/ValidationException.d.ts.map +0 -1
- package/dist/core/src/exceptions/index.d.ts +0 -6
- package/dist/core/src/exceptions/index.d.ts.map +0 -1
- package/dist/core/src/helpers/Arr.d.ts +0 -15
- package/dist/core/src/helpers/Arr.d.ts.map +0 -1
- package/dist/core/src/helpers/Str.d.ts +0 -19
- package/dist/core/src/helpers/Str.d.ts.map +0 -1
- package/dist/core/src/helpers/data.d.ts +0 -6
- package/dist/core/src/helpers/data.d.ts.map +0 -1
- package/dist/core/src/helpers/errors.d.ts +0 -13
- package/dist/core/src/helpers/errors.d.ts.map +0 -1
- package/dist/core/src/helpers/response.d.ts +0 -19
- package/dist/core/src/helpers/response.d.ts.map +0 -1
- package/dist/core/src/helpers.d.ts +0 -39
- package/dist/core/src/helpers.d.ts.map +0 -1
- package/dist/core/src/http/CookieJar.d.ts +0 -34
- package/dist/core/src/http/CookieJar.d.ts.map +0 -1
- package/dist/core/src/http/middleware/ThrottleRequests.d.ts +0 -13
- package/dist/core/src/http/middleware/ThrottleRequests.d.ts.map +0 -1
- package/dist/core/src/index.d.ts +0 -32
- package/dist/core/src/index.d.ts.map +0 -1
- package/dist/core/src/security/Encrypter.d.ts +0 -25
- package/dist/core/src/security/Encrypter.d.ts.map +0 -1
- package/dist/core/src/security/Hasher.d.ts +0 -30
- package/dist/core/src/security/Hasher.d.ts.map +0 -1
- package/dist/core/src/types/events.d.ts +0 -95
- package/dist/core/src/types/events.d.ts.map +0 -1
- package/dist/drivers/DatabaseDriver.d.ts +0 -60
- package/dist/drivers/DatabaseDriver.d.ts.map +0 -1
- package/dist/drivers/KafkaDriver.d.ts +0 -134
- package/dist/drivers/KafkaDriver.d.ts.map +0 -1
- package/dist/drivers/MemoryDriver.d.ts +0 -45
- package/dist/drivers/MemoryDriver.d.ts.map +0 -1
- package/dist/drivers/QueueDriver.d.ts +0 -89
- package/dist/drivers/QueueDriver.d.ts.map +0 -1
- package/dist/drivers/RedisDriver.d.ts +0 -79
- package/dist/drivers/RedisDriver.d.ts.map +0 -1
- package/dist/drivers/SQSDriver.d.ts +0 -100
- package/dist/drivers/SQSDriver.d.ts.map +0 -1
- package/dist/index.cjs.map +0 -422
- package/dist/index.d.ts.map +0 -1
- package/dist/index.mjs +0 -32096
- package/dist/index.mjs.map +0 -422
- package/dist/orbit-db/src/DBService.d.ts +0 -270
- package/dist/orbit-db/src/DBService.d.ts.map +0 -1
- package/dist/orbit-db/src/EventBus.d.ts +0 -53
- package/dist/orbit-db/src/EventBus.d.ts.map +0 -1
- package/dist/orbit-db/src/MigrationDriver.d.ts +0 -55
- package/dist/orbit-db/src/MigrationDriver.d.ts.map +0 -1
- package/dist/orbit-db/src/Model.d.ts +0 -564
- package/dist/orbit-db/src/Model.d.ts.map +0 -1
- package/dist/orbit-db/src/ModelCollection.d.ts +0 -35
- package/dist/orbit-db/src/ModelCollection.d.ts.map +0 -1
- package/dist/orbit-db/src/index.d.ts +0 -34
- package/dist/orbit-db/src/index.d.ts.map +0 -1
- package/dist/orbit-db/src/types.d.ts +0 -146
- package/dist/orbit-db/src/types.d.ts.map +0 -1
- package/dist/orbit-queue/src/Consumer.d.ts +0 -67
- package/dist/orbit-queue/src/Consumer.d.ts.map +0 -1
- package/dist/orbit-queue/src/Job.d.ts +0 -76
- package/dist/orbit-queue/src/Job.d.ts.map +0 -1
- package/dist/orbit-queue/src/OrbitQueue.d.ts +0 -74
- package/dist/orbit-queue/src/OrbitQueue.d.ts.map +0 -1
- package/dist/orbit-queue/src/QueueManager.d.ts +0 -86
- package/dist/orbit-queue/src/QueueManager.d.ts.map +0 -1
- package/dist/orbit-queue/src/Queueable.d.ts +0 -63
- package/dist/orbit-queue/src/Queueable.d.ts.map +0 -1
- package/dist/orbit-queue/src/Worker.d.ts +0 -48
- package/dist/orbit-queue/src/Worker.d.ts.map +0 -1
- package/dist/orbit-queue/src/drivers/DatabaseDriver.d.ts +0 -60
- package/dist/orbit-queue/src/drivers/DatabaseDriver.d.ts.map +0 -1
- package/dist/orbit-queue/src/drivers/KafkaDriver.d.ts +0 -134
- package/dist/orbit-queue/src/drivers/KafkaDriver.d.ts.map +0 -1
- package/dist/orbit-queue/src/drivers/MemoryDriver.d.ts +0 -45
- package/dist/orbit-queue/src/drivers/MemoryDriver.d.ts.map +0 -1
- package/dist/orbit-queue/src/drivers/QueueDriver.d.ts +0 -89
- package/dist/orbit-queue/src/drivers/QueueDriver.d.ts.map +0 -1
- package/dist/orbit-queue/src/drivers/RedisDriver.d.ts +0 -79
- package/dist/orbit-queue/src/drivers/RedisDriver.d.ts.map +0 -1
- package/dist/orbit-queue/src/drivers/SQSDriver.d.ts +0 -100
- package/dist/orbit-queue/src/drivers/SQSDriver.d.ts.map +0 -1
- package/dist/orbit-queue/src/index.d.ts +0 -45
- package/dist/orbit-queue/src/index.d.ts.map +0 -1
- package/dist/orbit-queue/src/serializers/ClassNameSerializer.d.ts +0 -46
- package/dist/orbit-queue/src/serializers/ClassNameSerializer.d.ts.map +0 -1
- package/dist/orbit-queue/src/serializers/JobSerializer.d.ts +0 -36
- package/dist/orbit-queue/src/serializers/JobSerializer.d.ts.map +0 -1
- package/dist/orbit-queue/src/serializers/JsonSerializer.d.ts +0 -32
- package/dist/orbit-queue/src/serializers/JsonSerializer.d.ts.map +0 -1
- package/dist/orbit-queue/src/types.d.ts +0 -85
- package/dist/orbit-queue/src/types.d.ts.map +0 -1
- package/dist/serializers/ClassNameSerializer.d.ts +0 -46
- package/dist/serializers/ClassNameSerializer.d.ts.map +0 -1
- package/dist/serializers/JobSerializer.d.ts +0 -36
- package/dist/serializers/JobSerializer.d.ts.map +0 -1
- package/dist/serializers/JsonSerializer.d.ts +0 -32
- package/dist/serializers/JsonSerializer.d.ts.map +0 -1
- package/dist/types.d.ts +0 -85
- package/dist/types.d.ts.map +0 -1
package/dist/index.js
ADDED
|
@@ -0,0 +1,2700 @@
|
|
|
1
|
+
var __defProp = Object.defineProperty;
|
|
2
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
3
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
4
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
5
|
+
var __require = /* @__PURE__ */ ((x) => typeof require !== "undefined" ? require : typeof Proxy !== "undefined" ? new Proxy(x, {
|
|
6
|
+
get: (a, b) => (typeof require !== "undefined" ? require : a)[b]
|
|
7
|
+
}) : x)(function(x) {
|
|
8
|
+
if (typeof require !== "undefined") return require.apply(this, arguments);
|
|
9
|
+
throw Error('Dynamic require of "' + x + '" is not supported');
|
|
10
|
+
});
|
|
11
|
+
var __esm = (fn, res) => function __init() {
|
|
12
|
+
return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res;
|
|
13
|
+
};
|
|
14
|
+
var __export = (target, all) => {
|
|
15
|
+
for (var name in all)
|
|
16
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
17
|
+
};
|
|
18
|
+
var __copyProps = (to, from, except, desc) => {
|
|
19
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
20
|
+
for (let key of __getOwnPropNames(from))
|
|
21
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
22
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
23
|
+
}
|
|
24
|
+
return to;
|
|
25
|
+
};
|
|
26
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
27
|
+
|
|
28
|
+
// src/drivers/DatabaseDriver.ts
|
|
29
|
+
var DatabaseDriver_exports = {};
|
|
30
|
+
__export(DatabaseDriver_exports, {
|
|
31
|
+
DatabaseDriver: () => DatabaseDriver
|
|
32
|
+
});
|
|
33
|
+
var DatabaseDriver;
|
|
34
|
+
var init_DatabaseDriver = __esm({
|
|
35
|
+
"src/drivers/DatabaseDriver.ts"() {
|
|
36
|
+
"use strict";
|
|
37
|
+
DatabaseDriver = class {
|
|
38
|
+
tableName;
|
|
39
|
+
dbService;
|
|
40
|
+
constructor(config) {
|
|
41
|
+
this.tableName = config.table ?? "jobs";
|
|
42
|
+
this.dbService = config.dbService;
|
|
43
|
+
if (!this.dbService) {
|
|
44
|
+
throw new Error(
|
|
45
|
+
"[DatabaseDriver] dbService is required. Please provide a database service that implements DatabaseService interface."
|
|
46
|
+
);
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
/**
|
|
50
|
+
* Push a job to a queue.
|
|
51
|
+
*/
|
|
52
|
+
async push(queue, job) {
|
|
53
|
+
const availableAt = job.delaySeconds ? new Date(Date.now() + job.delaySeconds * 1e3) : /* @__PURE__ */ new Date();
|
|
54
|
+
const payload = JSON.stringify(job);
|
|
55
|
+
await this.dbService.execute(
|
|
56
|
+
`INSERT INTO ${this.tableName} (queue, payload, attempts, available_at, created_at)
|
|
57
|
+
VALUES ($1, $2, $3, $4, $5)`,
|
|
58
|
+
[queue, payload, job.attempts ?? 0, availableAt.toISOString(), (/* @__PURE__ */ new Date()).toISOString()]
|
|
59
|
+
);
|
|
60
|
+
}
|
|
61
|
+
/**
|
|
62
|
+
* Pop a job from the queue (FIFO, with delay support).
|
|
63
|
+
*/
|
|
64
|
+
async pop(queue) {
|
|
65
|
+
const result = await this.dbService.execute(
|
|
66
|
+
`SELECT id, payload, attempts, created_at, available_at
|
|
67
|
+
FROM ${this.tableName}
|
|
68
|
+
WHERE queue = $1
|
|
69
|
+
AND available_at <= NOW()
|
|
70
|
+
AND (reserved_at IS NULL OR reserved_at < NOW() - INTERVAL '5 minutes')
|
|
71
|
+
ORDER BY created_at ASC
|
|
72
|
+
LIMIT 1
|
|
73
|
+
FOR UPDATE SKIP LOCKED`,
|
|
74
|
+
[queue]
|
|
75
|
+
).catch(() => {
|
|
76
|
+
return this.dbService.execute(
|
|
77
|
+
`SELECT id, payload, attempts, created_at, available_at
|
|
78
|
+
FROM ${this.tableName}
|
|
79
|
+
WHERE queue = $1
|
|
80
|
+
AND available_at <= NOW()
|
|
81
|
+
AND (reserved_at IS NULL OR reserved_at < NOW() - INTERVAL '5 minutes')
|
|
82
|
+
ORDER BY created_at ASC
|
|
83
|
+
LIMIT 1
|
|
84
|
+
FOR UPDATE`,
|
|
85
|
+
[queue]
|
|
86
|
+
);
|
|
87
|
+
});
|
|
88
|
+
const rows = result;
|
|
89
|
+
if (!rows || rows.length === 0) {
|
|
90
|
+
return null;
|
|
91
|
+
}
|
|
92
|
+
const row = rows[0];
|
|
93
|
+
await this.dbService.execute(
|
|
94
|
+
`UPDATE ${this.tableName}
|
|
95
|
+
SET reserved_at = NOW()
|
|
96
|
+
WHERE id = $1`,
|
|
97
|
+
[row.id]
|
|
98
|
+
);
|
|
99
|
+
const createdAt = new Date(row.created_at).getTime();
|
|
100
|
+
const delaySeconds = row.available_at ? Math.max(0, Math.floor((new Date(row.available_at).getTime() - createdAt) / 1e3)) : void 0;
|
|
101
|
+
let job;
|
|
102
|
+
try {
|
|
103
|
+
const parsed = JSON.parse(row.payload);
|
|
104
|
+
if (parsed && typeof parsed === "object" && parsed.type && parsed.data) {
|
|
105
|
+
job = {
|
|
106
|
+
...parsed,
|
|
107
|
+
id: row.id,
|
|
108
|
+
// DB ID is the source of truth for deletion
|
|
109
|
+
attempts: row.attempts
|
|
110
|
+
};
|
|
111
|
+
} else {
|
|
112
|
+
throw new Error("Fallback");
|
|
113
|
+
}
|
|
114
|
+
} catch (_e) {
|
|
115
|
+
job = {
|
|
116
|
+
id: row.id,
|
|
117
|
+
type: "class",
|
|
118
|
+
data: row.payload,
|
|
119
|
+
createdAt,
|
|
120
|
+
attempts: row.attempts
|
|
121
|
+
};
|
|
122
|
+
}
|
|
123
|
+
if (delaySeconds !== void 0) {
|
|
124
|
+
job.delaySeconds = delaySeconds;
|
|
125
|
+
}
|
|
126
|
+
return job;
|
|
127
|
+
}
|
|
128
|
+
/**
|
|
129
|
+
* Get queue size.
|
|
130
|
+
*/
|
|
131
|
+
async size(queue) {
|
|
132
|
+
const result = await this.dbService.execute(
|
|
133
|
+
`SELECT COUNT(*) as count
|
|
134
|
+
FROM ${this.tableName}
|
|
135
|
+
WHERE queue = $1
|
|
136
|
+
AND available_at <= NOW()
|
|
137
|
+
AND (reserved_at IS NULL OR reserved_at < NOW() - INTERVAL '5 minutes')`,
|
|
138
|
+
[queue]
|
|
139
|
+
);
|
|
140
|
+
return result?.[0]?.count ?? 0;
|
|
141
|
+
}
|
|
142
|
+
/**
|
|
143
|
+
* Clear a queue.
|
|
144
|
+
*/
|
|
145
|
+
async clear(queue) {
|
|
146
|
+
await this.dbService.execute(`DELETE FROM ${this.tableName} WHERE queue = $1`, [queue]);
|
|
147
|
+
}
|
|
148
|
+
/**
|
|
149
|
+
* Push multiple jobs.
|
|
150
|
+
*/
|
|
151
|
+
async pushMany(queue, jobs) {
|
|
152
|
+
if (jobs.length === 0) {
|
|
153
|
+
return;
|
|
154
|
+
}
|
|
155
|
+
await this.dbService.transaction(async (tx) => {
|
|
156
|
+
for (const job of jobs) {
|
|
157
|
+
const availableAt = job.delaySeconds ? new Date(Date.now() + job.delaySeconds * 1e3) : /* @__PURE__ */ new Date();
|
|
158
|
+
await tx.execute(
|
|
159
|
+
`INSERT INTO ${this.tableName} (queue, payload, attempts, available_at, created_at)
|
|
160
|
+
VALUES ($1, $2, $3, $4, $5)`,
|
|
161
|
+
[queue, job.data, job.attempts ?? 0, availableAt.toISOString(), (/* @__PURE__ */ new Date()).toISOString()]
|
|
162
|
+
);
|
|
163
|
+
}
|
|
164
|
+
});
|
|
165
|
+
}
|
|
166
|
+
/**
|
|
167
|
+
* Mark a job as failed (DLQ).
|
|
168
|
+
*/
|
|
169
|
+
async fail(queue, job) {
|
|
170
|
+
const failedQueue = `failed:${queue}`;
|
|
171
|
+
const payload = JSON.stringify(job);
|
|
172
|
+
await this.dbService.execute(
|
|
173
|
+
`INSERT INTO ${this.tableName} (queue, payload, attempts, available_at, created_at)
|
|
174
|
+
VALUES ($1, $2, $3, $4, $5)`,
|
|
175
|
+
[failedQueue, payload, job.attempts, (/* @__PURE__ */ new Date()).toISOString(), (/* @__PURE__ */ new Date()).toISOString()]
|
|
176
|
+
);
|
|
177
|
+
}
|
|
178
|
+
/**
|
|
179
|
+
* Acknowledge/Complete a job.
|
|
180
|
+
*/
|
|
181
|
+
async complete(_queue, job) {
|
|
182
|
+
if (!job.id) {
|
|
183
|
+
return;
|
|
184
|
+
}
|
|
185
|
+
await this.dbService.execute(`DELETE FROM ${this.tableName} WHERE id = $1`, [job.id]);
|
|
186
|
+
}
|
|
187
|
+
};
|
|
188
|
+
}
|
|
189
|
+
});
|
|
190
|
+
|
|
191
|
+
// src/drivers/KafkaDriver.ts
|
|
192
|
+
var KafkaDriver_exports = {};
|
|
193
|
+
__export(KafkaDriver_exports, {
|
|
194
|
+
KafkaDriver: () => KafkaDriver
|
|
195
|
+
});
|
|
196
|
+
var KafkaDriver;
|
|
197
|
+
var init_KafkaDriver = __esm({
|
|
198
|
+
"src/drivers/KafkaDriver.ts"() {
|
|
199
|
+
"use strict";
|
|
200
|
+
KafkaDriver = class {
|
|
201
|
+
client;
|
|
202
|
+
consumerGroupId;
|
|
203
|
+
producer;
|
|
204
|
+
admin;
|
|
205
|
+
constructor(config) {
|
|
206
|
+
this.client = config.client;
|
|
207
|
+
this.consumerGroupId = config.consumerGroupId ?? "gravito-workers";
|
|
208
|
+
if (!this.client) {
|
|
209
|
+
throw new Error("[KafkaDriver] Kafka client is required. Please install kafkajs package.");
|
|
210
|
+
}
|
|
211
|
+
}
|
|
212
|
+
/**
|
|
213
|
+
* Ensure the producer is connected.
|
|
214
|
+
*/
|
|
215
|
+
async ensureProducer() {
|
|
216
|
+
if (!this.producer) {
|
|
217
|
+
this.producer = this.client.producer();
|
|
218
|
+
await this.producer.connect();
|
|
219
|
+
}
|
|
220
|
+
return this.producer;
|
|
221
|
+
}
|
|
222
|
+
/**
|
|
223
|
+
* Ensure the admin client is connected.
|
|
224
|
+
*/
|
|
225
|
+
async ensureAdmin() {
|
|
226
|
+
if (!this.admin) {
|
|
227
|
+
this.admin = this.client.admin();
|
|
228
|
+
await this.admin.connect();
|
|
229
|
+
}
|
|
230
|
+
return this.admin;
|
|
231
|
+
}
|
|
232
|
+
/**
|
|
233
|
+
* Push a job to a topic.
|
|
234
|
+
*/
|
|
235
|
+
async push(queue, job) {
|
|
236
|
+
const producer = await this.ensureProducer();
|
|
237
|
+
const payload = JSON.stringify({
|
|
238
|
+
id: job.id,
|
|
239
|
+
type: job.type,
|
|
240
|
+
data: job.data,
|
|
241
|
+
className: job.className,
|
|
242
|
+
createdAt: job.createdAt,
|
|
243
|
+
delaySeconds: job.delaySeconds,
|
|
244
|
+
attempts: job.attempts,
|
|
245
|
+
maxAttempts: job.maxAttempts
|
|
246
|
+
});
|
|
247
|
+
await producer.send({
|
|
248
|
+
topic: queue,
|
|
249
|
+
messages: [
|
|
250
|
+
{
|
|
251
|
+
key: job.id,
|
|
252
|
+
value: payload
|
|
253
|
+
}
|
|
254
|
+
]
|
|
255
|
+
});
|
|
256
|
+
}
|
|
257
|
+
/**
|
|
258
|
+
* Pop is not supported for Kafka.
|
|
259
|
+
*
|
|
260
|
+
* Note: Kafka uses a push-based model, so you should use `subscribe()`.
|
|
261
|
+
*/
|
|
262
|
+
async pop(_queue) {
|
|
263
|
+
throw new Error("[KafkaDriver] Kafka uses push-based model. Use subscribe() instead of pop().");
|
|
264
|
+
}
|
|
265
|
+
/**
|
|
266
|
+
* Kafka does not provide a direct queue size.
|
|
267
|
+
*
|
|
268
|
+
* Returns 0; use Kafka tooling/metrics for lag/size insights.
|
|
269
|
+
*/
|
|
270
|
+
async size(_queue) {
|
|
271
|
+
return 0;
|
|
272
|
+
}
|
|
273
|
+
/**
|
|
274
|
+
* Clear a queue by deleting the topic.
|
|
275
|
+
*/
|
|
276
|
+
async clear(queue) {
|
|
277
|
+
const admin = await this.ensureAdmin();
|
|
278
|
+
await admin.deleteTopics({ topics: [queue] });
|
|
279
|
+
}
|
|
280
|
+
/**
|
|
281
|
+
* Push multiple jobs.
|
|
282
|
+
*/
|
|
283
|
+
async pushMany(queue, jobs) {
|
|
284
|
+
if (jobs.length === 0) {
|
|
285
|
+
return;
|
|
286
|
+
}
|
|
287
|
+
const producer = await this.ensureProducer();
|
|
288
|
+
const messages = jobs.map((job) => {
|
|
289
|
+
const payload = JSON.stringify({
|
|
290
|
+
id: job.id,
|
|
291
|
+
type: job.type,
|
|
292
|
+
data: job.data,
|
|
293
|
+
className: job.className,
|
|
294
|
+
createdAt: job.createdAt,
|
|
295
|
+
delaySeconds: job.delaySeconds,
|
|
296
|
+
attempts: job.attempts,
|
|
297
|
+
maxAttempts: job.maxAttempts
|
|
298
|
+
});
|
|
299
|
+
return {
|
|
300
|
+
key: job.id,
|
|
301
|
+
value: payload
|
|
302
|
+
};
|
|
303
|
+
});
|
|
304
|
+
await producer.send({
|
|
305
|
+
topic: queue,
|
|
306
|
+
messages
|
|
307
|
+
});
|
|
308
|
+
}
|
|
309
|
+
/**
|
|
310
|
+
* Create a topic.
|
|
311
|
+
*/
|
|
312
|
+
async createTopic(topic, options) {
|
|
313
|
+
const admin = await this.ensureAdmin();
|
|
314
|
+
await admin.createTopics({
|
|
315
|
+
topics: [
|
|
316
|
+
{
|
|
317
|
+
topic,
|
|
318
|
+
numPartitions: options?.partitions ?? 1,
|
|
319
|
+
replicationFactor: options?.replicationFactor ?? 1
|
|
320
|
+
}
|
|
321
|
+
]
|
|
322
|
+
});
|
|
323
|
+
}
|
|
324
|
+
/**
|
|
325
|
+
* Delete a topic.
|
|
326
|
+
*/
|
|
327
|
+
async deleteTopic(topic) {
|
|
328
|
+
await this.clear(topic);
|
|
329
|
+
}
|
|
330
|
+
/**
|
|
331
|
+
* Subscribe to a topic (push-based model).
|
|
332
|
+
*/
|
|
333
|
+
async subscribe(queue, callback) {
|
|
334
|
+
const consumer = this.client.consumer({ groupId: this.consumerGroupId });
|
|
335
|
+
await consumer.connect();
|
|
336
|
+
await consumer.subscribe({ topics: [queue] });
|
|
337
|
+
await consumer.run({
|
|
338
|
+
eachMessage: async ({ message }) => {
|
|
339
|
+
if (!message.value) {
|
|
340
|
+
return;
|
|
341
|
+
}
|
|
342
|
+
const payload = JSON.parse(message.value.toString());
|
|
343
|
+
const job = {
|
|
344
|
+
id: payload.id,
|
|
345
|
+
type: payload.type,
|
|
346
|
+
data: payload.data,
|
|
347
|
+
className: payload.className,
|
|
348
|
+
createdAt: payload.createdAt,
|
|
349
|
+
delaySeconds: payload.delaySeconds,
|
|
350
|
+
attempts: payload.attempts,
|
|
351
|
+
maxAttempts: payload.maxAttempts
|
|
352
|
+
};
|
|
353
|
+
try {
|
|
354
|
+
await callback(job);
|
|
355
|
+
} catch (error) {
|
|
356
|
+
console.error("[KafkaDriver] Error processing message:", error);
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
});
|
|
360
|
+
}
|
|
361
|
+
};
|
|
362
|
+
}
|
|
363
|
+
});
|
|
364
|
+
|
|
365
|
+
// src/drivers/RabbitMQDriver.ts
|
|
366
|
+
var RabbitMQDriver_exports = {};
|
|
367
|
+
__export(RabbitMQDriver_exports, {
|
|
368
|
+
RabbitMQDriver: () => RabbitMQDriver
|
|
369
|
+
});
|
|
370
|
+
var RabbitMQDriver;
|
|
371
|
+
var init_RabbitMQDriver = __esm({
|
|
372
|
+
"src/drivers/RabbitMQDriver.ts"() {
|
|
373
|
+
"use strict";
|
|
374
|
+
RabbitMQDriver = class {
|
|
375
|
+
connection;
|
|
376
|
+
channel;
|
|
377
|
+
exchange;
|
|
378
|
+
exchangeType;
|
|
379
|
+
constructor(config) {
|
|
380
|
+
this.connection = config.client;
|
|
381
|
+
this.exchange = config.exchange;
|
|
382
|
+
this.exchangeType = config.exchangeType ?? "fanout";
|
|
383
|
+
if (!this.connection) {
|
|
384
|
+
throw new Error(
|
|
385
|
+
"[RabbitMQDriver] RabbitMQ connection is required. Please provide a connection from amqplib."
|
|
386
|
+
);
|
|
387
|
+
}
|
|
388
|
+
}
|
|
389
|
+
/**
|
|
390
|
+
* Ensure channel is created.
|
|
391
|
+
*/
|
|
392
|
+
async ensureChannel() {
|
|
393
|
+
if (this.channel) {
|
|
394
|
+
return this.channel;
|
|
395
|
+
}
|
|
396
|
+
if (typeof this.connection.createChannel === "function") {
|
|
397
|
+
this.channel = await this.connection.createChannel();
|
|
398
|
+
} else {
|
|
399
|
+
this.channel = this.connection;
|
|
400
|
+
}
|
|
401
|
+
if (this.exchange) {
|
|
402
|
+
await this.channel.assertExchange(this.exchange, this.exchangeType, { durable: true });
|
|
403
|
+
}
|
|
404
|
+
return this.channel;
|
|
405
|
+
}
|
|
406
|
+
/**
|
|
407
|
+
* Get the underlying connection.
|
|
408
|
+
*/
|
|
409
|
+
getRawConnection() {
|
|
410
|
+
return this.connection;
|
|
411
|
+
}
|
|
412
|
+
/**
|
|
413
|
+
* Push a job (sendToQueue / publish).
|
|
414
|
+
*/
|
|
415
|
+
async push(queue, job) {
|
|
416
|
+
const channel = await this.ensureChannel();
|
|
417
|
+
const payload = Buffer.from(JSON.stringify(job));
|
|
418
|
+
if (this.exchange) {
|
|
419
|
+
await channel.assertQueue(queue, { durable: true });
|
|
420
|
+
await channel.bindQueue(queue, this.exchange, "");
|
|
421
|
+
channel.publish(this.exchange, "", payload, { persistent: true });
|
|
422
|
+
} else {
|
|
423
|
+
await channel.assertQueue(queue, { durable: true });
|
|
424
|
+
channel.sendToQueue(queue, payload, { persistent: true });
|
|
425
|
+
}
|
|
426
|
+
}
|
|
427
|
+
/**
|
|
428
|
+
* Pop a job (get).
|
|
429
|
+
*/
|
|
430
|
+
async pop(queue) {
|
|
431
|
+
const channel = await this.ensureChannel();
|
|
432
|
+
await channel.assertQueue(queue, { durable: true });
|
|
433
|
+
const msg = await channel.get(queue, { noAck: false });
|
|
434
|
+
if (!msg) {
|
|
435
|
+
return null;
|
|
436
|
+
}
|
|
437
|
+
const job = JSON.parse(msg.content.toString());
|
|
438
|
+
job._raw = msg;
|
|
439
|
+
return job;
|
|
440
|
+
}
|
|
441
|
+
/**
|
|
442
|
+
* Acknowledge a message.
|
|
443
|
+
*/
|
|
444
|
+
async acknowledge(messageId) {
|
|
445
|
+
const channel = await this.ensureChannel();
|
|
446
|
+
if (typeof messageId === "object") {
|
|
447
|
+
channel.ack(messageId);
|
|
448
|
+
}
|
|
449
|
+
}
|
|
450
|
+
/**
|
|
451
|
+
* Negative acknowledge a message.
|
|
452
|
+
*/
|
|
453
|
+
async nack(message, requeue = true) {
|
|
454
|
+
const channel = await this.ensureChannel();
|
|
455
|
+
channel.nack(message, false, requeue);
|
|
456
|
+
}
|
|
457
|
+
/**
|
|
458
|
+
* Reject a message.
|
|
459
|
+
*/
|
|
460
|
+
async reject(message, requeue = true) {
|
|
461
|
+
const channel = await this.ensureChannel();
|
|
462
|
+
channel.reject(message, requeue);
|
|
463
|
+
}
|
|
464
|
+
/**
|
|
465
|
+
* Subscribe to a queue.
|
|
466
|
+
*/
|
|
467
|
+
async subscribe(queue, callback, options = {}) {
|
|
468
|
+
const channel = await this.ensureChannel();
|
|
469
|
+
await channel.assertQueue(queue, { durable: true });
|
|
470
|
+
if (this.exchange) {
|
|
471
|
+
await channel.bindQueue(queue, this.exchange, "");
|
|
472
|
+
}
|
|
473
|
+
const { autoAck = true } = options;
|
|
474
|
+
await channel.consume(
|
|
475
|
+
queue,
|
|
476
|
+
async (msg) => {
|
|
477
|
+
if (!msg) {
|
|
478
|
+
return;
|
|
479
|
+
}
|
|
480
|
+
const job = JSON.parse(msg.content.toString());
|
|
481
|
+
job._raw = msg;
|
|
482
|
+
await callback(job);
|
|
483
|
+
if (autoAck) {
|
|
484
|
+
channel.ack(msg);
|
|
485
|
+
}
|
|
486
|
+
},
|
|
487
|
+
{ noAck: false }
|
|
488
|
+
);
|
|
489
|
+
}
|
|
490
|
+
/**
|
|
491
|
+
* Get queue size.
|
|
492
|
+
*/
|
|
493
|
+
async size(queue) {
|
|
494
|
+
const channel = await this.ensureChannel();
|
|
495
|
+
const ok = await channel.checkQueue(queue);
|
|
496
|
+
return ok.messageCount;
|
|
497
|
+
}
|
|
498
|
+
/**
|
|
499
|
+
* Clear a queue.
|
|
500
|
+
*/
|
|
501
|
+
async clear(queue) {
|
|
502
|
+
const channel = await this.ensureChannel();
|
|
503
|
+
await channel.purgeQueue(queue);
|
|
504
|
+
}
|
|
505
|
+
};
|
|
506
|
+
}
|
|
507
|
+
});
|
|
508
|
+
|
|
509
|
+
// src/drivers/RedisDriver.ts
|
|
510
|
+
var RedisDriver_exports = {};
|
|
511
|
+
__export(RedisDriver_exports, {
|
|
512
|
+
RedisDriver: () => RedisDriver
|
|
513
|
+
});
|
|
514
|
+
var RedisDriver;
|
|
515
|
+
var init_RedisDriver = __esm({
|
|
516
|
+
"src/drivers/RedisDriver.ts"() {
|
|
517
|
+
"use strict";
|
|
518
|
+
RedisDriver = class _RedisDriver {
|
|
519
|
+
prefix;
|
|
520
|
+
client;
|
|
521
|
+
// Lua Logic:
|
|
522
|
+
// IF (IS_MEMBER(activeSet, groupId)) -> PUSH(pendingList, job)
|
|
523
|
+
// ELSE -> SADD(activeSet, groupId) & LPUSH(waitList, job)
|
|
524
|
+
static PUSH_SCRIPT = `
|
|
525
|
+
local waitList = KEYS[1]
|
|
526
|
+
local activeSet = KEYS[2]
|
|
527
|
+
local pendingList = KEYS[3]
|
|
528
|
+
local groupId = ARGV[1]
|
|
529
|
+
local payload = ARGV[2]
|
|
530
|
+
|
|
531
|
+
if redis.call('SISMEMBER', activeSet, groupId) == 1 then
|
|
532
|
+
return redis.call('RPUSH', pendingList, payload)
|
|
533
|
+
else
|
|
534
|
+
redis.call('SADD', activeSet, groupId)
|
|
535
|
+
return redis.call('LPUSH', waitList, payload)
|
|
536
|
+
end
|
|
537
|
+
`;
|
|
538
|
+
// Lua Logic:
|
|
539
|
+
// local next = LPOP(pendingList)
|
|
540
|
+
// IF (next) -> LPUSH(waitList, next)
|
|
541
|
+
// ELSE -> SREM(activeSet, groupId)
|
|
542
|
+
static COMPLETE_SCRIPT = `
|
|
543
|
+
local waitList = KEYS[1]
|
|
544
|
+
local activeSet = KEYS[2]
|
|
545
|
+
local pendingList = KEYS[3]
|
|
546
|
+
local groupId = ARGV[1]
|
|
547
|
+
|
|
548
|
+
local nextJob = redis.call('LPOP', pendingList)
|
|
549
|
+
if nextJob then
|
|
550
|
+
return redis.call('LPUSH', waitList, nextJob)
|
|
551
|
+
else
|
|
552
|
+
return redis.call('SREM', activeSet, groupId)
|
|
553
|
+
end
|
|
554
|
+
`;
|
|
555
|
+
constructor(config) {
|
|
556
|
+
this.client = config.client;
|
|
557
|
+
this.prefix = config.prefix ?? "queue:";
|
|
558
|
+
if (!this.client) {
|
|
559
|
+
throw new Error(
|
|
560
|
+
"[RedisDriver] Redis client is required. Please install ioredis or redis package."
|
|
561
|
+
);
|
|
562
|
+
}
|
|
563
|
+
if (typeof this.client.defineCommand === "function") {
|
|
564
|
+
;
|
|
565
|
+
this.client.defineCommand("pushGroupJob", {
|
|
566
|
+
numberOfKeys: 3,
|
|
567
|
+
lua: _RedisDriver.PUSH_SCRIPT
|
|
568
|
+
});
|
|
569
|
+
this.client.defineCommand("completeGroupJob", {
|
|
570
|
+
numberOfKeys: 3,
|
|
571
|
+
lua: _RedisDriver.COMPLETE_SCRIPT
|
|
572
|
+
});
|
|
573
|
+
}
|
|
574
|
+
}
|
|
575
|
+
/**
|
|
576
|
+
* Get full Redis key for a queue.
|
|
577
|
+
*/
|
|
578
|
+
getKey(queue, priority) {
|
|
579
|
+
if (priority) {
|
|
580
|
+
return `${this.prefix}${queue}:${priority}`;
|
|
581
|
+
}
|
|
582
|
+
return `${this.prefix}${queue}`;
|
|
583
|
+
}
|
|
584
|
+
/**
|
|
585
|
+
* Push a job (LPUSH).
|
|
586
|
+
*/
|
|
587
|
+
async push(queue, job, options) {
|
|
588
|
+
const key = this.getKey(queue, options?.priority);
|
|
589
|
+
const groupId = options?.groupId;
|
|
590
|
+
if (groupId && options?.priority) {
|
|
591
|
+
}
|
|
592
|
+
const payloadObj = {
|
|
593
|
+
id: job.id,
|
|
594
|
+
type: job.type,
|
|
595
|
+
data: job.data,
|
|
596
|
+
className: job.className,
|
|
597
|
+
createdAt: job.createdAt,
|
|
598
|
+
delaySeconds: job.delaySeconds,
|
|
599
|
+
attempts: job.attempts,
|
|
600
|
+
maxAttempts: job.maxAttempts,
|
|
601
|
+
groupId,
|
|
602
|
+
error: job.error,
|
|
603
|
+
failedAt: job.failedAt
|
|
604
|
+
};
|
|
605
|
+
const payload = JSON.stringify(payloadObj);
|
|
606
|
+
if (groupId && typeof this.client.pushGroupJob === "function") {
|
|
607
|
+
const activeSetKey = `${this.prefix}active`;
|
|
608
|
+
const pendingListKey = `${this.prefix}pending:${groupId}`;
|
|
609
|
+
await this.client.pushGroupJob(key, activeSetKey, pendingListKey, groupId, payload);
|
|
610
|
+
return;
|
|
611
|
+
}
|
|
612
|
+
if (job.delaySeconds && job.delaySeconds > 0) {
|
|
613
|
+
const delayKey = `${key}:delayed`;
|
|
614
|
+
const score = Date.now() + job.delaySeconds * 1e3;
|
|
615
|
+
if (typeof this.client.zadd === "function") {
|
|
616
|
+
await this.client.zadd(delayKey, score, payload);
|
|
617
|
+
} else {
|
|
618
|
+
await this.client.lpush(key, payload);
|
|
619
|
+
}
|
|
620
|
+
} else {
|
|
621
|
+
await this.client.lpush(key, payload);
|
|
622
|
+
}
|
|
623
|
+
}
|
|
624
|
+
/**
|
|
625
|
+
* Complete a job (handle Group FIFO).
|
|
626
|
+
*/
|
|
627
|
+
async complete(queue, job) {
|
|
628
|
+
if (!job.groupId) {
|
|
629
|
+
return;
|
|
630
|
+
}
|
|
631
|
+
const key = this.getKey(queue);
|
|
632
|
+
const activeSetKey = `${this.prefix}active`;
|
|
633
|
+
const pendingListKey = `${this.prefix}pending:${job.groupId}`;
|
|
634
|
+
if (typeof this.client.completeGroupJob === "function") {
|
|
635
|
+
await this.client.completeGroupJob(key, activeSetKey, pendingListKey, job.groupId);
|
|
636
|
+
}
|
|
637
|
+
}
|
|
638
|
+
/**
|
|
639
|
+
* Pop a job (RPOP, FIFO).
|
|
640
|
+
* Supports implicit priority polling (critical -> high -> default -> low).
|
|
641
|
+
*/
|
|
642
|
+
async pop(queue) {
|
|
643
|
+
const priorities = ["critical", "high", void 0, "low"];
|
|
644
|
+
for (const priority of priorities) {
|
|
645
|
+
const key = this.getKey(queue, priority);
|
|
646
|
+
const delayKey = `${key}:delayed`;
|
|
647
|
+
if (typeof this.client.zrange === "function") {
|
|
648
|
+
const now = Date.now();
|
|
649
|
+
const delayedJobs = await this.client.zrange(delayKey, 0, 0, "WITHSCORES");
|
|
650
|
+
if (delayedJobs && delayedJobs.length >= 2) {
|
|
651
|
+
const score = parseFloat(delayedJobs[1]);
|
|
652
|
+
if (score <= now) {
|
|
653
|
+
const payload2 = delayedJobs[0];
|
|
654
|
+
await this.client.zrem(delayKey, payload2);
|
|
655
|
+
return this.parsePayload(payload2);
|
|
656
|
+
}
|
|
657
|
+
}
|
|
658
|
+
}
|
|
659
|
+
if (typeof this.client.get === "function") {
|
|
660
|
+
const isPaused = await this.client.get(`${key}:paused`);
|
|
661
|
+
if (isPaused === "1") {
|
|
662
|
+
continue;
|
|
663
|
+
}
|
|
664
|
+
}
|
|
665
|
+
const payload = await this.client.rpop(key);
|
|
666
|
+
if (payload) {
|
|
667
|
+
return this.parsePayload(payload);
|
|
668
|
+
}
|
|
669
|
+
}
|
|
670
|
+
return null;
|
|
671
|
+
}
|
|
672
|
+
/**
|
|
673
|
+
* Parse Redis payload.
|
|
674
|
+
*/
|
|
675
|
+
parsePayload(payload) {
|
|
676
|
+
const parsed = JSON.parse(payload);
|
|
677
|
+
return {
|
|
678
|
+
id: parsed.id,
|
|
679
|
+
type: parsed.type,
|
|
680
|
+
data: parsed.data,
|
|
681
|
+
className: parsed.className,
|
|
682
|
+
createdAt: parsed.createdAt,
|
|
683
|
+
delaySeconds: parsed.delaySeconds,
|
|
684
|
+
attempts: parsed.attempts,
|
|
685
|
+
maxAttempts: parsed.maxAttempts,
|
|
686
|
+
groupId: parsed.groupId,
|
|
687
|
+
error: parsed.error,
|
|
688
|
+
failedAt: parsed.failedAt,
|
|
689
|
+
priority: parsed.priority
|
|
690
|
+
};
|
|
691
|
+
}
|
|
692
|
+
/**
|
|
693
|
+
* Get queue size.
|
|
694
|
+
*/
|
|
695
|
+
async size(queue) {
|
|
696
|
+
const key = this.getKey(queue);
|
|
697
|
+
return this.client.llen(key);
|
|
698
|
+
}
|
|
699
|
+
/**
|
|
700
|
+
* Mark a job as permanently failed (DLQ).
|
|
701
|
+
*/
|
|
702
|
+
async fail(queue, job) {
|
|
703
|
+
const key = `${this.getKey(queue)}:failed`;
|
|
704
|
+
const payload = JSON.stringify({
|
|
705
|
+
...job,
|
|
706
|
+
failedAt: Date.now()
|
|
707
|
+
});
|
|
708
|
+
await this.client.lpush(key, payload);
|
|
709
|
+
if (typeof this.client.ltrim === "function") {
|
|
710
|
+
await this.client.ltrim(key, 0, 999);
|
|
711
|
+
}
|
|
712
|
+
}
|
|
713
|
+
/**
|
|
714
|
+
* Clear a queue.
|
|
715
|
+
*/
|
|
716
|
+
async clear(queue) {
|
|
717
|
+
const key = this.getKey(queue);
|
|
718
|
+
const delayKey = `${key}:delayed`;
|
|
719
|
+
const activeSetKey = `${this.prefix}active`;
|
|
720
|
+
await this.client.del(key);
|
|
721
|
+
if (typeof this.client.del === "function") {
|
|
722
|
+
await this.client.del(delayKey);
|
|
723
|
+
await this.client.del(activeSetKey);
|
|
724
|
+
}
|
|
725
|
+
}
|
|
726
|
+
/**
|
|
727
|
+
* Push multiple jobs.
|
|
728
|
+
*/
|
|
729
|
+
async pushMany(queue, jobs) {
|
|
730
|
+
if (jobs.length === 0) {
|
|
731
|
+
return;
|
|
732
|
+
}
|
|
733
|
+
const hasGroup = jobs.some((j) => j.groupId);
|
|
734
|
+
const hasPriority = jobs.some((j) => j.priority);
|
|
735
|
+
if (hasGroup || hasPriority) {
|
|
736
|
+
for (const job of jobs) {
|
|
737
|
+
await this.push(queue, job, {
|
|
738
|
+
groupId: job.groupId,
|
|
739
|
+
priority: job.priority
|
|
740
|
+
});
|
|
741
|
+
}
|
|
742
|
+
return;
|
|
743
|
+
}
|
|
744
|
+
const key = this.getKey(queue);
|
|
745
|
+
const payloads = jobs.map(
|
|
746
|
+
(job) => JSON.stringify({
|
|
747
|
+
id: job.id,
|
|
748
|
+
type: job.type,
|
|
749
|
+
data: job.data,
|
|
750
|
+
className: job.className,
|
|
751
|
+
createdAt: job.createdAt,
|
|
752
|
+
delaySeconds: job.delaySeconds,
|
|
753
|
+
attempts: job.attempts,
|
|
754
|
+
maxAttempts: job.maxAttempts,
|
|
755
|
+
groupId: job.groupId,
|
|
756
|
+
priority: job.priority
|
|
757
|
+
})
|
|
758
|
+
);
|
|
759
|
+
await this.client.lpush(key, ...payloads);
|
|
760
|
+
}
|
|
761
|
+
/**
|
|
762
|
+
* Pop multiple jobs.
|
|
763
|
+
*/
|
|
764
|
+
async popMany(queue, count) {
|
|
765
|
+
const key = this.getKey(queue);
|
|
766
|
+
const results = [];
|
|
767
|
+
for (let i = 0; i < count; i++) {
|
|
768
|
+
const payload = await this.client.rpop(key);
|
|
769
|
+
if (payload) {
|
|
770
|
+
results.push(this.parsePayload(payload));
|
|
771
|
+
} else {
|
|
772
|
+
break;
|
|
773
|
+
}
|
|
774
|
+
}
|
|
775
|
+
return results;
|
|
776
|
+
}
|
|
777
|
+
/**
|
|
778
|
+
* Report worker heartbeat for monitoring.
|
|
779
|
+
*/
|
|
780
|
+
async reportHeartbeat(workerInfo, prefix) {
|
|
781
|
+
const key = `${prefix ?? this.prefix}worker:${workerInfo.id}`;
|
|
782
|
+
if (typeof this.client.set === "function") {
|
|
783
|
+
await this.client.set(key, JSON.stringify(workerInfo), "EX", 10);
|
|
784
|
+
}
|
|
785
|
+
}
|
|
786
|
+
/**
|
|
787
|
+
* Publish a log message for monitoring.
|
|
788
|
+
*/
|
|
789
|
+
async publishLog(logPayload, prefix) {
|
|
790
|
+
const payload = JSON.stringify(logPayload);
|
|
791
|
+
const monitorPrefix = prefix ?? this.prefix;
|
|
792
|
+
if (typeof this.client.publish === "function") {
|
|
793
|
+
await this.client.publish(`${monitorPrefix}logs`, payload);
|
|
794
|
+
}
|
|
795
|
+
const historyKey = `${monitorPrefix}logs:history`;
|
|
796
|
+
if (typeof this.client.pipeline === "function") {
|
|
797
|
+
const pipe = this.client.pipeline();
|
|
798
|
+
pipe.lpush(historyKey, payload);
|
|
799
|
+
pipe.ltrim(historyKey, 0, 99);
|
|
800
|
+
await pipe.exec();
|
|
801
|
+
} else {
|
|
802
|
+
await this.client.lpush(historyKey, payload);
|
|
803
|
+
}
|
|
804
|
+
}
|
|
805
|
+
/**
|
|
806
|
+
* Check if a queue is rate limited.
|
|
807
|
+
* Uses a fixed window counter.
|
|
808
|
+
*/
|
|
809
|
+
async checkRateLimit(queue, config) {
|
|
810
|
+
const key = `${this.prefix}${queue}:ratelimit`;
|
|
811
|
+
const now = Date.now();
|
|
812
|
+
const windowStart = Math.floor(now / config.duration);
|
|
813
|
+
const windowKey = `${key}:${windowStart}`;
|
|
814
|
+
const client = this.client;
|
|
815
|
+
if (typeof client.incr === "function") {
|
|
816
|
+
const current = await client.incr(windowKey);
|
|
817
|
+
if (current === 1) {
|
|
818
|
+
await client.expire(windowKey, Math.ceil(config.duration / 1e3) + 1);
|
|
819
|
+
}
|
|
820
|
+
return current <= config.max;
|
|
821
|
+
}
|
|
822
|
+
return true;
|
|
823
|
+
}
|
|
824
|
+
/**
|
|
825
|
+
* Get failed jobs from DLQ.
|
|
826
|
+
*/
|
|
827
|
+
async getFailed(queue, start = 0, end = -1) {
|
|
828
|
+
const key = `${this.getKey(queue)}:failed`;
|
|
829
|
+
const payloads = await this.client.lrange(key, start, end);
|
|
830
|
+
return payloads.map((p) => this.parsePayload(p));
|
|
831
|
+
}
|
|
832
|
+
/**
|
|
833
|
+
* Retry failed jobs from DLQ.
|
|
834
|
+
* Moves jobs from failed list back to the main queue.
|
|
835
|
+
*/
|
|
836
|
+
async retryFailed(queue, count = 1) {
|
|
837
|
+
const failedKey = `${this.getKey(queue)}:failed`;
|
|
838
|
+
const queueKey = this.getKey(queue);
|
|
839
|
+
let retried = 0;
|
|
840
|
+
for (let i = 0; i < count; i++) {
|
|
841
|
+
const payload = await this.client.rpop(failedKey);
|
|
842
|
+
if (!payload) {
|
|
843
|
+
break;
|
|
844
|
+
}
|
|
845
|
+
const job = this.parsePayload(payload);
|
|
846
|
+
job.attempts = 0;
|
|
847
|
+
delete job.error;
|
|
848
|
+
delete job.failedAt;
|
|
849
|
+
await this.push(queue, job, { priority: job.priority, groupId: job.groupId });
|
|
850
|
+
retried++;
|
|
851
|
+
}
|
|
852
|
+
return retried;
|
|
853
|
+
}
|
|
854
|
+
/**
|
|
855
|
+
* Clear failed jobs from DLQ.
|
|
856
|
+
*/
|
|
857
|
+
async clearFailed(queue) {
|
|
858
|
+
const key = `${this.getKey(queue)}:failed`;
|
|
859
|
+
await this.client.del(key);
|
|
860
|
+
}
|
|
861
|
+
};
|
|
862
|
+
}
|
|
863
|
+
});
|
|
864
|
+
|
|
865
|
+
// src/drivers/SQSDriver.ts
|
|
866
|
+
var SQSDriver_exports = {};
|
|
867
|
+
__export(SQSDriver_exports, {
|
|
868
|
+
SQSDriver: () => SQSDriver
|
|
869
|
+
});
|
|
870
|
+
var SQSDriver;
|
|
871
|
+
var init_SQSDriver = __esm({
|
|
872
|
+
"src/drivers/SQSDriver.ts"() {
|
|
873
|
+
"use strict";
|
|
874
|
+
SQSDriver = class {
|
|
875
|
+
client;
|
|
876
|
+
queueUrlPrefix;
|
|
877
|
+
visibilityTimeout;
|
|
878
|
+
waitTimeSeconds;
|
|
879
|
+
queueUrls = /* @__PURE__ */ new Map();
|
|
880
|
+
constructor(config) {
|
|
881
|
+
this.client = config.client;
|
|
882
|
+
this.queueUrlPrefix = config.queueUrlPrefix ?? "";
|
|
883
|
+
this.visibilityTimeout = config.visibilityTimeout ?? 30;
|
|
884
|
+
this.waitTimeSeconds = config.waitTimeSeconds ?? 20;
|
|
885
|
+
if (!this.client) {
|
|
886
|
+
throw new Error(
|
|
887
|
+
"[SQSDriver] SQS client is required. Please install @aws-sdk/client-sqs package."
|
|
888
|
+
);
|
|
889
|
+
}
|
|
890
|
+
}
|
|
891
|
+
/**
|
|
892
|
+
* Resolve the full queue URL.
|
|
893
|
+
*/
|
|
894
|
+
async getQueueUrl(queue) {
|
|
895
|
+
if (this.queueUrls.has(queue)) {
|
|
896
|
+
return this.queueUrls.get(queue);
|
|
897
|
+
}
|
|
898
|
+
if (this.queueUrlPrefix) {
|
|
899
|
+
const url = `${this.queueUrlPrefix}/${queue}`;
|
|
900
|
+
this.queueUrls.set(queue, url);
|
|
901
|
+
return url;
|
|
902
|
+
}
|
|
903
|
+
this.queueUrls.set(queue, queue);
|
|
904
|
+
return queue;
|
|
905
|
+
}
|
|
906
|
+
/**
|
|
907
|
+
* Push a job to SQS.
|
|
908
|
+
*/
|
|
909
|
+
async push(queue, job) {
|
|
910
|
+
const { SendMessageCommand } = await import("@aws-sdk/client-sqs");
|
|
911
|
+
const queueUrl = await this.getQueueUrl(queue);
|
|
912
|
+
const payload = JSON.stringify({
|
|
913
|
+
id: job.id,
|
|
914
|
+
type: job.type,
|
|
915
|
+
data: job.data,
|
|
916
|
+
className: job.className,
|
|
917
|
+
createdAt: job.createdAt,
|
|
918
|
+
delaySeconds: job.delaySeconds,
|
|
919
|
+
attempts: job.attempts,
|
|
920
|
+
maxAttempts: job.maxAttempts
|
|
921
|
+
});
|
|
922
|
+
const delaySeconds = job.delaySeconds ? Math.min(job.delaySeconds, 900) : 0;
|
|
923
|
+
await this.client.send(
|
|
924
|
+
new SendMessageCommand({
|
|
925
|
+
QueueUrl: queueUrl,
|
|
926
|
+
MessageBody: payload,
|
|
927
|
+
DelaySeconds: delaySeconds
|
|
928
|
+
})
|
|
929
|
+
);
|
|
930
|
+
}
|
|
931
|
+
/**
|
|
932
|
+
* Pop a job (long polling).
|
|
933
|
+
*/
|
|
934
|
+
async pop(queue) {
|
|
935
|
+
const { ReceiveMessageCommand } = await import("@aws-sdk/client-sqs");
|
|
936
|
+
const queueUrl = await this.getQueueUrl(queue);
|
|
937
|
+
const response = await this.client.send(
|
|
938
|
+
new ReceiveMessageCommand({
|
|
939
|
+
QueueUrl: queueUrl,
|
|
940
|
+
MaxNumberOfMessages: 1,
|
|
941
|
+
WaitTimeSeconds: this.waitTimeSeconds,
|
|
942
|
+
VisibilityTimeout: this.visibilityTimeout
|
|
943
|
+
})
|
|
944
|
+
);
|
|
945
|
+
if (!response.Messages || response.Messages.length === 0) {
|
|
946
|
+
return null;
|
|
947
|
+
}
|
|
948
|
+
const message = response.Messages[0];
|
|
949
|
+
const payload = JSON.parse(message.Body ?? "{}");
|
|
950
|
+
return {
|
|
951
|
+
id: payload.id ?? message.MessageId,
|
|
952
|
+
type: payload.type,
|
|
953
|
+
data: payload.data,
|
|
954
|
+
className: payload.className,
|
|
955
|
+
createdAt: payload.createdAt,
|
|
956
|
+
delaySeconds: payload.delaySeconds,
|
|
957
|
+
attempts: payload.attempts,
|
|
958
|
+
maxAttempts: payload.maxAttempts,
|
|
959
|
+
// Store ReceiptHandle for acknowledgement
|
|
960
|
+
...message.ReceiptHandle && { receiptHandle: message.ReceiptHandle }
|
|
961
|
+
};
|
|
962
|
+
}
|
|
963
|
+
/**
|
|
964
|
+
* Get queue size (approximate).
|
|
965
|
+
*/
|
|
966
|
+
async size(queue) {
|
|
967
|
+
const { GetQueueAttributesCommand } = await import("@aws-sdk/client-sqs");
|
|
968
|
+
const queueUrl = await this.getQueueUrl(queue);
|
|
969
|
+
try {
|
|
970
|
+
const response = await this.client.send(
|
|
971
|
+
new GetQueueAttributesCommand({
|
|
972
|
+
QueueUrl: queueUrl,
|
|
973
|
+
AttributeNames: ["ApproximateNumberOfMessages"]
|
|
974
|
+
})
|
|
975
|
+
);
|
|
976
|
+
return parseInt(response.Attributes?.ApproximateNumberOfMessages ?? "0", 10);
|
|
977
|
+
} catch (error) {
|
|
978
|
+
console.error("[SQSDriver] Error getting queue size:", error);
|
|
979
|
+
return 0;
|
|
980
|
+
}
|
|
981
|
+
}
|
|
982
|
+
/**
|
|
983
|
+
* Clear a queue by receiving and deleting messages.
|
|
984
|
+
*
|
|
985
|
+
* Note: SQS does not provide a direct "purge" API via this wrapper. This method will
|
|
986
|
+
* keep receiving and deleting messages until the queue is empty.
|
|
987
|
+
*/
|
|
988
|
+
async clear(queue) {
|
|
989
|
+
const { DeleteMessageCommand } = await import("@aws-sdk/client-sqs");
|
|
990
|
+
const queueUrl = await this.getQueueUrl(queue);
|
|
991
|
+
while (true) {
|
|
992
|
+
const job = await this.pop(queue);
|
|
993
|
+
if (!job) {
|
|
994
|
+
break;
|
|
995
|
+
}
|
|
996
|
+
if (job.receiptHandle) {
|
|
997
|
+
await this.client.send(
|
|
998
|
+
new DeleteMessageCommand({
|
|
999
|
+
QueueUrl: queueUrl,
|
|
1000
|
+
ReceiptHandle: job.receiptHandle
|
|
1001
|
+
})
|
|
1002
|
+
);
|
|
1003
|
+
}
|
|
1004
|
+
}
|
|
1005
|
+
}
|
|
1006
|
+
/**
|
|
1007
|
+
* Push multiple jobs.
|
|
1008
|
+
*/
|
|
1009
|
+
async pushMany(queue, jobs) {
|
|
1010
|
+
if (jobs.length === 0) {
|
|
1011
|
+
return;
|
|
1012
|
+
}
|
|
1013
|
+
const { SendMessageBatchCommand } = await import("@aws-sdk/client-sqs");
|
|
1014
|
+
const queueUrl = await this.getQueueUrl(queue);
|
|
1015
|
+
const batchSize = 10;
|
|
1016
|
+
for (let i = 0; i < jobs.length; i += batchSize) {
|
|
1017
|
+
const batch = jobs.slice(i, i + batchSize);
|
|
1018
|
+
const entries = batch.map((job, index) => {
|
|
1019
|
+
const payload = JSON.stringify({
|
|
1020
|
+
id: job.id,
|
|
1021
|
+
type: job.type,
|
|
1022
|
+
data: job.data,
|
|
1023
|
+
className: job.className,
|
|
1024
|
+
createdAt: job.createdAt,
|
|
1025
|
+
delaySeconds: job.delaySeconds,
|
|
1026
|
+
attempts: job.attempts,
|
|
1027
|
+
maxAttempts: job.maxAttempts
|
|
1028
|
+
});
|
|
1029
|
+
return {
|
|
1030
|
+
Id: `${job.id}-${index}`,
|
|
1031
|
+
MessageBody: payload,
|
|
1032
|
+
DelaySeconds: job.delaySeconds ? Math.min(job.delaySeconds, 900) : 0
|
|
1033
|
+
};
|
|
1034
|
+
});
|
|
1035
|
+
await this.client.send(
|
|
1036
|
+
new SendMessageBatchCommand({
|
|
1037
|
+
QueueUrl: queueUrl,
|
|
1038
|
+
Entries: entries
|
|
1039
|
+
})
|
|
1040
|
+
);
|
|
1041
|
+
}
|
|
1042
|
+
}
|
|
1043
|
+
/**
|
|
1044
|
+
* Acknowledge is not supported via messageId.
|
|
1045
|
+
*/
|
|
1046
|
+
async acknowledge(_messageId) {
|
|
1047
|
+
throw new Error("[SQSDriver] Use deleteMessage() with ReceiptHandle instead of acknowledge().");
|
|
1048
|
+
}
|
|
1049
|
+
/**
|
|
1050
|
+
* Delete a message (acknowledge processing completion).
|
|
1051
|
+
*/
|
|
1052
|
+
async deleteMessage(queue, receiptHandle) {
|
|
1053
|
+
const { DeleteMessageCommand } = await import("@aws-sdk/client-sqs");
|
|
1054
|
+
const queueUrl = await this.getQueueUrl(queue);
|
|
1055
|
+
await this.client.send(
|
|
1056
|
+
new DeleteMessageCommand({
|
|
1057
|
+
QueueUrl: queueUrl,
|
|
1058
|
+
ReceiptHandle: receiptHandle
|
|
1059
|
+
})
|
|
1060
|
+
);
|
|
1061
|
+
}
|
|
1062
|
+
};
|
|
1063
|
+
}
|
|
1064
|
+
});
|
|
1065
|
+
|
|
1066
|
+
// src/Scheduler.ts
|
|
1067
|
+
var Scheduler_exports = {};
|
|
1068
|
+
__export(Scheduler_exports, {
|
|
1069
|
+
Scheduler: () => Scheduler
|
|
1070
|
+
});
|
|
1071
|
+
import parser from "cron-parser";
|
|
1072
|
+
var Scheduler;
|
|
1073
|
+
var init_Scheduler = __esm({
|
|
1074
|
+
"src/Scheduler.ts"() {
|
|
1075
|
+
"use strict";
|
|
1076
|
+
Scheduler = class {
|
|
1077
|
+
constructor(manager, options = {}) {
|
|
1078
|
+
this.manager = manager;
|
|
1079
|
+
this.prefix = options.prefix ?? "queue:";
|
|
1080
|
+
}
|
|
1081
|
+
prefix;
|
|
1082
|
+
get client() {
|
|
1083
|
+
const driver = this.manager.getDriver(this.manager.getDefaultConnection());
|
|
1084
|
+
return driver.client;
|
|
1085
|
+
}
|
|
1086
|
+
/**
|
|
1087
|
+
* Register a scheduled job.
|
|
1088
|
+
*/
|
|
1089
|
+
async register(config) {
|
|
1090
|
+
const nextRun = parser.parse(config.cron).next().getTime();
|
|
1091
|
+
const fullConfig = {
|
|
1092
|
+
...config,
|
|
1093
|
+
nextRun,
|
|
1094
|
+
enabled: true
|
|
1095
|
+
};
|
|
1096
|
+
const pipe = this.client.pipeline();
|
|
1097
|
+
pipe.hset(`${this.prefix}schedule:${config.id}`, {
|
|
1098
|
+
...fullConfig,
|
|
1099
|
+
job: JSON.stringify(fullConfig.job)
|
|
1100
|
+
});
|
|
1101
|
+
pipe.zadd(`${this.prefix}schedules`, nextRun, config.id);
|
|
1102
|
+
await pipe.exec();
|
|
1103
|
+
}
|
|
1104
|
+
/**
|
|
1105
|
+
* Remove a scheduled job.
|
|
1106
|
+
*/
|
|
1107
|
+
async remove(id) {
|
|
1108
|
+
const pipe = this.client.pipeline();
|
|
1109
|
+
pipe.del(`${this.prefix}schedule:${id}`);
|
|
1110
|
+
pipe.zrem(`${this.prefix}schedules`, id);
|
|
1111
|
+
await pipe.exec();
|
|
1112
|
+
}
|
|
1113
|
+
/**
|
|
1114
|
+
* List all scheduled jobs.
|
|
1115
|
+
*/
|
|
1116
|
+
async list() {
|
|
1117
|
+
const ids = await this.client.zrange(`${this.prefix}schedules`, 0, -1);
|
|
1118
|
+
const configs = [];
|
|
1119
|
+
for (const id of ids) {
|
|
1120
|
+
const data = await this.client.hgetall(`${this.prefix}schedule:${id}`);
|
|
1121
|
+
if (data?.id) {
|
|
1122
|
+
configs.push({
|
|
1123
|
+
...data,
|
|
1124
|
+
lastRun: data.lastRun ? parseInt(data.lastRun, 10) : void 0,
|
|
1125
|
+
nextRun: data.nextRun ? parseInt(data.nextRun, 10) : void 0,
|
|
1126
|
+
enabled: data.enabled === "true",
|
|
1127
|
+
job: JSON.parse(data.job)
|
|
1128
|
+
});
|
|
1129
|
+
}
|
|
1130
|
+
}
|
|
1131
|
+
return configs;
|
|
1132
|
+
}
|
|
1133
|
+
/**
|
|
1134
|
+
* Run a scheduled job immediately (out of schedule).
|
|
1135
|
+
*/
|
|
1136
|
+
async runNow(id) {
|
|
1137
|
+
const data = await this.client.hgetall(`${this.prefix}schedule:${id}`);
|
|
1138
|
+
if (data?.id) {
|
|
1139
|
+
const serialized = JSON.parse(data.job);
|
|
1140
|
+
const serializer = this.manager.getSerializer();
|
|
1141
|
+
const job = serializer.deserialize(serialized);
|
|
1142
|
+
await this.manager.push(job);
|
|
1143
|
+
}
|
|
1144
|
+
}
|
|
1145
|
+
/**
|
|
1146
|
+
* Process due tasks (TICK).
|
|
1147
|
+
* This should be called periodically (e.g. every minute).
|
|
1148
|
+
*/
|
|
1149
|
+
async tick() {
|
|
1150
|
+
const now = Date.now();
|
|
1151
|
+
const dueIds = await this.client.zrangebyscore(`${this.prefix}schedules`, 0, now);
|
|
1152
|
+
let fired = 0;
|
|
1153
|
+
const serializer = this.manager.getSerializer();
|
|
1154
|
+
for (const id of dueIds) {
|
|
1155
|
+
const lockKey = `${this.prefix}lock:schedule:${id}:${Math.floor(now / 1e3)}`;
|
|
1156
|
+
const lock = await this.client.set(lockKey, "1", "EX", 10, "NX");
|
|
1157
|
+
if (lock === "OK") {
|
|
1158
|
+
const data = await this.client.hgetall(`${this.prefix}schedule:${id}`);
|
|
1159
|
+
if (data?.id && data.enabled === "true") {
|
|
1160
|
+
try {
|
|
1161
|
+
const serializedJob = JSON.parse(data.job);
|
|
1162
|
+
const connection = data.connection || this.manager.getDefaultConnection();
|
|
1163
|
+
const driver = this.manager.getDriver(connection);
|
|
1164
|
+
await driver.push(data.queue, serializedJob);
|
|
1165
|
+
const nextRun = parser.parse(data.cron).next().getTime();
|
|
1166
|
+
const pipe = this.client.pipeline();
|
|
1167
|
+
pipe.hset(`${this.prefix}schedule:${id}`, {
|
|
1168
|
+
lastRun: now,
|
|
1169
|
+
nextRun
|
|
1170
|
+
});
|
|
1171
|
+
pipe.zadd(`${this.prefix}schedules`, nextRun, id);
|
|
1172
|
+
await pipe.exec();
|
|
1173
|
+
fired++;
|
|
1174
|
+
} catch (err) {
|
|
1175
|
+
console.error(`[Scheduler] Failed to process schedule ${id}:`, err);
|
|
1176
|
+
}
|
|
1177
|
+
}
|
|
1178
|
+
}
|
|
1179
|
+
}
|
|
1180
|
+
return fired;
|
|
1181
|
+
}
|
|
1182
|
+
};
|
|
1183
|
+
}
|
|
1184
|
+
});
|
|
1185
|
+
|
|
1186
|
+
// src/Worker.ts
|
|
1187
|
+
var Worker = class {
|
|
1188
|
+
constructor(options = {}) {
|
|
1189
|
+
this.options = options;
|
|
1190
|
+
}
|
|
1191
|
+
/**
|
|
1192
|
+
* Process a Job.
|
|
1193
|
+
* @param job - Job instance
|
|
1194
|
+
*/
|
|
1195
|
+
async process(job) {
|
|
1196
|
+
const maxAttempts = job.maxAttempts ?? this.options.maxAttempts ?? 3;
|
|
1197
|
+
const timeout = this.options.timeout;
|
|
1198
|
+
if (!job.attempts) {
|
|
1199
|
+
job.attempts = 1;
|
|
1200
|
+
}
|
|
1201
|
+
try {
|
|
1202
|
+
if (timeout) {
|
|
1203
|
+
await Promise.race([
|
|
1204
|
+
job.handle(),
|
|
1205
|
+
new Promise(
|
|
1206
|
+
(_, reject) => setTimeout(
|
|
1207
|
+
() => reject(new Error(`Job timeout after ${timeout} seconds`)),
|
|
1208
|
+
timeout * 1e3
|
|
1209
|
+
)
|
|
1210
|
+
)
|
|
1211
|
+
]);
|
|
1212
|
+
} else {
|
|
1213
|
+
await job.handle();
|
|
1214
|
+
}
|
|
1215
|
+
} catch (error) {
|
|
1216
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
1217
|
+
if (job.attempts >= maxAttempts) {
|
|
1218
|
+
await this.handleFailure(job, err);
|
|
1219
|
+
}
|
|
1220
|
+
throw err;
|
|
1221
|
+
}
|
|
1222
|
+
}
|
|
1223
|
+
/**
|
|
1224
|
+
* Handle failure.
|
|
1225
|
+
*/
|
|
1226
|
+
async handleFailure(job, error) {
|
|
1227
|
+
try {
|
|
1228
|
+
await job.failed(error);
|
|
1229
|
+
} catch (failedError) {
|
|
1230
|
+
console.error("[Worker] Error in job.failed():", failedError);
|
|
1231
|
+
}
|
|
1232
|
+
if (this.options.onFailed) {
|
|
1233
|
+
try {
|
|
1234
|
+
await this.options.onFailed(job, error);
|
|
1235
|
+
} catch (callbackError) {
|
|
1236
|
+
console.error("[Worker] Error in onFailed callback:", callbackError);
|
|
1237
|
+
}
|
|
1238
|
+
}
|
|
1239
|
+
}
|
|
1240
|
+
};
|
|
1241
|
+
|
|
1242
|
+
// src/Consumer.ts
|
|
1243
|
+
var Consumer = class {
|
|
1244
|
+
constructor(queueManager, options) {
|
|
1245
|
+
this.queueManager = queueManager;
|
|
1246
|
+
this.options = options;
|
|
1247
|
+
}
|
|
1248
|
+
running = false;
|
|
1249
|
+
stopRequested = false;
|
|
1250
|
+
workerId = `worker-${Math.random().toString(36).substring(2, 8)}`;
|
|
1251
|
+
heartbeatTimer = null;
|
|
1252
|
+
get connectionName() {
|
|
1253
|
+
return this.options.connection ?? this.queueManager.getDefaultConnection();
|
|
1254
|
+
}
|
|
1255
|
+
/**
|
|
1256
|
+
* Start the consumer loop.
|
|
1257
|
+
*/
|
|
1258
|
+
async start() {
|
|
1259
|
+
if (this.running) {
|
|
1260
|
+
throw new Error("Consumer is already running");
|
|
1261
|
+
}
|
|
1262
|
+
this.running = true;
|
|
1263
|
+
this.stopRequested = false;
|
|
1264
|
+
const worker = new Worker(this.options.workerOptions);
|
|
1265
|
+
const pollInterval = this.options.pollInterval ?? 1e3;
|
|
1266
|
+
const keepAlive = this.options.keepAlive ?? true;
|
|
1267
|
+
console.log("[Consumer] Started", {
|
|
1268
|
+
queues: this.options.queues,
|
|
1269
|
+
connection: this.options.connection,
|
|
1270
|
+
workerId: this.workerId
|
|
1271
|
+
});
|
|
1272
|
+
if (this.options.monitor) {
|
|
1273
|
+
this.startHeartbeat();
|
|
1274
|
+
await this.publishLog("info", `Consumer started on [${this.options.queues.join(", ")}]`);
|
|
1275
|
+
}
|
|
1276
|
+
while (this.running && !this.stopRequested) {
|
|
1277
|
+
let processed = false;
|
|
1278
|
+
for (const queue of this.options.queues) {
|
|
1279
|
+
if (this.options.rateLimits?.[queue]) {
|
|
1280
|
+
const limit = this.options.rateLimits[queue];
|
|
1281
|
+
try {
|
|
1282
|
+
const driver = this.queueManager.getDriver(this.connectionName);
|
|
1283
|
+
if (driver.checkRateLimit) {
|
|
1284
|
+
const allowed = await driver.checkRateLimit(queue, limit);
|
|
1285
|
+
if (!allowed) {
|
|
1286
|
+
continue;
|
|
1287
|
+
}
|
|
1288
|
+
}
|
|
1289
|
+
} catch (err) {
|
|
1290
|
+
console.error(`[Consumer] Error checking rate limit for "${queue}":`, err);
|
|
1291
|
+
}
|
|
1292
|
+
}
|
|
1293
|
+
try {
|
|
1294
|
+
const job = await this.queueManager.pop(queue, this.options.connection);
|
|
1295
|
+
if (job) {
|
|
1296
|
+
processed = true;
|
|
1297
|
+
if (this.options.monitor) {
|
|
1298
|
+
await this.publishLog("info", `Processing job: ${job.id}`, job.id);
|
|
1299
|
+
}
|
|
1300
|
+
try {
|
|
1301
|
+
await worker.process(job);
|
|
1302
|
+
if (this.options.monitor) {
|
|
1303
|
+
await this.publishLog("success", `Completed job: ${job.id}`, job.id);
|
|
1304
|
+
}
|
|
1305
|
+
} catch (err) {
|
|
1306
|
+
console.error(`[Consumer] Error processing job in queue "${queue}":`, err);
|
|
1307
|
+
if (this.options.monitor) {
|
|
1308
|
+
await this.publishLog("error", `Job failed: ${job.id} - ${err.message}`, job.id);
|
|
1309
|
+
}
|
|
1310
|
+
const attempts = job.attempts ?? 1;
|
|
1311
|
+
const maxAttempts = job.maxAttempts ?? this.options.workerOptions?.maxAttempts ?? 3;
|
|
1312
|
+
if (attempts < maxAttempts) {
|
|
1313
|
+
job.attempts = attempts + 1;
|
|
1314
|
+
const delayMs = job.getRetryDelay(job.attempts);
|
|
1315
|
+
const delaySec = Math.ceil(delayMs / 1e3);
|
|
1316
|
+
job.delay(delaySec);
|
|
1317
|
+
await this.queueManager.push(job);
|
|
1318
|
+
if (this.options.monitor) {
|
|
1319
|
+
await this.publishLog(
|
|
1320
|
+
"warning",
|
|
1321
|
+
`Job retrying in ${delaySec}s (Attempt ${job.attempts}/${maxAttempts})`,
|
|
1322
|
+
job.id
|
|
1323
|
+
);
|
|
1324
|
+
}
|
|
1325
|
+
} else {
|
|
1326
|
+
await this.queueManager.fail(job, err).catch((dlqErr) => {
|
|
1327
|
+
console.error(`[Consumer] Error moving job to DLQ:`, dlqErr);
|
|
1328
|
+
});
|
|
1329
|
+
}
|
|
1330
|
+
} finally {
|
|
1331
|
+
await this.queueManager.complete(job).catch((err) => {
|
|
1332
|
+
console.error(`[Consumer] Error completing job in queue "${queue}":`, err);
|
|
1333
|
+
});
|
|
1334
|
+
}
|
|
1335
|
+
}
|
|
1336
|
+
} catch (error) {
|
|
1337
|
+
console.error(`[Consumer] Error polling queue "${queue}":`, error);
|
|
1338
|
+
}
|
|
1339
|
+
}
|
|
1340
|
+
if (!processed && !keepAlive) {
|
|
1341
|
+
break;
|
|
1342
|
+
}
|
|
1343
|
+
if (!this.stopRequested && !processed) {
|
|
1344
|
+
await new Promise((resolve) => setTimeout(resolve, pollInterval));
|
|
1345
|
+
} else if (!this.stopRequested && processed) {
|
|
1346
|
+
await new Promise((resolve) => setTimeout(resolve, 0));
|
|
1347
|
+
}
|
|
1348
|
+
}
|
|
1349
|
+
this.running = false;
|
|
1350
|
+
this.stopHeartbeat();
|
|
1351
|
+
if (this.options.monitor) {
|
|
1352
|
+
await this.publishLog("info", "Consumer stopped");
|
|
1353
|
+
}
|
|
1354
|
+
console.log("[Consumer] Stopped");
|
|
1355
|
+
}
|
|
1356
|
+
startHeartbeat() {
|
|
1357
|
+
const interval = typeof this.options.monitor === "object" ? this.options.monitor.interval ?? 5e3 : 5e3;
|
|
1358
|
+
const monitorOptions = typeof this.options.monitor === "object" ? this.options.monitor : {};
|
|
1359
|
+
this.heartbeatTimer = setInterval(async () => {
|
|
1360
|
+
try {
|
|
1361
|
+
const driver = this.queueManager.getDriver(this.connectionName);
|
|
1362
|
+
if (driver.reportHeartbeat) {
|
|
1363
|
+
const monitorPrefix = typeof this.options.monitor === "object" ? this.options.monitor.prefix : void 0;
|
|
1364
|
+
const os = __require("os");
|
|
1365
|
+
const mem = process.memoryUsage();
|
|
1366
|
+
const metrics = {
|
|
1367
|
+
cpu: os.loadavg()[0],
|
|
1368
|
+
// 1m load avg
|
|
1369
|
+
cores: os.cpus().length,
|
|
1370
|
+
ram: {
|
|
1371
|
+
rss: Math.floor(mem.rss / 1024 / 1024),
|
|
1372
|
+
heapUsed: Math.floor(mem.heapUsed / 1024 / 1024),
|
|
1373
|
+
total: Math.floor(os.totalmem() / 1024 / 1024)
|
|
1374
|
+
}
|
|
1375
|
+
};
|
|
1376
|
+
await driver.reportHeartbeat(
|
|
1377
|
+
{
|
|
1378
|
+
id: this.workerId,
|
|
1379
|
+
status: "online",
|
|
1380
|
+
hostname: os.hostname(),
|
|
1381
|
+
pid: process.pid,
|
|
1382
|
+
uptime: Math.floor(process.uptime()),
|
|
1383
|
+
last_ping: (/* @__PURE__ */ new Date()).toISOString(),
|
|
1384
|
+
queues: this.options.queues,
|
|
1385
|
+
metrics,
|
|
1386
|
+
...monitorOptions.extraInfo || {}
|
|
1387
|
+
},
|
|
1388
|
+
monitorPrefix
|
|
1389
|
+
);
|
|
1390
|
+
}
|
|
1391
|
+
} catch (_e) {
|
|
1392
|
+
}
|
|
1393
|
+
}, interval);
|
|
1394
|
+
}
|
|
1395
|
+
stopHeartbeat() {
|
|
1396
|
+
if (this.heartbeatTimer) {
|
|
1397
|
+
clearInterval(this.heartbeatTimer);
|
|
1398
|
+
this.heartbeatTimer = null;
|
|
1399
|
+
}
|
|
1400
|
+
}
|
|
1401
|
+
async publishLog(level, message, jobId) {
|
|
1402
|
+
try {
|
|
1403
|
+
const driver = this.queueManager.getDriver(this.connectionName);
|
|
1404
|
+
if (driver.publishLog) {
|
|
1405
|
+
const monitorPrefix = typeof this.options.monitor === "object" ? this.options.monitor.prefix : void 0;
|
|
1406
|
+
await driver.publishLog(
|
|
1407
|
+
{
|
|
1408
|
+
level,
|
|
1409
|
+
message,
|
|
1410
|
+
workerId: this.workerId,
|
|
1411
|
+
jobId,
|
|
1412
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1413
|
+
},
|
|
1414
|
+
monitorPrefix
|
|
1415
|
+
);
|
|
1416
|
+
}
|
|
1417
|
+
} catch (_e) {
|
|
1418
|
+
}
|
|
1419
|
+
}
|
|
1420
|
+
/**
|
|
1421
|
+
* Stop the consumer loop (graceful shutdown).
|
|
1422
|
+
*/
|
|
1423
|
+
async stop() {
|
|
1424
|
+
console.log("[Consumer] Stopping...");
|
|
1425
|
+
this.stopRequested = true;
|
|
1426
|
+
while (this.running) {
|
|
1427
|
+
await new Promise((resolve) => setTimeout(resolve, 100));
|
|
1428
|
+
}
|
|
1429
|
+
}
|
|
1430
|
+
/**
|
|
1431
|
+
* Check whether the consumer is running.
|
|
1432
|
+
*/
|
|
1433
|
+
isRunning() {
|
|
1434
|
+
return this.running;
|
|
1435
|
+
}
|
|
1436
|
+
};
|
|
1437
|
+
|
|
1438
|
+
// src/index.ts
|
|
1439
|
+
init_DatabaseDriver();
|
|
1440
|
+
init_KafkaDriver();
|
|
1441
|
+
|
|
1442
|
+
// src/drivers/MemoryDriver.ts
|
|
1443
|
+
var MemoryDriver = class {
|
|
1444
|
+
queues = /* @__PURE__ */ new Map();
|
|
1445
|
+
/**
|
|
1446
|
+
* Push a job to a queue.
|
|
1447
|
+
*/
|
|
1448
|
+
async push(queue, job) {
|
|
1449
|
+
if (!this.queues.has(queue)) {
|
|
1450
|
+
this.queues.set(queue, []);
|
|
1451
|
+
}
|
|
1452
|
+
this.queues.get(queue)?.push(job);
|
|
1453
|
+
}
|
|
1454
|
+
/**
|
|
1455
|
+
* Pop a job from a queue (FIFO).
|
|
1456
|
+
*/
|
|
1457
|
+
async pop(queue) {
|
|
1458
|
+
const queueJobs = this.queues.get(queue);
|
|
1459
|
+
if (!queueJobs || queueJobs.length === 0) {
|
|
1460
|
+
return null;
|
|
1461
|
+
}
|
|
1462
|
+
const now = Date.now();
|
|
1463
|
+
const availableIndex = queueJobs.findIndex(
|
|
1464
|
+
(job) => !job.delaySeconds || now >= job.createdAt + job.delaySeconds * 1e3
|
|
1465
|
+
);
|
|
1466
|
+
if (availableIndex === -1) {
|
|
1467
|
+
return null;
|
|
1468
|
+
}
|
|
1469
|
+
return queueJobs.splice(availableIndex, 1)[0];
|
|
1470
|
+
}
|
|
1471
|
+
/**
|
|
1472
|
+
* Get queue size.
|
|
1473
|
+
*/
|
|
1474
|
+
async size(queue) {
|
|
1475
|
+
return this.queues.get(queue)?.length ?? 0;
|
|
1476
|
+
}
|
|
1477
|
+
/**
|
|
1478
|
+
* Clear a queue.
|
|
1479
|
+
*/
|
|
1480
|
+
async clear(queue) {
|
|
1481
|
+
this.queues.delete(queue);
|
|
1482
|
+
}
|
|
1483
|
+
/**
|
|
1484
|
+
* Push multiple jobs.
|
|
1485
|
+
*/
|
|
1486
|
+
async pushMany(queue, jobs) {
|
|
1487
|
+
if (!this.queues.has(queue)) {
|
|
1488
|
+
this.queues.set(queue, []);
|
|
1489
|
+
}
|
|
1490
|
+
this.queues.get(queue)?.push(...jobs);
|
|
1491
|
+
}
|
|
1492
|
+
/**
|
|
1493
|
+
* Pop multiple jobs.
|
|
1494
|
+
*/
|
|
1495
|
+
async popMany(queue, count) {
|
|
1496
|
+
const results = [];
|
|
1497
|
+
for (let i = 0; i < count; i++) {
|
|
1498
|
+
const job = await this.pop(queue);
|
|
1499
|
+
if (job) {
|
|
1500
|
+
results.push(job);
|
|
1501
|
+
} else {
|
|
1502
|
+
break;
|
|
1503
|
+
}
|
|
1504
|
+
}
|
|
1505
|
+
return results;
|
|
1506
|
+
}
|
|
1507
|
+
};
|
|
1508
|
+
|
|
1509
|
+
// src/index.ts
|
|
1510
|
+
init_RabbitMQDriver();
|
|
1511
|
+
init_RedisDriver();
|
|
1512
|
+
init_SQSDriver();
|
|
1513
|
+
|
|
1514
|
+
// src/Job.ts
|
|
1515
|
+
var Job = class {
|
|
1516
|
+
/**
|
|
1517
|
+
* Unique job identifier.
|
|
1518
|
+
*/
|
|
1519
|
+
id;
|
|
1520
|
+
/**
|
|
1521
|
+
* Queue name.
|
|
1522
|
+
*/
|
|
1523
|
+
queueName;
|
|
1524
|
+
/**
|
|
1525
|
+
* Connection name.
|
|
1526
|
+
*/
|
|
1527
|
+
connectionName;
|
|
1528
|
+
/**
|
|
1529
|
+
* Delay before execution (seconds).
|
|
1530
|
+
*/
|
|
1531
|
+
delaySeconds;
|
|
1532
|
+
/**
|
|
1533
|
+
* Current attempt number.
|
|
1534
|
+
*/
|
|
1535
|
+
attempts;
|
|
1536
|
+
/**
|
|
1537
|
+
* Maximum attempts.
|
|
1538
|
+
*/
|
|
1539
|
+
maxAttempts;
|
|
1540
|
+
/**
|
|
1541
|
+
* Group ID for FIFO.
|
|
1542
|
+
*/
|
|
1543
|
+
groupId;
|
|
1544
|
+
/**
|
|
1545
|
+
* Job priority.
|
|
1546
|
+
*/
|
|
1547
|
+
priority;
|
|
1548
|
+
/**
|
|
1549
|
+
* Initial retry delay (seconds).
|
|
1550
|
+
*/
|
|
1551
|
+
retryAfterSeconds;
|
|
1552
|
+
/**
|
|
1553
|
+
* Retry delay multiplier.
|
|
1554
|
+
*/
|
|
1555
|
+
retryMultiplier;
|
|
1556
|
+
/**
|
|
1557
|
+
* Set target queue.
|
|
1558
|
+
*/
|
|
1559
|
+
onQueue(queue) {
|
|
1560
|
+
this.queueName = queue;
|
|
1561
|
+
return this;
|
|
1562
|
+
}
|
|
1563
|
+
/**
|
|
1564
|
+
* Set target connection.
|
|
1565
|
+
*/
|
|
1566
|
+
onConnection(connection) {
|
|
1567
|
+
this.connectionName = connection;
|
|
1568
|
+
return this;
|
|
1569
|
+
}
|
|
1570
|
+
/**
|
|
1571
|
+
* Set job priority.
|
|
1572
|
+
* @param priority - 'high', 'low', or number
|
|
1573
|
+
*/
|
|
1574
|
+
withPriority(priority) {
|
|
1575
|
+
this.priority = priority;
|
|
1576
|
+
return this;
|
|
1577
|
+
}
|
|
1578
|
+
/**
|
|
1579
|
+
* Set delay (seconds).
|
|
1580
|
+
*/
|
|
1581
|
+
delay(delay) {
|
|
1582
|
+
this.delaySeconds = delay;
|
|
1583
|
+
return this;
|
|
1584
|
+
}
|
|
1585
|
+
/**
|
|
1586
|
+
* Set retry backoff strategy.
|
|
1587
|
+
* @param seconds - Initial delay in seconds
|
|
1588
|
+
* @param multiplier - Multiplier for each subsequent attempt (default: 2)
|
|
1589
|
+
*/
|
|
1590
|
+
backoff(seconds, multiplier = 2) {
|
|
1591
|
+
this.retryAfterSeconds = seconds;
|
|
1592
|
+
this.retryMultiplier = multiplier;
|
|
1593
|
+
return this;
|
|
1594
|
+
}
|
|
1595
|
+
/**
|
|
1596
|
+
* Calculate retry delay for the next attempt.
|
|
1597
|
+
* @param attempt - Current attempt number (1-based)
|
|
1598
|
+
* @returns Delay in milliseconds
|
|
1599
|
+
*/
|
|
1600
|
+
getRetryDelay(attempt) {
|
|
1601
|
+
const initialDelay = (this.retryAfterSeconds ?? 1) * 1e3;
|
|
1602
|
+
const multiplier = this.retryMultiplier ?? 2;
|
|
1603
|
+
return Math.min(initialDelay * multiplier ** (attempt - 1), 36e5);
|
|
1604
|
+
}
|
|
1605
|
+
/**
|
|
1606
|
+
* Failure handler (optional).
|
|
1607
|
+
*
|
|
1608
|
+
* Called when the job fails and reaches the maximum number of attempts.
|
|
1609
|
+
* Subclasses can override to implement custom failure handling.
|
|
1610
|
+
*
|
|
1611
|
+
* @param error - Error instance
|
|
1612
|
+
*/
|
|
1613
|
+
async failed(_error) {
|
|
1614
|
+
}
|
|
1615
|
+
};
|
|
1616
|
+
|
|
1617
|
+
// src/serializers/ClassNameSerializer.ts
|
|
1618
|
+
var ClassNameSerializer = class {
|
|
1619
|
+
/**
|
|
1620
|
+
* Job class registry (for resolving classes by name).
|
|
1621
|
+
*/
|
|
1622
|
+
jobClasses = /* @__PURE__ */ new Map();
|
|
1623
|
+
/**
|
|
1624
|
+
* Register a Job class.
|
|
1625
|
+
* @param jobClass - Job class
|
|
1626
|
+
*/
|
|
1627
|
+
register(jobClass) {
|
|
1628
|
+
this.jobClasses.set(jobClass.name, jobClass);
|
|
1629
|
+
}
|
|
1630
|
+
/**
|
|
1631
|
+
* Register multiple Job classes.
|
|
1632
|
+
* @param jobClasses - Job class array
|
|
1633
|
+
*/
|
|
1634
|
+
registerMany(jobClasses) {
|
|
1635
|
+
for (const jobClass of jobClasses) {
|
|
1636
|
+
this.register(jobClass);
|
|
1637
|
+
}
|
|
1638
|
+
}
|
|
1639
|
+
/**
|
|
1640
|
+
* Serialize a Job.
|
|
1641
|
+
*/
|
|
1642
|
+
serialize(job) {
|
|
1643
|
+
const id = job.id || `${Date.now()}-${Math.random().toString(36).substring(2, 9)}`;
|
|
1644
|
+
const className = job.constructor.name;
|
|
1645
|
+
const properties = {};
|
|
1646
|
+
for (const key in job) {
|
|
1647
|
+
if (Object.hasOwn(job, key) && typeof job[key] !== "function") {
|
|
1648
|
+
properties[key] = job[key];
|
|
1649
|
+
}
|
|
1650
|
+
}
|
|
1651
|
+
return {
|
|
1652
|
+
id,
|
|
1653
|
+
type: "class",
|
|
1654
|
+
className,
|
|
1655
|
+
data: JSON.stringify({
|
|
1656
|
+
class: className,
|
|
1657
|
+
properties
|
|
1658
|
+
}),
|
|
1659
|
+
createdAt: Date.now(),
|
|
1660
|
+
...job.delaySeconds !== void 0 ? { delaySeconds: job.delaySeconds } : {},
|
|
1661
|
+
attempts: job.attempts ?? 0,
|
|
1662
|
+
...job.maxAttempts !== void 0 ? { maxAttempts: job.maxAttempts } : {},
|
|
1663
|
+
...job.groupId ? { groupId: job.groupId } : {},
|
|
1664
|
+
...job.retryAfterSeconds !== void 0 ? { retryAfterSeconds: job.retryAfterSeconds } : {},
|
|
1665
|
+
...job.retryMultiplier !== void 0 ? { retryMultiplier: job.retryMultiplier } : {},
|
|
1666
|
+
...job.priority !== void 0 ? { priority: job.priority } : {}
|
|
1667
|
+
};
|
|
1668
|
+
}
|
|
1669
|
+
/**
|
|
1670
|
+
* Deserialize a Job.
|
|
1671
|
+
*/
|
|
1672
|
+
deserialize(serialized) {
|
|
1673
|
+
if (serialized.type !== "class") {
|
|
1674
|
+
throw new Error('Invalid serialization type: expected "class"');
|
|
1675
|
+
}
|
|
1676
|
+
if (!serialized.className) {
|
|
1677
|
+
throw new Error("Missing className in serialized job");
|
|
1678
|
+
}
|
|
1679
|
+
const JobClass = this.jobClasses.get(serialized.className);
|
|
1680
|
+
if (!JobClass) {
|
|
1681
|
+
throw new Error(
|
|
1682
|
+
`Job class "${serialized.className}" is not registered. Please register it using serializer.register().`
|
|
1683
|
+
);
|
|
1684
|
+
}
|
|
1685
|
+
const parsed = JSON.parse(serialized.data);
|
|
1686
|
+
const job = new JobClass();
|
|
1687
|
+
if (parsed.properties) {
|
|
1688
|
+
Object.assign(job, parsed.properties);
|
|
1689
|
+
}
|
|
1690
|
+
job.id = serialized.id;
|
|
1691
|
+
if (serialized.delaySeconds !== void 0) {
|
|
1692
|
+
job.delaySeconds = serialized.delaySeconds;
|
|
1693
|
+
}
|
|
1694
|
+
if (serialized.attempts !== void 0) {
|
|
1695
|
+
job.attempts = serialized.attempts;
|
|
1696
|
+
}
|
|
1697
|
+
if (serialized.maxAttempts !== void 0) {
|
|
1698
|
+
job.maxAttempts = serialized.maxAttempts;
|
|
1699
|
+
}
|
|
1700
|
+
if (serialized.groupId !== void 0) {
|
|
1701
|
+
job.groupId = serialized.groupId;
|
|
1702
|
+
}
|
|
1703
|
+
if (serialized.retryAfterSeconds !== void 0) {
|
|
1704
|
+
job.retryAfterSeconds = serialized.retryAfterSeconds;
|
|
1705
|
+
}
|
|
1706
|
+
if (serialized.retryMultiplier !== void 0) {
|
|
1707
|
+
job.retryMultiplier = serialized.retryMultiplier;
|
|
1708
|
+
}
|
|
1709
|
+
if (serialized.priority !== void 0) {
|
|
1710
|
+
job.priority = serialized.priority;
|
|
1711
|
+
}
|
|
1712
|
+
return job;
|
|
1713
|
+
}
|
|
1714
|
+
};
|
|
1715
|
+
|
|
1716
|
+
// src/serializers/JsonSerializer.ts
|
|
1717
|
+
var JsonSerializer = class {
|
|
1718
|
+
/**
|
|
1719
|
+
* Serialize a job.
|
|
1720
|
+
*/
|
|
1721
|
+
serialize(job) {
|
|
1722
|
+
const id = `${Date.now()}-${Math.random().toString(36).substring(2, 9)}`;
|
|
1723
|
+
return {
|
|
1724
|
+
id,
|
|
1725
|
+
type: "json",
|
|
1726
|
+
data: JSON.stringify({
|
|
1727
|
+
job: job.constructor.name,
|
|
1728
|
+
properties: { ...job }
|
|
1729
|
+
}),
|
|
1730
|
+
createdAt: Date.now(),
|
|
1731
|
+
...job.delaySeconds !== void 0 ? { delaySeconds: job.delaySeconds } : {},
|
|
1732
|
+
attempts: job.attempts ?? 0,
|
|
1733
|
+
...job.maxAttempts !== void 0 ? { maxAttempts: job.maxAttempts } : {},
|
|
1734
|
+
...job.groupId ? { groupId: job.groupId } : {},
|
|
1735
|
+
...job.priority ? { priority: job.priority } : {}
|
|
1736
|
+
};
|
|
1737
|
+
}
|
|
1738
|
+
/**
|
|
1739
|
+
* Deserialize a job.
|
|
1740
|
+
*
|
|
1741
|
+
* Note: this implementation only restores properties and does not recreate class instances.
|
|
1742
|
+
* For class instances, use `ClassNameSerializer`.
|
|
1743
|
+
*/
|
|
1744
|
+
deserialize(serialized) {
|
|
1745
|
+
if (serialized.type !== "json") {
|
|
1746
|
+
throw new Error('Invalid serialization type: expected "json"');
|
|
1747
|
+
}
|
|
1748
|
+
const parsed = JSON.parse(serialized.data);
|
|
1749
|
+
const job = /* @__PURE__ */ Object.create({});
|
|
1750
|
+
Object.assign(job, parsed.properties);
|
|
1751
|
+
if (serialized.groupId) {
|
|
1752
|
+
job.groupId = serialized.groupId;
|
|
1753
|
+
}
|
|
1754
|
+
if (serialized.priority) {
|
|
1755
|
+
job.priority = serialized.priority;
|
|
1756
|
+
}
|
|
1757
|
+
return job;
|
|
1758
|
+
}
|
|
1759
|
+
};
|
|
1760
|
+
|
|
1761
|
+
// src/QueueManager.ts
|
|
1762
|
+
var QueueManager = class {
|
|
1763
|
+
drivers = /* @__PURE__ */ new Map();
|
|
1764
|
+
serializers = /* @__PURE__ */ new Map();
|
|
1765
|
+
defaultConnection;
|
|
1766
|
+
defaultSerializer;
|
|
1767
|
+
persistence;
|
|
1768
|
+
scheduler;
|
|
1769
|
+
// Using any to avoid circular dependency or import issues for now
|
|
1770
|
+
constructor(config = {}) {
|
|
1771
|
+
this.persistence = config.persistence;
|
|
1772
|
+
this.defaultConnection = config.default ?? "default";
|
|
1773
|
+
const serializerType = config.defaultSerializer ?? "class";
|
|
1774
|
+
if (serializerType === "class") {
|
|
1775
|
+
this.defaultSerializer = new ClassNameSerializer();
|
|
1776
|
+
} else {
|
|
1777
|
+
this.defaultSerializer = new JsonSerializer();
|
|
1778
|
+
}
|
|
1779
|
+
if (!this.drivers.has("default")) {
|
|
1780
|
+
this.drivers.set("default", new MemoryDriver());
|
|
1781
|
+
}
|
|
1782
|
+
if (config.connections) {
|
|
1783
|
+
for (const [name, connectionConfig] of Object.entries(config.connections)) {
|
|
1784
|
+
this.registerConnection(name, connectionConfig);
|
|
1785
|
+
}
|
|
1786
|
+
}
|
|
1787
|
+
}
|
|
1788
|
+
/**
|
|
1789
|
+
* Register a connection.
|
|
1790
|
+
* @param name - Connection name
|
|
1791
|
+
* @param config - Connection config
|
|
1792
|
+
*/
|
|
1793
|
+
registerConnection(name, config) {
|
|
1794
|
+
const driverType = config.driver;
|
|
1795
|
+
switch (driverType) {
|
|
1796
|
+
case "memory":
|
|
1797
|
+
this.drivers.set(name, new MemoryDriver());
|
|
1798
|
+
break;
|
|
1799
|
+
case "database": {
|
|
1800
|
+
const { DatabaseDriver: DatabaseDriver2 } = (init_DatabaseDriver(), __toCommonJS(DatabaseDriver_exports));
|
|
1801
|
+
const dbService = config.dbService;
|
|
1802
|
+
if (!dbService) {
|
|
1803
|
+
throw new Error(
|
|
1804
|
+
"[QueueManager] DatabaseDriver requires dbService. Please provide a database service that implements DatabaseService interface."
|
|
1805
|
+
);
|
|
1806
|
+
}
|
|
1807
|
+
this.drivers.set(
|
|
1808
|
+
name,
|
|
1809
|
+
new DatabaseDriver2({
|
|
1810
|
+
// biome-ignore lint/suspicious/noExplicitAny: Dynamic driver loading requires type assertion
|
|
1811
|
+
dbService,
|
|
1812
|
+
// biome-ignore lint/suspicious/noExplicitAny: Dynamic driver config type
|
|
1813
|
+
table: config.table
|
|
1814
|
+
})
|
|
1815
|
+
);
|
|
1816
|
+
break;
|
|
1817
|
+
}
|
|
1818
|
+
case "redis": {
|
|
1819
|
+
const { RedisDriver: RedisDriver2 } = (init_RedisDriver(), __toCommonJS(RedisDriver_exports));
|
|
1820
|
+
const client = config.client;
|
|
1821
|
+
if (!client) {
|
|
1822
|
+
throw new Error(
|
|
1823
|
+
"[QueueManager] RedisDriver requires client. Please provide Redis client in connection config."
|
|
1824
|
+
);
|
|
1825
|
+
}
|
|
1826
|
+
this.drivers.set(
|
|
1827
|
+
name,
|
|
1828
|
+
new RedisDriver2({
|
|
1829
|
+
// biome-ignore lint/suspicious/noExplicitAny: Dynamic driver loading requires type assertion
|
|
1830
|
+
client,
|
|
1831
|
+
// biome-ignore lint/suspicious/noExplicitAny: Dynamic driver config type
|
|
1832
|
+
prefix: config.prefix
|
|
1833
|
+
})
|
|
1834
|
+
);
|
|
1835
|
+
break;
|
|
1836
|
+
}
|
|
1837
|
+
case "kafka": {
|
|
1838
|
+
const { KafkaDriver: KafkaDriver2 } = (init_KafkaDriver(), __toCommonJS(KafkaDriver_exports));
|
|
1839
|
+
const client = config.client;
|
|
1840
|
+
if (!client) {
|
|
1841
|
+
throw new Error(
|
|
1842
|
+
"[QueueManager] KafkaDriver requires client. Please provide Kafka client in connection config."
|
|
1843
|
+
);
|
|
1844
|
+
}
|
|
1845
|
+
this.drivers.set(
|
|
1846
|
+
name,
|
|
1847
|
+
new KafkaDriver2({
|
|
1848
|
+
// biome-ignore lint/suspicious/noExplicitAny: Dynamic driver loading requires type assertion
|
|
1849
|
+
client,
|
|
1850
|
+
// biome-ignore lint/suspicious/noExplicitAny: Dynamic driver config type
|
|
1851
|
+
consumerGroupId: config.consumerGroupId
|
|
1852
|
+
})
|
|
1853
|
+
);
|
|
1854
|
+
break;
|
|
1855
|
+
}
|
|
1856
|
+
case "sqs": {
|
|
1857
|
+
const { SQSDriver: SQSDriver2 } = (init_SQSDriver(), __toCommonJS(SQSDriver_exports));
|
|
1858
|
+
const client = config.client;
|
|
1859
|
+
if (!client) {
|
|
1860
|
+
throw new Error(
|
|
1861
|
+
"[QueueManager] SQSDriver requires client. Please provide SQS client in connection config."
|
|
1862
|
+
);
|
|
1863
|
+
}
|
|
1864
|
+
this.drivers.set(
|
|
1865
|
+
name,
|
|
1866
|
+
new SQSDriver2({
|
|
1867
|
+
// biome-ignore lint/suspicious/noExplicitAny: Dynamic driver loading requires type assertion
|
|
1868
|
+
client,
|
|
1869
|
+
// biome-ignore lint/suspicious/noExplicitAny: Dynamic driver config type
|
|
1870
|
+
queueUrlPrefix: config.queueUrlPrefix,
|
|
1871
|
+
// biome-ignore lint/suspicious/noExplicitAny: Dynamic driver config type
|
|
1872
|
+
visibilityTimeout: config.visibilityTimeout,
|
|
1873
|
+
// biome-ignore lint/suspicious/noExplicitAny: Dynamic driver config type
|
|
1874
|
+
waitTimeSeconds: config.waitTimeSeconds
|
|
1875
|
+
})
|
|
1876
|
+
);
|
|
1877
|
+
break;
|
|
1878
|
+
}
|
|
1879
|
+
case "rabbitmq": {
|
|
1880
|
+
const { RabbitMQDriver: RabbitMQDriver2 } = (init_RabbitMQDriver(), __toCommonJS(RabbitMQDriver_exports));
|
|
1881
|
+
const client = config.client;
|
|
1882
|
+
if (!client) {
|
|
1883
|
+
throw new Error(
|
|
1884
|
+
"[QueueManager] RabbitMQDriver requires client. Please provide RabbitMQ connection/channel in connection config."
|
|
1885
|
+
);
|
|
1886
|
+
}
|
|
1887
|
+
this.drivers.set(
|
|
1888
|
+
name,
|
|
1889
|
+
new RabbitMQDriver2({
|
|
1890
|
+
// biome-ignore lint/suspicious/noExplicitAny: Dynamic driver loading requires type assertion
|
|
1891
|
+
client,
|
|
1892
|
+
// biome-ignore lint/suspicious/noExplicitAny: Dynamic driver config type
|
|
1893
|
+
exchange: config.exchange,
|
|
1894
|
+
// biome-ignore lint/suspicious/noExplicitAny: Dynamic driver config type
|
|
1895
|
+
exchangeType: config.exchangeType
|
|
1896
|
+
})
|
|
1897
|
+
);
|
|
1898
|
+
break;
|
|
1899
|
+
}
|
|
1900
|
+
default:
|
|
1901
|
+
throw new Error(
|
|
1902
|
+
`Driver "${driverType}" is not supported. Supported drivers: memory, database, redis, kafka, sqs, rabbitmq`
|
|
1903
|
+
);
|
|
1904
|
+
}
|
|
1905
|
+
}
|
|
1906
|
+
/**
|
|
1907
|
+
* Get a driver for a connection.
|
|
1908
|
+
* @param connection - Connection name
|
|
1909
|
+
* @returns Driver instance
|
|
1910
|
+
*/
|
|
1911
|
+
getDriver(connection) {
|
|
1912
|
+
const driver = this.drivers.get(connection);
|
|
1913
|
+
if (!driver) {
|
|
1914
|
+
throw new Error(`Connection "${connection}" not found`);
|
|
1915
|
+
}
|
|
1916
|
+
return driver;
|
|
1917
|
+
}
|
|
1918
|
+
/**
|
|
1919
|
+
* Get the default connection name.
|
|
1920
|
+
* @returns Default connection name
|
|
1921
|
+
*/
|
|
1922
|
+
getDefaultConnection() {
|
|
1923
|
+
return this.defaultConnection;
|
|
1924
|
+
}
|
|
1925
|
+
/**
|
|
1926
|
+
* Get a serializer.
|
|
1927
|
+
* @param type - Serializer type
|
|
1928
|
+
* @returns Serializer instance
|
|
1929
|
+
*/
|
|
1930
|
+
getSerializer(type) {
|
|
1931
|
+
if (type) {
|
|
1932
|
+
const serializer = this.serializers.get(type);
|
|
1933
|
+
if (!serializer) {
|
|
1934
|
+
throw new Error(`Serializer "${type}" not found`);
|
|
1935
|
+
}
|
|
1936
|
+
return serializer;
|
|
1937
|
+
}
|
|
1938
|
+
return this.defaultSerializer;
|
|
1939
|
+
}
|
|
1940
|
+
/**
|
|
1941
|
+
* Register Job classes (used by ClassNameSerializer).
|
|
1942
|
+
* @param jobClasses - Job class array
|
|
1943
|
+
*/
|
|
1944
|
+
registerJobClasses(jobClasses) {
|
|
1945
|
+
if (this.defaultSerializer instanceof ClassNameSerializer) {
|
|
1946
|
+
this.defaultSerializer.registerMany(jobClasses);
|
|
1947
|
+
}
|
|
1948
|
+
}
|
|
1949
|
+
/**
|
|
1950
|
+
* Push a Job to the queue.
|
|
1951
|
+
*
|
|
1952
|
+
* @template T - The type of the job.
|
|
1953
|
+
* @param job - Job instance to push.
|
|
1954
|
+
* @param options - Push options.
|
|
1955
|
+
* @returns The same job instance (for fluent chaining).
|
|
1956
|
+
*
|
|
1957
|
+
* @example
|
|
1958
|
+
* ```typescript
|
|
1959
|
+
* await manager.push(new SendEmailJob('user@example.com'));
|
|
1960
|
+
* ```
|
|
1961
|
+
*/
|
|
1962
|
+
async push(job, options) {
|
|
1963
|
+
const connection = job.connectionName ?? this.defaultConnection;
|
|
1964
|
+
const queue = job.queueName ?? "default";
|
|
1965
|
+
const driver = this.getDriver(connection);
|
|
1966
|
+
const serializer = this.getSerializer();
|
|
1967
|
+
const serialized = serializer.serialize(job);
|
|
1968
|
+
const pushOptions = { ...options };
|
|
1969
|
+
if (job.priority) {
|
|
1970
|
+
pushOptions.priority = job.priority;
|
|
1971
|
+
}
|
|
1972
|
+
await driver.push(queue, serialized, pushOptions);
|
|
1973
|
+
if (this.persistence?.archiveEnqueued) {
|
|
1974
|
+
this.persistence.adapter.archive(queue, serialized, "waiting").catch((err) => {
|
|
1975
|
+
console.error("[QueueManager] Persistence archive failed (waiting):", err);
|
|
1976
|
+
});
|
|
1977
|
+
}
|
|
1978
|
+
return job;
|
|
1979
|
+
}
|
|
1980
|
+
/**
|
|
1981
|
+
* Push multiple jobs to the queue.
|
|
1982
|
+
*
|
|
1983
|
+
* @template T - The type of the jobs.
|
|
1984
|
+
* @param jobs - Array of job instances.
|
|
1985
|
+
*
|
|
1986
|
+
* @example
|
|
1987
|
+
* ```typescript
|
|
1988
|
+
* await manager.pushMany([new JobA(), new JobB()]);
|
|
1989
|
+
* ```
|
|
1990
|
+
*/
|
|
1991
|
+
async pushMany(jobs) {
|
|
1992
|
+
if (jobs.length === 0) {
|
|
1993
|
+
return;
|
|
1994
|
+
}
|
|
1995
|
+
const groups = /* @__PURE__ */ new Map();
|
|
1996
|
+
const serializer = this.getSerializer();
|
|
1997
|
+
for (const job of jobs) {
|
|
1998
|
+
const connection = job.connectionName ?? this.defaultConnection;
|
|
1999
|
+
const queue = job.queueName ?? "default";
|
|
2000
|
+
const key = `${connection}:${queue}`;
|
|
2001
|
+
const serialized = serializer.serialize(job);
|
|
2002
|
+
if (!groups.has(key)) {
|
|
2003
|
+
groups.set(key, []);
|
|
2004
|
+
}
|
|
2005
|
+
groups.get(key)?.push(serialized);
|
|
2006
|
+
}
|
|
2007
|
+
for (const [key, serializedJobs] of groups.entries()) {
|
|
2008
|
+
const [connection, queue] = key.split(":");
|
|
2009
|
+
if (!connection || !queue) {
|
|
2010
|
+
continue;
|
|
2011
|
+
}
|
|
2012
|
+
const driver = this.getDriver(connection);
|
|
2013
|
+
if (driver.pushMany) {
|
|
2014
|
+
await driver.pushMany(queue, serializedJobs);
|
|
2015
|
+
} else {
|
|
2016
|
+
for (const job of serializedJobs) {
|
|
2017
|
+
await driver.push(queue, job);
|
|
2018
|
+
}
|
|
2019
|
+
}
|
|
2020
|
+
}
|
|
2021
|
+
}
|
|
2022
|
+
/**
|
|
2023
|
+
* Pop a job from the queue.
|
|
2024
|
+
*
|
|
2025
|
+
* @param queue - Queue name (default: 'default').
|
|
2026
|
+
* @param connection - Connection name (optional).
|
|
2027
|
+
* @returns Job instance or null if queue is empty.
|
|
2028
|
+
*
|
|
2029
|
+
* @example
|
|
2030
|
+
* ```typescript
|
|
2031
|
+
* const job = await manager.pop('emails');
|
|
2032
|
+
* if (job) await job.handle();
|
|
2033
|
+
* ```
|
|
2034
|
+
*/
|
|
2035
|
+
async pop(queue = "default", connection = this.defaultConnection) {
|
|
2036
|
+
const driver = this.getDriver(connection);
|
|
2037
|
+
const serializer = this.getSerializer();
|
|
2038
|
+
const serialized = await driver.pop(queue);
|
|
2039
|
+
if (!serialized) {
|
|
2040
|
+
return null;
|
|
2041
|
+
}
|
|
2042
|
+
try {
|
|
2043
|
+
return serializer.deserialize(serialized);
|
|
2044
|
+
} catch (error) {
|
|
2045
|
+
console.error("[QueueManager] Failed to deserialize job:", error);
|
|
2046
|
+
return null;
|
|
2047
|
+
}
|
|
2048
|
+
}
|
|
2049
|
+
/**
|
|
2050
|
+
* Get queue size.
|
|
2051
|
+
*
|
|
2052
|
+
* @param queue - Queue name (default: 'default').
|
|
2053
|
+
* @param connection - Connection name (optional).
|
|
2054
|
+
* @returns Number of jobs in the queue.
|
|
2055
|
+
*/
|
|
2056
|
+
async size(queue = "default", connection = this.defaultConnection) {
|
|
2057
|
+
const driver = this.getDriver(connection);
|
|
2058
|
+
return driver.size(queue);
|
|
2059
|
+
}
|
|
2060
|
+
/**
|
|
2061
|
+
* Clear all jobs from a queue.
|
|
2062
|
+
*
|
|
2063
|
+
* @param queue - Queue name (default: 'default').
|
|
2064
|
+
* @param connection - Connection name (optional).
|
|
2065
|
+
*/
|
|
2066
|
+
async clear(queue = "default", connection = this.defaultConnection) {
|
|
2067
|
+
const driver = this.getDriver(connection);
|
|
2068
|
+
await driver.clear(queue);
|
|
2069
|
+
}
|
|
2070
|
+
/**
|
|
2071
|
+
* Mark a job as completed.
|
|
2072
|
+
* @param job - Job instance
|
|
2073
|
+
*/
|
|
2074
|
+
async complete(job) {
|
|
2075
|
+
const connection = job.connectionName ?? this.defaultConnection;
|
|
2076
|
+
const queue = job.queueName ?? "default";
|
|
2077
|
+
const driver = this.getDriver(connection);
|
|
2078
|
+
const serializer = this.getSerializer();
|
|
2079
|
+
if (driver.complete) {
|
|
2080
|
+
const serialized = serializer.serialize(job);
|
|
2081
|
+
await driver.complete(queue, serialized);
|
|
2082
|
+
if (this.persistence?.archiveCompleted) {
|
|
2083
|
+
await this.persistence.adapter.archive(queue, serialized, "completed").catch((err) => {
|
|
2084
|
+
console.error("[QueueManager] Persistence archive failed (completed):", err);
|
|
2085
|
+
});
|
|
2086
|
+
}
|
|
2087
|
+
}
|
|
2088
|
+
}
|
|
2089
|
+
/**
|
|
2090
|
+
* Mark a job as permanently failed.
|
|
2091
|
+
* @param job - Job instance
|
|
2092
|
+
* @param error - Error object
|
|
2093
|
+
*/
|
|
2094
|
+
async fail(job, error) {
|
|
2095
|
+
const connection = job.connectionName ?? this.defaultConnection;
|
|
2096
|
+
const queue = job.queueName ?? "default";
|
|
2097
|
+
const driver = this.getDriver(connection);
|
|
2098
|
+
const serializer = this.getSerializer();
|
|
2099
|
+
if (driver.fail) {
|
|
2100
|
+
const serialized = serializer.serialize(job);
|
|
2101
|
+
serialized.error = error.message;
|
|
2102
|
+
serialized.failedAt = Date.now();
|
|
2103
|
+
await driver.fail(queue, serialized);
|
|
2104
|
+
if (this.persistence?.archiveFailed) {
|
|
2105
|
+
await this.persistence.adapter.archive(queue, serialized, "failed").catch((err) => {
|
|
2106
|
+
console.error("[QueueManager] Persistence archive failed (failed):", err);
|
|
2107
|
+
});
|
|
2108
|
+
}
|
|
2109
|
+
}
|
|
2110
|
+
}
|
|
2111
|
+
/**
|
|
2112
|
+
* Get the persistence adapter if configured.
|
|
2113
|
+
*/
|
|
2114
|
+
getPersistence() {
|
|
2115
|
+
return this.persistence?.adapter;
|
|
2116
|
+
}
|
|
2117
|
+
/**
|
|
2118
|
+
* Get the scheduler if configured.
|
|
2119
|
+
*/
|
|
2120
|
+
getScheduler() {
|
|
2121
|
+
if (!this.scheduler) {
|
|
2122
|
+
const { Scheduler: Scheduler2 } = (init_Scheduler(), __toCommonJS(Scheduler_exports));
|
|
2123
|
+
this.scheduler = new Scheduler2(this);
|
|
2124
|
+
}
|
|
2125
|
+
return this.scheduler;
|
|
2126
|
+
}
|
|
2127
|
+
/**
|
|
2128
|
+
* Get failed jobs from DLQ (if driver supports it).
|
|
2129
|
+
*/
|
|
2130
|
+
async getFailed(queue, start = 0, end = -1, connection = this.defaultConnection) {
|
|
2131
|
+
const driver = this.getDriver(connection);
|
|
2132
|
+
if (driver.getFailed) {
|
|
2133
|
+
return driver.getFailed(queue, start, end);
|
|
2134
|
+
}
|
|
2135
|
+
return [];
|
|
2136
|
+
}
|
|
2137
|
+
/**
|
|
2138
|
+
* Retry failed jobs from DLQ (if driver supports it).
|
|
2139
|
+
*/
|
|
2140
|
+
async retryFailed(queue, count = 1, connection = this.defaultConnection) {
|
|
2141
|
+
const driver = this.getDriver(connection);
|
|
2142
|
+
if (driver.retryFailed) {
|
|
2143
|
+
return driver.retryFailed(queue, count);
|
|
2144
|
+
}
|
|
2145
|
+
return 0;
|
|
2146
|
+
}
|
|
2147
|
+
/**
|
|
2148
|
+
* Clear failed jobs from DLQ (if driver supports it).
|
|
2149
|
+
*/
|
|
2150
|
+
async clearFailed(queue, connection = this.defaultConnection) {
|
|
2151
|
+
const driver = this.getDriver(connection);
|
|
2152
|
+
if (driver.clearFailed) {
|
|
2153
|
+
await driver.clearFailed(queue);
|
|
2154
|
+
}
|
|
2155
|
+
}
|
|
2156
|
+
};
|
|
2157
|
+
|
|
2158
|
+
// src/OrbitStream.ts
|
|
2159
|
+
var OrbitStream = class _OrbitStream {
|
|
2160
|
+
constructor(options = {}) {
|
|
2161
|
+
this.options = options;
|
|
2162
|
+
}
|
|
2163
|
+
queueManager;
|
|
2164
|
+
consumer;
|
|
2165
|
+
/**
|
|
2166
|
+
* Static configuration helper.
|
|
2167
|
+
*/
|
|
2168
|
+
static configure(options) {
|
|
2169
|
+
return new _OrbitStream(options);
|
|
2170
|
+
}
|
|
2171
|
+
/**
|
|
2172
|
+
* Install into PlanetCore.
|
|
2173
|
+
*/
|
|
2174
|
+
install(core) {
|
|
2175
|
+
this.queueManager = new QueueManager(this.options);
|
|
2176
|
+
core.adapter.use("*", async (c, next) => {
|
|
2177
|
+
if (this.queueManager && this.options.connections) {
|
|
2178
|
+
for (const [name, config] of Object.entries(this.options.connections)) {
|
|
2179
|
+
if (config.driver === "database" && !config.dbService) {
|
|
2180
|
+
try {
|
|
2181
|
+
const dbService = c.get("db");
|
|
2182
|
+
if (dbService) {
|
|
2183
|
+
try {
|
|
2184
|
+
this.queueManager.getDriver(name);
|
|
2185
|
+
} catch {
|
|
2186
|
+
this.queueManager.registerConnection(name, {
|
|
2187
|
+
...config,
|
|
2188
|
+
dbService
|
|
2189
|
+
});
|
|
2190
|
+
}
|
|
2191
|
+
}
|
|
2192
|
+
} catch {
|
|
2193
|
+
}
|
|
2194
|
+
}
|
|
2195
|
+
}
|
|
2196
|
+
}
|
|
2197
|
+
c.set("queue", this.queueManager);
|
|
2198
|
+
await next();
|
|
2199
|
+
return void 0;
|
|
2200
|
+
});
|
|
2201
|
+
core.logger.info("[OrbitStream] Installed");
|
|
2202
|
+
if (this.options.autoStartWorker && process.env.NODE_ENV === "development" && this.options.workerOptions) {
|
|
2203
|
+
this.startWorker(this.options.workerOptions);
|
|
2204
|
+
}
|
|
2205
|
+
}
|
|
2206
|
+
/**
|
|
2207
|
+
* Start embedded worker.
|
|
2208
|
+
*/
|
|
2209
|
+
startWorker(options) {
|
|
2210
|
+
if (!this.queueManager) {
|
|
2211
|
+
throw new Error("QueueManager not initialized. Call install() first.");
|
|
2212
|
+
}
|
|
2213
|
+
if (this.consumer?.isRunning()) {
|
|
2214
|
+
throw new Error("Worker is already running");
|
|
2215
|
+
}
|
|
2216
|
+
this.consumer = new Consumer(this.queueManager, options);
|
|
2217
|
+
this.consumer.start().catch((error) => {
|
|
2218
|
+
console.error("[OrbitStream] Worker error:", error);
|
|
2219
|
+
});
|
|
2220
|
+
}
|
|
2221
|
+
/**
|
|
2222
|
+
* Stop embedded worker.
|
|
2223
|
+
*/
|
|
2224
|
+
async stopWorker() {
|
|
2225
|
+
if (this.consumer) {
|
|
2226
|
+
await this.consumer.stop();
|
|
2227
|
+
}
|
|
2228
|
+
}
|
|
2229
|
+
/**
|
|
2230
|
+
* Get QueueManager instance.
|
|
2231
|
+
*/
|
|
2232
|
+
getQueueManager() {
|
|
2233
|
+
return this.queueManager;
|
|
2234
|
+
}
|
|
2235
|
+
};
|
|
2236
|
+
|
|
2237
|
+
// src/persistence/MySQLPersistence.ts
|
|
2238
|
+
import { DB, Schema } from "@gravito/atlas";
|
|
2239
|
+
var MySQLPersistence = class {
|
|
2240
|
+
/**
|
|
2241
|
+
* @param db - An Atlas DB instance or compatible QueryBuilder.
|
|
2242
|
+
* @param table - The name of the table to store archived jobs.
|
|
2243
|
+
*/
|
|
2244
|
+
constructor(db, table = "flux_job_archive", logsTable = "flux_system_logs") {
|
|
2245
|
+
this.db = db;
|
|
2246
|
+
this.table = table;
|
|
2247
|
+
this.logsTable = logsTable;
|
|
2248
|
+
}
|
|
2249
|
+
/**
|
|
2250
|
+
* Archive a job.
|
|
2251
|
+
*/
|
|
2252
|
+
async archive(queue, job, status) {
|
|
2253
|
+
try {
|
|
2254
|
+
await this.db.table(this.table).insert({
|
|
2255
|
+
job_id: job.id,
|
|
2256
|
+
queue,
|
|
2257
|
+
status,
|
|
2258
|
+
payload: JSON.stringify(job),
|
|
2259
|
+
error: job.error || null,
|
|
2260
|
+
created_at: new Date(job.createdAt),
|
|
2261
|
+
archived_at: /* @__PURE__ */ new Date()
|
|
2262
|
+
});
|
|
2263
|
+
} catch (err) {
|
|
2264
|
+
console.error(`[MySQLPersistence] Failed to archive job ${job.id}:`, err);
|
|
2265
|
+
}
|
|
2266
|
+
}
|
|
2267
|
+
/**
|
|
2268
|
+
* Find a specific job in the archive.
|
|
2269
|
+
*/
|
|
2270
|
+
async find(queue, id) {
|
|
2271
|
+
const row = await this.db.table(this.table).where("queue", queue).where("job_id", id).first();
|
|
2272
|
+
if (!row) {
|
|
2273
|
+
return null;
|
|
2274
|
+
}
|
|
2275
|
+
try {
|
|
2276
|
+
const job = typeof row.payload === "string" ? JSON.parse(row.payload) : row.payload;
|
|
2277
|
+
return job;
|
|
2278
|
+
} catch (_e) {
|
|
2279
|
+
return null;
|
|
2280
|
+
}
|
|
2281
|
+
}
|
|
2282
|
+
/**
|
|
2283
|
+
* List jobs from the archive.
|
|
2284
|
+
*/
|
|
2285
|
+
async list(queue, options = {}) {
|
|
2286
|
+
let query = this.db.table(this.table).where("queue", queue);
|
|
2287
|
+
if (options.status) {
|
|
2288
|
+
query = query.where("status", options.status);
|
|
2289
|
+
}
|
|
2290
|
+
if (options.jobId) {
|
|
2291
|
+
query = query.where("job_id", options.jobId);
|
|
2292
|
+
}
|
|
2293
|
+
if (options.startTime) {
|
|
2294
|
+
query = query.where("archived_at", ">=", options.startTime);
|
|
2295
|
+
}
|
|
2296
|
+
if (options.endTime) {
|
|
2297
|
+
query = query.where("archived_at", "<=", options.endTime);
|
|
2298
|
+
}
|
|
2299
|
+
const rows = await query.orderBy("archived_at", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2300
|
+
return rows.map((r) => {
|
|
2301
|
+
try {
|
|
2302
|
+
const job = typeof r.payload === "string" ? JSON.parse(r.payload) : r.payload;
|
|
2303
|
+
return { ...job, _status: r.status, _archivedAt: r.archived_at };
|
|
2304
|
+
} catch (_e) {
|
|
2305
|
+
return null;
|
|
2306
|
+
}
|
|
2307
|
+
}).filter(Boolean);
|
|
2308
|
+
}
|
|
2309
|
+
/**
|
|
2310
|
+
* Search jobs from the archive.
|
|
2311
|
+
*/
|
|
2312
|
+
async search(query, options = {}) {
|
|
2313
|
+
let q = this.db.table(this.table);
|
|
2314
|
+
if (options.queue) {
|
|
2315
|
+
q = q.where("queue", options.queue);
|
|
2316
|
+
}
|
|
2317
|
+
const rows = await q.where((sub) => {
|
|
2318
|
+
sub.where("job_id", "like", `%${query}%`).orWhere("payload", "like", `%${query}%`).orWhere("error", "like", `%${query}%`);
|
|
2319
|
+
}).orderBy("archived_at", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2320
|
+
return rows.map((r) => {
|
|
2321
|
+
try {
|
|
2322
|
+
const job = typeof r.payload === "string" ? JSON.parse(r.payload) : r.payload;
|
|
2323
|
+
return { ...job, _status: r.status, _archivedAt: r.archived_at };
|
|
2324
|
+
} catch (_e) {
|
|
2325
|
+
return null;
|
|
2326
|
+
}
|
|
2327
|
+
}).filter(Boolean);
|
|
2328
|
+
}
|
|
2329
|
+
/**
|
|
2330
|
+
* Archive a system log message.
|
|
2331
|
+
*/
|
|
2332
|
+
async archiveLog(log) {
|
|
2333
|
+
try {
|
|
2334
|
+
await this.db.table(this.logsTable).insert({
|
|
2335
|
+
level: log.level,
|
|
2336
|
+
message: log.message,
|
|
2337
|
+
worker_id: log.workerId,
|
|
2338
|
+
queue: log.queue || null,
|
|
2339
|
+
timestamp: log.timestamp
|
|
2340
|
+
});
|
|
2341
|
+
} catch (err) {
|
|
2342
|
+
console.error(`[MySQLPersistence] Failed to archive log:`, err.message);
|
|
2343
|
+
}
|
|
2344
|
+
}
|
|
2345
|
+
/**
|
|
2346
|
+
* List system logs from the archive.
|
|
2347
|
+
*/
|
|
2348
|
+
async listLogs(options = {}) {
|
|
2349
|
+
let query = this.db.table(this.logsTable);
|
|
2350
|
+
if (options.level) query = query.where("level", options.level);
|
|
2351
|
+
if (options.workerId) query = query.where("worker_id", options.workerId);
|
|
2352
|
+
if (options.queue) query = query.where("queue", options.queue);
|
|
2353
|
+
if (options.search) {
|
|
2354
|
+
query = query.where("message", "like", `%${options.search}%`);
|
|
2355
|
+
}
|
|
2356
|
+
if (options.startTime) {
|
|
2357
|
+
query = query.where("timestamp", ">=", options.startTime);
|
|
2358
|
+
}
|
|
2359
|
+
if (options.endTime) {
|
|
2360
|
+
query = query.where("timestamp", "<=", options.endTime);
|
|
2361
|
+
}
|
|
2362
|
+
return await query.orderBy("timestamp", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2363
|
+
}
|
|
2364
|
+
/**
|
|
2365
|
+
* Count system logs in the archive.
|
|
2366
|
+
*/
|
|
2367
|
+
async countLogs(options = {}) {
|
|
2368
|
+
let query = this.db.table(this.logsTable);
|
|
2369
|
+
if (options.level) query = query.where("level", options.level);
|
|
2370
|
+
if (options.workerId) query = query.where("worker_id", options.workerId);
|
|
2371
|
+
if (options.queue) query = query.where("queue", options.queue);
|
|
2372
|
+
if (options.search) {
|
|
2373
|
+
query = query.where("message", "like", `%${options.search}%`);
|
|
2374
|
+
}
|
|
2375
|
+
if (options.startTime) {
|
|
2376
|
+
query = query.where("timestamp", ">=", options.startTime);
|
|
2377
|
+
}
|
|
2378
|
+
if (options.endTime) {
|
|
2379
|
+
query = query.where("timestamp", "<=", options.endTime);
|
|
2380
|
+
}
|
|
2381
|
+
const result = await query.count("id as total").first();
|
|
2382
|
+
return result?.total || 0;
|
|
2383
|
+
}
|
|
2384
|
+
/**
|
|
2385
|
+
* Remove old records from the archive.
|
|
2386
|
+
*/
|
|
2387
|
+
async cleanup(days) {
|
|
2388
|
+
const threshold = /* @__PURE__ */ new Date();
|
|
2389
|
+
threshold.setDate(threshold.getDate() - days);
|
|
2390
|
+
const [jobsDeleted, logsDeleted] = await Promise.all([
|
|
2391
|
+
this.db.table(this.table).where("archived_at", "<", threshold).delete(),
|
|
2392
|
+
this.db.table(this.logsTable).where("timestamp", "<", threshold).delete()
|
|
2393
|
+
]);
|
|
2394
|
+
return (jobsDeleted || 0) + (logsDeleted || 0);
|
|
2395
|
+
}
|
|
2396
|
+
/**
|
|
2397
|
+
* Count jobs in the archive.
|
|
2398
|
+
*/
|
|
2399
|
+
async count(queue, options = {}) {
|
|
2400
|
+
let query = this.db.table(this.table).where("queue", queue);
|
|
2401
|
+
if (options.status) {
|
|
2402
|
+
query = query.where("status", options.status);
|
|
2403
|
+
}
|
|
2404
|
+
if (options.jobId) {
|
|
2405
|
+
query = query.where("job_id", options.jobId);
|
|
2406
|
+
}
|
|
2407
|
+
if (options.startTime) {
|
|
2408
|
+
query = query.where("archived_at", ">=", options.startTime);
|
|
2409
|
+
}
|
|
2410
|
+
if (options.endTime) {
|
|
2411
|
+
query = query.where("archived_at", "<=", options.endTime);
|
|
2412
|
+
}
|
|
2413
|
+
const result = await query.count("id as total").first();
|
|
2414
|
+
return result?.total || 0;
|
|
2415
|
+
}
|
|
2416
|
+
/**
|
|
2417
|
+
* Help script to create the necessary table.
|
|
2418
|
+
*/
|
|
2419
|
+
async setupTable() {
|
|
2420
|
+
await Promise.all([this.setupJobsTable(), this.setupLogsTable()]);
|
|
2421
|
+
}
|
|
2422
|
+
async setupJobsTable() {
|
|
2423
|
+
const exists = await Schema.hasTable(this.table);
|
|
2424
|
+
if (exists) return;
|
|
2425
|
+
await Schema.create(this.table, (table) => {
|
|
2426
|
+
table.id();
|
|
2427
|
+
table.string("job_id", 64);
|
|
2428
|
+
table.string("queue", 128);
|
|
2429
|
+
table.string("status", 20);
|
|
2430
|
+
table.json("payload");
|
|
2431
|
+
table.text("error").nullable();
|
|
2432
|
+
table.timestamp("created_at").nullable();
|
|
2433
|
+
table.timestamp("archived_at").default(DB.raw("CURRENT_TIMESTAMP"));
|
|
2434
|
+
table.index(["queue", "archived_at"]);
|
|
2435
|
+
table.index(["queue", "job_id"]);
|
|
2436
|
+
table.index(["status", "archived_at"]);
|
|
2437
|
+
table.index(["archived_at"]);
|
|
2438
|
+
});
|
|
2439
|
+
console.log(`[MySQLPersistence] Created jobs archive table: ${this.table}`);
|
|
2440
|
+
}
|
|
2441
|
+
async setupLogsTable() {
|
|
2442
|
+
const exists = await Schema.hasTable(this.logsTable);
|
|
2443
|
+
if (exists) return;
|
|
2444
|
+
await Schema.create(this.logsTable, (table) => {
|
|
2445
|
+
table.id();
|
|
2446
|
+
table.string("level", 20);
|
|
2447
|
+
table.text("message");
|
|
2448
|
+
table.string("worker_id", 128);
|
|
2449
|
+
table.string("queue", 128).nullable();
|
|
2450
|
+
table.timestamp("timestamp").default(DB.raw("CURRENT_TIMESTAMP"));
|
|
2451
|
+
table.index(["worker_id"]);
|
|
2452
|
+
table.index(["queue"]);
|
|
2453
|
+
table.index(["level"]);
|
|
2454
|
+
table.index(["timestamp"]);
|
|
2455
|
+
});
|
|
2456
|
+
console.log(`[MySQLPersistence] Created logs archive table: ${this.logsTable}`);
|
|
2457
|
+
}
|
|
2458
|
+
};
|
|
2459
|
+
|
|
2460
|
+
// src/persistence/SQLitePersistence.ts
|
|
2461
|
+
import { Schema as Schema2 } from "@gravito/atlas";
|
|
2462
|
+
var SQLitePersistence = class {
|
|
2463
|
+
/**
|
|
2464
|
+
* @param db - An Atlas DB instance (SQLite driver).
|
|
2465
|
+
* @param table - The name of the table to store archived jobs.
|
|
2466
|
+
*/
|
|
2467
|
+
constructor(db, table = "flux_job_archive", logsTable = "flux_system_logs") {
|
|
2468
|
+
this.db = db;
|
|
2469
|
+
this.table = table;
|
|
2470
|
+
this.logsTable = logsTable;
|
|
2471
|
+
}
|
|
2472
|
+
/**
|
|
2473
|
+
* Archive a job.
|
|
2474
|
+
*/
|
|
2475
|
+
async archive(queue, job, status) {
|
|
2476
|
+
try {
|
|
2477
|
+
await this.db.table(this.table).insert({
|
|
2478
|
+
job_id: job.id,
|
|
2479
|
+
queue,
|
|
2480
|
+
status,
|
|
2481
|
+
payload: JSON.stringify(job),
|
|
2482
|
+
error: job.error || null,
|
|
2483
|
+
created_at: new Date(job.createdAt),
|
|
2484
|
+
archived_at: /* @__PURE__ */ new Date()
|
|
2485
|
+
});
|
|
2486
|
+
} catch (err) {
|
|
2487
|
+
console.error(`[SQLitePersistence] Failed to archive job ${job.id}:`, err.message);
|
|
2488
|
+
}
|
|
2489
|
+
}
|
|
2490
|
+
/**
|
|
2491
|
+
* Find a specific job in the archive.
|
|
2492
|
+
*/
|
|
2493
|
+
async find(queue, id) {
|
|
2494
|
+
const row = await this.db.table(this.table).where("queue", queue).where("job_id", id).first();
|
|
2495
|
+
if (!row) {
|
|
2496
|
+
return null;
|
|
2497
|
+
}
|
|
2498
|
+
try {
|
|
2499
|
+
const job = typeof row.payload === "string" ? JSON.parse(row.payload) : row.payload;
|
|
2500
|
+
return job;
|
|
2501
|
+
} catch (_e) {
|
|
2502
|
+
return null;
|
|
2503
|
+
}
|
|
2504
|
+
}
|
|
2505
|
+
/**
|
|
2506
|
+
* List jobs from the archive.
|
|
2507
|
+
*/
|
|
2508
|
+
async list(queue, options = {}) {
|
|
2509
|
+
let query = this.db.table(this.table).where("queue", queue);
|
|
2510
|
+
if (options.status) {
|
|
2511
|
+
query = query.where("status", options.status);
|
|
2512
|
+
}
|
|
2513
|
+
if (options.jobId) {
|
|
2514
|
+
query = query.where("job_id", options.jobId);
|
|
2515
|
+
}
|
|
2516
|
+
if (options.startTime) {
|
|
2517
|
+
query = query.where("archived_at", ">=", options.startTime);
|
|
2518
|
+
}
|
|
2519
|
+
if (options.endTime) {
|
|
2520
|
+
query = query.where("archived_at", "<=", options.endTime);
|
|
2521
|
+
}
|
|
2522
|
+
const rows = await query.orderBy("archived_at", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2523
|
+
return rows.map((r) => {
|
|
2524
|
+
try {
|
|
2525
|
+
const job = typeof r.payload === "string" ? JSON.parse(r.payload) : r.payload;
|
|
2526
|
+
return { ...job, _status: r.status, _archivedAt: r.archived_at };
|
|
2527
|
+
} catch (_e) {
|
|
2528
|
+
return null;
|
|
2529
|
+
}
|
|
2530
|
+
}).filter(Boolean);
|
|
2531
|
+
}
|
|
2532
|
+
/**
|
|
2533
|
+
* Search jobs from the archive.
|
|
2534
|
+
*/
|
|
2535
|
+
async search(query, options = {}) {
|
|
2536
|
+
let q = this.db.table(this.table);
|
|
2537
|
+
if (options.queue) {
|
|
2538
|
+
q = q.where("queue", options.queue);
|
|
2539
|
+
}
|
|
2540
|
+
const rows = await q.where((sub) => {
|
|
2541
|
+
sub.where("job_id", "like", `%${query}%`).orWhere("payload", "like", `%${query}%`).orWhere("error", "like", `%${query}%`);
|
|
2542
|
+
}).orderBy("archived_at", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2543
|
+
return rows.map((r) => {
|
|
2544
|
+
try {
|
|
2545
|
+
const job = typeof r.payload === "string" ? JSON.parse(r.payload) : r.payload;
|
|
2546
|
+
return { ...job, _status: r.status, _archivedAt: r.archived_at };
|
|
2547
|
+
} catch (_e) {
|
|
2548
|
+
return null;
|
|
2549
|
+
}
|
|
2550
|
+
}).filter(Boolean);
|
|
2551
|
+
}
|
|
2552
|
+
/**
|
|
2553
|
+
* Archive a system log message.
|
|
2554
|
+
*/
|
|
2555
|
+
async archiveLog(log) {
|
|
2556
|
+
try {
|
|
2557
|
+
await this.db.table(this.logsTable).insert({
|
|
2558
|
+
level: log.level,
|
|
2559
|
+
message: log.message,
|
|
2560
|
+
worker_id: log.workerId,
|
|
2561
|
+
queue: log.queue || null,
|
|
2562
|
+
timestamp: log.timestamp
|
|
2563
|
+
});
|
|
2564
|
+
} catch (err) {
|
|
2565
|
+
console.error(`[SQLitePersistence] Failed to archive log:`, err.message);
|
|
2566
|
+
}
|
|
2567
|
+
}
|
|
2568
|
+
/**
|
|
2569
|
+
* List system logs from the archive.
|
|
2570
|
+
*/
|
|
2571
|
+
async listLogs(options = {}) {
|
|
2572
|
+
let query = this.db.table(this.logsTable);
|
|
2573
|
+
if (options.level) query = query.where("level", options.level);
|
|
2574
|
+
if (options.workerId) query = query.where("worker_id", options.workerId);
|
|
2575
|
+
if (options.queue) query = query.where("queue", options.queue);
|
|
2576
|
+
if (options.search) {
|
|
2577
|
+
query = query.where("message", "like", `%${options.search}%`);
|
|
2578
|
+
}
|
|
2579
|
+
if (options.startTime) {
|
|
2580
|
+
query = query.where("timestamp", ">=", options.startTime);
|
|
2581
|
+
}
|
|
2582
|
+
if (options.endTime) {
|
|
2583
|
+
query = query.where("timestamp", "<=", options.endTime);
|
|
2584
|
+
}
|
|
2585
|
+
return await query.orderBy("timestamp", "desc").limit(options.limit ?? 50).offset(options.offset ?? 0).get();
|
|
2586
|
+
}
|
|
2587
|
+
/**
|
|
2588
|
+
* Count system logs in the archive.
|
|
2589
|
+
*/
|
|
2590
|
+
async countLogs(options = {}) {
|
|
2591
|
+
let query = this.db.table(this.logsTable);
|
|
2592
|
+
if (options.level) query = query.where("level", options.level);
|
|
2593
|
+
if (options.workerId) query = query.where("worker_id", options.workerId);
|
|
2594
|
+
if (options.queue) query = query.where("queue", options.queue);
|
|
2595
|
+
if (options.search) {
|
|
2596
|
+
query = query.where("message", "like", `%${options.search}%`);
|
|
2597
|
+
}
|
|
2598
|
+
if (options.startTime) {
|
|
2599
|
+
query = query.where("timestamp", ">=", options.startTime);
|
|
2600
|
+
}
|
|
2601
|
+
if (options.endTime) {
|
|
2602
|
+
query = query.where("timestamp", "<=", options.endTime);
|
|
2603
|
+
}
|
|
2604
|
+
const result = await query.count("id as total").first();
|
|
2605
|
+
return result?.total || 0;
|
|
2606
|
+
}
|
|
2607
|
+
/**
|
|
2608
|
+
* Remove old records from the archive.
|
|
2609
|
+
*/
|
|
2610
|
+
async cleanup(days) {
|
|
2611
|
+
const threshold = /* @__PURE__ */ new Date();
|
|
2612
|
+
threshold.setDate(threshold.getDate() - days);
|
|
2613
|
+
const [jobsDeleted, logsDeleted] = await Promise.all([
|
|
2614
|
+
this.db.table(this.table).where("archived_at", "<", threshold).delete(),
|
|
2615
|
+
this.db.table(this.logsTable).where("timestamp", "<", threshold).delete()
|
|
2616
|
+
]);
|
|
2617
|
+
return (jobsDeleted || 0) + (logsDeleted || 0);
|
|
2618
|
+
}
|
|
2619
|
+
/**
|
|
2620
|
+
* Count jobs in the archive.
|
|
2621
|
+
*/
|
|
2622
|
+
async count(queue, options = {}) {
|
|
2623
|
+
let query = this.db.table(this.table).where("queue", queue);
|
|
2624
|
+
if (options.status) {
|
|
2625
|
+
query = query.where("status", options.status);
|
|
2626
|
+
}
|
|
2627
|
+
if (options.jobId) {
|
|
2628
|
+
query = query.where("job_id", options.jobId);
|
|
2629
|
+
}
|
|
2630
|
+
if (options.startTime) {
|
|
2631
|
+
query = query.where("archived_at", ">=", options.startTime);
|
|
2632
|
+
}
|
|
2633
|
+
if (options.endTime) {
|
|
2634
|
+
query = query.where("archived_at", "<=", options.endTime);
|
|
2635
|
+
}
|
|
2636
|
+
const result = await query.count("id as total").first();
|
|
2637
|
+
return result?.total || 0;
|
|
2638
|
+
}
|
|
2639
|
+
/**
|
|
2640
|
+
* Setup table for SQLite.
|
|
2641
|
+
*/
|
|
2642
|
+
async setupTable() {
|
|
2643
|
+
await Promise.all([this.setupJobsTable(), this.setupLogsTable()]);
|
|
2644
|
+
}
|
|
2645
|
+
async setupJobsTable() {
|
|
2646
|
+
const exists = await Schema2.hasTable(this.table);
|
|
2647
|
+
if (exists) return;
|
|
2648
|
+
await Schema2.create(this.table, (table) => {
|
|
2649
|
+
table.id();
|
|
2650
|
+
table.string("job_id", 64);
|
|
2651
|
+
table.string("queue", 128);
|
|
2652
|
+
table.string("status", 20);
|
|
2653
|
+
table.text("payload");
|
|
2654
|
+
table.text("error").nullable();
|
|
2655
|
+
table.timestamp("created_at").nullable();
|
|
2656
|
+
table.timestamp("archived_at").nullable();
|
|
2657
|
+
table.index(["queue", "archived_at"]);
|
|
2658
|
+
table.index(["archived_at"]);
|
|
2659
|
+
});
|
|
2660
|
+
console.log(`[SQLitePersistence] Created jobs archive table: ${this.table}`);
|
|
2661
|
+
}
|
|
2662
|
+
async setupLogsTable() {
|
|
2663
|
+
const exists = await Schema2.hasTable(this.logsTable);
|
|
2664
|
+
if (exists) return;
|
|
2665
|
+
await Schema2.create(this.logsTable, (table) => {
|
|
2666
|
+
table.id();
|
|
2667
|
+
table.string("level", 20);
|
|
2668
|
+
table.text("message");
|
|
2669
|
+
table.string("worker_id", 128);
|
|
2670
|
+
table.string("queue", 128).nullable();
|
|
2671
|
+
table.timestamp("timestamp");
|
|
2672
|
+
table.index(["worker_id"]);
|
|
2673
|
+
table.index(["queue"]);
|
|
2674
|
+
table.index(["level"]);
|
|
2675
|
+
table.index(["timestamp"]);
|
|
2676
|
+
});
|
|
2677
|
+
console.log(`[SQLitePersistence] Created logs archive table: ${this.logsTable}`);
|
|
2678
|
+
}
|
|
2679
|
+
};
|
|
2680
|
+
|
|
2681
|
+
// src/index.ts
|
|
2682
|
+
init_Scheduler();
|
|
2683
|
+
export {
|
|
2684
|
+
ClassNameSerializer,
|
|
2685
|
+
Consumer,
|
|
2686
|
+
DatabaseDriver,
|
|
2687
|
+
Job,
|
|
2688
|
+
JsonSerializer,
|
|
2689
|
+
KafkaDriver,
|
|
2690
|
+
MemoryDriver,
|
|
2691
|
+
MySQLPersistence,
|
|
2692
|
+
OrbitStream,
|
|
2693
|
+
QueueManager,
|
|
2694
|
+
RabbitMQDriver,
|
|
2695
|
+
RedisDriver,
|
|
2696
|
+
SQLitePersistence,
|
|
2697
|
+
SQSDriver,
|
|
2698
|
+
Scheduler,
|
|
2699
|
+
Worker
|
|
2700
|
+
};
|