@nicnocquee/dataqueue 1.22.0 → 1.25.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +44 -0
- package/dist/index.cjs +2822 -583
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +589 -12
- package/dist/index.d.ts +589 -12
- package/dist/index.js +2818 -584
- package/dist/index.js.map +1 -1
- package/migrations/1751131910825_add_timeout_seconds_to_job_queue.sql +2 -2
- package/migrations/1751186053000_add_job_events_table.sql +12 -8
- package/migrations/1751984773000_add_tags_to_job_queue.sql +1 -1
- package/migrations/1765809419000_add_force_kill_on_timeout_to_job_queue.sql +6 -0
- package/migrations/1771100000000_add_idempotency_key_to_job_queue.sql +7 -0
- package/migrations/1781200000000_add_wait_support.sql +12 -0
- package/migrations/1781200000001_create_waitpoints_table.sql +18 -0
- package/migrations/1781200000002_add_performance_indexes.sql +34 -0
- package/migrations/1781200000003_add_progress_to_job_queue.sql +7 -0
- package/package.json +20 -6
- package/src/backend.ts +163 -0
- package/src/backends/postgres.ts +1111 -0
- package/src/backends/redis-scripts.ts +533 -0
- package/src/backends/redis.test.ts +543 -0
- package/src/backends/redis.ts +834 -0
- package/src/db-util.ts +4 -2
- package/src/handler-validation.test.ts +414 -0
- package/src/handler-validation.ts +168 -0
- package/src/index.test.ts +230 -1
- package/src/index.ts +128 -32
- package/src/processor.test.ts +612 -16
- package/src/processor.ts +759 -47
- package/src/queue.test.ts +736 -3
- package/src/queue.ts +346 -660
- package/src/test-util.ts +32 -0
- package/src/types.ts +451 -16
- package/src/wait.test.ts +698 -0
package/dist/index.cjs
CHANGED
|
@@ -1,10 +1,14 @@
|
|
|
1
1
|
'use strict';
|
|
2
2
|
|
|
3
3
|
var async_hooks = require('async_hooks');
|
|
4
|
+
var crypto = require('crypto');
|
|
5
|
+
var worker_threads = require('worker_threads');
|
|
4
6
|
var pg = require('pg');
|
|
5
7
|
var pgConnectionString = require('pg-connection-string');
|
|
6
8
|
var fs = require('fs');
|
|
9
|
+
var module$1 = require('module');
|
|
7
10
|
|
|
11
|
+
var _documentCurrentScript = typeof document !== 'undefined' ? document.currentScript : null;
|
|
8
12
|
function _interopDefault (e) { return e && e.__esModule ? e : { default: e }; }
|
|
9
13
|
|
|
10
14
|
var fs__default = /*#__PURE__*/_interopDefault(fs);
|
|
@@ -17,14 +21,28 @@ var JobEventType = /* @__PURE__ */ ((JobEventType2) => {
|
|
|
17
21
|
JobEventType2["Failed"] = "failed";
|
|
18
22
|
JobEventType2["Cancelled"] = "cancelled";
|
|
19
23
|
JobEventType2["Retried"] = "retried";
|
|
24
|
+
JobEventType2["Edited"] = "edited";
|
|
25
|
+
JobEventType2["Prolonged"] = "prolonged";
|
|
26
|
+
JobEventType2["Waiting"] = "waiting";
|
|
20
27
|
return JobEventType2;
|
|
21
28
|
})(JobEventType || {});
|
|
22
|
-
var FailureReason = /* @__PURE__ */ ((
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
return
|
|
29
|
+
var FailureReason = /* @__PURE__ */ ((FailureReason5) => {
|
|
30
|
+
FailureReason5["Timeout"] = "timeout";
|
|
31
|
+
FailureReason5["HandlerError"] = "handler_error";
|
|
32
|
+
FailureReason5["NoHandler"] = "no_handler";
|
|
33
|
+
return FailureReason5;
|
|
27
34
|
})(FailureReason || {});
|
|
35
|
+
var WaitSignal = class extends Error {
|
|
36
|
+
constructor(type, waitUntil, tokenId, stepData) {
|
|
37
|
+
super("WaitSignal");
|
|
38
|
+
this.type = type;
|
|
39
|
+
this.waitUntil = waitUntil;
|
|
40
|
+
this.tokenId = tokenId;
|
|
41
|
+
this.stepData = stepData;
|
|
42
|
+
this.isWaitSignal = true;
|
|
43
|
+
this.name = "WaitSignal";
|
|
44
|
+
}
|
|
45
|
+
};
|
|
28
46
|
var logStorage = new async_hooks.AsyncLocalStorage();
|
|
29
47
|
var setLogContext = (verbose) => {
|
|
30
48
|
logStorage.enterWith({ verbose });
|
|
@@ -39,675 +57,1655 @@ var log = (message) => {
|
|
|
39
57
|
}
|
|
40
58
|
};
|
|
41
59
|
|
|
42
|
-
// src/
|
|
43
|
-
var
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
await client.query(
|
|
47
|
-
`INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
|
|
48
|
-
[jobId, eventType, metadata ? JSON.stringify(metadata) : null]
|
|
49
|
-
);
|
|
50
|
-
} catch (error) {
|
|
51
|
-
log(`Error recording job event for job ${jobId}: ${error}`);
|
|
52
|
-
} finally {
|
|
53
|
-
client.release();
|
|
60
|
+
// src/backends/postgres.ts
|
|
61
|
+
var PostgresBackend = class {
|
|
62
|
+
constructor(pool) {
|
|
63
|
+
this.pool = pool;
|
|
54
64
|
}
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
try {
|
|
67
|
-
let result;
|
|
68
|
-
if (runAt) {
|
|
69
|
-
result = await client.query(
|
|
70
|
-
`INSERT INTO job_queue
|
|
71
|
-
(job_type, payload, max_attempts, priority, run_at, timeout_ms, tags)
|
|
72
|
-
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
|
73
|
-
RETURNING id`,
|
|
74
|
-
[
|
|
75
|
-
jobType,
|
|
76
|
-
payload,
|
|
77
|
-
maxAttempts,
|
|
78
|
-
priority,
|
|
79
|
-
runAt,
|
|
80
|
-
timeoutMs ?? null,
|
|
81
|
-
tags ?? null
|
|
82
|
-
]
|
|
65
|
+
/** Expose the raw pool for advanced usage. */
|
|
66
|
+
getPool() {
|
|
67
|
+
return this.pool;
|
|
68
|
+
}
|
|
69
|
+
// ── Events ──────────────────────────────────────────────────────────
|
|
70
|
+
async recordJobEvent(jobId, eventType, metadata) {
|
|
71
|
+
const client = await this.pool.connect();
|
|
72
|
+
try {
|
|
73
|
+
await client.query(
|
|
74
|
+
`INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
|
|
75
|
+
[jobId, eventType, metadata ? JSON.stringify(metadata) : null]
|
|
83
76
|
);
|
|
77
|
+
} catch (error) {
|
|
78
|
+
log(`Error recording job event for job ${jobId}: ${error}`);
|
|
79
|
+
} finally {
|
|
80
|
+
client.release();
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
async getJobEvents(jobId) {
|
|
84
|
+
const client = await this.pool.connect();
|
|
85
|
+
try {
|
|
86
|
+
const res = await client.query(
|
|
87
|
+
`SELECT id, job_id AS "jobId", event_type AS "eventType", metadata, created_at AS "createdAt" FROM job_events WHERE job_id = $1 ORDER BY created_at ASC`,
|
|
88
|
+
[jobId]
|
|
89
|
+
);
|
|
90
|
+
return res.rows;
|
|
91
|
+
} finally {
|
|
92
|
+
client.release();
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
// ── Job CRUD ──────────────────────────────────────────────────────────
|
|
96
|
+
async addJob({
|
|
97
|
+
jobType,
|
|
98
|
+
payload,
|
|
99
|
+
maxAttempts = 3,
|
|
100
|
+
priority = 0,
|
|
101
|
+
runAt = null,
|
|
102
|
+
timeoutMs = void 0,
|
|
103
|
+
forceKillOnTimeout = false,
|
|
104
|
+
tags = void 0,
|
|
105
|
+
idempotencyKey = void 0
|
|
106
|
+
}) {
|
|
107
|
+
const client = await this.pool.connect();
|
|
108
|
+
try {
|
|
109
|
+
let result;
|
|
110
|
+
const onConflict = idempotencyKey ? `ON CONFLICT (idempotency_key) WHERE idempotency_key IS NOT NULL DO NOTHING` : "";
|
|
111
|
+
if (runAt) {
|
|
112
|
+
result = await client.query(
|
|
113
|
+
`INSERT INTO job_queue
|
|
114
|
+
(job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key)
|
|
115
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
|
116
|
+
${onConflict}
|
|
117
|
+
RETURNING id`,
|
|
118
|
+
[
|
|
119
|
+
jobType,
|
|
120
|
+
payload,
|
|
121
|
+
maxAttempts,
|
|
122
|
+
priority,
|
|
123
|
+
runAt,
|
|
124
|
+
timeoutMs ?? null,
|
|
125
|
+
forceKillOnTimeout ?? false,
|
|
126
|
+
tags ?? null,
|
|
127
|
+
idempotencyKey ?? null
|
|
128
|
+
]
|
|
129
|
+
);
|
|
130
|
+
} else {
|
|
131
|
+
result = await client.query(
|
|
132
|
+
`INSERT INTO job_queue
|
|
133
|
+
(job_type, payload, max_attempts, priority, timeout_ms, force_kill_on_timeout, tags, idempotency_key)
|
|
134
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
|
135
|
+
${onConflict}
|
|
136
|
+
RETURNING id`,
|
|
137
|
+
[
|
|
138
|
+
jobType,
|
|
139
|
+
payload,
|
|
140
|
+
maxAttempts,
|
|
141
|
+
priority,
|
|
142
|
+
timeoutMs ?? null,
|
|
143
|
+
forceKillOnTimeout ?? false,
|
|
144
|
+
tags ?? null,
|
|
145
|
+
idempotencyKey ?? null
|
|
146
|
+
]
|
|
147
|
+
);
|
|
148
|
+
}
|
|
149
|
+
if (result.rows.length === 0 && idempotencyKey) {
|
|
150
|
+
const existing = await client.query(
|
|
151
|
+
`SELECT id FROM job_queue WHERE idempotency_key = $1`,
|
|
152
|
+
[idempotencyKey]
|
|
153
|
+
);
|
|
154
|
+
if (existing.rows.length > 0) {
|
|
155
|
+
log(
|
|
156
|
+
`Job with idempotency key "${idempotencyKey}" already exists (id: ${existing.rows[0].id}), returning existing job`
|
|
157
|
+
);
|
|
158
|
+
return existing.rows[0].id;
|
|
159
|
+
}
|
|
160
|
+
throw new Error(
|
|
161
|
+
`Failed to insert job and could not find existing job with idempotency key "${idempotencyKey}"`
|
|
162
|
+
);
|
|
163
|
+
}
|
|
164
|
+
const jobId = result.rows[0].id;
|
|
84
165
|
log(
|
|
85
|
-
`Added job ${
|
|
166
|
+
`Added job ${jobId}: payload ${JSON.stringify(payload)}, ${runAt ? `runAt ${runAt.toISOString()}, ` : ""}priority ${priority}, maxAttempts ${maxAttempts}, jobType ${jobType}, tags ${JSON.stringify(tags)}${idempotencyKey ? `, idempotencyKey "${idempotencyKey}"` : ""}`
|
|
86
167
|
);
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
168
|
+
await this.recordJobEvent(jobId, "added" /* Added */, {
|
|
169
|
+
jobType,
|
|
170
|
+
payload,
|
|
171
|
+
tags,
|
|
172
|
+
idempotencyKey
|
|
173
|
+
});
|
|
174
|
+
return jobId;
|
|
175
|
+
} catch (error) {
|
|
176
|
+
log(`Error adding job: ${error}`);
|
|
177
|
+
throw error;
|
|
178
|
+
} finally {
|
|
179
|
+
client.release();
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
async getJob(id) {
|
|
183
|
+
const client = await this.pool.connect();
|
|
184
|
+
try {
|
|
185
|
+
const result = await client.query(
|
|
186
|
+
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue WHERE id = $1`,
|
|
187
|
+
[id]
|
|
188
|
+
);
|
|
189
|
+
if (result.rows.length === 0) {
|
|
190
|
+
log(`Job ${id} not found`);
|
|
191
|
+
return null;
|
|
192
|
+
}
|
|
193
|
+
log(`Found job ${id}`);
|
|
194
|
+
const job = result.rows[0];
|
|
195
|
+
return {
|
|
196
|
+
...job,
|
|
197
|
+
payload: job.payload,
|
|
198
|
+
timeoutMs: job.timeoutMs,
|
|
199
|
+
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
200
|
+
failureReason: job.failureReason
|
|
201
|
+
};
|
|
202
|
+
} catch (error) {
|
|
203
|
+
log(`Error getting job ${id}: ${error}`);
|
|
204
|
+
throw error;
|
|
205
|
+
} finally {
|
|
206
|
+
client.release();
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
async getJobsByStatus(status, limit = 100, offset = 0) {
|
|
210
|
+
const client = await this.pool.connect();
|
|
211
|
+
try {
|
|
212
|
+
const result = await client.query(
|
|
213
|
+
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue WHERE status = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3`,
|
|
214
|
+
[status, limit, offset]
|
|
215
|
+
);
|
|
216
|
+
log(`Found ${result.rows.length} jobs by status ${status}`);
|
|
217
|
+
return result.rows.map((job) => ({
|
|
218
|
+
...job,
|
|
219
|
+
payload: job.payload,
|
|
220
|
+
timeoutMs: job.timeoutMs,
|
|
221
|
+
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
222
|
+
failureReason: job.failureReason
|
|
223
|
+
}));
|
|
224
|
+
} catch (error) {
|
|
225
|
+
log(`Error getting jobs by status ${status}: ${error}`);
|
|
226
|
+
throw error;
|
|
227
|
+
} finally {
|
|
228
|
+
client.release();
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
async getAllJobs(limit = 100, offset = 0) {
|
|
232
|
+
const client = await this.pool.connect();
|
|
233
|
+
try {
|
|
234
|
+
const result = await client.query(
|
|
235
|
+
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2`,
|
|
236
|
+
[limit, offset]
|
|
237
|
+
);
|
|
238
|
+
log(`Found ${result.rows.length} jobs (all)`);
|
|
239
|
+
return result.rows.map((job) => ({
|
|
240
|
+
...job,
|
|
241
|
+
payload: job.payload,
|
|
242
|
+
timeoutMs: job.timeoutMs,
|
|
243
|
+
forceKillOnTimeout: job.forceKillOnTimeout
|
|
244
|
+
}));
|
|
245
|
+
} catch (error) {
|
|
246
|
+
log(`Error getting all jobs: ${error}`);
|
|
247
|
+
throw error;
|
|
248
|
+
} finally {
|
|
249
|
+
client.release();
|
|
250
|
+
}
|
|
251
|
+
}
|
|
252
|
+
async getJobs(filters, limit = 100, offset = 0) {
|
|
253
|
+
const client = await this.pool.connect();
|
|
254
|
+
try {
|
|
255
|
+
let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue`;
|
|
256
|
+
const params = [];
|
|
257
|
+
const where = [];
|
|
258
|
+
let paramIdx = 1;
|
|
259
|
+
if (filters) {
|
|
260
|
+
if (filters.jobType) {
|
|
261
|
+
where.push(`job_type = $${paramIdx++}`);
|
|
262
|
+
params.push(filters.jobType);
|
|
263
|
+
}
|
|
264
|
+
if (filters.priority !== void 0) {
|
|
265
|
+
where.push(`priority = $${paramIdx++}`);
|
|
266
|
+
params.push(filters.priority);
|
|
267
|
+
}
|
|
268
|
+
if (filters.runAt) {
|
|
269
|
+
if (filters.runAt instanceof Date) {
|
|
270
|
+
where.push(`run_at = $${paramIdx++}`);
|
|
271
|
+
params.push(filters.runAt);
|
|
272
|
+
} else if (typeof filters.runAt === "object" && (filters.runAt.gt !== void 0 || filters.runAt.gte !== void 0 || filters.runAt.lt !== void 0 || filters.runAt.lte !== void 0 || filters.runAt.eq !== void 0)) {
|
|
273
|
+
const ops = filters.runAt;
|
|
274
|
+
if (ops.gt) {
|
|
275
|
+
where.push(`run_at > $${paramIdx++}`);
|
|
276
|
+
params.push(ops.gt);
|
|
277
|
+
}
|
|
278
|
+
if (ops.gte) {
|
|
279
|
+
where.push(`run_at >= $${paramIdx++}`);
|
|
280
|
+
params.push(ops.gte);
|
|
281
|
+
}
|
|
282
|
+
if (ops.lt) {
|
|
283
|
+
where.push(`run_at < $${paramIdx++}`);
|
|
284
|
+
params.push(ops.lt);
|
|
285
|
+
}
|
|
286
|
+
if (ops.lte) {
|
|
287
|
+
where.push(`run_at <= $${paramIdx++}`);
|
|
288
|
+
params.push(ops.lte);
|
|
289
|
+
}
|
|
290
|
+
if (ops.eq) {
|
|
291
|
+
where.push(`run_at = $${paramIdx++}`);
|
|
292
|
+
params.push(ops.eq);
|
|
293
|
+
}
|
|
294
|
+
}
|
|
295
|
+
}
|
|
296
|
+
if (filters.tags && filters.tags.values && filters.tags.values.length > 0) {
|
|
297
|
+
const mode = filters.tags.mode || "all";
|
|
298
|
+
const tagValues = filters.tags.values;
|
|
299
|
+
switch (mode) {
|
|
300
|
+
case "exact":
|
|
301
|
+
where.push(`tags = $${paramIdx++}`);
|
|
302
|
+
params.push(tagValues);
|
|
303
|
+
break;
|
|
304
|
+
case "all":
|
|
305
|
+
where.push(`tags @> $${paramIdx++}`);
|
|
306
|
+
params.push(tagValues);
|
|
307
|
+
break;
|
|
308
|
+
case "any":
|
|
309
|
+
where.push(`tags && $${paramIdx++}`);
|
|
310
|
+
params.push(tagValues);
|
|
311
|
+
break;
|
|
312
|
+
case "none":
|
|
313
|
+
where.push(`NOT (tags && $${paramIdx++})`);
|
|
314
|
+
params.push(tagValues);
|
|
315
|
+
break;
|
|
316
|
+
default:
|
|
317
|
+
where.push(`tags @> $${paramIdx++}`);
|
|
318
|
+
params.push(tagValues);
|
|
319
|
+
}
|
|
320
|
+
}
|
|
321
|
+
if (filters.cursor !== void 0) {
|
|
322
|
+
where.push(`id < $${paramIdx++}`);
|
|
323
|
+
params.push(filters.cursor);
|
|
324
|
+
}
|
|
325
|
+
}
|
|
326
|
+
if (where.length > 0) {
|
|
327
|
+
query += ` WHERE ${where.join(" AND ")}`;
|
|
328
|
+
}
|
|
329
|
+
paramIdx = params.length + 1;
|
|
330
|
+
query += ` ORDER BY id DESC LIMIT $${paramIdx++}`;
|
|
331
|
+
if (!filters?.cursor) {
|
|
332
|
+
query += ` OFFSET $${paramIdx}`;
|
|
333
|
+
params.push(limit, offset);
|
|
334
|
+
} else {
|
|
335
|
+
params.push(limit);
|
|
336
|
+
}
|
|
337
|
+
const result = await client.query(query, params);
|
|
338
|
+
log(`Found ${result.rows.length} jobs`);
|
|
339
|
+
return result.rows.map((job) => ({
|
|
340
|
+
...job,
|
|
341
|
+
payload: job.payload,
|
|
342
|
+
timeoutMs: job.timeoutMs,
|
|
343
|
+
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
344
|
+
failureReason: job.failureReason
|
|
345
|
+
}));
|
|
346
|
+
} catch (error) {
|
|
347
|
+
log(`Error getting jobs: ${error}`);
|
|
348
|
+
throw error;
|
|
349
|
+
} finally {
|
|
350
|
+
client.release();
|
|
351
|
+
}
|
|
352
|
+
}
|
|
353
|
+
async getJobsByTags(tags, mode = "all", limit = 100, offset = 0) {
|
|
354
|
+
const client = await this.pool.connect();
|
|
355
|
+
try {
|
|
356
|
+
let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress
|
|
357
|
+
FROM job_queue`;
|
|
358
|
+
let params = [];
|
|
359
|
+
switch (mode) {
|
|
360
|
+
case "exact":
|
|
361
|
+
query += " WHERE tags = $1";
|
|
362
|
+
params = [tags];
|
|
363
|
+
break;
|
|
364
|
+
case "all":
|
|
365
|
+
query += " WHERE tags @> $1";
|
|
366
|
+
params = [tags];
|
|
367
|
+
break;
|
|
368
|
+
case "any":
|
|
369
|
+
query += " WHERE tags && $1";
|
|
370
|
+
params = [tags];
|
|
371
|
+
break;
|
|
372
|
+
case "none":
|
|
373
|
+
query += " WHERE NOT (tags && $1)";
|
|
374
|
+
params = [tags];
|
|
375
|
+
break;
|
|
376
|
+
default:
|
|
377
|
+
query += " WHERE tags @> $1";
|
|
378
|
+
params = [tags];
|
|
379
|
+
}
|
|
380
|
+
query += " ORDER BY created_at DESC LIMIT $2 OFFSET $3";
|
|
381
|
+
params.push(limit, offset);
|
|
382
|
+
const result = await client.query(query, params);
|
|
383
|
+
log(
|
|
384
|
+
`Found ${result.rows.length} jobs by tags ${JSON.stringify(tags)} (mode: ${mode})`
|
|
385
|
+
);
|
|
386
|
+
return result.rows.map((job) => ({
|
|
387
|
+
...job,
|
|
388
|
+
payload: job.payload,
|
|
389
|
+
timeoutMs: job.timeoutMs,
|
|
390
|
+
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
391
|
+
failureReason: job.failureReason
|
|
392
|
+
}));
|
|
393
|
+
} catch (error) {
|
|
394
|
+
log(
|
|
395
|
+
`Error getting jobs by tags ${JSON.stringify(tags)} (mode: ${mode}): ${error}`
|
|
396
|
+
);
|
|
397
|
+
throw error;
|
|
398
|
+
} finally {
|
|
399
|
+
client.release();
|
|
400
|
+
}
|
|
401
|
+
}
|
|
402
|
+
// ── Processing lifecycle ──────────────────────────────────────────────
|
|
403
|
+
async getNextBatch(workerId, batchSize = 10, jobType) {
|
|
404
|
+
const client = await this.pool.connect();
|
|
405
|
+
try {
|
|
406
|
+
await client.query("BEGIN");
|
|
407
|
+
let jobTypeFilter = "";
|
|
408
|
+
const params = [workerId, batchSize];
|
|
409
|
+
if (jobType) {
|
|
410
|
+
if (Array.isArray(jobType)) {
|
|
411
|
+
jobTypeFilter = ` AND job_type = ANY($3)`;
|
|
412
|
+
params.push(jobType);
|
|
413
|
+
} else {
|
|
414
|
+
jobTypeFilter = ` AND job_type = $3`;
|
|
415
|
+
params.push(jobType);
|
|
416
|
+
}
|
|
417
|
+
}
|
|
418
|
+
const result = await client.query(
|
|
419
|
+
`
|
|
420
|
+
UPDATE job_queue
|
|
421
|
+
SET status = 'processing',
|
|
422
|
+
locked_at = NOW(),
|
|
423
|
+
locked_by = $1,
|
|
424
|
+
attempts = CASE WHEN status = 'waiting' THEN attempts ELSE attempts + 1 END,
|
|
425
|
+
updated_at = NOW(),
|
|
426
|
+
pending_reason = NULL,
|
|
427
|
+
started_at = COALESCE(started_at, NOW()),
|
|
428
|
+
last_retried_at = CASE WHEN status != 'waiting' AND attempts > 0 THEN NOW() ELSE last_retried_at END,
|
|
429
|
+
wait_until = NULL
|
|
430
|
+
WHERE id IN (
|
|
431
|
+
SELECT id FROM job_queue
|
|
432
|
+
WHERE (
|
|
433
|
+
(
|
|
434
|
+
(status = 'pending' OR (status = 'failed' AND next_attempt_at <= NOW()))
|
|
435
|
+
AND (attempts < max_attempts)
|
|
436
|
+
AND run_at <= NOW()
|
|
437
|
+
)
|
|
438
|
+
OR (
|
|
439
|
+
status = 'waiting'
|
|
440
|
+
AND wait_until IS NOT NULL
|
|
441
|
+
AND wait_until <= NOW()
|
|
442
|
+
AND wait_token_id IS NULL
|
|
443
|
+
)
|
|
444
|
+
)
|
|
445
|
+
${jobTypeFilter}
|
|
446
|
+
ORDER BY priority DESC, created_at ASC
|
|
447
|
+
LIMIT $2
|
|
448
|
+
FOR UPDATE SKIP LOCKED
|
|
449
|
+
)
|
|
450
|
+
RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress
|
|
451
|
+
`,
|
|
452
|
+
params
|
|
453
|
+
);
|
|
454
|
+
log(`Found ${result.rows.length} jobs to process`);
|
|
455
|
+
await client.query("COMMIT");
|
|
456
|
+
if (result.rows.length > 0) {
|
|
457
|
+
await this.recordJobEventsBatch(
|
|
458
|
+
result.rows.map((row) => ({
|
|
459
|
+
jobId: row.id,
|
|
460
|
+
eventType: "processing" /* Processing */
|
|
461
|
+
}))
|
|
462
|
+
);
|
|
463
|
+
}
|
|
464
|
+
return result.rows.map((job) => ({
|
|
465
|
+
...job,
|
|
466
|
+
payload: job.payload,
|
|
467
|
+
timeoutMs: job.timeoutMs,
|
|
468
|
+
forceKillOnTimeout: job.forceKillOnTimeout
|
|
469
|
+
}));
|
|
470
|
+
} catch (error) {
|
|
471
|
+
log(`Error getting next batch: ${error}`);
|
|
472
|
+
await client.query("ROLLBACK");
|
|
473
|
+
throw error;
|
|
474
|
+
} finally {
|
|
475
|
+
client.release();
|
|
476
|
+
}
|
|
477
|
+
}
|
|
478
|
+
async completeJob(jobId) {
|
|
479
|
+
const client = await this.pool.connect();
|
|
480
|
+
try {
|
|
481
|
+
const result = await client.query(
|
|
482
|
+
`
|
|
483
|
+
UPDATE job_queue
|
|
484
|
+
SET status = 'completed', updated_at = NOW(), completed_at = NOW(),
|
|
485
|
+
step_data = NULL, wait_until = NULL, wait_token_id = NULL
|
|
486
|
+
WHERE id = $1 AND status = 'processing'
|
|
487
|
+
`,
|
|
488
|
+
[jobId]
|
|
489
|
+
);
|
|
490
|
+
if (result.rowCount === 0) {
|
|
491
|
+
log(
|
|
492
|
+
`Job ${jobId} could not be completed (not in processing state or does not exist)`
|
|
493
|
+
);
|
|
494
|
+
}
|
|
495
|
+
await this.recordJobEvent(jobId, "completed" /* Completed */);
|
|
496
|
+
log(`Completed job ${jobId}`);
|
|
497
|
+
} catch (error) {
|
|
498
|
+
log(`Error completing job ${jobId}: ${error}`);
|
|
499
|
+
throw error;
|
|
500
|
+
} finally {
|
|
501
|
+
client.release();
|
|
502
|
+
}
|
|
503
|
+
}
|
|
504
|
+
async failJob(jobId, error, failureReason) {
|
|
505
|
+
const client = await this.pool.connect();
|
|
506
|
+
try {
|
|
507
|
+
const result = await client.query(
|
|
508
|
+
`
|
|
509
|
+
UPDATE job_queue
|
|
510
|
+
SET status = 'failed',
|
|
511
|
+
updated_at = NOW(),
|
|
512
|
+
next_attempt_at = CASE
|
|
513
|
+
WHEN attempts < max_attempts THEN NOW() + (POWER(2, attempts) * INTERVAL '1 minute')
|
|
514
|
+
ELSE NULL
|
|
515
|
+
END,
|
|
516
|
+
error_history = COALESCE(error_history, '[]'::jsonb) || $2::jsonb,
|
|
517
|
+
failure_reason = $3,
|
|
518
|
+
last_failed_at = NOW()
|
|
519
|
+
WHERE id = $1 AND status IN ('processing', 'pending')
|
|
520
|
+
`,
|
|
93
521
|
[
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
522
|
+
jobId,
|
|
523
|
+
JSON.stringify([
|
|
524
|
+
{
|
|
525
|
+
message: error.message || String(error),
|
|
526
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
527
|
+
}
|
|
528
|
+
]),
|
|
529
|
+
failureReason ?? null
|
|
100
530
|
]
|
|
101
531
|
);
|
|
102
|
-
|
|
103
|
-
|
|
532
|
+
if (result.rowCount === 0) {
|
|
533
|
+
log(
|
|
534
|
+
`Job ${jobId} could not be failed (not in processing/pending state or does not exist)`
|
|
535
|
+
);
|
|
536
|
+
}
|
|
537
|
+
await this.recordJobEvent(jobId, "failed" /* Failed */, {
|
|
538
|
+
message: error.message || String(error),
|
|
539
|
+
failureReason
|
|
540
|
+
});
|
|
541
|
+
log(`Failed job ${jobId}`);
|
|
542
|
+
} catch (err) {
|
|
543
|
+
log(`Error failing job ${jobId}: ${err}`);
|
|
544
|
+
throw err;
|
|
545
|
+
} finally {
|
|
546
|
+
client.release();
|
|
547
|
+
}
|
|
548
|
+
}
|
|
549
|
+
async prolongJob(jobId) {
|
|
550
|
+
const client = await this.pool.connect();
|
|
551
|
+
try {
|
|
552
|
+
await client.query(
|
|
553
|
+
`
|
|
554
|
+
UPDATE job_queue
|
|
555
|
+
SET locked_at = NOW(), updated_at = NOW()
|
|
556
|
+
WHERE id = $1 AND status = 'processing'
|
|
557
|
+
`,
|
|
558
|
+
[jobId]
|
|
104
559
|
);
|
|
560
|
+
await this.recordJobEvent(jobId, "prolonged" /* Prolonged */);
|
|
561
|
+
log(`Prolonged job ${jobId}`);
|
|
562
|
+
} catch (error) {
|
|
563
|
+
log(`Error prolonging job ${jobId}: ${error}`);
|
|
564
|
+
} finally {
|
|
565
|
+
client.release();
|
|
566
|
+
}
|
|
567
|
+
}
|
|
568
|
+
// ── Progress ──────────────────────────────────────────────────────────
|
|
569
|
+
async updateProgress(jobId, progress) {
|
|
570
|
+
const client = await this.pool.connect();
|
|
571
|
+
try {
|
|
572
|
+
await client.query(
|
|
573
|
+
`UPDATE job_queue SET progress = $2, updated_at = NOW() WHERE id = $1`,
|
|
574
|
+
[jobId, progress]
|
|
575
|
+
);
|
|
576
|
+
log(`Updated progress for job ${jobId}: ${progress}%`);
|
|
577
|
+
} catch (error) {
|
|
578
|
+
log(`Error updating progress for job ${jobId}: ${error}`);
|
|
579
|
+
} finally {
|
|
580
|
+
client.release();
|
|
581
|
+
}
|
|
582
|
+
}
|
|
583
|
+
// ── Job management ────────────────────────────────────────────────────
|
|
584
|
+
async retryJob(jobId) {
|
|
585
|
+
const client = await this.pool.connect();
|
|
586
|
+
try {
|
|
587
|
+
const result = await client.query(
|
|
588
|
+
`
|
|
589
|
+
UPDATE job_queue
|
|
590
|
+
SET status = 'pending',
|
|
591
|
+
updated_at = NOW(),
|
|
592
|
+
locked_at = NULL,
|
|
593
|
+
locked_by = NULL,
|
|
594
|
+
next_attempt_at = NOW(),
|
|
595
|
+
last_retried_at = NOW()
|
|
596
|
+
WHERE id = $1 AND status IN ('failed', 'processing')
|
|
597
|
+
`,
|
|
598
|
+
[jobId]
|
|
599
|
+
);
|
|
600
|
+
if (result.rowCount === 0) {
|
|
601
|
+
log(
|
|
602
|
+
`Job ${jobId} could not be retried (not in failed/processing state or does not exist)`
|
|
603
|
+
);
|
|
604
|
+
}
|
|
605
|
+
await this.recordJobEvent(jobId, "retried" /* Retried */);
|
|
606
|
+
log(`Retried job ${jobId}`);
|
|
607
|
+
} catch (error) {
|
|
608
|
+
log(`Error retrying job ${jobId}: ${error}`);
|
|
609
|
+
throw error;
|
|
610
|
+
} finally {
|
|
611
|
+
client.release();
|
|
612
|
+
}
|
|
613
|
+
}
|
|
614
|
+
async cancelJob(jobId) {
|
|
615
|
+
const client = await this.pool.connect();
|
|
616
|
+
try {
|
|
617
|
+
await client.query(
|
|
618
|
+
`
|
|
619
|
+
UPDATE job_queue
|
|
620
|
+
SET status = 'cancelled', updated_at = NOW(), last_cancelled_at = NOW(),
|
|
621
|
+
wait_until = NULL, wait_token_id = NULL
|
|
622
|
+
WHERE id = $1 AND status IN ('pending', 'waiting')
|
|
623
|
+
`,
|
|
624
|
+
[jobId]
|
|
625
|
+
);
|
|
626
|
+
await this.recordJobEvent(jobId, "cancelled" /* Cancelled */);
|
|
627
|
+
log(`Cancelled job ${jobId}`);
|
|
628
|
+
} catch (error) {
|
|
629
|
+
log(`Error cancelling job ${jobId}: ${error}`);
|
|
630
|
+
throw error;
|
|
631
|
+
} finally {
|
|
632
|
+
client.release();
|
|
633
|
+
}
|
|
634
|
+
}
|
|
635
|
+
async cancelAllUpcomingJobs(filters) {
|
|
636
|
+
const client = await this.pool.connect();
|
|
637
|
+
try {
|
|
638
|
+
let query = `
|
|
639
|
+
UPDATE job_queue
|
|
640
|
+
SET status = 'cancelled', updated_at = NOW()
|
|
641
|
+
WHERE status = 'pending'`;
|
|
642
|
+
const params = [];
|
|
643
|
+
let paramIdx = 1;
|
|
644
|
+
if (filters) {
|
|
645
|
+
if (filters.jobType) {
|
|
646
|
+
query += ` AND job_type = $${paramIdx++}`;
|
|
647
|
+
params.push(filters.jobType);
|
|
648
|
+
}
|
|
649
|
+
if (filters.priority !== void 0) {
|
|
650
|
+
query += ` AND priority = $${paramIdx++}`;
|
|
651
|
+
params.push(filters.priority);
|
|
652
|
+
}
|
|
653
|
+
if (filters.runAt) {
|
|
654
|
+
if (filters.runAt instanceof Date) {
|
|
655
|
+
query += ` AND run_at = $${paramIdx++}`;
|
|
656
|
+
params.push(filters.runAt);
|
|
657
|
+
} else if (typeof filters.runAt === "object") {
|
|
658
|
+
const ops = filters.runAt;
|
|
659
|
+
if (ops.gt) {
|
|
660
|
+
query += ` AND run_at > $${paramIdx++}`;
|
|
661
|
+
params.push(ops.gt);
|
|
662
|
+
}
|
|
663
|
+
if (ops.gte) {
|
|
664
|
+
query += ` AND run_at >= $${paramIdx++}`;
|
|
665
|
+
params.push(ops.gte);
|
|
666
|
+
}
|
|
667
|
+
if (ops.lt) {
|
|
668
|
+
query += ` AND run_at < $${paramIdx++}`;
|
|
669
|
+
params.push(ops.lt);
|
|
670
|
+
}
|
|
671
|
+
if (ops.lte) {
|
|
672
|
+
query += ` AND run_at <= $${paramIdx++}`;
|
|
673
|
+
params.push(ops.lte);
|
|
674
|
+
}
|
|
675
|
+
if (ops.eq) {
|
|
676
|
+
query += ` AND run_at = $${paramIdx++}`;
|
|
677
|
+
params.push(ops.eq);
|
|
678
|
+
}
|
|
679
|
+
}
|
|
680
|
+
}
|
|
681
|
+
if (filters.tags && filters.tags.values && filters.tags.values.length > 0) {
|
|
682
|
+
const mode = filters.tags.mode || "all";
|
|
683
|
+
const tagValues = filters.tags.values;
|
|
684
|
+
switch (mode) {
|
|
685
|
+
case "exact":
|
|
686
|
+
query += ` AND tags = $${paramIdx++}`;
|
|
687
|
+
params.push(tagValues);
|
|
688
|
+
break;
|
|
689
|
+
case "all":
|
|
690
|
+
query += ` AND tags @> $${paramIdx++}`;
|
|
691
|
+
params.push(tagValues);
|
|
692
|
+
break;
|
|
693
|
+
case "any":
|
|
694
|
+
query += ` AND tags && $${paramIdx++}`;
|
|
695
|
+
params.push(tagValues);
|
|
696
|
+
break;
|
|
697
|
+
case "none":
|
|
698
|
+
query += ` AND NOT (tags && $${paramIdx++})`;
|
|
699
|
+
params.push(tagValues);
|
|
700
|
+
break;
|
|
701
|
+
default:
|
|
702
|
+
query += ` AND tags @> $${paramIdx++}`;
|
|
703
|
+
params.push(tagValues);
|
|
704
|
+
}
|
|
705
|
+
}
|
|
706
|
+
}
|
|
707
|
+
query += "\nRETURNING id";
|
|
708
|
+
const result = await client.query(query, params);
|
|
709
|
+
log(`Cancelled ${result.rowCount} jobs`);
|
|
710
|
+
return result.rowCount || 0;
|
|
711
|
+
} catch (error) {
|
|
712
|
+
log(`Error cancelling upcoming jobs: ${error}`);
|
|
713
|
+
throw error;
|
|
714
|
+
} finally {
|
|
715
|
+
client.release();
|
|
716
|
+
}
|
|
717
|
+
}
|
|
718
|
+
async editJob(jobId, updates) {
|
|
719
|
+
const client = await this.pool.connect();
|
|
720
|
+
try {
|
|
721
|
+
const updateFields = [];
|
|
722
|
+
const params = [];
|
|
723
|
+
let paramIdx = 1;
|
|
724
|
+
if (updates.payload !== void 0) {
|
|
725
|
+
updateFields.push(`payload = $${paramIdx++}`);
|
|
726
|
+
params.push(updates.payload);
|
|
727
|
+
}
|
|
728
|
+
if (updates.maxAttempts !== void 0) {
|
|
729
|
+
updateFields.push(`max_attempts = $${paramIdx++}`);
|
|
730
|
+
params.push(updates.maxAttempts);
|
|
731
|
+
}
|
|
732
|
+
if (updates.priority !== void 0) {
|
|
733
|
+
updateFields.push(`priority = $${paramIdx++}`);
|
|
734
|
+
params.push(updates.priority);
|
|
735
|
+
}
|
|
736
|
+
if (updates.runAt !== void 0) {
|
|
737
|
+
if (updates.runAt === null) {
|
|
738
|
+
updateFields.push(`run_at = NOW()`);
|
|
739
|
+
} else {
|
|
740
|
+
updateFields.push(`run_at = $${paramIdx++}`);
|
|
741
|
+
params.push(updates.runAt);
|
|
742
|
+
}
|
|
743
|
+
}
|
|
744
|
+
if (updates.timeoutMs !== void 0) {
|
|
745
|
+
updateFields.push(`timeout_ms = $${paramIdx++}`);
|
|
746
|
+
params.push(updates.timeoutMs ?? null);
|
|
747
|
+
}
|
|
748
|
+
if (updates.tags !== void 0) {
|
|
749
|
+
updateFields.push(`tags = $${paramIdx++}`);
|
|
750
|
+
params.push(updates.tags ?? null);
|
|
751
|
+
}
|
|
752
|
+
if (updateFields.length === 0) {
|
|
753
|
+
log(`No fields to update for job ${jobId}`);
|
|
754
|
+
return;
|
|
755
|
+
}
|
|
756
|
+
updateFields.push(`updated_at = NOW()`);
|
|
757
|
+
params.push(jobId);
|
|
758
|
+
const query = `
|
|
759
|
+
UPDATE job_queue
|
|
760
|
+
SET ${updateFields.join(", ")}
|
|
761
|
+
WHERE id = $${paramIdx} AND status = 'pending'
|
|
762
|
+
`;
|
|
763
|
+
await client.query(query, params);
|
|
764
|
+
const metadata = {};
|
|
765
|
+
if (updates.payload !== void 0) metadata.payload = updates.payload;
|
|
766
|
+
if (updates.maxAttempts !== void 0)
|
|
767
|
+
metadata.maxAttempts = updates.maxAttempts;
|
|
768
|
+
if (updates.priority !== void 0) metadata.priority = updates.priority;
|
|
769
|
+
if (updates.runAt !== void 0) metadata.runAt = updates.runAt;
|
|
770
|
+
if (updates.timeoutMs !== void 0)
|
|
771
|
+
metadata.timeoutMs = updates.timeoutMs;
|
|
772
|
+
if (updates.tags !== void 0) metadata.tags = updates.tags;
|
|
773
|
+
await this.recordJobEvent(jobId, "edited" /* Edited */, metadata);
|
|
774
|
+
log(`Edited job ${jobId}: ${JSON.stringify(metadata)}`);
|
|
775
|
+
} catch (error) {
|
|
776
|
+
log(`Error editing job ${jobId}: ${error}`);
|
|
777
|
+
throw error;
|
|
778
|
+
} finally {
|
|
779
|
+
client.release();
|
|
780
|
+
}
|
|
781
|
+
}
|
|
782
|
+
async editAllPendingJobs(filters = void 0, updates) {
|
|
783
|
+
const client = await this.pool.connect();
|
|
784
|
+
try {
|
|
785
|
+
const updateFields = [];
|
|
786
|
+
const params = [];
|
|
787
|
+
let paramIdx = 1;
|
|
788
|
+
if (updates.payload !== void 0) {
|
|
789
|
+
updateFields.push(`payload = $${paramIdx++}`);
|
|
790
|
+
params.push(updates.payload);
|
|
791
|
+
}
|
|
792
|
+
if (updates.maxAttempts !== void 0) {
|
|
793
|
+
updateFields.push(`max_attempts = $${paramIdx++}`);
|
|
794
|
+
params.push(updates.maxAttempts);
|
|
795
|
+
}
|
|
796
|
+
if (updates.priority !== void 0) {
|
|
797
|
+
updateFields.push(`priority = $${paramIdx++}`);
|
|
798
|
+
params.push(updates.priority);
|
|
799
|
+
}
|
|
800
|
+
if (updates.runAt !== void 0) {
|
|
801
|
+
if (updates.runAt === null) {
|
|
802
|
+
updateFields.push(`run_at = NOW()`);
|
|
803
|
+
} else {
|
|
804
|
+
updateFields.push(`run_at = $${paramIdx++}`);
|
|
805
|
+
params.push(updates.runAt);
|
|
806
|
+
}
|
|
807
|
+
}
|
|
808
|
+
if (updates.timeoutMs !== void 0) {
|
|
809
|
+
updateFields.push(`timeout_ms = $${paramIdx++}`);
|
|
810
|
+
params.push(updates.timeoutMs ?? null);
|
|
811
|
+
}
|
|
812
|
+
if (updates.tags !== void 0) {
|
|
813
|
+
updateFields.push(`tags = $${paramIdx++}`);
|
|
814
|
+
params.push(updates.tags ?? null);
|
|
815
|
+
}
|
|
816
|
+
if (updateFields.length === 0) {
|
|
817
|
+
log(`No fields to update for batch edit`);
|
|
818
|
+
return 0;
|
|
819
|
+
}
|
|
820
|
+
updateFields.push(`updated_at = NOW()`);
|
|
821
|
+
let query = `
|
|
822
|
+
UPDATE job_queue
|
|
823
|
+
SET ${updateFields.join(", ")}
|
|
824
|
+
WHERE status = 'pending'`;
|
|
825
|
+
if (filters) {
|
|
826
|
+
if (filters.jobType) {
|
|
827
|
+
query += ` AND job_type = $${paramIdx++}`;
|
|
828
|
+
params.push(filters.jobType);
|
|
829
|
+
}
|
|
830
|
+
if (filters.priority !== void 0) {
|
|
831
|
+
query += ` AND priority = $${paramIdx++}`;
|
|
832
|
+
params.push(filters.priority);
|
|
833
|
+
}
|
|
834
|
+
if (filters.runAt) {
|
|
835
|
+
if (filters.runAt instanceof Date) {
|
|
836
|
+
query += ` AND run_at = $${paramIdx++}`;
|
|
837
|
+
params.push(filters.runAt);
|
|
838
|
+
} else if (typeof filters.runAt === "object") {
|
|
839
|
+
const ops = filters.runAt;
|
|
840
|
+
if (ops.gt) {
|
|
841
|
+
query += ` AND run_at > $${paramIdx++}`;
|
|
842
|
+
params.push(ops.gt);
|
|
843
|
+
}
|
|
844
|
+
if (ops.gte) {
|
|
845
|
+
query += ` AND run_at >= $${paramIdx++}`;
|
|
846
|
+
params.push(ops.gte);
|
|
847
|
+
}
|
|
848
|
+
if (ops.lt) {
|
|
849
|
+
query += ` AND run_at < $${paramIdx++}`;
|
|
850
|
+
params.push(ops.lt);
|
|
851
|
+
}
|
|
852
|
+
if (ops.lte) {
|
|
853
|
+
query += ` AND run_at <= $${paramIdx++}`;
|
|
854
|
+
params.push(ops.lte);
|
|
855
|
+
}
|
|
856
|
+
if (ops.eq) {
|
|
857
|
+
query += ` AND run_at = $${paramIdx++}`;
|
|
858
|
+
params.push(ops.eq);
|
|
859
|
+
}
|
|
860
|
+
}
|
|
861
|
+
}
|
|
862
|
+
if (filters.tags && filters.tags.values && filters.tags.values.length > 0) {
|
|
863
|
+
const mode = filters.tags.mode || "all";
|
|
864
|
+
const tagValues = filters.tags.values;
|
|
865
|
+
switch (mode) {
|
|
866
|
+
case "exact":
|
|
867
|
+
query += ` AND tags = $${paramIdx++}`;
|
|
868
|
+
params.push(tagValues);
|
|
869
|
+
break;
|
|
870
|
+
case "all":
|
|
871
|
+
query += ` AND tags @> $${paramIdx++}`;
|
|
872
|
+
params.push(tagValues);
|
|
873
|
+
break;
|
|
874
|
+
case "any":
|
|
875
|
+
query += ` AND tags && $${paramIdx++}`;
|
|
876
|
+
params.push(tagValues);
|
|
877
|
+
break;
|
|
878
|
+
case "none":
|
|
879
|
+
query += ` AND NOT (tags && $${paramIdx++})`;
|
|
880
|
+
params.push(tagValues);
|
|
881
|
+
break;
|
|
882
|
+
default:
|
|
883
|
+
query += ` AND tags @> $${paramIdx++}`;
|
|
884
|
+
params.push(tagValues);
|
|
885
|
+
}
|
|
886
|
+
}
|
|
887
|
+
}
|
|
888
|
+
query += "\nRETURNING id";
|
|
889
|
+
const result = await client.query(query, params);
|
|
890
|
+
const editedCount = result.rowCount || 0;
|
|
891
|
+
const metadata = {};
|
|
892
|
+
if (updates.payload !== void 0) metadata.payload = updates.payload;
|
|
893
|
+
if (updates.maxAttempts !== void 0)
|
|
894
|
+
metadata.maxAttempts = updates.maxAttempts;
|
|
895
|
+
if (updates.priority !== void 0) metadata.priority = updates.priority;
|
|
896
|
+
if (updates.runAt !== void 0) metadata.runAt = updates.runAt;
|
|
897
|
+
if (updates.timeoutMs !== void 0)
|
|
898
|
+
metadata.timeoutMs = updates.timeoutMs;
|
|
899
|
+
if (updates.tags !== void 0) metadata.tags = updates.tags;
|
|
900
|
+
for (const row of result.rows) {
|
|
901
|
+
await this.recordJobEvent(row.id, "edited" /* Edited */, metadata);
|
|
902
|
+
}
|
|
903
|
+
log(`Edited ${editedCount} pending jobs: ${JSON.stringify(metadata)}`);
|
|
904
|
+
return editedCount;
|
|
905
|
+
} catch (error) {
|
|
906
|
+
log(`Error editing pending jobs: ${error}`);
|
|
907
|
+
throw error;
|
|
908
|
+
} finally {
|
|
909
|
+
client.release();
|
|
910
|
+
}
|
|
911
|
+
}
|
|
912
|
+
async cleanupOldJobs(daysToKeep = 30) {
|
|
913
|
+
const client = await this.pool.connect();
|
|
914
|
+
try {
|
|
915
|
+
const result = await client.query(
|
|
916
|
+
`
|
|
917
|
+
DELETE FROM job_queue
|
|
918
|
+
WHERE status = 'completed'
|
|
919
|
+
AND updated_at < NOW() - INTERVAL '1 day' * $1::int
|
|
920
|
+
RETURNING id
|
|
921
|
+
`,
|
|
922
|
+
[daysToKeep]
|
|
923
|
+
);
|
|
924
|
+
log(`Deleted ${result.rowCount} old jobs`);
|
|
925
|
+
return result.rowCount || 0;
|
|
926
|
+
} catch (error) {
|
|
927
|
+
log(`Error cleaning up old jobs: ${error}`);
|
|
928
|
+
throw error;
|
|
929
|
+
} finally {
|
|
930
|
+
client.release();
|
|
931
|
+
}
|
|
932
|
+
}
|
|
933
|
+
async cleanupOldJobEvents(daysToKeep = 30) {
|
|
934
|
+
const client = await this.pool.connect();
|
|
935
|
+
try {
|
|
936
|
+
const result = await client.query(
|
|
937
|
+
`
|
|
938
|
+
DELETE FROM job_events
|
|
939
|
+
WHERE created_at < NOW() - INTERVAL '1 day' * $1::int
|
|
940
|
+
RETURNING id
|
|
941
|
+
`,
|
|
942
|
+
[daysToKeep]
|
|
943
|
+
);
|
|
944
|
+
log(`Deleted ${result.rowCount} old job events`);
|
|
945
|
+
return result.rowCount || 0;
|
|
946
|
+
} catch (error) {
|
|
947
|
+
log(`Error cleaning up old job events: ${error}`);
|
|
948
|
+
throw error;
|
|
949
|
+
} finally {
|
|
950
|
+
client.release();
|
|
951
|
+
}
|
|
952
|
+
}
|
|
953
|
+
async reclaimStuckJobs(maxProcessingTimeMinutes = 10) {
|
|
954
|
+
const client = await this.pool.connect();
|
|
955
|
+
try {
|
|
956
|
+
const result = await client.query(
|
|
957
|
+
`
|
|
958
|
+
UPDATE job_queue
|
|
959
|
+
SET status = 'pending', locked_at = NULL, locked_by = NULL, updated_at = NOW()
|
|
960
|
+
WHERE status = 'processing'
|
|
961
|
+
AND locked_at < NOW() - GREATEST(
|
|
962
|
+
INTERVAL '1 minute' * $1::int,
|
|
963
|
+
INTERVAL '1 millisecond' * COALESCE(timeout_ms, 0)
|
|
964
|
+
)
|
|
965
|
+
RETURNING id
|
|
966
|
+
`,
|
|
967
|
+
[maxProcessingTimeMinutes]
|
|
968
|
+
);
|
|
969
|
+
log(`Reclaimed ${result.rowCount} stuck jobs`);
|
|
970
|
+
return result.rowCount || 0;
|
|
971
|
+
} catch (error) {
|
|
972
|
+
log(`Error reclaiming stuck jobs: ${error}`);
|
|
973
|
+
throw error;
|
|
974
|
+
} finally {
|
|
975
|
+
client.release();
|
|
976
|
+
}
|
|
977
|
+
}
|
|
978
|
+
// ── Internal helpers ──────────────────────────────────────────────────
|
|
979
|
+
/**
|
|
980
|
+
* Batch-insert multiple job events in a single query.
|
|
981
|
+
* More efficient than individual recordJobEvent calls.
|
|
982
|
+
*/
|
|
983
|
+
async recordJobEventsBatch(events) {
|
|
984
|
+
if (events.length === 0) return;
|
|
985
|
+
const client = await this.pool.connect();
|
|
986
|
+
try {
|
|
987
|
+
const values = [];
|
|
988
|
+
const params = [];
|
|
989
|
+
let paramIdx = 1;
|
|
990
|
+
for (const event of events) {
|
|
991
|
+
values.push(`($${paramIdx++}, $${paramIdx++}, $${paramIdx++})`);
|
|
992
|
+
params.push(
|
|
993
|
+
event.jobId,
|
|
994
|
+
event.eventType,
|
|
995
|
+
event.metadata ? JSON.stringify(event.metadata) : null
|
|
996
|
+
);
|
|
997
|
+
}
|
|
998
|
+
await client.query(
|
|
999
|
+
`INSERT INTO job_events (job_id, event_type, metadata) VALUES ${values.join(", ")}`,
|
|
1000
|
+
params
|
|
1001
|
+
);
|
|
1002
|
+
} catch (error) {
|
|
1003
|
+
log(`Error recording batch job events: ${error}`);
|
|
1004
|
+
} finally {
|
|
1005
|
+
client.release();
|
|
1006
|
+
}
|
|
1007
|
+
}
|
|
1008
|
+
async setPendingReasonForUnpickedJobs(reason, jobType) {
|
|
1009
|
+
const client = await this.pool.connect();
|
|
1010
|
+
try {
|
|
1011
|
+
let jobTypeFilter = "";
|
|
1012
|
+
const params = [reason];
|
|
1013
|
+
if (jobType) {
|
|
1014
|
+
if (Array.isArray(jobType)) {
|
|
1015
|
+
jobTypeFilter = ` AND job_type = ANY($2)`;
|
|
1016
|
+
params.push(jobType);
|
|
1017
|
+
} else {
|
|
1018
|
+
jobTypeFilter = ` AND job_type = $2`;
|
|
1019
|
+
params.push(jobType);
|
|
1020
|
+
}
|
|
1021
|
+
}
|
|
1022
|
+
await client.query(
|
|
1023
|
+
`UPDATE job_queue SET pending_reason = $1 WHERE status = 'pending'${jobTypeFilter}`,
|
|
1024
|
+
params
|
|
1025
|
+
);
|
|
1026
|
+
} finally {
|
|
1027
|
+
client.release();
|
|
105
1028
|
}
|
|
106
|
-
await recordJobEvent(pool, result.rows[0].id, "added" /* Added */, {
|
|
107
|
-
jobType,
|
|
108
|
-
payload,
|
|
109
|
-
tags
|
|
110
|
-
});
|
|
111
|
-
return result.rows[0].id;
|
|
112
|
-
} catch (error) {
|
|
113
|
-
log(`Error adding job: ${error}`);
|
|
114
|
-
throw error;
|
|
115
|
-
} finally {
|
|
116
|
-
client.release();
|
|
117
1029
|
}
|
|
118
1030
|
};
|
|
119
|
-
var
|
|
1031
|
+
var recordJobEvent = async (pool, jobId, eventType, metadata) => new PostgresBackend(pool).recordJobEvent(jobId, eventType, metadata);
|
|
1032
|
+
var waitJob = async (pool, jobId, options) => {
|
|
120
1033
|
const client = await pool.connect();
|
|
121
1034
|
try {
|
|
122
1035
|
const result = await client.query(
|
|
123
|
-
`
|
|
124
|
-
|
|
1036
|
+
`
|
|
1037
|
+
UPDATE job_queue
|
|
1038
|
+
SET status = 'waiting',
|
|
1039
|
+
wait_until = $2,
|
|
1040
|
+
wait_token_id = $3,
|
|
1041
|
+
step_data = $4,
|
|
1042
|
+
locked_at = NULL,
|
|
1043
|
+
locked_by = NULL,
|
|
1044
|
+
updated_at = NOW()
|
|
1045
|
+
WHERE id = $1 AND status = 'processing'
|
|
1046
|
+
`,
|
|
1047
|
+
[
|
|
1048
|
+
jobId,
|
|
1049
|
+
options.waitUntil ?? null,
|
|
1050
|
+
options.waitTokenId ?? null,
|
|
1051
|
+
JSON.stringify(options.stepData)
|
|
1052
|
+
]
|
|
125
1053
|
);
|
|
126
|
-
if (result.
|
|
127
|
-
log(
|
|
128
|
-
|
|
1054
|
+
if (result.rowCount === 0) {
|
|
1055
|
+
log(
|
|
1056
|
+
`Job ${jobId} could not be set to waiting (may have been reclaimed or is no longer processing)`
|
|
1057
|
+
);
|
|
1058
|
+
return;
|
|
129
1059
|
}
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
timeoutMs: job.timeoutMs,
|
|
136
|
-
failureReason: job.failureReason
|
|
137
|
-
};
|
|
1060
|
+
await recordJobEvent(pool, jobId, "waiting" /* Waiting */, {
|
|
1061
|
+
waitUntil: options.waitUntil?.toISOString() ?? null,
|
|
1062
|
+
waitTokenId: options.waitTokenId ?? null
|
|
1063
|
+
});
|
|
1064
|
+
log(`Job ${jobId} set to waiting`);
|
|
138
1065
|
} catch (error) {
|
|
139
|
-
log(`Error
|
|
1066
|
+
log(`Error setting job ${jobId} to waiting: ${error}`);
|
|
140
1067
|
throw error;
|
|
141
1068
|
} finally {
|
|
142
1069
|
client.release();
|
|
143
1070
|
}
|
|
144
1071
|
};
|
|
145
|
-
var
|
|
1072
|
+
var updateStepData = async (pool, jobId, stepData) => {
|
|
146
1073
|
const client = await pool.connect();
|
|
147
1074
|
try {
|
|
148
|
-
|
|
149
|
-
`
|
|
150
|
-
[
|
|
1075
|
+
await client.query(
|
|
1076
|
+
`UPDATE job_queue SET step_data = $2, updated_at = NOW() WHERE id = $1`,
|
|
1077
|
+
[jobId, JSON.stringify(stepData)]
|
|
151
1078
|
);
|
|
152
|
-
log(`Found ${result.rows.length} jobs by status ${status}`);
|
|
153
|
-
return result.rows.map((job) => ({
|
|
154
|
-
...job,
|
|
155
|
-
payload: job.payload,
|
|
156
|
-
timeoutMs: job.timeoutMs,
|
|
157
|
-
failureReason: job.failureReason
|
|
158
|
-
}));
|
|
159
1079
|
} catch (error) {
|
|
160
|
-
log(`Error
|
|
161
|
-
throw error;
|
|
1080
|
+
log(`Error updating step_data for job ${jobId}: ${error}`);
|
|
162
1081
|
} finally {
|
|
163
1082
|
client.release();
|
|
164
1083
|
}
|
|
165
1084
|
};
|
|
166
|
-
var
|
|
1085
|
+
var MAX_TIMEOUT_MS = 365 * 24 * 60 * 60 * 1e3;
|
|
1086
|
+
function parseTimeoutString(timeout) {
|
|
1087
|
+
const match = timeout.match(/^(\d+)(s|m|h|d)$/);
|
|
1088
|
+
if (!match) {
|
|
1089
|
+
throw new Error(
|
|
1090
|
+
`Invalid timeout format: "${timeout}". Expected format like "10m", "1h", "24h", "7d".`
|
|
1091
|
+
);
|
|
1092
|
+
}
|
|
1093
|
+
const value = parseInt(match[1], 10);
|
|
1094
|
+
const unit = match[2];
|
|
1095
|
+
let ms;
|
|
1096
|
+
switch (unit) {
|
|
1097
|
+
case "s":
|
|
1098
|
+
ms = value * 1e3;
|
|
1099
|
+
break;
|
|
1100
|
+
case "m":
|
|
1101
|
+
ms = value * 60 * 1e3;
|
|
1102
|
+
break;
|
|
1103
|
+
case "h":
|
|
1104
|
+
ms = value * 60 * 60 * 1e3;
|
|
1105
|
+
break;
|
|
1106
|
+
case "d":
|
|
1107
|
+
ms = value * 24 * 60 * 60 * 1e3;
|
|
1108
|
+
break;
|
|
1109
|
+
default:
|
|
1110
|
+
throw new Error(`Unknown timeout unit: "${unit}"`);
|
|
1111
|
+
}
|
|
1112
|
+
if (!Number.isFinite(ms) || ms > MAX_TIMEOUT_MS) {
|
|
1113
|
+
throw new Error(
|
|
1114
|
+
`Timeout value "${timeout}" is too large. Maximum allowed is 365 days.`
|
|
1115
|
+
);
|
|
1116
|
+
}
|
|
1117
|
+
return ms;
|
|
1118
|
+
}
|
|
1119
|
+
var createWaitpoint = async (pool, jobId, options) => {
|
|
167
1120
|
const client = await pool.connect();
|
|
168
1121
|
try {
|
|
169
|
-
|
|
170
|
-
let
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
jobTypeFilter = ` AND job_type = ANY($3)`;
|
|
175
|
-
params.push(jobType);
|
|
176
|
-
} else {
|
|
177
|
-
jobTypeFilter = ` AND job_type = $3`;
|
|
178
|
-
params.push(jobType);
|
|
179
|
-
}
|
|
1122
|
+
const id = `wp_${crypto.randomUUID()}`;
|
|
1123
|
+
let timeoutAt = null;
|
|
1124
|
+
if (options?.timeout) {
|
|
1125
|
+
const ms = parseTimeoutString(options.timeout);
|
|
1126
|
+
timeoutAt = new Date(Date.now() + ms);
|
|
180
1127
|
}
|
|
181
|
-
|
|
182
|
-
`
|
|
183
|
-
|
|
184
|
-
SET status = 'processing',
|
|
185
|
-
locked_at = NOW(),
|
|
186
|
-
locked_by = $1,
|
|
187
|
-
attempts = attempts + 1,
|
|
188
|
-
updated_at = NOW(),
|
|
189
|
-
pending_reason = NULL,
|
|
190
|
-
started_at = COALESCE(started_at, NOW()),
|
|
191
|
-
last_retried_at = CASE WHEN attempts > 0 THEN NOW() ELSE last_retried_at END
|
|
192
|
-
WHERE id IN (
|
|
193
|
-
SELECT id FROM job_queue
|
|
194
|
-
WHERE (status = 'pending' OR (status = 'failed' AND next_attempt_at <= NOW()))
|
|
195
|
-
AND (attempts < max_attempts)
|
|
196
|
-
AND run_at <= NOW()
|
|
197
|
-
${jobTypeFilter}
|
|
198
|
-
ORDER BY priority DESC, created_at ASC
|
|
199
|
-
LIMIT $2
|
|
200
|
-
FOR UPDATE SKIP LOCKED
|
|
201
|
-
)
|
|
202
|
-
RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason"
|
|
203
|
-
`,
|
|
204
|
-
params
|
|
1128
|
+
await client.query(
|
|
1129
|
+
`INSERT INTO waitpoints (id, job_id, status, timeout_at, tags) VALUES ($1, $2, 'waiting', $3, $4)`,
|
|
1130
|
+
[id, jobId, timeoutAt, options?.tags ?? null]
|
|
205
1131
|
);
|
|
206
|
-
log(`
|
|
207
|
-
|
|
208
|
-
for (const row of result.rows) {
|
|
209
|
-
await recordJobEvent(pool, row.id, "processing" /* Processing */);
|
|
210
|
-
}
|
|
211
|
-
return result.rows.map((job) => ({
|
|
212
|
-
...job,
|
|
213
|
-
payload: job.payload,
|
|
214
|
-
timeoutMs: job.timeoutMs
|
|
215
|
-
}));
|
|
1132
|
+
log(`Created waitpoint ${id} for job ${jobId}`);
|
|
1133
|
+
return { id };
|
|
216
1134
|
} catch (error) {
|
|
217
|
-
log(`Error
|
|
218
|
-
await client.query("ROLLBACK");
|
|
1135
|
+
log(`Error creating waitpoint: ${error}`);
|
|
219
1136
|
throw error;
|
|
220
1137
|
} finally {
|
|
221
1138
|
client.release();
|
|
222
1139
|
}
|
|
223
1140
|
};
|
|
224
|
-
var
|
|
1141
|
+
var completeWaitpoint = async (pool, tokenId, data) => {
|
|
225
1142
|
const client = await pool.connect();
|
|
226
1143
|
try {
|
|
227
|
-
await client.query(
|
|
228
|
-
|
|
229
|
-
UPDATE
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
[jobId]
|
|
1144
|
+
await client.query("BEGIN");
|
|
1145
|
+
const wpResult = await client.query(
|
|
1146
|
+
`UPDATE waitpoints SET status = 'completed', output = $2, completed_at = NOW()
|
|
1147
|
+
WHERE id = $1 AND status = 'waiting'
|
|
1148
|
+
RETURNING job_id`,
|
|
1149
|
+
[tokenId, data != null ? JSON.stringify(data) : null]
|
|
234
1150
|
);
|
|
235
|
-
|
|
1151
|
+
if (wpResult.rows.length === 0) {
|
|
1152
|
+
await client.query("ROLLBACK");
|
|
1153
|
+
log(`Waitpoint ${tokenId} not found or already completed`);
|
|
1154
|
+
return;
|
|
1155
|
+
}
|
|
1156
|
+
const jobId = wpResult.rows[0].job_id;
|
|
1157
|
+
if (jobId != null) {
|
|
1158
|
+
await client.query(
|
|
1159
|
+
`UPDATE job_queue
|
|
1160
|
+
SET status = 'pending', wait_token_id = NULL, wait_until = NULL, updated_at = NOW()
|
|
1161
|
+
WHERE id = $1 AND status = 'waiting'`,
|
|
1162
|
+
[jobId]
|
|
1163
|
+
);
|
|
1164
|
+
}
|
|
1165
|
+
await client.query("COMMIT");
|
|
1166
|
+
log(`Completed waitpoint ${tokenId} for job ${jobId}`);
|
|
236
1167
|
} catch (error) {
|
|
237
|
-
|
|
1168
|
+
await client.query("ROLLBACK");
|
|
1169
|
+
log(`Error completing waitpoint ${tokenId}: ${error}`);
|
|
238
1170
|
throw error;
|
|
239
1171
|
} finally {
|
|
240
|
-
log(`Completed job ${jobId}`);
|
|
241
1172
|
client.release();
|
|
242
1173
|
}
|
|
243
1174
|
};
|
|
244
|
-
var
|
|
1175
|
+
var getWaitpoint = async (pool, tokenId) => {
|
|
245
1176
|
const client = await pool.connect();
|
|
246
1177
|
try {
|
|
247
|
-
await client.query(
|
|
248
|
-
`
|
|
249
|
-
|
|
250
|
-
SET status = 'failed',
|
|
251
|
-
updated_at = NOW(),
|
|
252
|
-
next_attempt_at = CASE
|
|
253
|
-
WHEN attempts < max_attempts THEN NOW() + (POWER(2, attempts) * INTERVAL '1 minute')
|
|
254
|
-
ELSE NULL
|
|
255
|
-
END,
|
|
256
|
-
error_history = COALESCE(error_history, '[]'::jsonb) || $2::jsonb,
|
|
257
|
-
failure_reason = $3,
|
|
258
|
-
last_failed_at = NOW()
|
|
259
|
-
WHERE id = $1
|
|
260
|
-
`,
|
|
261
|
-
[
|
|
262
|
-
jobId,
|
|
263
|
-
JSON.stringify([
|
|
264
|
-
{
|
|
265
|
-
message: error.message || String(error),
|
|
266
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
267
|
-
}
|
|
268
|
-
]),
|
|
269
|
-
failureReason ?? null
|
|
270
|
-
]
|
|
1178
|
+
const result = await client.query(
|
|
1179
|
+
`SELECT id, job_id AS "jobId", status, output, timeout_at AS "timeoutAt", created_at AS "createdAt", completed_at AS "completedAt", tags FROM waitpoints WHERE id = $1`,
|
|
1180
|
+
[tokenId]
|
|
271
1181
|
);
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
});
|
|
276
|
-
|
|
277
|
-
log(`Error failing job ${jobId}: ${error2}`);
|
|
278
|
-
throw error2;
|
|
1182
|
+
if (result.rows.length === 0) return null;
|
|
1183
|
+
return result.rows[0];
|
|
1184
|
+
} catch (error) {
|
|
1185
|
+
log(`Error getting waitpoint ${tokenId}: ${error}`);
|
|
1186
|
+
throw error;
|
|
279
1187
|
} finally {
|
|
280
|
-
log(`Failed job ${jobId}`);
|
|
281
1188
|
client.release();
|
|
282
1189
|
}
|
|
283
1190
|
};
|
|
284
|
-
var
|
|
1191
|
+
var expireTimedOutWaitpoints = async (pool) => {
|
|
285
1192
|
const client = await pool.connect();
|
|
286
1193
|
try {
|
|
287
|
-
await client.query(
|
|
288
|
-
|
|
289
|
-
UPDATE
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
locked_by = NULL,
|
|
294
|
-
next_attempt_at = NOW(),
|
|
295
|
-
last_retried_at = NOW()
|
|
296
|
-
WHERE id = $1
|
|
297
|
-
`,
|
|
298
|
-
[jobId]
|
|
1194
|
+
await client.query("BEGIN");
|
|
1195
|
+
const result = await client.query(
|
|
1196
|
+
`UPDATE waitpoints
|
|
1197
|
+
SET status = 'timed_out'
|
|
1198
|
+
WHERE status = 'waiting' AND timeout_at IS NOT NULL AND timeout_at <= NOW()
|
|
1199
|
+
RETURNING id, job_id`
|
|
299
1200
|
);
|
|
300
|
-
|
|
1201
|
+
for (const row of result.rows) {
|
|
1202
|
+
if (row.job_id != null) {
|
|
1203
|
+
await client.query(
|
|
1204
|
+
`UPDATE job_queue
|
|
1205
|
+
SET status = 'pending', wait_token_id = NULL, wait_until = NULL, updated_at = NOW()
|
|
1206
|
+
WHERE id = $1 AND status = 'waiting'`,
|
|
1207
|
+
[row.job_id]
|
|
1208
|
+
);
|
|
1209
|
+
}
|
|
1210
|
+
}
|
|
1211
|
+
await client.query("COMMIT");
|
|
1212
|
+
const count = result.rowCount || 0;
|
|
1213
|
+
if (count > 0) {
|
|
1214
|
+
log(`Expired ${count} timed-out waitpoints`);
|
|
1215
|
+
}
|
|
1216
|
+
return count;
|
|
301
1217
|
} catch (error) {
|
|
302
|
-
|
|
1218
|
+
await client.query("ROLLBACK");
|
|
1219
|
+
log(`Error expiring timed-out waitpoints: ${error}`);
|
|
303
1220
|
throw error;
|
|
304
1221
|
} finally {
|
|
305
|
-
log(`Retried job ${jobId}`);
|
|
306
1222
|
client.release();
|
|
307
1223
|
}
|
|
308
1224
|
};
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
const result = await client.query(`
|
|
313
|
-
DELETE FROM job_queue
|
|
314
|
-
WHERE status = 'completed'
|
|
315
|
-
AND updated_at < NOW() - INTERVAL '${daysToKeep} days'
|
|
316
|
-
RETURNING id
|
|
317
|
-
`);
|
|
318
|
-
log(`Deleted ${result.rowCount} old jobs`);
|
|
319
|
-
return result.rowCount || 0;
|
|
320
|
-
} catch (error) {
|
|
321
|
-
log(`Error cleaning up old jobs: ${error}`);
|
|
322
|
-
throw error;
|
|
323
|
-
} finally {
|
|
324
|
-
client.release();
|
|
1225
|
+
function tryExtractPool(backend) {
|
|
1226
|
+
if (backend instanceof PostgresBackend) {
|
|
1227
|
+
return backend.getPool();
|
|
325
1228
|
}
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
1229
|
+
return null;
|
|
1230
|
+
}
|
|
1231
|
+
function buildBasicContext(backend, jobId, baseCtx) {
|
|
1232
|
+
const waitError = () => new Error(
|
|
1233
|
+
"Wait features (waitFor, waitUntil, createToken, waitForToken, ctx.run) are currently only supported with the PostgreSQL backend."
|
|
1234
|
+
);
|
|
1235
|
+
return {
|
|
1236
|
+
prolong: baseCtx.prolong,
|
|
1237
|
+
onTimeout: baseCtx.onTimeout,
|
|
1238
|
+
run: async (_stepName, fn) => {
|
|
1239
|
+
return fn();
|
|
1240
|
+
},
|
|
1241
|
+
waitFor: async () => {
|
|
1242
|
+
throw waitError();
|
|
1243
|
+
},
|
|
1244
|
+
waitUntil: async () => {
|
|
1245
|
+
throw waitError();
|
|
1246
|
+
},
|
|
1247
|
+
createToken: async () => {
|
|
1248
|
+
throw waitError();
|
|
1249
|
+
},
|
|
1250
|
+
waitForToken: async () => {
|
|
1251
|
+
throw waitError();
|
|
1252
|
+
},
|
|
1253
|
+
setProgress: async (percent) => {
|
|
1254
|
+
if (percent < 0 || percent > 100)
|
|
1255
|
+
throw new Error("Progress must be between 0 and 100");
|
|
1256
|
+
await backend.updateProgress(jobId, Math.round(percent));
|
|
1257
|
+
}
|
|
1258
|
+
};
|
|
1259
|
+
}
|
|
1260
|
+
function validateHandlerSerializable(handler, jobType) {
|
|
329
1261
|
try {
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
1262
|
+
const handlerString = handler.toString();
|
|
1263
|
+
if (handlerString.includes("this.") && !handlerString.match(/\([^)]*this[^)]*\)/)) {
|
|
1264
|
+
throw new Error(
|
|
1265
|
+
`Handler for job type "${jobType}" uses 'this' context which cannot be serialized. Use a regular function or avoid 'this' references when forceKillOnTimeout is enabled.`
|
|
1266
|
+
);
|
|
1267
|
+
}
|
|
1268
|
+
if (handlerString.includes("[native code]")) {
|
|
1269
|
+
throw new Error(
|
|
1270
|
+
`Handler for job type "${jobType}" contains native code which cannot be serialized. Ensure your handler is a plain function when forceKillOnTimeout is enabled.`
|
|
1271
|
+
);
|
|
1272
|
+
}
|
|
1273
|
+
try {
|
|
1274
|
+
new Function("return " + handlerString);
|
|
1275
|
+
} catch (parseError) {
|
|
1276
|
+
throw new Error(
|
|
1277
|
+
`Handler for job type "${jobType}" cannot be serialized: ${parseError instanceof Error ? parseError.message : String(parseError)}. When using forceKillOnTimeout, handlers must be serializable functions without closures over external variables.`
|
|
1278
|
+
);
|
|
1279
|
+
}
|
|
339
1280
|
} catch (error) {
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
1281
|
+
if (error instanceof Error) {
|
|
1282
|
+
throw error;
|
|
1283
|
+
}
|
|
1284
|
+
throw new Error(
|
|
1285
|
+
`Failed to validate handler serialization for job type "${jobType}": ${String(error)}`
|
|
1286
|
+
);
|
|
345
1287
|
}
|
|
346
|
-
}
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
1288
|
+
}
|
|
1289
|
+
async function runHandlerInWorker(handler, payload, timeoutMs, jobType) {
|
|
1290
|
+
validateHandlerSerializable(handler, jobType);
|
|
1291
|
+
return new Promise((resolve, reject) => {
|
|
1292
|
+
const workerCode = `
|
|
1293
|
+
(function() {
|
|
1294
|
+
const { parentPort, workerData } = require('worker_threads');
|
|
1295
|
+
const { handlerCode, payload, timeoutMs } = workerData;
|
|
1296
|
+
|
|
1297
|
+
// Create an AbortController for the handler
|
|
1298
|
+
const controller = new AbortController();
|
|
1299
|
+
const signal = controller.signal;
|
|
1300
|
+
|
|
1301
|
+
// Set up timeout
|
|
1302
|
+
const timeoutId = setTimeout(() => {
|
|
1303
|
+
controller.abort();
|
|
1304
|
+
parentPort.postMessage({ type: 'timeout' });
|
|
1305
|
+
}, timeoutMs);
|
|
1306
|
+
|
|
1307
|
+
try {
|
|
1308
|
+
// Execute the handler
|
|
1309
|
+
// Note: This uses Function constructor which requires the handler to be serializable.
|
|
1310
|
+
// The handler should be validated before reaching this point.
|
|
1311
|
+
let handlerFn;
|
|
1312
|
+
try {
|
|
1313
|
+
// Wrap handlerCode in parentheses to ensure it's treated as an expression
|
|
1314
|
+
// This handles both arrow functions and regular functions
|
|
1315
|
+
const wrappedCode = handlerCode.trim().startsWith('async') || handlerCode.trim().startsWith('function')
|
|
1316
|
+
? handlerCode
|
|
1317
|
+
: '(' + handlerCode + ')';
|
|
1318
|
+
handlerFn = new Function('return ' + wrappedCode)();
|
|
1319
|
+
} catch (parseError) {
|
|
1320
|
+
clearTimeout(timeoutId);
|
|
1321
|
+
parentPort.postMessage({
|
|
1322
|
+
type: 'error',
|
|
1323
|
+
error: {
|
|
1324
|
+
message: 'Handler cannot be deserialized in worker thread. ' +
|
|
1325
|
+
'Ensure your handler is a standalone function without closures over external variables. ' +
|
|
1326
|
+
'Original error: ' + (parseError instanceof Error ? parseError.message : String(parseError)),
|
|
1327
|
+
stack: parseError instanceof Error ? parseError.stack : undefined,
|
|
1328
|
+
name: 'SerializationError',
|
|
1329
|
+
},
|
|
1330
|
+
});
|
|
1331
|
+
return;
|
|
386
1332
|
}
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
1333
|
+
|
|
1334
|
+
// Ensure handlerFn is actually a function
|
|
1335
|
+
if (typeof handlerFn !== 'function') {
|
|
1336
|
+
clearTimeout(timeoutId);
|
|
1337
|
+
parentPort.postMessage({
|
|
1338
|
+
type: 'error',
|
|
1339
|
+
error: {
|
|
1340
|
+
message: 'Handler deserialization did not produce a function. ' +
|
|
1341
|
+
'Ensure your handler is a valid function when forceKillOnTimeout is enabled.',
|
|
1342
|
+
name: 'SerializationError',
|
|
1343
|
+
},
|
|
1344
|
+
});
|
|
1345
|
+
return;
|
|
390
1346
|
}
|
|
1347
|
+
|
|
1348
|
+
handlerFn(payload, signal)
|
|
1349
|
+
.then(() => {
|
|
1350
|
+
clearTimeout(timeoutId);
|
|
1351
|
+
parentPort.postMessage({ type: 'success' });
|
|
1352
|
+
})
|
|
1353
|
+
.catch((error) => {
|
|
1354
|
+
clearTimeout(timeoutId);
|
|
1355
|
+
parentPort.postMessage({
|
|
1356
|
+
type: 'error',
|
|
1357
|
+
error: {
|
|
1358
|
+
message: error.message,
|
|
1359
|
+
stack: error.stack,
|
|
1360
|
+
name: error.name,
|
|
1361
|
+
},
|
|
1362
|
+
});
|
|
1363
|
+
});
|
|
1364
|
+
} catch (error) {
|
|
1365
|
+
clearTimeout(timeoutId);
|
|
1366
|
+
parentPort.postMessage({
|
|
1367
|
+
type: 'error',
|
|
1368
|
+
error: {
|
|
1369
|
+
message: error.message,
|
|
1370
|
+
stack: error.stack,
|
|
1371
|
+
name: error.name,
|
|
1372
|
+
},
|
|
1373
|
+
});
|
|
391
1374
|
}
|
|
1375
|
+
})();
|
|
1376
|
+
`;
|
|
1377
|
+
const worker = new worker_threads.Worker(workerCode, {
|
|
1378
|
+
eval: true,
|
|
1379
|
+
workerData: {
|
|
1380
|
+
handlerCode: handler.toString(),
|
|
1381
|
+
payload,
|
|
1382
|
+
timeoutMs
|
|
392
1383
|
}
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
params.push(tagValues);
|
|
412
|
-
break;
|
|
413
|
-
default:
|
|
414
|
-
query += ` AND tags @> $${paramIdx++}`;
|
|
415
|
-
params.push(tagValues);
|
|
416
|
-
}
|
|
1384
|
+
});
|
|
1385
|
+
let resolved = false;
|
|
1386
|
+
worker.on("message", (message) => {
|
|
1387
|
+
if (resolved) return;
|
|
1388
|
+
resolved = true;
|
|
1389
|
+
if (message.type === "success") {
|
|
1390
|
+
resolve();
|
|
1391
|
+
} else if (message.type === "timeout") {
|
|
1392
|
+
const timeoutError = new Error(
|
|
1393
|
+
`Job timed out after ${timeoutMs} ms and was forcefully terminated`
|
|
1394
|
+
);
|
|
1395
|
+
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
1396
|
+
reject(timeoutError);
|
|
1397
|
+
} else if (message.type === "error") {
|
|
1398
|
+
const error = new Error(message.error.message);
|
|
1399
|
+
error.stack = message.error.stack;
|
|
1400
|
+
error.name = message.error.name;
|
|
1401
|
+
reject(error);
|
|
417
1402
|
}
|
|
418
|
-
}
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
}
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
1403
|
+
});
|
|
1404
|
+
worker.on("error", (error) => {
|
|
1405
|
+
if (resolved) return;
|
|
1406
|
+
resolved = true;
|
|
1407
|
+
reject(error);
|
|
1408
|
+
});
|
|
1409
|
+
worker.on("exit", (code) => {
|
|
1410
|
+
if (resolved) return;
|
|
1411
|
+
if (code !== 0) {
|
|
1412
|
+
resolved = true;
|
|
1413
|
+
reject(new Error(`Worker stopped with exit code ${code}`));
|
|
1414
|
+
}
|
|
1415
|
+
});
|
|
1416
|
+
setTimeout(() => {
|
|
1417
|
+
if (!resolved) {
|
|
1418
|
+
resolved = true;
|
|
1419
|
+
worker.terminate().then(() => {
|
|
1420
|
+
const timeoutError = new Error(
|
|
1421
|
+
`Job timed out after ${timeoutMs} ms and was forcefully terminated`
|
|
1422
|
+
);
|
|
1423
|
+
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
1424
|
+
reject(timeoutError);
|
|
1425
|
+
}).catch((err) => {
|
|
1426
|
+
reject(err);
|
|
1427
|
+
});
|
|
1428
|
+
}
|
|
1429
|
+
}, timeoutMs + 100);
|
|
1430
|
+
});
|
|
1431
|
+
}
|
|
1432
|
+
function calculateWaitUntil(duration) {
|
|
1433
|
+
const now = Date.now();
|
|
1434
|
+
let ms = 0;
|
|
1435
|
+
if (duration.seconds) ms += duration.seconds * 1e3;
|
|
1436
|
+
if (duration.minutes) ms += duration.minutes * 60 * 1e3;
|
|
1437
|
+
if (duration.hours) ms += duration.hours * 60 * 60 * 1e3;
|
|
1438
|
+
if (duration.days) ms += duration.days * 24 * 60 * 60 * 1e3;
|
|
1439
|
+
if (duration.weeks) ms += duration.weeks * 7 * 24 * 60 * 60 * 1e3;
|
|
1440
|
+
if (duration.months) ms += duration.months * 30 * 24 * 60 * 60 * 1e3;
|
|
1441
|
+
if (duration.years) ms += duration.years * 365 * 24 * 60 * 60 * 1e3;
|
|
1442
|
+
if (ms <= 0) {
|
|
1443
|
+
throw new Error(
|
|
1444
|
+
"waitFor duration must be positive. Provide at least one positive duration field."
|
|
436
1445
|
);
|
|
437
|
-
log(`Found ${result.rows.length} jobs (all)`);
|
|
438
|
-
return result.rows.map((job) => ({
|
|
439
|
-
...job,
|
|
440
|
-
payload: job.payload,
|
|
441
|
-
timeoutMs: job.timeoutMs
|
|
442
|
-
}));
|
|
443
|
-
} catch (error) {
|
|
444
|
-
log(`Error getting all jobs: ${error}`);
|
|
445
|
-
throw error;
|
|
446
|
-
} finally {
|
|
447
|
-
client.release();
|
|
448
1446
|
}
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
if (
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
1447
|
+
return new Date(now + ms);
|
|
1448
|
+
}
|
|
1449
|
+
async function resolveCompletedWaits(pool, stepData) {
|
|
1450
|
+
for (const key of Object.keys(stepData)) {
|
|
1451
|
+
if (!key.startsWith("__wait_")) continue;
|
|
1452
|
+
const entry = stepData[key];
|
|
1453
|
+
if (!entry || typeof entry !== "object" || entry.completed) continue;
|
|
1454
|
+
if (entry.type === "duration" || entry.type === "date") {
|
|
1455
|
+
stepData[key] = { ...entry, completed: true };
|
|
1456
|
+
} else if (entry.type === "token" && entry.tokenId) {
|
|
1457
|
+
const wp = await getWaitpoint(pool, entry.tokenId);
|
|
1458
|
+
if (wp && wp.status === "completed") {
|
|
1459
|
+
stepData[key] = {
|
|
1460
|
+
...entry,
|
|
1461
|
+
completed: true,
|
|
1462
|
+
result: { ok: true, output: wp.output }
|
|
1463
|
+
};
|
|
1464
|
+
} else if (wp && wp.status === "timed_out") {
|
|
1465
|
+
stepData[key] = {
|
|
1466
|
+
...entry,
|
|
1467
|
+
completed: true,
|
|
1468
|
+
result: { ok: false, error: "Token timed out" }
|
|
1469
|
+
};
|
|
462
1470
|
}
|
|
463
1471
|
}
|
|
464
|
-
await client.query(
|
|
465
|
-
`UPDATE job_queue SET pending_reason = $1 WHERE status = 'pending'${jobTypeFilter}`,
|
|
466
|
-
params
|
|
467
|
-
);
|
|
468
|
-
} finally {
|
|
469
|
-
client.release();
|
|
470
1472
|
}
|
|
471
|
-
}
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
`
|
|
483
|
-
);
|
|
484
|
-
log(`Reclaimed ${result.rowCount} stuck jobs`);
|
|
485
|
-
return result.rowCount || 0;
|
|
486
|
-
} catch (error) {
|
|
487
|
-
log(`Error reclaiming stuck jobs: ${error}`);
|
|
488
|
-
throw error;
|
|
489
|
-
} finally {
|
|
490
|
-
client.release();
|
|
491
|
-
}
|
|
492
|
-
};
|
|
493
|
-
var getJobEvents = async (pool, jobId) => {
|
|
494
|
-
const client = await pool.connect();
|
|
495
|
-
try {
|
|
496
|
-
const res = await client.query(
|
|
497
|
-
`SELECT id, job_id AS "jobId", event_type AS "eventType", metadata, created_at AS "createdAt" FROM job_events WHERE job_id = $1 ORDER BY created_at ASC`,
|
|
498
|
-
[jobId]
|
|
499
|
-
);
|
|
500
|
-
return res.rows;
|
|
501
|
-
} finally {
|
|
502
|
-
client.release();
|
|
503
|
-
}
|
|
504
|
-
};
|
|
505
|
-
var getJobsByTags = async (pool, tags, mode = "all", limit = 100, offset = 0) => {
|
|
506
|
-
const client = await pool.connect();
|
|
507
|
-
try {
|
|
508
|
-
let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags
|
|
509
|
-
FROM job_queue`;
|
|
510
|
-
let params = [];
|
|
511
|
-
switch (mode) {
|
|
512
|
-
case "exact":
|
|
513
|
-
query += " WHERE tags = $1";
|
|
514
|
-
params = [tags];
|
|
515
|
-
break;
|
|
516
|
-
case "all":
|
|
517
|
-
query += " WHERE tags @> $1";
|
|
518
|
-
params = [tags];
|
|
519
|
-
break;
|
|
520
|
-
case "any":
|
|
521
|
-
query += " WHERE tags && $1";
|
|
522
|
-
params = [tags];
|
|
523
|
-
break;
|
|
524
|
-
case "none":
|
|
525
|
-
query += " WHERE NOT (tags && $1)";
|
|
526
|
-
params = [tags];
|
|
527
|
-
break;
|
|
528
|
-
default:
|
|
529
|
-
query += " WHERE tags @> $1";
|
|
530
|
-
params = [tags];
|
|
531
|
-
}
|
|
532
|
-
query += " ORDER BY created_at DESC LIMIT $2 OFFSET $3";
|
|
533
|
-
params.push(limit, offset);
|
|
534
|
-
const result = await client.query(query, params);
|
|
535
|
-
log(
|
|
536
|
-
`Found ${result.rows.length} jobs by tags ${JSON.stringify(tags)} (mode: ${mode})`
|
|
537
|
-
);
|
|
538
|
-
return result.rows.map((job) => ({
|
|
539
|
-
...job,
|
|
540
|
-
payload: job.payload,
|
|
541
|
-
timeoutMs: job.timeoutMs,
|
|
542
|
-
failureReason: job.failureReason
|
|
543
|
-
}));
|
|
544
|
-
} catch (error) {
|
|
545
|
-
log(
|
|
546
|
-
`Error getting jobs by tags ${JSON.stringify(tags)} (mode: ${mode}): ${error}`
|
|
547
|
-
);
|
|
548
|
-
throw error;
|
|
549
|
-
} finally {
|
|
550
|
-
client.release();
|
|
551
|
-
}
|
|
552
|
-
};
|
|
553
|
-
var getJobs = async (pool, filters, limit = 100, offset = 0) => {
|
|
554
|
-
const client = await pool.connect();
|
|
555
|
-
try {
|
|
556
|
-
let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags FROM job_queue`;
|
|
557
|
-
const params = [];
|
|
558
|
-
let where = [];
|
|
559
|
-
let paramIdx = 1;
|
|
560
|
-
if (filters) {
|
|
561
|
-
if (filters.jobType) {
|
|
562
|
-
where.push(`job_type = $${paramIdx++}`);
|
|
563
|
-
params.push(filters.jobType);
|
|
1473
|
+
}
|
|
1474
|
+
function buildWaitContext(backend, pool, jobId, stepData, baseCtx) {
|
|
1475
|
+
let waitCounter = 0;
|
|
1476
|
+
const ctx = {
|
|
1477
|
+
prolong: baseCtx.prolong,
|
|
1478
|
+
onTimeout: baseCtx.onTimeout,
|
|
1479
|
+
run: async (stepName, fn) => {
|
|
1480
|
+
const cached = stepData[stepName];
|
|
1481
|
+
if (cached && typeof cached === "object" && cached.__completed) {
|
|
1482
|
+
log(`Step "${stepName}" replayed from cache for job ${jobId}`);
|
|
1483
|
+
return cached.result;
|
|
564
1484
|
}
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
1485
|
+
const result = await fn();
|
|
1486
|
+
stepData[stepName] = { __completed: true, result };
|
|
1487
|
+
await updateStepData(pool, jobId, stepData);
|
|
1488
|
+
return result;
|
|
1489
|
+
},
|
|
1490
|
+
waitFor: async (duration) => {
|
|
1491
|
+
const waitKey = `__wait_${waitCounter++}`;
|
|
1492
|
+
const cached = stepData[waitKey];
|
|
1493
|
+
if (cached && typeof cached === "object" && cached.completed) {
|
|
1494
|
+
log(`Wait "${waitKey}" already completed for job ${jobId}, skipping`);
|
|
1495
|
+
return;
|
|
568
1496
|
}
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
if (ops.gte) {
|
|
580
|
-
where.push(`run_at >= $${paramIdx++}`);
|
|
581
|
-
params.push(ops.gte);
|
|
582
|
-
}
|
|
583
|
-
if (ops.lt) {
|
|
584
|
-
where.push(`run_at < $${paramIdx++}`);
|
|
585
|
-
params.push(ops.lt);
|
|
586
|
-
}
|
|
587
|
-
if (ops.lte) {
|
|
588
|
-
where.push(`run_at <= $${paramIdx++}`);
|
|
589
|
-
params.push(ops.lte);
|
|
590
|
-
}
|
|
591
|
-
if (ops.eq) {
|
|
592
|
-
where.push(`run_at = $${paramIdx++}`);
|
|
593
|
-
params.push(ops.eq);
|
|
594
|
-
}
|
|
595
|
-
}
|
|
1497
|
+
const waitUntilDate = calculateWaitUntil(duration);
|
|
1498
|
+
stepData[waitKey] = { type: "duration", completed: false };
|
|
1499
|
+
throw new WaitSignal("duration", waitUntilDate, void 0, stepData);
|
|
1500
|
+
},
|
|
1501
|
+
waitUntil: async (date) => {
|
|
1502
|
+
const waitKey = `__wait_${waitCounter++}`;
|
|
1503
|
+
const cached = stepData[waitKey];
|
|
1504
|
+
if (cached && typeof cached === "object" && cached.completed) {
|
|
1505
|
+
log(`Wait "${waitKey}" already completed for job ${jobId}, skipping`);
|
|
1506
|
+
return;
|
|
596
1507
|
}
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
1508
|
+
stepData[waitKey] = { type: "date", completed: false };
|
|
1509
|
+
throw new WaitSignal("date", date, void 0, stepData);
|
|
1510
|
+
},
|
|
1511
|
+
createToken: async (options) => {
|
|
1512
|
+
const token = await createWaitpoint(pool, jobId, options);
|
|
1513
|
+
return token;
|
|
1514
|
+
},
|
|
1515
|
+
waitForToken: async (tokenId) => {
|
|
1516
|
+
const waitKey = `__wait_${waitCounter++}`;
|
|
1517
|
+
const cached = stepData[waitKey];
|
|
1518
|
+
if (cached && typeof cached === "object" && cached.completed) {
|
|
1519
|
+
log(
|
|
1520
|
+
`Token wait "${waitKey}" already completed for job ${jobId}, returning cached result`
|
|
1521
|
+
);
|
|
1522
|
+
return cached.result;
|
|
1523
|
+
}
|
|
1524
|
+
const wp = await getWaitpoint(pool, tokenId);
|
|
1525
|
+
if (wp && wp.status === "completed") {
|
|
1526
|
+
const result = {
|
|
1527
|
+
ok: true,
|
|
1528
|
+
output: wp.output
|
|
1529
|
+
};
|
|
1530
|
+
stepData[waitKey] = {
|
|
1531
|
+
type: "token",
|
|
1532
|
+
tokenId,
|
|
1533
|
+
completed: true,
|
|
1534
|
+
result
|
|
1535
|
+
};
|
|
1536
|
+
await updateStepData(pool, jobId, stepData);
|
|
1537
|
+
return result;
|
|
621
1538
|
}
|
|
1539
|
+
if (wp && wp.status === "timed_out") {
|
|
1540
|
+
const result = {
|
|
1541
|
+
ok: false,
|
|
1542
|
+
error: "Token timed out"
|
|
1543
|
+
};
|
|
1544
|
+
stepData[waitKey] = {
|
|
1545
|
+
type: "token",
|
|
1546
|
+
tokenId,
|
|
1547
|
+
completed: true,
|
|
1548
|
+
result
|
|
1549
|
+
};
|
|
1550
|
+
await updateStepData(pool, jobId, stepData);
|
|
1551
|
+
return result;
|
|
1552
|
+
}
|
|
1553
|
+
stepData[waitKey] = { type: "token", tokenId, completed: false };
|
|
1554
|
+
throw new WaitSignal("token", void 0, tokenId, stepData);
|
|
1555
|
+
},
|
|
1556
|
+
setProgress: async (percent) => {
|
|
1557
|
+
if (percent < 0 || percent > 100)
|
|
1558
|
+
throw new Error("Progress must be between 0 and 100");
|
|
1559
|
+
await backend.updateProgress(jobId, Math.round(percent));
|
|
622
1560
|
}
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
query += ` ORDER BY created_at DESC LIMIT $${paramIdx++} OFFSET $${paramIdx}`;
|
|
628
|
-
params.push(limit, offset);
|
|
629
|
-
const result = await client.query(query, params);
|
|
630
|
-
log(`Found ${result.rows.length} jobs`);
|
|
631
|
-
return result.rows.map((job) => ({
|
|
632
|
-
...job,
|
|
633
|
-
payload: job.payload,
|
|
634
|
-
timeoutMs: job.timeoutMs,
|
|
635
|
-
failureReason: job.failureReason
|
|
636
|
-
}));
|
|
637
|
-
} catch (error) {
|
|
638
|
-
log(`Error getting jobs: ${error}`);
|
|
639
|
-
throw error;
|
|
640
|
-
} finally {
|
|
641
|
-
client.release();
|
|
642
|
-
}
|
|
643
|
-
};
|
|
644
|
-
|
|
645
|
-
// src/processor.ts
|
|
646
|
-
async function processJobWithHandlers(pool, job, jobHandlers) {
|
|
1561
|
+
};
|
|
1562
|
+
return ctx;
|
|
1563
|
+
}
|
|
1564
|
+
async function processJobWithHandlers(backend, job, jobHandlers) {
|
|
647
1565
|
const handler = jobHandlers[job.jobType];
|
|
648
1566
|
if (!handler) {
|
|
649
|
-
await setPendingReasonForUnpickedJobs(
|
|
650
|
-
pool,
|
|
1567
|
+
await backend.setPendingReasonForUnpickedJobs(
|
|
651
1568
|
`No handler registered for job type: ${job.jobType}`,
|
|
652
1569
|
job.jobType
|
|
653
1570
|
);
|
|
654
|
-
await failJob(
|
|
655
|
-
pool,
|
|
1571
|
+
await backend.failJob(
|
|
656
1572
|
job.id,
|
|
657
1573
|
new Error(`No handler registered for job type: ${job.jobType}`),
|
|
658
1574
|
"no_handler" /* NoHandler */
|
|
659
1575
|
);
|
|
660
1576
|
return;
|
|
661
1577
|
}
|
|
1578
|
+
const stepData = { ...job.stepData || {} };
|
|
1579
|
+
const pool = tryExtractPool(backend);
|
|
1580
|
+
const hasStepHistory = Object.keys(stepData).some(
|
|
1581
|
+
(k) => k.startsWith("__wait_")
|
|
1582
|
+
);
|
|
1583
|
+
if (hasStepHistory && pool) {
|
|
1584
|
+
await resolveCompletedWaits(pool, stepData);
|
|
1585
|
+
await updateStepData(pool, job.id, stepData);
|
|
1586
|
+
}
|
|
662
1587
|
const timeoutMs = job.timeoutMs ?? void 0;
|
|
1588
|
+
const forceKillOnTimeout = job.forceKillOnTimeout ?? false;
|
|
663
1589
|
let timeoutId;
|
|
664
1590
|
const controller = new AbortController();
|
|
665
1591
|
try {
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
await Promise.race([
|
|
669
|
-
jobPromise,
|
|
670
|
-
new Promise((_, reject) => {
|
|
671
|
-
timeoutId = setTimeout(() => {
|
|
672
|
-
controller.abort();
|
|
673
|
-
const timeoutError = new Error(
|
|
674
|
-
`Job timed out after ${timeoutMs} ms`
|
|
675
|
-
);
|
|
676
|
-
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
677
|
-
reject(timeoutError);
|
|
678
|
-
}, timeoutMs);
|
|
679
|
-
})
|
|
680
|
-
]);
|
|
1592
|
+
if (forceKillOnTimeout && timeoutMs && timeoutMs > 0) {
|
|
1593
|
+
await runHandlerInWorker(handler, job.payload, timeoutMs, job.jobType);
|
|
681
1594
|
} else {
|
|
682
|
-
|
|
1595
|
+
let onTimeoutCallback;
|
|
1596
|
+
let timeoutReject;
|
|
1597
|
+
const armTimeout = (ms) => {
|
|
1598
|
+
if (timeoutId) clearTimeout(timeoutId);
|
|
1599
|
+
timeoutId = setTimeout(() => {
|
|
1600
|
+
if (onTimeoutCallback) {
|
|
1601
|
+
try {
|
|
1602
|
+
const extension = onTimeoutCallback();
|
|
1603
|
+
if (typeof extension === "number" && extension > 0) {
|
|
1604
|
+
backend.prolongJob(job.id).catch(() => {
|
|
1605
|
+
});
|
|
1606
|
+
armTimeout(extension);
|
|
1607
|
+
return;
|
|
1608
|
+
}
|
|
1609
|
+
} catch (callbackError) {
|
|
1610
|
+
log(
|
|
1611
|
+
`onTimeout callback threw for job ${job.id}: ${callbackError}`
|
|
1612
|
+
);
|
|
1613
|
+
}
|
|
1614
|
+
}
|
|
1615
|
+
controller.abort();
|
|
1616
|
+
const timeoutError = new Error(`Job timed out after ${ms} ms`);
|
|
1617
|
+
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
1618
|
+
if (timeoutReject) {
|
|
1619
|
+
timeoutReject(timeoutError);
|
|
1620
|
+
}
|
|
1621
|
+
}, ms);
|
|
1622
|
+
};
|
|
1623
|
+
const hasTimeout = timeoutMs != null && timeoutMs > 0;
|
|
1624
|
+
const baseCtx = hasTimeout ? {
|
|
1625
|
+
prolong: (ms) => {
|
|
1626
|
+
const duration = ms ?? timeoutMs;
|
|
1627
|
+
if (duration != null && duration > 0) {
|
|
1628
|
+
armTimeout(duration);
|
|
1629
|
+
backend.prolongJob(job.id).catch(() => {
|
|
1630
|
+
});
|
|
1631
|
+
}
|
|
1632
|
+
},
|
|
1633
|
+
onTimeout: (callback) => {
|
|
1634
|
+
onTimeoutCallback = callback;
|
|
1635
|
+
}
|
|
1636
|
+
} : {
|
|
1637
|
+
prolong: () => {
|
|
1638
|
+
log("prolong() called but ignored: job has no timeout set");
|
|
1639
|
+
},
|
|
1640
|
+
onTimeout: () => {
|
|
1641
|
+
log("onTimeout() called but ignored: job has no timeout set");
|
|
1642
|
+
}
|
|
1643
|
+
};
|
|
1644
|
+
const ctx = pool ? buildWaitContext(backend, pool, job.id, stepData, baseCtx) : buildBasicContext(backend, job.id, baseCtx);
|
|
1645
|
+
if (forceKillOnTimeout && !hasTimeout) {
|
|
1646
|
+
log(
|
|
1647
|
+
`forceKillOnTimeout is set but no timeoutMs for job ${job.id}, running without force kill`
|
|
1648
|
+
);
|
|
1649
|
+
}
|
|
1650
|
+
const jobPromise = handler(job.payload, controller.signal, ctx);
|
|
1651
|
+
if (hasTimeout) {
|
|
1652
|
+
await Promise.race([
|
|
1653
|
+
jobPromise,
|
|
1654
|
+
new Promise((_, reject) => {
|
|
1655
|
+
timeoutReject = reject;
|
|
1656
|
+
armTimeout(timeoutMs);
|
|
1657
|
+
})
|
|
1658
|
+
]);
|
|
1659
|
+
} else {
|
|
1660
|
+
await jobPromise;
|
|
1661
|
+
}
|
|
683
1662
|
}
|
|
684
1663
|
if (timeoutId) clearTimeout(timeoutId);
|
|
685
|
-
await completeJob(
|
|
1664
|
+
await backend.completeJob(job.id);
|
|
686
1665
|
} catch (error) {
|
|
687
1666
|
if (timeoutId) clearTimeout(timeoutId);
|
|
1667
|
+
if (error instanceof WaitSignal) {
|
|
1668
|
+
if (!pool) {
|
|
1669
|
+
await backend.failJob(
|
|
1670
|
+
job.id,
|
|
1671
|
+
new Error(
|
|
1672
|
+
"WaitSignal received but wait features require the PostgreSQL backend."
|
|
1673
|
+
),
|
|
1674
|
+
"handler_error" /* HandlerError */
|
|
1675
|
+
);
|
|
1676
|
+
return;
|
|
1677
|
+
}
|
|
1678
|
+
log(
|
|
1679
|
+
`Job ${job.id} entering wait: type=${error.type}, waitUntil=${error.waitUntil?.toISOString() ?? "none"}, tokenId=${error.tokenId ?? "none"}`
|
|
1680
|
+
);
|
|
1681
|
+
await waitJob(pool, job.id, {
|
|
1682
|
+
waitUntil: error.waitUntil,
|
|
1683
|
+
waitTokenId: error.tokenId,
|
|
1684
|
+
stepData: error.stepData
|
|
1685
|
+
});
|
|
1686
|
+
return;
|
|
1687
|
+
}
|
|
688
1688
|
console.error(`Error processing job ${job.id}:`, error);
|
|
689
1689
|
let failureReason = "handler_error" /* HandlerError */;
|
|
690
1690
|
if (error && typeof error === "object" && "failureReason" in error && error.failureReason === "timeout" /* Timeout */) {
|
|
691
1691
|
failureReason = "timeout" /* Timeout */;
|
|
692
1692
|
}
|
|
693
|
-
await failJob(
|
|
694
|
-
pool,
|
|
1693
|
+
await backend.failJob(
|
|
695
1694
|
job.id,
|
|
696
1695
|
error instanceof Error ? error : new Error(String(error)),
|
|
697
1696
|
failureReason
|
|
698
1697
|
);
|
|
699
1698
|
}
|
|
700
1699
|
}
|
|
701
|
-
async function processBatchWithHandlers(
|
|
702
|
-
const jobs = await getNextBatch(
|
|
703
|
-
pool,
|
|
1700
|
+
async function processBatchWithHandlers(backend, workerId, batchSize, jobType, jobHandlers, concurrency, onError) {
|
|
1701
|
+
const jobs = await backend.getNextBatch(
|
|
704
1702
|
workerId,
|
|
705
1703
|
batchSize,
|
|
706
1704
|
jobType
|
|
707
1705
|
);
|
|
708
1706
|
if (!concurrency || concurrency >= jobs.length) {
|
|
709
1707
|
await Promise.all(
|
|
710
|
-
jobs.map((job) => processJobWithHandlers(
|
|
1708
|
+
jobs.map((job) => processJobWithHandlers(backend, job, jobHandlers))
|
|
711
1709
|
);
|
|
712
1710
|
return jobs.length;
|
|
713
1711
|
}
|
|
@@ -720,13 +1718,16 @@ async function processBatchWithHandlers(pool, workerId, batchSize, jobType, jobH
|
|
|
720
1718
|
while (running < concurrency && idx < jobs.length) {
|
|
721
1719
|
const job = jobs[idx++];
|
|
722
1720
|
running++;
|
|
723
|
-
processJobWithHandlers(
|
|
1721
|
+
processJobWithHandlers(backend, job, jobHandlers).then(() => {
|
|
724
1722
|
running--;
|
|
725
1723
|
finished++;
|
|
726
1724
|
next();
|
|
727
1725
|
}).catch((err) => {
|
|
728
1726
|
running--;
|
|
729
1727
|
finished++;
|
|
1728
|
+
if (onError) {
|
|
1729
|
+
onError(err instanceof Error ? err : new Error(String(err)));
|
|
1730
|
+
}
|
|
730
1731
|
next();
|
|
731
1732
|
});
|
|
732
1733
|
}
|
|
@@ -734,7 +1735,7 @@ async function processBatchWithHandlers(pool, workerId, batchSize, jobType, jobH
|
|
|
734
1735
|
next();
|
|
735
1736
|
});
|
|
736
1737
|
}
|
|
737
|
-
var createProcessor = (
|
|
1738
|
+
var createProcessor = (backend, handlers, options = {}) => {
|
|
738
1739
|
const {
|
|
739
1740
|
workerId = `worker-${Math.random().toString(36).substring(2, 9)}`,
|
|
740
1741
|
batchSize = 10,
|
|
@@ -745,6 +1746,7 @@ var createProcessor = (pool, handlers, options = {}) => {
|
|
|
745
1746
|
} = options;
|
|
746
1747
|
let running = false;
|
|
747
1748
|
let intervalId = null;
|
|
1749
|
+
let currentBatchPromise = null;
|
|
748
1750
|
setLogContext(options.verbose ?? false);
|
|
749
1751
|
const processJobs = async () => {
|
|
750
1752
|
if (!running) return 0;
|
|
@@ -753,12 +1755,13 @@ var createProcessor = (pool, handlers, options = {}) => {
|
|
|
753
1755
|
);
|
|
754
1756
|
try {
|
|
755
1757
|
const processed = await processBatchWithHandlers(
|
|
756
|
-
|
|
1758
|
+
backend,
|
|
757
1759
|
workerId,
|
|
758
1760
|
batchSize,
|
|
759
1761
|
jobType,
|
|
760
1762
|
handlers,
|
|
761
|
-
concurrency
|
|
1763
|
+
concurrency,
|
|
1764
|
+
onError
|
|
762
1765
|
);
|
|
763
1766
|
return processed;
|
|
764
1767
|
} catch (error) {
|
|
@@ -776,27 +1779,56 @@ var createProcessor = (pool, handlers, options = {}) => {
|
|
|
776
1779
|
if (running) return;
|
|
777
1780
|
log(`Starting job processor with workerId: ${workerId}`);
|
|
778
1781
|
running = true;
|
|
779
|
-
const
|
|
1782
|
+
const scheduleNext = (immediate) => {
|
|
780
1783
|
if (!running) return;
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
1784
|
+
if (immediate) {
|
|
1785
|
+
intervalId = setTimeout(loop, 0);
|
|
1786
|
+
} else {
|
|
1787
|
+
intervalId = setTimeout(loop, pollInterval);
|
|
784
1788
|
}
|
|
785
1789
|
};
|
|
786
|
-
|
|
787
|
-
|
|
1790
|
+
const loop = async () => {
|
|
1791
|
+
if (!running) return;
|
|
1792
|
+
currentBatchPromise = processJobs();
|
|
1793
|
+
const processed = await currentBatchPromise;
|
|
1794
|
+
currentBatchPromise = null;
|
|
1795
|
+
scheduleNext(processed === batchSize);
|
|
1796
|
+
};
|
|
1797
|
+
loop();
|
|
788
1798
|
},
|
|
789
1799
|
/**
|
|
790
|
-
* Stop the job processor that runs in the background
|
|
1800
|
+
* Stop the job processor that runs in the background.
|
|
1801
|
+
* Does not wait for in-flight jobs.
|
|
791
1802
|
*/
|
|
792
1803
|
stop: () => {
|
|
793
1804
|
log(`Stopping job processor with workerId: ${workerId}`);
|
|
794
1805
|
running = false;
|
|
795
1806
|
if (intervalId) {
|
|
796
|
-
|
|
1807
|
+
clearTimeout(intervalId);
|
|
797
1808
|
intervalId = null;
|
|
798
1809
|
}
|
|
799
1810
|
},
|
|
1811
|
+
/**
|
|
1812
|
+
* Stop the job processor and wait for all in-flight jobs to complete.
|
|
1813
|
+
* Useful for graceful shutdown (e.g., SIGTERM handling).
|
|
1814
|
+
*/
|
|
1815
|
+
stopAndDrain: async (drainTimeoutMs = 3e4) => {
|
|
1816
|
+
log(`Stopping and draining job processor with workerId: ${workerId}`);
|
|
1817
|
+
running = false;
|
|
1818
|
+
if (intervalId) {
|
|
1819
|
+
clearTimeout(intervalId);
|
|
1820
|
+
intervalId = null;
|
|
1821
|
+
}
|
|
1822
|
+
if (currentBatchPromise) {
|
|
1823
|
+
await Promise.race([
|
|
1824
|
+
currentBatchPromise.catch(() => {
|
|
1825
|
+
}),
|
|
1826
|
+
new Promise((resolve) => setTimeout(resolve, drainTimeoutMs))
|
|
1827
|
+
]);
|
|
1828
|
+
currentBatchPromise = null;
|
|
1829
|
+
}
|
|
1830
|
+
log(`Job processor ${workerId} drained`);
|
|
1831
|
+
},
|
|
800
1832
|
/**
|
|
801
1833
|
* Start the job processor synchronously.
|
|
802
1834
|
* - This will process all jobs immediately and then stop.
|
|
@@ -895,60 +1927,1263 @@ Recommended: Remove sslmode from the connection string when using a custom CA.
|
|
|
895
1927
|
return pool;
|
|
896
1928
|
};
|
|
897
1929
|
|
|
1930
|
+
// src/backends/redis-scripts.ts
|
|
1931
|
+
var SCORE_RANGE = "1000000000000000";
|
|
1932
|
+
var ADD_JOB_SCRIPT = `
|
|
1933
|
+
local prefix = KEYS[1]
|
|
1934
|
+
local jobType = ARGV[1]
|
|
1935
|
+
local payloadJson = ARGV[2]
|
|
1936
|
+
local maxAttempts = tonumber(ARGV[3])
|
|
1937
|
+
local priority = tonumber(ARGV[4])
|
|
1938
|
+
local runAtMs = ARGV[5] -- "0" means now
|
|
1939
|
+
local timeoutMs = ARGV[6] -- "null" string if not set
|
|
1940
|
+
local forceKillOnTimeout = ARGV[7]
|
|
1941
|
+
local tagsJson = ARGV[8] -- "null" or JSON array string
|
|
1942
|
+
local idempotencyKey = ARGV[9] -- "null" string if not set
|
|
1943
|
+
local nowMs = tonumber(ARGV[10])
|
|
1944
|
+
|
|
1945
|
+
-- Idempotency check
|
|
1946
|
+
if idempotencyKey ~= "null" then
|
|
1947
|
+
local existing = redis.call('GET', prefix .. 'idempotency:' .. idempotencyKey)
|
|
1948
|
+
if existing then
|
|
1949
|
+
return existing
|
|
1950
|
+
end
|
|
1951
|
+
end
|
|
1952
|
+
|
|
1953
|
+
-- Generate ID
|
|
1954
|
+
local id = redis.call('INCR', prefix .. 'id_seq')
|
|
1955
|
+
local jobKey = prefix .. 'job:' .. id
|
|
1956
|
+
local runAt = runAtMs ~= "0" and tonumber(runAtMs) or nowMs
|
|
1957
|
+
|
|
1958
|
+
-- Store the job hash
|
|
1959
|
+
redis.call('HMSET', jobKey,
|
|
1960
|
+
'id', id,
|
|
1961
|
+
'jobType', jobType,
|
|
1962
|
+
'payload', payloadJson,
|
|
1963
|
+
'status', 'pending',
|
|
1964
|
+
'maxAttempts', maxAttempts,
|
|
1965
|
+
'attempts', 0,
|
|
1966
|
+
'priority', priority,
|
|
1967
|
+
'runAt', runAt,
|
|
1968
|
+
'timeoutMs', timeoutMs,
|
|
1969
|
+
'forceKillOnTimeout', forceKillOnTimeout,
|
|
1970
|
+
'createdAt', nowMs,
|
|
1971
|
+
'updatedAt', nowMs,
|
|
1972
|
+
'lockedAt', 'null',
|
|
1973
|
+
'lockedBy', 'null',
|
|
1974
|
+
'nextAttemptAt', 'null',
|
|
1975
|
+
'pendingReason', 'null',
|
|
1976
|
+
'errorHistory', '[]',
|
|
1977
|
+
'failureReason', 'null',
|
|
1978
|
+
'completedAt', 'null',
|
|
1979
|
+
'startedAt', 'null',
|
|
1980
|
+
'lastRetriedAt', 'null',
|
|
1981
|
+
'lastFailedAt', 'null',
|
|
1982
|
+
'lastCancelledAt', 'null',
|
|
1983
|
+
'tags', tagsJson,
|
|
1984
|
+
'idempotencyKey', idempotencyKey
|
|
1985
|
+
)
|
|
1986
|
+
|
|
1987
|
+
-- Status index
|
|
1988
|
+
redis.call('SADD', prefix .. 'status:pending', id)
|
|
1989
|
+
|
|
1990
|
+
-- Type index
|
|
1991
|
+
redis.call('SADD', prefix .. 'type:' .. jobType, id)
|
|
1992
|
+
|
|
1993
|
+
-- Tag indexes
|
|
1994
|
+
if tagsJson ~= "null" then
|
|
1995
|
+
local tags = cjson.decode(tagsJson)
|
|
1996
|
+
for _, tag in ipairs(tags) do
|
|
1997
|
+
redis.call('SADD', prefix .. 'tag:' .. tag, id)
|
|
1998
|
+
end
|
|
1999
|
+
-- Store tags for exact-match queries
|
|
2000
|
+
for _, tag in ipairs(tags) do
|
|
2001
|
+
redis.call('SADD', prefix .. 'job:' .. id .. ':tags', tag)
|
|
2002
|
+
end
|
|
2003
|
+
end
|
|
2004
|
+
|
|
2005
|
+
-- Idempotency mapping
|
|
2006
|
+
if idempotencyKey ~= "null" then
|
|
2007
|
+
redis.call('SET', prefix .. 'idempotency:' .. idempotencyKey, id)
|
|
2008
|
+
end
|
|
2009
|
+
|
|
2010
|
+
-- All-jobs sorted set (for ordering by createdAt)
|
|
2011
|
+
redis.call('ZADD', prefix .. 'all', nowMs, id)
|
|
2012
|
+
|
|
2013
|
+
-- Queue or delayed
|
|
2014
|
+
if runAt <= nowMs then
|
|
2015
|
+
-- Ready now: add to queue with priority score
|
|
2016
|
+
local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - nowMs)
|
|
2017
|
+
redis.call('ZADD', prefix .. 'queue', score, id)
|
|
2018
|
+
else
|
|
2019
|
+
-- Future: add to delayed set
|
|
2020
|
+
redis.call('ZADD', prefix .. 'delayed', runAt, id)
|
|
2021
|
+
end
|
|
2022
|
+
|
|
2023
|
+
return id
|
|
2024
|
+
`;
|
|
2025
|
+
var GET_NEXT_BATCH_SCRIPT = `
|
|
2026
|
+
local prefix = KEYS[1]
|
|
2027
|
+
local workerId = ARGV[1]
|
|
2028
|
+
local batchSize = tonumber(ARGV[2])
|
|
2029
|
+
local nowMs = tonumber(ARGV[3])
|
|
2030
|
+
local jobTypeFilter = ARGV[4] -- "null" or JSON array or single string
|
|
2031
|
+
|
|
2032
|
+
-- 1. Move ready delayed jobs into queue
|
|
2033
|
+
local delayed = redis.call('ZRANGEBYSCORE', prefix .. 'delayed', '-inf', nowMs, 'LIMIT', 0, 200)
|
|
2034
|
+
for _, jobId in ipairs(delayed) do
|
|
2035
|
+
local jk = prefix .. 'job:' .. jobId
|
|
2036
|
+
local status = redis.call('HGET', jk, 'status')
|
|
2037
|
+
local attempts = tonumber(redis.call('HGET', jk, 'attempts'))
|
|
2038
|
+
local maxAttempts = tonumber(redis.call('HGET', jk, 'maxAttempts'))
|
|
2039
|
+
if status == 'pending' and attempts < maxAttempts then
|
|
2040
|
+
local pri = tonumber(redis.call('HGET', jk, 'priority') or '0')
|
|
2041
|
+
local ca = tonumber(redis.call('HGET', jk, 'createdAt'))
|
|
2042
|
+
local score = pri * ${SCORE_RANGE} + (${SCORE_RANGE} - ca)
|
|
2043
|
+
redis.call('ZADD', prefix .. 'queue', score, jobId)
|
|
2044
|
+
end
|
|
2045
|
+
redis.call('ZREM', prefix .. 'delayed', jobId)
|
|
2046
|
+
end
|
|
2047
|
+
|
|
2048
|
+
-- 2. Move ready retry jobs into queue
|
|
2049
|
+
local retries = redis.call('ZRANGEBYSCORE', prefix .. 'retry', '-inf', nowMs, 'LIMIT', 0, 200)
|
|
2050
|
+
for _, jobId in ipairs(retries) do
|
|
2051
|
+
local jk = prefix .. 'job:' .. jobId
|
|
2052
|
+
local status = redis.call('HGET', jk, 'status')
|
|
2053
|
+
local attempts = tonumber(redis.call('HGET', jk, 'attempts'))
|
|
2054
|
+
local maxAttempts = tonumber(redis.call('HGET', jk, 'maxAttempts'))
|
|
2055
|
+
if status == 'failed' and attempts < maxAttempts then
|
|
2056
|
+
local pri = tonumber(redis.call('HGET', jk, 'priority') or '0')
|
|
2057
|
+
local ca = tonumber(redis.call('HGET', jk, 'createdAt'))
|
|
2058
|
+
local score = pri * ${SCORE_RANGE} + (${SCORE_RANGE} - ca)
|
|
2059
|
+
redis.call('ZADD', prefix .. 'queue', score, jobId)
|
|
2060
|
+
redis.call('SREM', prefix .. 'status:failed', jobId)
|
|
2061
|
+
redis.call('SADD', prefix .. 'status:pending', jobId)
|
|
2062
|
+
redis.call('HMSET', jk, 'status', 'pending')
|
|
2063
|
+
end
|
|
2064
|
+
redis.call('ZREM', prefix .. 'retry', jobId)
|
|
2065
|
+
end
|
|
2066
|
+
|
|
2067
|
+
-- 3. Parse job type filter
|
|
2068
|
+
local filterTypes = nil
|
|
2069
|
+
if jobTypeFilter ~= "null" then
|
|
2070
|
+
-- Could be a JSON array or a plain string
|
|
2071
|
+
local ok, decoded = pcall(cjson.decode, jobTypeFilter)
|
|
2072
|
+
if ok and type(decoded) == 'table' then
|
|
2073
|
+
filterTypes = {}
|
|
2074
|
+
for _, t in ipairs(decoded) do filterTypes[t] = true end
|
|
2075
|
+
else
|
|
2076
|
+
filterTypes = { [jobTypeFilter] = true }
|
|
2077
|
+
end
|
|
2078
|
+
end
|
|
2079
|
+
|
|
2080
|
+
-- 4. Pop candidates from queue (highest score first)
|
|
2081
|
+
-- We pop more than batchSize because some may be filtered out
|
|
2082
|
+
local popCount = batchSize * 3
|
|
2083
|
+
local candidates = redis.call('ZPOPMAX', prefix .. 'queue', popCount)
|
|
2084
|
+
-- candidates: [member1, score1, member2, score2, ...]
|
|
2085
|
+
|
|
2086
|
+
local results = {}
|
|
2087
|
+
local jobsClaimed = 0
|
|
2088
|
+
local putBack = {} -- {score, id} pairs to put back
|
|
2089
|
+
|
|
2090
|
+
for i = 1, #candidates, 2 do
|
|
2091
|
+
local jobId = candidates[i]
|
|
2092
|
+
local score = candidates[i + 1]
|
|
2093
|
+
local jk = prefix .. 'job:' .. jobId
|
|
2094
|
+
|
|
2095
|
+
if jobsClaimed >= batchSize then
|
|
2096
|
+
-- We have enough; put the rest back
|
|
2097
|
+
table.insert(putBack, score)
|
|
2098
|
+
table.insert(putBack, jobId)
|
|
2099
|
+
else
|
|
2100
|
+
-- Check job type filter
|
|
2101
|
+
local jt = redis.call('HGET', jk, 'jobType')
|
|
2102
|
+
if filterTypes and not filterTypes[jt] then
|
|
2103
|
+
-- Doesn't match filter: put back
|
|
2104
|
+
table.insert(putBack, score)
|
|
2105
|
+
table.insert(putBack, jobId)
|
|
2106
|
+
else
|
|
2107
|
+
-- Check run_at
|
|
2108
|
+
local runAt = tonumber(redis.call('HGET', jk, 'runAt'))
|
|
2109
|
+
if runAt > nowMs then
|
|
2110
|
+
-- Not ready yet: move to delayed
|
|
2111
|
+
redis.call('ZADD', prefix .. 'delayed', runAt, jobId)
|
|
2112
|
+
else
|
|
2113
|
+
-- Claim this job
|
|
2114
|
+
local attempts = tonumber(redis.call('HGET', jk, 'attempts'))
|
|
2115
|
+
local startedAt = redis.call('HGET', jk, 'startedAt')
|
|
2116
|
+
local lastRetriedAt = redis.call('HGET', jk, 'lastRetriedAt')
|
|
2117
|
+
if startedAt == 'null' then startedAt = nowMs end
|
|
2118
|
+
if attempts > 0 then lastRetriedAt = nowMs end
|
|
2119
|
+
|
|
2120
|
+
redis.call('HMSET', jk,
|
|
2121
|
+
'status', 'processing',
|
|
2122
|
+
'lockedAt', nowMs,
|
|
2123
|
+
'lockedBy', workerId,
|
|
2124
|
+
'attempts', attempts + 1,
|
|
2125
|
+
'updatedAt', nowMs,
|
|
2126
|
+
'pendingReason', 'null',
|
|
2127
|
+
'startedAt', startedAt,
|
|
2128
|
+
'lastRetriedAt', lastRetriedAt
|
|
2129
|
+
)
|
|
2130
|
+
|
|
2131
|
+
-- Update status sets
|
|
2132
|
+
redis.call('SREM', prefix .. 'status:pending', jobId)
|
|
2133
|
+
redis.call('SADD', prefix .. 'status:processing', jobId)
|
|
2134
|
+
|
|
2135
|
+
-- Return job data as flat array
|
|
2136
|
+
local data = redis.call('HGETALL', jk)
|
|
2137
|
+
for _, v in ipairs(data) do
|
|
2138
|
+
table.insert(results, v)
|
|
2139
|
+
end
|
|
2140
|
+
-- Separator
|
|
2141
|
+
table.insert(results, '__JOB_SEP__')
|
|
2142
|
+
jobsClaimed = jobsClaimed + 1
|
|
2143
|
+
end
|
|
2144
|
+
end
|
|
2145
|
+
end
|
|
2146
|
+
end
|
|
2147
|
+
|
|
2148
|
+
-- Put back jobs we didn't claim
|
|
2149
|
+
if #putBack > 0 then
|
|
2150
|
+
redis.call('ZADD', prefix .. 'queue', unpack(putBack))
|
|
2151
|
+
end
|
|
2152
|
+
|
|
2153
|
+
return results
|
|
2154
|
+
`;
|
|
2155
|
+
var COMPLETE_JOB_SCRIPT = `
|
|
2156
|
+
local prefix = KEYS[1]
|
|
2157
|
+
local jobId = ARGV[1]
|
|
2158
|
+
local nowMs = ARGV[2]
|
|
2159
|
+
local jk = prefix .. 'job:' .. jobId
|
|
2160
|
+
|
|
2161
|
+
redis.call('HMSET', jk,
|
|
2162
|
+
'status', 'completed',
|
|
2163
|
+
'updatedAt', nowMs,
|
|
2164
|
+
'completedAt', nowMs
|
|
2165
|
+
)
|
|
2166
|
+
redis.call('SREM', prefix .. 'status:processing', jobId)
|
|
2167
|
+
redis.call('SADD', prefix .. 'status:completed', jobId)
|
|
2168
|
+
|
|
2169
|
+
return 1
|
|
2170
|
+
`;
|
|
2171
|
+
var FAIL_JOB_SCRIPT = `
|
|
2172
|
+
local prefix = KEYS[1]
|
|
2173
|
+
local jobId = ARGV[1]
|
|
2174
|
+
local errorJson = ARGV[2]
|
|
2175
|
+
local failureReason = ARGV[3]
|
|
2176
|
+
local nowMs = tonumber(ARGV[4])
|
|
2177
|
+
local jk = prefix .. 'job:' .. jobId
|
|
2178
|
+
|
|
2179
|
+
local attempts = tonumber(redis.call('HGET', jk, 'attempts'))
|
|
2180
|
+
local maxAttempts = tonumber(redis.call('HGET', jk, 'maxAttempts'))
|
|
2181
|
+
|
|
2182
|
+
-- Compute next_attempt_at: 2^attempts minutes from now
|
|
2183
|
+
local nextAttemptAt = 'null'
|
|
2184
|
+
if attempts < maxAttempts then
|
|
2185
|
+
local delayMs = math.pow(2, attempts) * 60000
|
|
2186
|
+
nextAttemptAt = nowMs + delayMs
|
|
2187
|
+
end
|
|
2188
|
+
|
|
2189
|
+
-- Append to error_history
|
|
2190
|
+
local history = redis.call('HGET', jk, 'errorHistory') or '[]'
|
|
2191
|
+
local ok, arr = pcall(cjson.decode, history)
|
|
2192
|
+
if not ok then arr = {} end
|
|
2193
|
+
local newErrors = cjson.decode(errorJson)
|
|
2194
|
+
for _, e in ipairs(newErrors) do
|
|
2195
|
+
table.insert(arr, e)
|
|
2196
|
+
end
|
|
2197
|
+
|
|
2198
|
+
redis.call('HMSET', jk,
|
|
2199
|
+
'status', 'failed',
|
|
2200
|
+
'updatedAt', nowMs,
|
|
2201
|
+
'nextAttemptAt', tostring(nextAttemptAt),
|
|
2202
|
+
'errorHistory', cjson.encode(arr),
|
|
2203
|
+
'failureReason', failureReason,
|
|
2204
|
+
'lastFailedAt', nowMs
|
|
2205
|
+
)
|
|
2206
|
+
redis.call('SREM', prefix .. 'status:processing', jobId)
|
|
2207
|
+
redis.call('SADD', prefix .. 'status:failed', jobId)
|
|
2208
|
+
|
|
2209
|
+
-- Schedule retry if applicable
|
|
2210
|
+
if nextAttemptAt ~= 'null' then
|
|
2211
|
+
redis.call('ZADD', prefix .. 'retry', nextAttemptAt, jobId)
|
|
2212
|
+
end
|
|
2213
|
+
|
|
2214
|
+
return 1
|
|
2215
|
+
`;
|
|
2216
|
+
var RETRY_JOB_SCRIPT = `
|
|
2217
|
+
local prefix = KEYS[1]
|
|
2218
|
+
local jobId = ARGV[1]
|
|
2219
|
+
local nowMs = tonumber(ARGV[2])
|
|
2220
|
+
local jk = prefix .. 'job:' .. jobId
|
|
2221
|
+
|
|
2222
|
+
local oldStatus = redis.call('HGET', jk, 'status')
|
|
2223
|
+
|
|
2224
|
+
redis.call('HMSET', jk,
|
|
2225
|
+
'status', 'pending',
|
|
2226
|
+
'updatedAt', nowMs,
|
|
2227
|
+
'lockedAt', 'null',
|
|
2228
|
+
'lockedBy', 'null',
|
|
2229
|
+
'nextAttemptAt', nowMs,
|
|
2230
|
+
'lastRetriedAt', nowMs
|
|
2231
|
+
)
|
|
2232
|
+
|
|
2233
|
+
-- Remove from old status, add to pending
|
|
2234
|
+
if oldStatus then
|
|
2235
|
+
redis.call('SREM', prefix .. 'status:' .. oldStatus, jobId)
|
|
2236
|
+
end
|
|
2237
|
+
redis.call('SADD', prefix .. 'status:pending', jobId)
|
|
2238
|
+
|
|
2239
|
+
-- Remove from retry sorted set if present
|
|
2240
|
+
redis.call('ZREM', prefix .. 'retry', jobId)
|
|
2241
|
+
|
|
2242
|
+
-- Add to queue (ready now)
|
|
2243
|
+
local priority = tonumber(redis.call('HGET', jk, 'priority') or '0')
|
|
2244
|
+
local createdAt = tonumber(redis.call('HGET', jk, 'createdAt'))
|
|
2245
|
+
local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - createdAt)
|
|
2246
|
+
redis.call('ZADD', prefix .. 'queue', score, jobId)
|
|
2247
|
+
|
|
2248
|
+
return 1
|
|
2249
|
+
`;
|
|
2250
|
+
var CANCEL_JOB_SCRIPT = `
|
|
2251
|
+
local prefix = KEYS[1]
|
|
2252
|
+
local jobId = ARGV[1]
|
|
2253
|
+
local nowMs = ARGV[2]
|
|
2254
|
+
local jk = prefix .. 'job:' .. jobId
|
|
2255
|
+
|
|
2256
|
+
local status = redis.call('HGET', jk, 'status')
|
|
2257
|
+
if status ~= 'pending' then return 0 end
|
|
2258
|
+
|
|
2259
|
+
redis.call('HMSET', jk,
|
|
2260
|
+
'status', 'cancelled',
|
|
2261
|
+
'updatedAt', nowMs,
|
|
2262
|
+
'lastCancelledAt', nowMs
|
|
2263
|
+
)
|
|
2264
|
+
redis.call('SREM', prefix .. 'status:pending', jobId)
|
|
2265
|
+
redis.call('SADD', prefix .. 'status:cancelled', jobId)
|
|
2266
|
+
-- Remove from queue / delayed
|
|
2267
|
+
redis.call('ZREM', prefix .. 'queue', jobId)
|
|
2268
|
+
redis.call('ZREM', prefix .. 'delayed', jobId)
|
|
2269
|
+
|
|
2270
|
+
return 1
|
|
2271
|
+
`;
|
|
2272
|
+
var PROLONG_JOB_SCRIPT = `
|
|
2273
|
+
local prefix = KEYS[1]
|
|
2274
|
+
local jobId = ARGV[1]
|
|
2275
|
+
local nowMs = ARGV[2]
|
|
2276
|
+
local jk = prefix .. 'job:' .. jobId
|
|
2277
|
+
|
|
2278
|
+
local status = redis.call('HGET', jk, 'status')
|
|
2279
|
+
if status ~= 'processing' then return 0 end
|
|
2280
|
+
|
|
2281
|
+
redis.call('HMSET', jk,
|
|
2282
|
+
'lockedAt', nowMs,
|
|
2283
|
+
'updatedAt', nowMs
|
|
2284
|
+
)
|
|
2285
|
+
|
|
2286
|
+
return 1
|
|
2287
|
+
`;
|
|
2288
|
+
var RECLAIM_STUCK_JOBS_SCRIPT = `
|
|
2289
|
+
local prefix = KEYS[1]
|
|
2290
|
+
local maxAgeMs = tonumber(ARGV[1])
|
|
2291
|
+
local nowMs = tonumber(ARGV[2])
|
|
2292
|
+
|
|
2293
|
+
local processing = redis.call('SMEMBERS', prefix .. 'status:processing')
|
|
2294
|
+
local count = 0
|
|
2295
|
+
|
|
2296
|
+
for _, jobId in ipairs(processing) do
|
|
2297
|
+
local jk = prefix .. 'job:' .. jobId
|
|
2298
|
+
local lockedAt = redis.call('HGET', jk, 'lockedAt')
|
|
2299
|
+
if lockedAt and lockedAt ~= 'null' then
|
|
2300
|
+
local lockedAtNum = tonumber(lockedAt)
|
|
2301
|
+
if lockedAtNum then
|
|
2302
|
+
-- Use the greater of maxAgeMs and the job's own timeoutMs
|
|
2303
|
+
local jobMaxAge = maxAgeMs
|
|
2304
|
+
local timeoutMs = redis.call('HGET', jk, 'timeoutMs')
|
|
2305
|
+
if timeoutMs and timeoutMs ~= 'null' then
|
|
2306
|
+
local tMs = tonumber(timeoutMs)
|
|
2307
|
+
if tMs and tMs > jobMaxAge then
|
|
2308
|
+
jobMaxAge = tMs
|
|
2309
|
+
end
|
|
2310
|
+
end
|
|
2311
|
+
local cutoff = nowMs - jobMaxAge
|
|
2312
|
+
if lockedAtNum < cutoff then
|
|
2313
|
+
redis.call('HMSET', jk,
|
|
2314
|
+
'status', 'pending',
|
|
2315
|
+
'lockedAt', 'null',
|
|
2316
|
+
'lockedBy', 'null',
|
|
2317
|
+
'updatedAt', nowMs
|
|
2318
|
+
)
|
|
2319
|
+
redis.call('SREM', prefix .. 'status:processing', jobId)
|
|
2320
|
+
redis.call('SADD', prefix .. 'status:pending', jobId)
|
|
2321
|
+
|
|
2322
|
+
-- Re-add to queue
|
|
2323
|
+
local priority = tonumber(redis.call('HGET', jk, 'priority') or '0')
|
|
2324
|
+
local createdAt = tonumber(redis.call('HGET', jk, 'createdAt'))
|
|
2325
|
+
local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - createdAt)
|
|
2326
|
+
redis.call('ZADD', prefix .. 'queue', score, jobId)
|
|
2327
|
+
|
|
2328
|
+
count = count + 1
|
|
2329
|
+
end
|
|
2330
|
+
end
|
|
2331
|
+
end
|
|
2332
|
+
end
|
|
2333
|
+
|
|
2334
|
+
return count
|
|
2335
|
+
`;
|
|
2336
|
+
var CLEANUP_OLD_JOBS_SCRIPT = `
|
|
2337
|
+
local prefix = KEYS[1]
|
|
2338
|
+
local cutoffMs = tonumber(ARGV[1])
|
|
2339
|
+
|
|
2340
|
+
local completed = redis.call('SMEMBERS', prefix .. 'status:completed')
|
|
2341
|
+
local count = 0
|
|
2342
|
+
|
|
2343
|
+
for _, jobId in ipairs(completed) do
|
|
2344
|
+
local jk = prefix .. 'job:' .. jobId
|
|
2345
|
+
local updatedAt = tonumber(redis.call('HGET', jk, 'updatedAt'))
|
|
2346
|
+
if updatedAt and updatedAt < cutoffMs then
|
|
2347
|
+
-- Remove all indexes
|
|
2348
|
+
local jobType = redis.call('HGET', jk, 'jobType')
|
|
2349
|
+
local tagsJson = redis.call('HGET', jk, 'tags')
|
|
2350
|
+
local idempotencyKey = redis.call('HGET', jk, 'idempotencyKey')
|
|
2351
|
+
|
|
2352
|
+
redis.call('DEL', jk)
|
|
2353
|
+
redis.call('SREM', prefix .. 'status:completed', jobId)
|
|
2354
|
+
redis.call('ZREM', prefix .. 'all', jobId)
|
|
2355
|
+
if jobType then
|
|
2356
|
+
redis.call('SREM', prefix .. 'type:' .. jobType, jobId)
|
|
2357
|
+
end
|
|
2358
|
+
if tagsJson and tagsJson ~= 'null' then
|
|
2359
|
+
local ok, tags = pcall(cjson.decode, tagsJson)
|
|
2360
|
+
if ok and type(tags) == 'table' then
|
|
2361
|
+
for _, tag in ipairs(tags) do
|
|
2362
|
+
redis.call('SREM', prefix .. 'tag:' .. tag, jobId)
|
|
2363
|
+
end
|
|
2364
|
+
end
|
|
2365
|
+
redis.call('DEL', prefix .. 'job:' .. jobId .. ':tags')
|
|
2366
|
+
end
|
|
2367
|
+
if idempotencyKey and idempotencyKey ~= 'null' then
|
|
2368
|
+
redis.call('DEL', prefix .. 'idempotency:' .. idempotencyKey)
|
|
2369
|
+
end
|
|
2370
|
+
-- Delete events
|
|
2371
|
+
redis.call('DEL', prefix .. 'events:' .. jobId)
|
|
2372
|
+
|
|
2373
|
+
count = count + 1
|
|
2374
|
+
end
|
|
2375
|
+
end
|
|
2376
|
+
|
|
2377
|
+
return count
|
|
2378
|
+
`;
|
|
2379
|
+
|
|
2380
|
+
// src/backends/redis.ts
|
|
2381
|
+
function hashToObject(arr) {
|
|
2382
|
+
const obj = {};
|
|
2383
|
+
for (let i = 0; i < arr.length; i += 2) {
|
|
2384
|
+
obj[arr[i]] = arr[i + 1];
|
|
2385
|
+
}
|
|
2386
|
+
return obj;
|
|
2387
|
+
}
|
|
2388
|
+
function deserializeJob(h) {
|
|
2389
|
+
const nullish = (v) => v === void 0 || v === "null" || v === "" ? null : v;
|
|
2390
|
+
const numOrNull = (v) => {
|
|
2391
|
+
const n = nullish(v);
|
|
2392
|
+
return n === null ? null : Number(n);
|
|
2393
|
+
};
|
|
2394
|
+
const dateOrNull = (v) => {
|
|
2395
|
+
const n = numOrNull(v);
|
|
2396
|
+
return n === null ? null : new Date(n);
|
|
2397
|
+
};
|
|
2398
|
+
let errorHistory = [];
|
|
2399
|
+
try {
|
|
2400
|
+
const raw = h.errorHistory;
|
|
2401
|
+
if (raw && raw !== "[]") {
|
|
2402
|
+
errorHistory = JSON.parse(raw);
|
|
2403
|
+
}
|
|
2404
|
+
} catch {
|
|
2405
|
+
}
|
|
2406
|
+
let tags;
|
|
2407
|
+
try {
|
|
2408
|
+
const raw = h.tags;
|
|
2409
|
+
if (raw && raw !== "null") {
|
|
2410
|
+
tags = JSON.parse(raw);
|
|
2411
|
+
}
|
|
2412
|
+
} catch {
|
|
2413
|
+
}
|
|
2414
|
+
let payload;
|
|
2415
|
+
try {
|
|
2416
|
+
payload = JSON.parse(h.payload);
|
|
2417
|
+
} catch {
|
|
2418
|
+
payload = h.payload;
|
|
2419
|
+
}
|
|
2420
|
+
return {
|
|
2421
|
+
id: Number(h.id),
|
|
2422
|
+
jobType: h.jobType,
|
|
2423
|
+
payload,
|
|
2424
|
+
status: h.status,
|
|
2425
|
+
createdAt: new Date(Number(h.createdAt)),
|
|
2426
|
+
updatedAt: new Date(Number(h.updatedAt)),
|
|
2427
|
+
lockedAt: dateOrNull(h.lockedAt),
|
|
2428
|
+
lockedBy: nullish(h.lockedBy),
|
|
2429
|
+
attempts: Number(h.attempts),
|
|
2430
|
+
maxAttempts: Number(h.maxAttempts),
|
|
2431
|
+
nextAttemptAt: dateOrNull(h.nextAttemptAt),
|
|
2432
|
+
priority: Number(h.priority),
|
|
2433
|
+
runAt: new Date(Number(h.runAt)),
|
|
2434
|
+
pendingReason: nullish(h.pendingReason),
|
|
2435
|
+
errorHistory,
|
|
2436
|
+
timeoutMs: numOrNull(h.timeoutMs),
|
|
2437
|
+
forceKillOnTimeout: h.forceKillOnTimeout === "true" || h.forceKillOnTimeout === "1" ? true : h.forceKillOnTimeout === "false" || h.forceKillOnTimeout === "0" ? false : null,
|
|
2438
|
+
failureReason: nullish(h.failureReason) ?? null,
|
|
2439
|
+
completedAt: dateOrNull(h.completedAt),
|
|
2440
|
+
startedAt: dateOrNull(h.startedAt),
|
|
2441
|
+
lastRetriedAt: dateOrNull(h.lastRetriedAt),
|
|
2442
|
+
lastFailedAt: dateOrNull(h.lastFailedAt),
|
|
2443
|
+
lastCancelledAt: dateOrNull(h.lastCancelledAt),
|
|
2444
|
+
tags,
|
|
2445
|
+
idempotencyKey: nullish(h.idempotencyKey),
|
|
2446
|
+
progress: numOrNull(h.progress)
|
|
2447
|
+
};
|
|
2448
|
+
}
|
|
2449
|
+
var RedisBackend = class {
|
|
2450
|
+
constructor(redisConfig) {
|
|
2451
|
+
let IORedis;
|
|
2452
|
+
try {
|
|
2453
|
+
const _require = module$1.createRequire((typeof document === 'undefined' ? require('u' + 'rl').pathToFileURL(__filename).href : (_documentCurrentScript && _documentCurrentScript.tagName.toUpperCase() === 'SCRIPT' && _documentCurrentScript.src || new URL('index.cjs', document.baseURI).href)));
|
|
2454
|
+
IORedis = _require("ioredis");
|
|
2455
|
+
} catch {
|
|
2456
|
+
throw new Error(
|
|
2457
|
+
'Redis backend requires the "ioredis" package. Install it with: npm install ioredis'
|
|
2458
|
+
);
|
|
2459
|
+
}
|
|
2460
|
+
this.prefix = redisConfig.keyPrefix ?? "dq:";
|
|
2461
|
+
if (redisConfig.url) {
|
|
2462
|
+
this.client = new IORedis(redisConfig.url, {
|
|
2463
|
+
...redisConfig.tls ? { tls: redisConfig.tls } : {},
|
|
2464
|
+
...redisConfig.db !== void 0 ? { db: redisConfig.db } : {}
|
|
2465
|
+
});
|
|
2466
|
+
} else {
|
|
2467
|
+
this.client = new IORedis({
|
|
2468
|
+
host: redisConfig.host ?? "127.0.0.1",
|
|
2469
|
+
port: redisConfig.port ?? 6379,
|
|
2470
|
+
password: redisConfig.password,
|
|
2471
|
+
db: redisConfig.db ?? 0,
|
|
2472
|
+
...redisConfig.tls ? { tls: redisConfig.tls } : {}
|
|
2473
|
+
});
|
|
2474
|
+
}
|
|
2475
|
+
}
|
|
2476
|
+
/** Expose the raw ioredis client for advanced usage. */
|
|
2477
|
+
getClient() {
|
|
2478
|
+
return this.client;
|
|
2479
|
+
}
|
|
2480
|
+
nowMs() {
|
|
2481
|
+
return Date.now();
|
|
2482
|
+
}
|
|
2483
|
+
// ── Events ──────────────────────────────────────────────────────────
|
|
2484
|
+
async recordJobEvent(jobId, eventType, metadata) {
|
|
2485
|
+
try {
|
|
2486
|
+
const eventId = await this.client.incr(`${this.prefix}event_id_seq`);
|
|
2487
|
+
const event = JSON.stringify({
|
|
2488
|
+
id: eventId,
|
|
2489
|
+
jobId,
|
|
2490
|
+
eventType,
|
|
2491
|
+
createdAt: this.nowMs(),
|
|
2492
|
+
metadata: metadata ?? null
|
|
2493
|
+
});
|
|
2494
|
+
await this.client.rpush(`${this.prefix}events:${jobId}`, event);
|
|
2495
|
+
} catch (error) {
|
|
2496
|
+
log(`Error recording job event for job ${jobId}: ${error}`);
|
|
2497
|
+
}
|
|
2498
|
+
}
|
|
2499
|
+
async getJobEvents(jobId) {
|
|
2500
|
+
const raw = await this.client.lrange(
|
|
2501
|
+
`${this.prefix}events:${jobId}`,
|
|
2502
|
+
0,
|
|
2503
|
+
-1
|
|
2504
|
+
);
|
|
2505
|
+
return raw.map((r) => {
|
|
2506
|
+
const e = JSON.parse(r);
|
|
2507
|
+
return {
|
|
2508
|
+
...e,
|
|
2509
|
+
createdAt: new Date(e.createdAt)
|
|
2510
|
+
};
|
|
2511
|
+
});
|
|
2512
|
+
}
|
|
2513
|
+
// ── Job CRUD ──────────────────────────────────────────────────────────
|
|
2514
|
+
async addJob({
|
|
2515
|
+
jobType,
|
|
2516
|
+
payload,
|
|
2517
|
+
maxAttempts = 3,
|
|
2518
|
+
priority = 0,
|
|
2519
|
+
runAt = null,
|
|
2520
|
+
timeoutMs = void 0,
|
|
2521
|
+
forceKillOnTimeout = false,
|
|
2522
|
+
tags = void 0,
|
|
2523
|
+
idempotencyKey = void 0
|
|
2524
|
+
}) {
|
|
2525
|
+
const now = this.nowMs();
|
|
2526
|
+
const runAtMs = runAt ? runAt.getTime() : 0;
|
|
2527
|
+
const result = await this.client.eval(
|
|
2528
|
+
ADD_JOB_SCRIPT,
|
|
2529
|
+
1,
|
|
2530
|
+
this.prefix,
|
|
2531
|
+
jobType,
|
|
2532
|
+
JSON.stringify(payload),
|
|
2533
|
+
maxAttempts,
|
|
2534
|
+
priority,
|
|
2535
|
+
runAtMs.toString(),
|
|
2536
|
+
timeoutMs !== void 0 ? timeoutMs.toString() : "null",
|
|
2537
|
+
forceKillOnTimeout ? "true" : "false",
|
|
2538
|
+
tags ? JSON.stringify(tags) : "null",
|
|
2539
|
+
idempotencyKey ?? "null",
|
|
2540
|
+
now
|
|
2541
|
+
);
|
|
2542
|
+
const jobId = Number(result);
|
|
2543
|
+
log(
|
|
2544
|
+
`Added job ${jobId}: payload ${JSON.stringify(payload)}, ${runAt ? `runAt ${runAt.toISOString()}, ` : ""}priority ${priority}, maxAttempts ${maxAttempts}, jobType ${jobType}, tags ${JSON.stringify(tags)}${idempotencyKey ? `, idempotencyKey "${idempotencyKey}"` : ""}`
|
|
2545
|
+
);
|
|
2546
|
+
await this.recordJobEvent(jobId, "added" /* Added */, {
|
|
2547
|
+
jobType,
|
|
2548
|
+
payload,
|
|
2549
|
+
tags,
|
|
2550
|
+
idempotencyKey
|
|
2551
|
+
});
|
|
2552
|
+
return jobId;
|
|
2553
|
+
}
|
|
2554
|
+
async getJob(id) {
|
|
2555
|
+
const data = await this.client.hgetall(`${this.prefix}job:${id}`);
|
|
2556
|
+
if (!data || Object.keys(data).length === 0) {
|
|
2557
|
+
log(`Job ${id} not found`);
|
|
2558
|
+
return null;
|
|
2559
|
+
}
|
|
2560
|
+
log(`Found job ${id}`);
|
|
2561
|
+
return deserializeJob(data);
|
|
2562
|
+
}
|
|
2563
|
+
async getJobsByStatus(status, limit = 100, offset = 0) {
|
|
2564
|
+
const ids = await this.client.smembers(`${this.prefix}status:${status}`);
|
|
2565
|
+
if (ids.length === 0) return [];
|
|
2566
|
+
const jobs = await this.loadJobsByIds(ids);
|
|
2567
|
+
jobs.sort((a, b) => b.createdAt.getTime() - a.createdAt.getTime());
|
|
2568
|
+
return jobs.slice(offset, offset + limit);
|
|
2569
|
+
}
|
|
2570
|
+
async getAllJobs(limit = 100, offset = 0) {
|
|
2571
|
+
const ids = await this.client.zrevrange(
|
|
2572
|
+
`${this.prefix}all`,
|
|
2573
|
+
offset,
|
|
2574
|
+
offset + limit - 1
|
|
2575
|
+
);
|
|
2576
|
+
if (ids.length === 0) return [];
|
|
2577
|
+
return this.loadJobsByIds(ids);
|
|
2578
|
+
}
|
|
2579
|
+
async getJobs(filters, limit = 100, offset = 0) {
|
|
2580
|
+
let candidateIds;
|
|
2581
|
+
if (filters?.jobType) {
|
|
2582
|
+
candidateIds = await this.client.smembers(
|
|
2583
|
+
`${this.prefix}type:${filters.jobType}`
|
|
2584
|
+
);
|
|
2585
|
+
} else {
|
|
2586
|
+
candidateIds = await this.client.zrevrange(`${this.prefix}all`, 0, -1);
|
|
2587
|
+
}
|
|
2588
|
+
if (candidateIds.length === 0) return [];
|
|
2589
|
+
if (filters?.tags && filters.tags.values.length > 0) {
|
|
2590
|
+
candidateIds = await this.filterByTags(
|
|
2591
|
+
candidateIds,
|
|
2592
|
+
filters.tags.values,
|
|
2593
|
+
filters.tags.mode || "all"
|
|
2594
|
+
);
|
|
2595
|
+
}
|
|
2596
|
+
let jobs = await this.loadJobsByIds(candidateIds);
|
|
2597
|
+
if (filters) {
|
|
2598
|
+
if (filters.priority !== void 0) {
|
|
2599
|
+
jobs = jobs.filter((j) => j.priority === filters.priority);
|
|
2600
|
+
}
|
|
2601
|
+
if (filters.runAt) {
|
|
2602
|
+
jobs = this.filterByRunAt(jobs, filters.runAt);
|
|
2603
|
+
}
|
|
2604
|
+
}
|
|
2605
|
+
jobs.sort((a, b) => b.createdAt.getTime() - a.createdAt.getTime());
|
|
2606
|
+
return jobs.slice(offset, offset + limit);
|
|
2607
|
+
}
|
|
2608
|
+
async getJobsByTags(tags, mode = "all", limit = 100, offset = 0) {
|
|
2609
|
+
const allIds = await this.client.zrevrange(`${this.prefix}all`, 0, -1);
|
|
2610
|
+
if (allIds.length === 0) return [];
|
|
2611
|
+
const filtered = await this.filterByTags(allIds, tags, mode);
|
|
2612
|
+
if (filtered.length === 0) return [];
|
|
2613
|
+
const jobs = await this.loadJobsByIds(filtered);
|
|
2614
|
+
jobs.sort((a, b) => b.createdAt.getTime() - a.createdAt.getTime());
|
|
2615
|
+
return jobs.slice(offset, offset + limit);
|
|
2616
|
+
}
|
|
2617
|
+
// ── Processing lifecycle ──────────────────────────────────────────────
|
|
2618
|
+
async getNextBatch(workerId, batchSize = 10, jobType) {
|
|
2619
|
+
const now = this.nowMs();
|
|
2620
|
+
const jobTypeFilter = jobType === void 0 ? "null" : Array.isArray(jobType) ? JSON.stringify(jobType) : jobType;
|
|
2621
|
+
const result = await this.client.eval(
|
|
2622
|
+
GET_NEXT_BATCH_SCRIPT,
|
|
2623
|
+
1,
|
|
2624
|
+
this.prefix,
|
|
2625
|
+
workerId,
|
|
2626
|
+
batchSize,
|
|
2627
|
+
now,
|
|
2628
|
+
jobTypeFilter
|
|
2629
|
+
);
|
|
2630
|
+
if (!result || result.length === 0) {
|
|
2631
|
+
log("Found 0 jobs to process");
|
|
2632
|
+
return [];
|
|
2633
|
+
}
|
|
2634
|
+
const jobs = [];
|
|
2635
|
+
let current = [];
|
|
2636
|
+
for (const item of result) {
|
|
2637
|
+
if (item === "__JOB_SEP__") {
|
|
2638
|
+
if (current.length > 0) {
|
|
2639
|
+
const h = hashToObject(current);
|
|
2640
|
+
jobs.push(deserializeJob(h));
|
|
2641
|
+
}
|
|
2642
|
+
current = [];
|
|
2643
|
+
} else {
|
|
2644
|
+
current.push(item);
|
|
2645
|
+
}
|
|
2646
|
+
}
|
|
2647
|
+
log(`Found ${jobs.length} jobs to process`);
|
|
2648
|
+
for (const job of jobs) {
|
|
2649
|
+
await this.recordJobEvent(job.id, "processing" /* Processing */);
|
|
2650
|
+
}
|
|
2651
|
+
return jobs;
|
|
2652
|
+
}
|
|
2653
|
+
async completeJob(jobId) {
|
|
2654
|
+
const now = this.nowMs();
|
|
2655
|
+
await this.client.eval(COMPLETE_JOB_SCRIPT, 1, this.prefix, jobId, now);
|
|
2656
|
+
await this.recordJobEvent(jobId, "completed" /* Completed */);
|
|
2657
|
+
log(`Completed job ${jobId}`);
|
|
2658
|
+
}
|
|
2659
|
+
async failJob(jobId, error, failureReason) {
|
|
2660
|
+
const now = this.nowMs();
|
|
2661
|
+
const errorJson = JSON.stringify([
|
|
2662
|
+
{
|
|
2663
|
+
message: error.message || String(error),
|
|
2664
|
+
timestamp: new Date(now).toISOString()
|
|
2665
|
+
}
|
|
2666
|
+
]);
|
|
2667
|
+
await this.client.eval(
|
|
2668
|
+
FAIL_JOB_SCRIPT,
|
|
2669
|
+
1,
|
|
2670
|
+
this.prefix,
|
|
2671
|
+
jobId,
|
|
2672
|
+
errorJson,
|
|
2673
|
+
failureReason ?? "null",
|
|
2674
|
+
now
|
|
2675
|
+
);
|
|
2676
|
+
await this.recordJobEvent(jobId, "failed" /* Failed */, {
|
|
2677
|
+
message: error.message || String(error),
|
|
2678
|
+
failureReason
|
|
2679
|
+
});
|
|
2680
|
+
log(`Failed job ${jobId}`);
|
|
2681
|
+
}
|
|
2682
|
+
async prolongJob(jobId) {
|
|
2683
|
+
try {
|
|
2684
|
+
const now = this.nowMs();
|
|
2685
|
+
await this.client.eval(PROLONG_JOB_SCRIPT, 1, this.prefix, jobId, now);
|
|
2686
|
+
await this.recordJobEvent(jobId, "prolonged" /* Prolonged */);
|
|
2687
|
+
log(`Prolonged job ${jobId}`);
|
|
2688
|
+
} catch (error) {
|
|
2689
|
+
log(`Error prolonging job ${jobId}: ${error}`);
|
|
2690
|
+
}
|
|
2691
|
+
}
|
|
2692
|
+
// ── Progress ──────────────────────────────────────────────────────────
|
|
2693
|
+
async updateProgress(jobId, progress) {
|
|
2694
|
+
try {
|
|
2695
|
+
const now = this.nowMs();
|
|
2696
|
+
await this.client.hset(
|
|
2697
|
+
`${this.prefix}job:${jobId}`,
|
|
2698
|
+
"progress",
|
|
2699
|
+
progress.toString(),
|
|
2700
|
+
"updatedAt",
|
|
2701
|
+
now.toString()
|
|
2702
|
+
);
|
|
2703
|
+
log(`Updated progress for job ${jobId}: ${progress}%`);
|
|
2704
|
+
} catch (error) {
|
|
2705
|
+
log(`Error updating progress for job ${jobId}: ${error}`);
|
|
2706
|
+
}
|
|
2707
|
+
}
|
|
2708
|
+
// ── Job management ────────────────────────────────────────────────────
|
|
2709
|
+
async retryJob(jobId) {
|
|
2710
|
+
const now = this.nowMs();
|
|
2711
|
+
await this.client.eval(RETRY_JOB_SCRIPT, 1, this.prefix, jobId, now);
|
|
2712
|
+
await this.recordJobEvent(jobId, "retried" /* Retried */);
|
|
2713
|
+
log(`Retried job ${jobId}`);
|
|
2714
|
+
}
|
|
2715
|
+
async cancelJob(jobId) {
|
|
2716
|
+
const now = this.nowMs();
|
|
2717
|
+
await this.client.eval(CANCEL_JOB_SCRIPT, 1, this.prefix, jobId, now);
|
|
2718
|
+
await this.recordJobEvent(jobId, "cancelled" /* Cancelled */);
|
|
2719
|
+
log(`Cancelled job ${jobId}`);
|
|
2720
|
+
}
|
|
2721
|
+
async cancelAllUpcomingJobs(filters) {
|
|
2722
|
+
let ids = await this.client.smembers(`${this.prefix}status:pending`);
|
|
2723
|
+
if (ids.length === 0) return 0;
|
|
2724
|
+
if (filters) {
|
|
2725
|
+
ids = await this.applyFilters(ids, filters);
|
|
2726
|
+
}
|
|
2727
|
+
const now = this.nowMs();
|
|
2728
|
+
let count = 0;
|
|
2729
|
+
for (const id of ids) {
|
|
2730
|
+
const result = await this.client.eval(
|
|
2731
|
+
CANCEL_JOB_SCRIPT,
|
|
2732
|
+
1,
|
|
2733
|
+
this.prefix,
|
|
2734
|
+
id,
|
|
2735
|
+
now
|
|
2736
|
+
);
|
|
2737
|
+
if (Number(result) === 1) count++;
|
|
2738
|
+
}
|
|
2739
|
+
log(`Cancelled ${count} jobs`);
|
|
2740
|
+
return count;
|
|
2741
|
+
}
|
|
2742
|
+
async editJob(jobId, updates) {
|
|
2743
|
+
const jk = `${this.prefix}job:${jobId}`;
|
|
2744
|
+
const status = await this.client.hget(jk, "status");
|
|
2745
|
+
if (status !== "pending") {
|
|
2746
|
+
log(`Job ${jobId} is not pending (status: ${status}), skipping edit`);
|
|
2747
|
+
return;
|
|
2748
|
+
}
|
|
2749
|
+
const now = this.nowMs();
|
|
2750
|
+
const fields = [];
|
|
2751
|
+
const metadata = {};
|
|
2752
|
+
if (updates.payload !== void 0) {
|
|
2753
|
+
fields.push("payload", JSON.stringify(updates.payload));
|
|
2754
|
+
metadata.payload = updates.payload;
|
|
2755
|
+
}
|
|
2756
|
+
if (updates.maxAttempts !== void 0) {
|
|
2757
|
+
fields.push("maxAttempts", updates.maxAttempts.toString());
|
|
2758
|
+
metadata.maxAttempts = updates.maxAttempts;
|
|
2759
|
+
}
|
|
2760
|
+
if (updates.priority !== void 0) {
|
|
2761
|
+
fields.push("priority", updates.priority.toString());
|
|
2762
|
+
metadata.priority = updates.priority;
|
|
2763
|
+
const createdAt = await this.client.hget(jk, "createdAt");
|
|
2764
|
+
const score = updates.priority * 1e15 + (1e15 - Number(createdAt));
|
|
2765
|
+
const inQueue = await this.client.zscore(
|
|
2766
|
+
`${this.prefix}queue`,
|
|
2767
|
+
jobId.toString()
|
|
2768
|
+
);
|
|
2769
|
+
if (inQueue !== null) {
|
|
2770
|
+
await this.client.zadd(`${this.prefix}queue`, score, jobId.toString());
|
|
2771
|
+
}
|
|
2772
|
+
}
|
|
2773
|
+
if (updates.runAt !== void 0) {
|
|
2774
|
+
if (updates.runAt === null) {
|
|
2775
|
+
fields.push("runAt", now.toString());
|
|
2776
|
+
} else {
|
|
2777
|
+
fields.push("runAt", updates.runAt.getTime().toString());
|
|
2778
|
+
}
|
|
2779
|
+
metadata.runAt = updates.runAt;
|
|
2780
|
+
}
|
|
2781
|
+
if (updates.timeoutMs !== void 0) {
|
|
2782
|
+
fields.push(
|
|
2783
|
+
"timeoutMs",
|
|
2784
|
+
updates.timeoutMs !== null ? updates.timeoutMs.toString() : "null"
|
|
2785
|
+
);
|
|
2786
|
+
metadata.timeoutMs = updates.timeoutMs;
|
|
2787
|
+
}
|
|
2788
|
+
if (updates.tags !== void 0) {
|
|
2789
|
+
const oldTagsJson = await this.client.hget(jk, "tags");
|
|
2790
|
+
if (oldTagsJson && oldTagsJson !== "null") {
|
|
2791
|
+
try {
|
|
2792
|
+
const oldTags = JSON.parse(oldTagsJson);
|
|
2793
|
+
for (const tag of oldTags) {
|
|
2794
|
+
await this.client.srem(
|
|
2795
|
+
`${this.prefix}tag:${tag}`,
|
|
2796
|
+
jobId.toString()
|
|
2797
|
+
);
|
|
2798
|
+
}
|
|
2799
|
+
} catch {
|
|
2800
|
+
}
|
|
2801
|
+
}
|
|
2802
|
+
await this.client.del(`${this.prefix}job:${jobId}:tags`);
|
|
2803
|
+
if (updates.tags !== null) {
|
|
2804
|
+
for (const tag of updates.tags) {
|
|
2805
|
+
await this.client.sadd(`${this.prefix}tag:${tag}`, jobId.toString());
|
|
2806
|
+
await this.client.sadd(`${this.prefix}job:${jobId}:tags`, tag);
|
|
2807
|
+
}
|
|
2808
|
+
fields.push("tags", JSON.stringify(updates.tags));
|
|
2809
|
+
} else {
|
|
2810
|
+
fields.push("tags", "null");
|
|
2811
|
+
}
|
|
2812
|
+
metadata.tags = updates.tags;
|
|
2813
|
+
}
|
|
2814
|
+
if (fields.length === 0) {
|
|
2815
|
+
log(`No fields to update for job ${jobId}`);
|
|
2816
|
+
return;
|
|
2817
|
+
}
|
|
2818
|
+
fields.push("updatedAt", now.toString());
|
|
2819
|
+
await this.client.hmset(jk, ...fields);
|
|
2820
|
+
await this.recordJobEvent(jobId, "edited" /* Edited */, metadata);
|
|
2821
|
+
log(`Edited job ${jobId}: ${JSON.stringify(metadata)}`);
|
|
2822
|
+
}
|
|
2823
|
+
async editAllPendingJobs(filters, updates) {
|
|
2824
|
+
let ids = await this.client.smembers(`${this.prefix}status:pending`);
|
|
2825
|
+
if (ids.length === 0) return 0;
|
|
2826
|
+
if (filters) {
|
|
2827
|
+
ids = await this.applyFilters(ids, filters);
|
|
2828
|
+
}
|
|
2829
|
+
let count = 0;
|
|
2830
|
+
for (const id of ids) {
|
|
2831
|
+
await this.editJob(Number(id), updates);
|
|
2832
|
+
count++;
|
|
2833
|
+
}
|
|
2834
|
+
log(`Edited ${count} pending jobs`);
|
|
2835
|
+
return count;
|
|
2836
|
+
}
|
|
2837
|
+
async cleanupOldJobs(daysToKeep = 30) {
|
|
2838
|
+
const cutoffMs = this.nowMs() - daysToKeep * 24 * 60 * 60 * 1e3;
|
|
2839
|
+
const result = await this.client.eval(
|
|
2840
|
+
CLEANUP_OLD_JOBS_SCRIPT,
|
|
2841
|
+
1,
|
|
2842
|
+
this.prefix,
|
|
2843
|
+
cutoffMs
|
|
2844
|
+
);
|
|
2845
|
+
log(`Deleted ${result} old jobs`);
|
|
2846
|
+
return Number(result);
|
|
2847
|
+
}
|
|
2848
|
+
async cleanupOldJobEvents(daysToKeep = 30) {
|
|
2849
|
+
log(
|
|
2850
|
+
`cleanupOldJobEvents is a no-op for Redis backend (events are cleaned up with their jobs)`
|
|
2851
|
+
);
|
|
2852
|
+
return 0;
|
|
2853
|
+
}
|
|
2854
|
+
async reclaimStuckJobs(maxProcessingTimeMinutes = 10) {
|
|
2855
|
+
const maxAgeMs = maxProcessingTimeMinutes * 60 * 1e3;
|
|
2856
|
+
const now = this.nowMs();
|
|
2857
|
+
const result = await this.client.eval(
|
|
2858
|
+
RECLAIM_STUCK_JOBS_SCRIPT,
|
|
2859
|
+
1,
|
|
2860
|
+
this.prefix,
|
|
2861
|
+
maxAgeMs,
|
|
2862
|
+
now
|
|
2863
|
+
);
|
|
2864
|
+
log(`Reclaimed ${result} stuck jobs`);
|
|
2865
|
+
return Number(result);
|
|
2866
|
+
}
|
|
2867
|
+
// ── Internal helpers ──────────────────────────────────────────────────
|
|
2868
|
+
async setPendingReasonForUnpickedJobs(reason, jobType) {
|
|
2869
|
+
let ids = await this.client.smembers(`${this.prefix}status:pending`);
|
|
2870
|
+
if (ids.length === 0) return;
|
|
2871
|
+
if (jobType) {
|
|
2872
|
+
const types = Array.isArray(jobType) ? jobType : [jobType];
|
|
2873
|
+
const typeSet = /* @__PURE__ */ new Set();
|
|
2874
|
+
for (const t of types) {
|
|
2875
|
+
const typeIds = await this.client.smembers(`${this.prefix}type:${t}`);
|
|
2876
|
+
for (const id of typeIds) typeSet.add(id);
|
|
2877
|
+
}
|
|
2878
|
+
ids = ids.filter((id) => typeSet.has(id));
|
|
2879
|
+
}
|
|
2880
|
+
for (const id of ids) {
|
|
2881
|
+
await this.client.hset(
|
|
2882
|
+
`${this.prefix}job:${id}`,
|
|
2883
|
+
"pendingReason",
|
|
2884
|
+
reason
|
|
2885
|
+
);
|
|
2886
|
+
}
|
|
2887
|
+
}
|
|
2888
|
+
// ── Private helpers ───────────────────────────────────────────────────
|
|
2889
|
+
async loadJobsByIds(ids) {
|
|
2890
|
+
const pipeline = this.client.pipeline();
|
|
2891
|
+
for (const id of ids) {
|
|
2892
|
+
pipeline.hgetall(`${this.prefix}job:${id}`);
|
|
2893
|
+
}
|
|
2894
|
+
const results = await pipeline.exec();
|
|
2895
|
+
const jobs = [];
|
|
2896
|
+
if (results) {
|
|
2897
|
+
for (const [err, data] of results) {
|
|
2898
|
+
if (!err && data && typeof data === "object" && Object.keys(data).length > 0) {
|
|
2899
|
+
jobs.push(
|
|
2900
|
+
deserializeJob(data)
|
|
2901
|
+
);
|
|
2902
|
+
}
|
|
2903
|
+
}
|
|
2904
|
+
}
|
|
2905
|
+
return jobs;
|
|
2906
|
+
}
|
|
2907
|
+
async filterByTags(candidateIds, tags, mode) {
|
|
2908
|
+
const candidateSet = new Set(candidateIds.map(String));
|
|
2909
|
+
if (mode === "exact") {
|
|
2910
|
+
const tagSet = new Set(tags);
|
|
2911
|
+
const result = [];
|
|
2912
|
+
for (const id of candidateIds) {
|
|
2913
|
+
const jobTags = await this.client.smembers(
|
|
2914
|
+
`${this.prefix}job:${id}:tags`
|
|
2915
|
+
);
|
|
2916
|
+
if (jobTags.length === tagSet.size && jobTags.every((t) => tagSet.has(t))) {
|
|
2917
|
+
result.push(id);
|
|
2918
|
+
}
|
|
2919
|
+
}
|
|
2920
|
+
return result;
|
|
2921
|
+
}
|
|
2922
|
+
if (mode === "all") {
|
|
2923
|
+
let intersection = new Set(candidateIds.map(String));
|
|
2924
|
+
for (const tag of tags) {
|
|
2925
|
+
const tagMembers = await this.client.smembers(
|
|
2926
|
+
`${this.prefix}tag:${tag}`
|
|
2927
|
+
);
|
|
2928
|
+
const tagSet = new Set(tagMembers.map(String));
|
|
2929
|
+
intersection = new Set(
|
|
2930
|
+
[...intersection].filter((id) => tagSet.has(id))
|
|
2931
|
+
);
|
|
2932
|
+
}
|
|
2933
|
+
return [...intersection].filter((id) => candidateSet.has(id));
|
|
2934
|
+
}
|
|
2935
|
+
if (mode === "any") {
|
|
2936
|
+
const union = /* @__PURE__ */ new Set();
|
|
2937
|
+
for (const tag of tags) {
|
|
2938
|
+
const tagMembers = await this.client.smembers(
|
|
2939
|
+
`${this.prefix}tag:${tag}`
|
|
2940
|
+
);
|
|
2941
|
+
for (const id of tagMembers) union.add(String(id));
|
|
2942
|
+
}
|
|
2943
|
+
return [...union].filter((id) => candidateSet.has(id));
|
|
2944
|
+
}
|
|
2945
|
+
if (mode === "none") {
|
|
2946
|
+
const exclude = /* @__PURE__ */ new Set();
|
|
2947
|
+
for (const tag of tags) {
|
|
2948
|
+
const tagMembers = await this.client.smembers(
|
|
2949
|
+
`${this.prefix}tag:${tag}`
|
|
2950
|
+
);
|
|
2951
|
+
for (const id of tagMembers) exclude.add(String(id));
|
|
2952
|
+
}
|
|
2953
|
+
return candidateIds.filter((id) => !exclude.has(String(id)));
|
|
2954
|
+
}
|
|
2955
|
+
return this.filterByTags(candidateIds, tags, "all");
|
|
2956
|
+
}
|
|
2957
|
+
filterByRunAt(jobs, runAt) {
|
|
2958
|
+
if (runAt instanceof Date) {
|
|
2959
|
+
return jobs.filter((j) => j.runAt.getTime() === runAt.getTime());
|
|
2960
|
+
}
|
|
2961
|
+
return jobs.filter((j) => {
|
|
2962
|
+
const t = j.runAt.getTime();
|
|
2963
|
+
if (runAt.gt && !(t > runAt.gt.getTime())) return false;
|
|
2964
|
+
if (runAt.gte && !(t >= runAt.gte.getTime())) return false;
|
|
2965
|
+
if (runAt.lt && !(t < runAt.lt.getTime())) return false;
|
|
2966
|
+
if (runAt.lte && !(t <= runAt.lte.getTime())) return false;
|
|
2967
|
+
if (runAt.eq && t !== runAt.eq.getTime()) return false;
|
|
2968
|
+
return true;
|
|
2969
|
+
});
|
|
2970
|
+
}
|
|
2971
|
+
async applyFilters(ids, filters) {
|
|
2972
|
+
let result = ids;
|
|
2973
|
+
if (filters.jobType) {
|
|
2974
|
+
const typeIds = new Set(
|
|
2975
|
+
await this.client.smembers(`${this.prefix}type:${filters.jobType}`)
|
|
2976
|
+
);
|
|
2977
|
+
result = result.filter((id) => typeIds.has(id));
|
|
2978
|
+
}
|
|
2979
|
+
if (filters.tags && filters.tags.values.length > 0) {
|
|
2980
|
+
result = await this.filterByTags(
|
|
2981
|
+
result,
|
|
2982
|
+
filters.tags.values,
|
|
2983
|
+
filters.tags.mode || "all"
|
|
2984
|
+
);
|
|
2985
|
+
}
|
|
2986
|
+
if (filters.priority !== void 0 || filters.runAt) {
|
|
2987
|
+
const jobs = await this.loadJobsByIds(result);
|
|
2988
|
+
let filtered = jobs;
|
|
2989
|
+
if (filters.priority !== void 0) {
|
|
2990
|
+
filtered = filtered.filter((j) => j.priority === filters.priority);
|
|
2991
|
+
}
|
|
2992
|
+
if (filters.runAt) {
|
|
2993
|
+
filtered = this.filterByRunAt(filtered, filters.runAt);
|
|
2994
|
+
}
|
|
2995
|
+
result = filtered.map((j) => j.id.toString());
|
|
2996
|
+
}
|
|
2997
|
+
return result;
|
|
2998
|
+
}
|
|
2999
|
+
};
|
|
3000
|
+
|
|
3001
|
+
// src/handler-validation.ts
|
|
3002
|
+
function validateHandlerSerializable2(handler, jobType) {
|
|
3003
|
+
try {
|
|
3004
|
+
const handlerString = handler.toString();
|
|
3005
|
+
const typeLabel = jobType ? `job type "${jobType}"` : "handler";
|
|
3006
|
+
if (handlerString.includes("this.") && !handlerString.match(/\([^)]*this[^)]*\)/)) {
|
|
3007
|
+
return {
|
|
3008
|
+
isSerializable: false,
|
|
3009
|
+
error: `Handler for ${typeLabel} uses 'this' context which cannot be serialized. Use a regular function or avoid 'this' references when forceKillOnTimeout is enabled.`
|
|
3010
|
+
};
|
|
3011
|
+
}
|
|
3012
|
+
if (handlerString.includes("[native code]")) {
|
|
3013
|
+
return {
|
|
3014
|
+
isSerializable: false,
|
|
3015
|
+
error: `Handler for ${typeLabel} contains native code which cannot be serialized. Ensure your handler is a plain function when forceKillOnTimeout is enabled.`
|
|
3016
|
+
};
|
|
3017
|
+
}
|
|
3018
|
+
try {
|
|
3019
|
+
new Function("return " + handlerString);
|
|
3020
|
+
} catch (parseError) {
|
|
3021
|
+
return {
|
|
3022
|
+
isSerializable: false,
|
|
3023
|
+
error: `Handler for ${typeLabel} cannot be serialized: ${parseError instanceof Error ? parseError.message : String(parseError)}. When using forceKillOnTimeout, handlers must be serializable functions without closures over external variables.`
|
|
3024
|
+
};
|
|
3025
|
+
}
|
|
3026
|
+
const hasPotentialClosure = /const\s+\w+\s*=\s*[^;]+;\s*async\s*\(/.test(handlerString) || /let\s+\w+\s*=\s*[^;]+;\s*async\s*\(/.test(handlerString);
|
|
3027
|
+
if (hasPotentialClosure) {
|
|
3028
|
+
return {
|
|
3029
|
+
isSerializable: true,
|
|
3030
|
+
// Still serializable, but might have issues
|
|
3031
|
+
error: `Warning: Handler for ${typeLabel} may have closures over external variables. Test thoroughly with forceKillOnTimeout enabled. If the handler fails to execute in a worker thread, ensure all dependencies are imported within the handler function.`
|
|
3032
|
+
};
|
|
3033
|
+
}
|
|
3034
|
+
return { isSerializable: true };
|
|
3035
|
+
} catch (error) {
|
|
3036
|
+
return {
|
|
3037
|
+
isSerializable: false,
|
|
3038
|
+
error: `Failed to validate handler serialization${jobType ? ` for job type "${jobType}"` : ""}: ${error instanceof Error ? error.message : String(error)}`
|
|
3039
|
+
};
|
|
3040
|
+
}
|
|
3041
|
+
}
|
|
3042
|
+
async function testHandlerSerialization(handler, jobType) {
|
|
3043
|
+
const basicValidation = validateHandlerSerializable2(handler, jobType);
|
|
3044
|
+
if (!basicValidation.isSerializable) {
|
|
3045
|
+
return basicValidation;
|
|
3046
|
+
}
|
|
3047
|
+
try {
|
|
3048
|
+
const handlerString = handler.toString();
|
|
3049
|
+
const handlerFn = new Function("return " + handlerString)();
|
|
3050
|
+
const testPromise = handlerFn({}, new AbortController().signal);
|
|
3051
|
+
const timeoutPromise = new Promise(
|
|
3052
|
+
(_, reject) => setTimeout(() => reject(new Error("Handler test timeout")), 100)
|
|
3053
|
+
);
|
|
3054
|
+
try {
|
|
3055
|
+
await Promise.race([testPromise, timeoutPromise]);
|
|
3056
|
+
} catch (execError) {
|
|
3057
|
+
if (execError instanceof Error && execError.message === "Handler test timeout") {
|
|
3058
|
+
return { isSerializable: true };
|
|
3059
|
+
}
|
|
3060
|
+
}
|
|
3061
|
+
return { isSerializable: true };
|
|
3062
|
+
} catch (error) {
|
|
3063
|
+
return {
|
|
3064
|
+
isSerializable: false,
|
|
3065
|
+
error: `Handler failed serialization test: ${error instanceof Error ? error.message : String(error)}`
|
|
3066
|
+
};
|
|
3067
|
+
}
|
|
3068
|
+
}
|
|
3069
|
+
|
|
898
3070
|
// src/index.ts
|
|
899
3071
|
var initJobQueue = (config) => {
|
|
900
|
-
const
|
|
901
|
-
const pool = createPool(databaseConfig);
|
|
3072
|
+
const backendType = config.backend ?? "postgres";
|
|
902
3073
|
setLogContext(config.verbose ?? false);
|
|
3074
|
+
let backend;
|
|
3075
|
+
let pool;
|
|
3076
|
+
if (backendType === "postgres") {
|
|
3077
|
+
const pgConfig = config;
|
|
3078
|
+
pool = createPool(pgConfig.databaseConfig);
|
|
3079
|
+
backend = new PostgresBackend(pool);
|
|
3080
|
+
} else if (backendType === "redis") {
|
|
3081
|
+
const redisConfig = config.redisConfig;
|
|
3082
|
+
backend = new RedisBackend(redisConfig);
|
|
3083
|
+
} else {
|
|
3084
|
+
throw new Error(`Unknown backend: ${backendType}`);
|
|
3085
|
+
}
|
|
3086
|
+
const requirePool = () => {
|
|
3087
|
+
if (!pool) {
|
|
3088
|
+
throw new Error(
|
|
3089
|
+
'Wait/Token features require the PostgreSQL backend. Configure with backend: "postgres" to use these features.'
|
|
3090
|
+
);
|
|
3091
|
+
}
|
|
3092
|
+
return pool;
|
|
3093
|
+
};
|
|
903
3094
|
return {
|
|
904
3095
|
// Job queue operations
|
|
905
3096
|
addJob: withLogContext(
|
|
906
|
-
(job) => addJob(
|
|
3097
|
+
(job) => backend.addJob(job),
|
|
907
3098
|
config.verbose ?? false
|
|
908
3099
|
),
|
|
909
3100
|
getJob: withLogContext(
|
|
910
|
-
(id) => getJob(
|
|
3101
|
+
(id) => backend.getJob(id),
|
|
911
3102
|
config.verbose ?? false
|
|
912
3103
|
),
|
|
913
3104
|
getJobsByStatus: withLogContext(
|
|
914
|
-
(status, limit, offset) => getJobsByStatus(
|
|
3105
|
+
(status, limit, offset) => backend.getJobsByStatus(status, limit, offset),
|
|
915
3106
|
config.verbose ?? false
|
|
916
3107
|
),
|
|
917
3108
|
getAllJobs: withLogContext(
|
|
918
|
-
(limit, offset) => getAllJobs(
|
|
3109
|
+
(limit, offset) => backend.getAllJobs(limit, offset),
|
|
919
3110
|
config.verbose ?? false
|
|
920
3111
|
),
|
|
921
3112
|
getJobs: withLogContext(
|
|
922
|
-
(filters, limit, offset) => getJobs(
|
|
3113
|
+
(filters, limit, offset) => backend.getJobs(filters, limit, offset),
|
|
923
3114
|
config.verbose ?? false
|
|
924
3115
|
),
|
|
925
|
-
retryJob: (jobId) => retryJob(
|
|
926
|
-
cleanupOldJobs: (daysToKeep) => cleanupOldJobs(
|
|
3116
|
+
retryJob: (jobId) => backend.retryJob(jobId),
|
|
3117
|
+
cleanupOldJobs: (daysToKeep) => backend.cleanupOldJobs(daysToKeep),
|
|
3118
|
+
cleanupOldJobEvents: (daysToKeep) => backend.cleanupOldJobEvents(daysToKeep),
|
|
927
3119
|
cancelJob: withLogContext(
|
|
928
|
-
(jobId) => cancelJob(
|
|
3120
|
+
(jobId) => backend.cancelJob(jobId),
|
|
3121
|
+
config.verbose ?? false
|
|
3122
|
+
),
|
|
3123
|
+
editJob: withLogContext(
|
|
3124
|
+
(jobId, updates) => backend.editJob(jobId, updates),
|
|
3125
|
+
config.verbose ?? false
|
|
3126
|
+
),
|
|
3127
|
+
editAllPendingJobs: withLogContext(
|
|
3128
|
+
(filters, updates) => backend.editAllPendingJobs(
|
|
3129
|
+
filters,
|
|
3130
|
+
updates
|
|
3131
|
+
),
|
|
929
3132
|
config.verbose ?? false
|
|
930
3133
|
),
|
|
931
3134
|
cancelAllUpcomingJobs: withLogContext(
|
|
932
|
-
(filters) => cancelAllUpcomingJobs(
|
|
3135
|
+
(filters) => backend.cancelAllUpcomingJobs(filters),
|
|
933
3136
|
config.verbose ?? false
|
|
934
3137
|
),
|
|
935
3138
|
reclaimStuckJobs: withLogContext(
|
|
936
|
-
(maxProcessingTimeMinutes) => reclaimStuckJobs(
|
|
3139
|
+
(maxProcessingTimeMinutes) => backend.reclaimStuckJobs(maxProcessingTimeMinutes),
|
|
937
3140
|
config.verbose ?? false
|
|
938
3141
|
),
|
|
939
3142
|
getJobsByTags: withLogContext(
|
|
940
|
-
(tags, mode = "all", limit, offset) => getJobsByTags(
|
|
3143
|
+
(tags, mode = "all", limit, offset) => backend.getJobsByTags(tags, mode, limit, offset),
|
|
941
3144
|
config.verbose ?? false
|
|
942
3145
|
),
|
|
943
3146
|
// Job processing
|
|
944
|
-
createProcessor: (handlers, options) => createProcessor(
|
|
945
|
-
// Advanced access (for custom operations)
|
|
946
|
-
getPool: () => pool,
|
|
3147
|
+
createProcessor: (handlers, options) => createProcessor(backend, handlers, options),
|
|
947
3148
|
// Job events
|
|
948
3149
|
getJobEvents: withLogContext(
|
|
949
|
-
(jobId) => getJobEvents(
|
|
3150
|
+
(jobId) => backend.getJobEvents(jobId),
|
|
3151
|
+
config.verbose ?? false
|
|
3152
|
+
),
|
|
3153
|
+
// Wait / Token support (PostgreSQL-only for now)
|
|
3154
|
+
createToken: withLogContext(
|
|
3155
|
+
(options) => createWaitpoint(requirePool(), null, options),
|
|
3156
|
+
config.verbose ?? false
|
|
3157
|
+
),
|
|
3158
|
+
completeToken: withLogContext(
|
|
3159
|
+
(tokenId, data) => completeWaitpoint(requirePool(), tokenId, data),
|
|
3160
|
+
config.verbose ?? false
|
|
3161
|
+
),
|
|
3162
|
+
getToken: withLogContext(
|
|
3163
|
+
(tokenId) => getWaitpoint(requirePool(), tokenId),
|
|
950
3164
|
config.verbose ?? false
|
|
951
|
-
)
|
|
3165
|
+
),
|
|
3166
|
+
expireTimedOutTokens: withLogContext(
|
|
3167
|
+
() => expireTimedOutWaitpoints(requirePool()),
|
|
3168
|
+
config.verbose ?? false
|
|
3169
|
+
),
|
|
3170
|
+
// Advanced access
|
|
3171
|
+
getPool: () => {
|
|
3172
|
+
if (backendType !== "postgres") {
|
|
3173
|
+
throw new Error(
|
|
3174
|
+
"getPool() is only available with the PostgreSQL backend."
|
|
3175
|
+
);
|
|
3176
|
+
}
|
|
3177
|
+
return backend.getPool();
|
|
3178
|
+
},
|
|
3179
|
+
getRedisClient: () => {
|
|
3180
|
+
if (backendType !== "redis") {
|
|
3181
|
+
throw new Error(
|
|
3182
|
+
"getRedisClient() is only available with the Redis backend."
|
|
3183
|
+
);
|
|
3184
|
+
}
|
|
3185
|
+
return backend.getClient();
|
|
3186
|
+
}
|
|
952
3187
|
};
|
|
953
3188
|
};
|
|
954
3189
|
var withLogContext = (fn, verbose) => (...args) => {
|
|
@@ -958,6 +3193,10 @@ var withLogContext = (fn, verbose) => (...args) => {
|
|
|
958
3193
|
|
|
959
3194
|
exports.FailureReason = FailureReason;
|
|
960
3195
|
exports.JobEventType = JobEventType;
|
|
3196
|
+
exports.PostgresBackend = PostgresBackend;
|
|
3197
|
+
exports.WaitSignal = WaitSignal;
|
|
961
3198
|
exports.initJobQueue = initJobQueue;
|
|
3199
|
+
exports.testHandlerSerialization = testHandlerSerialization;
|
|
3200
|
+
exports.validateHandlerSerializable = validateHandlerSerializable2;
|
|
962
3201
|
//# sourceMappingURL=index.cjs.map
|
|
963
3202
|
//# sourceMappingURL=index.cjs.map
|