@nicnocquee/dataqueue 1.25.0 → 1.26.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/dist/cli.cjs +0 -88
- package/dist/cli.cjs.map +0 -1
- package/dist/cli.d.cts +0 -12
- package/dist/cli.d.ts +0 -12
- package/dist/cli.js +0 -81
- package/dist/cli.js.map +0 -1
- package/dist/index.cjs +0 -3202
- package/dist/index.cjs.map +0 -1
- package/dist/index.d.cts +0 -873
- package/dist/index.d.ts +0 -873
- package/dist/index.js +0 -3189
- package/dist/index.js.map +0 -1
package/dist/index.js
DELETED
|
@@ -1,3189 +0,0 @@
|
|
|
1
|
-
import { AsyncLocalStorage } from 'async_hooks';
|
|
2
|
-
import { randomUUID } from 'crypto';
|
|
3
|
-
import { Worker } from 'worker_threads';
|
|
4
|
-
import { Pool } from 'pg';
|
|
5
|
-
import { parse } from 'pg-connection-string';
|
|
6
|
-
import fs from 'fs';
|
|
7
|
-
import { createRequire } from 'module';
|
|
8
|
-
|
|
9
|
-
// src/types.ts
|
|
10
|
-
var JobEventType = /* @__PURE__ */ ((JobEventType2) => {
|
|
11
|
-
JobEventType2["Added"] = "added";
|
|
12
|
-
JobEventType2["Processing"] = "processing";
|
|
13
|
-
JobEventType2["Completed"] = "completed";
|
|
14
|
-
JobEventType2["Failed"] = "failed";
|
|
15
|
-
JobEventType2["Cancelled"] = "cancelled";
|
|
16
|
-
JobEventType2["Retried"] = "retried";
|
|
17
|
-
JobEventType2["Edited"] = "edited";
|
|
18
|
-
JobEventType2["Prolonged"] = "prolonged";
|
|
19
|
-
JobEventType2["Waiting"] = "waiting";
|
|
20
|
-
return JobEventType2;
|
|
21
|
-
})(JobEventType || {});
|
|
22
|
-
var FailureReason = /* @__PURE__ */ ((FailureReason5) => {
|
|
23
|
-
FailureReason5["Timeout"] = "timeout";
|
|
24
|
-
FailureReason5["HandlerError"] = "handler_error";
|
|
25
|
-
FailureReason5["NoHandler"] = "no_handler";
|
|
26
|
-
return FailureReason5;
|
|
27
|
-
})(FailureReason || {});
|
|
28
|
-
var WaitSignal = class extends Error {
|
|
29
|
-
constructor(type, waitUntil, tokenId, stepData) {
|
|
30
|
-
super("WaitSignal");
|
|
31
|
-
this.type = type;
|
|
32
|
-
this.waitUntil = waitUntil;
|
|
33
|
-
this.tokenId = tokenId;
|
|
34
|
-
this.stepData = stepData;
|
|
35
|
-
this.isWaitSignal = true;
|
|
36
|
-
this.name = "WaitSignal";
|
|
37
|
-
}
|
|
38
|
-
};
|
|
39
|
-
var logStorage = new AsyncLocalStorage();
|
|
40
|
-
var setLogContext = (verbose) => {
|
|
41
|
-
logStorage.enterWith({ verbose });
|
|
42
|
-
};
|
|
43
|
-
var getLogContext = () => {
|
|
44
|
-
return logStorage.getStore();
|
|
45
|
-
};
|
|
46
|
-
var log = (message) => {
|
|
47
|
-
const context = getLogContext();
|
|
48
|
-
if (context?.verbose) {
|
|
49
|
-
console.log(message);
|
|
50
|
-
}
|
|
51
|
-
};
|
|
52
|
-
|
|
53
|
-
// src/backends/postgres.ts
|
|
54
|
-
var PostgresBackend = class {
|
|
55
|
-
constructor(pool) {
|
|
56
|
-
this.pool = pool;
|
|
57
|
-
}
|
|
58
|
-
/** Expose the raw pool for advanced usage. */
|
|
59
|
-
getPool() {
|
|
60
|
-
return this.pool;
|
|
61
|
-
}
|
|
62
|
-
// ── Events ──────────────────────────────────────────────────────────
|
|
63
|
-
async recordJobEvent(jobId, eventType, metadata) {
|
|
64
|
-
const client = await this.pool.connect();
|
|
65
|
-
try {
|
|
66
|
-
await client.query(
|
|
67
|
-
`INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
|
|
68
|
-
[jobId, eventType, metadata ? JSON.stringify(metadata) : null]
|
|
69
|
-
);
|
|
70
|
-
} catch (error) {
|
|
71
|
-
log(`Error recording job event for job ${jobId}: ${error}`);
|
|
72
|
-
} finally {
|
|
73
|
-
client.release();
|
|
74
|
-
}
|
|
75
|
-
}
|
|
76
|
-
async getJobEvents(jobId) {
|
|
77
|
-
const client = await this.pool.connect();
|
|
78
|
-
try {
|
|
79
|
-
const res = await client.query(
|
|
80
|
-
`SELECT id, job_id AS "jobId", event_type AS "eventType", metadata, created_at AS "createdAt" FROM job_events WHERE job_id = $1 ORDER BY created_at ASC`,
|
|
81
|
-
[jobId]
|
|
82
|
-
);
|
|
83
|
-
return res.rows;
|
|
84
|
-
} finally {
|
|
85
|
-
client.release();
|
|
86
|
-
}
|
|
87
|
-
}
|
|
88
|
-
// ── Job CRUD ──────────────────────────────────────────────────────────
|
|
89
|
-
async addJob({
|
|
90
|
-
jobType,
|
|
91
|
-
payload,
|
|
92
|
-
maxAttempts = 3,
|
|
93
|
-
priority = 0,
|
|
94
|
-
runAt = null,
|
|
95
|
-
timeoutMs = void 0,
|
|
96
|
-
forceKillOnTimeout = false,
|
|
97
|
-
tags = void 0,
|
|
98
|
-
idempotencyKey = void 0
|
|
99
|
-
}) {
|
|
100
|
-
const client = await this.pool.connect();
|
|
101
|
-
try {
|
|
102
|
-
let result;
|
|
103
|
-
const onConflict = idempotencyKey ? `ON CONFLICT (idempotency_key) WHERE idempotency_key IS NOT NULL DO NOTHING` : "";
|
|
104
|
-
if (runAt) {
|
|
105
|
-
result = await client.query(
|
|
106
|
-
`INSERT INTO job_queue
|
|
107
|
-
(job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key)
|
|
108
|
-
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
|
109
|
-
${onConflict}
|
|
110
|
-
RETURNING id`,
|
|
111
|
-
[
|
|
112
|
-
jobType,
|
|
113
|
-
payload,
|
|
114
|
-
maxAttempts,
|
|
115
|
-
priority,
|
|
116
|
-
runAt,
|
|
117
|
-
timeoutMs ?? null,
|
|
118
|
-
forceKillOnTimeout ?? false,
|
|
119
|
-
tags ?? null,
|
|
120
|
-
idempotencyKey ?? null
|
|
121
|
-
]
|
|
122
|
-
);
|
|
123
|
-
} else {
|
|
124
|
-
result = await client.query(
|
|
125
|
-
`INSERT INTO job_queue
|
|
126
|
-
(job_type, payload, max_attempts, priority, timeout_ms, force_kill_on_timeout, tags, idempotency_key)
|
|
127
|
-
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
|
128
|
-
${onConflict}
|
|
129
|
-
RETURNING id`,
|
|
130
|
-
[
|
|
131
|
-
jobType,
|
|
132
|
-
payload,
|
|
133
|
-
maxAttempts,
|
|
134
|
-
priority,
|
|
135
|
-
timeoutMs ?? null,
|
|
136
|
-
forceKillOnTimeout ?? false,
|
|
137
|
-
tags ?? null,
|
|
138
|
-
idempotencyKey ?? null
|
|
139
|
-
]
|
|
140
|
-
);
|
|
141
|
-
}
|
|
142
|
-
if (result.rows.length === 0 && idempotencyKey) {
|
|
143
|
-
const existing = await client.query(
|
|
144
|
-
`SELECT id FROM job_queue WHERE idempotency_key = $1`,
|
|
145
|
-
[idempotencyKey]
|
|
146
|
-
);
|
|
147
|
-
if (existing.rows.length > 0) {
|
|
148
|
-
log(
|
|
149
|
-
`Job with idempotency key "${idempotencyKey}" already exists (id: ${existing.rows[0].id}), returning existing job`
|
|
150
|
-
);
|
|
151
|
-
return existing.rows[0].id;
|
|
152
|
-
}
|
|
153
|
-
throw new Error(
|
|
154
|
-
`Failed to insert job and could not find existing job with idempotency key "${idempotencyKey}"`
|
|
155
|
-
);
|
|
156
|
-
}
|
|
157
|
-
const jobId = result.rows[0].id;
|
|
158
|
-
log(
|
|
159
|
-
`Added job ${jobId}: payload ${JSON.stringify(payload)}, ${runAt ? `runAt ${runAt.toISOString()}, ` : ""}priority ${priority}, maxAttempts ${maxAttempts}, jobType ${jobType}, tags ${JSON.stringify(tags)}${idempotencyKey ? `, idempotencyKey "${idempotencyKey}"` : ""}`
|
|
160
|
-
);
|
|
161
|
-
await this.recordJobEvent(jobId, "added" /* Added */, {
|
|
162
|
-
jobType,
|
|
163
|
-
payload,
|
|
164
|
-
tags,
|
|
165
|
-
idempotencyKey
|
|
166
|
-
});
|
|
167
|
-
return jobId;
|
|
168
|
-
} catch (error) {
|
|
169
|
-
log(`Error adding job: ${error}`);
|
|
170
|
-
throw error;
|
|
171
|
-
} finally {
|
|
172
|
-
client.release();
|
|
173
|
-
}
|
|
174
|
-
}
|
|
175
|
-
async getJob(id) {
|
|
176
|
-
const client = await this.pool.connect();
|
|
177
|
-
try {
|
|
178
|
-
const result = await client.query(
|
|
179
|
-
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue WHERE id = $1`,
|
|
180
|
-
[id]
|
|
181
|
-
);
|
|
182
|
-
if (result.rows.length === 0) {
|
|
183
|
-
log(`Job ${id} not found`);
|
|
184
|
-
return null;
|
|
185
|
-
}
|
|
186
|
-
log(`Found job ${id}`);
|
|
187
|
-
const job = result.rows[0];
|
|
188
|
-
return {
|
|
189
|
-
...job,
|
|
190
|
-
payload: job.payload,
|
|
191
|
-
timeoutMs: job.timeoutMs,
|
|
192
|
-
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
193
|
-
failureReason: job.failureReason
|
|
194
|
-
};
|
|
195
|
-
} catch (error) {
|
|
196
|
-
log(`Error getting job ${id}: ${error}`);
|
|
197
|
-
throw error;
|
|
198
|
-
} finally {
|
|
199
|
-
client.release();
|
|
200
|
-
}
|
|
201
|
-
}
|
|
202
|
-
async getJobsByStatus(status, limit = 100, offset = 0) {
|
|
203
|
-
const client = await this.pool.connect();
|
|
204
|
-
try {
|
|
205
|
-
const result = await client.query(
|
|
206
|
-
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue WHERE status = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3`,
|
|
207
|
-
[status, limit, offset]
|
|
208
|
-
);
|
|
209
|
-
log(`Found ${result.rows.length} jobs by status ${status}`);
|
|
210
|
-
return result.rows.map((job) => ({
|
|
211
|
-
...job,
|
|
212
|
-
payload: job.payload,
|
|
213
|
-
timeoutMs: job.timeoutMs,
|
|
214
|
-
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
215
|
-
failureReason: job.failureReason
|
|
216
|
-
}));
|
|
217
|
-
} catch (error) {
|
|
218
|
-
log(`Error getting jobs by status ${status}: ${error}`);
|
|
219
|
-
throw error;
|
|
220
|
-
} finally {
|
|
221
|
-
client.release();
|
|
222
|
-
}
|
|
223
|
-
}
|
|
224
|
-
async getAllJobs(limit = 100, offset = 0) {
|
|
225
|
-
const client = await this.pool.connect();
|
|
226
|
-
try {
|
|
227
|
-
const result = await client.query(
|
|
228
|
-
`SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2`,
|
|
229
|
-
[limit, offset]
|
|
230
|
-
);
|
|
231
|
-
log(`Found ${result.rows.length} jobs (all)`);
|
|
232
|
-
return result.rows.map((job) => ({
|
|
233
|
-
...job,
|
|
234
|
-
payload: job.payload,
|
|
235
|
-
timeoutMs: job.timeoutMs,
|
|
236
|
-
forceKillOnTimeout: job.forceKillOnTimeout
|
|
237
|
-
}));
|
|
238
|
-
} catch (error) {
|
|
239
|
-
log(`Error getting all jobs: ${error}`);
|
|
240
|
-
throw error;
|
|
241
|
-
} finally {
|
|
242
|
-
client.release();
|
|
243
|
-
}
|
|
244
|
-
}
|
|
245
|
-
async getJobs(filters, limit = 100, offset = 0) {
|
|
246
|
-
const client = await this.pool.connect();
|
|
247
|
-
try {
|
|
248
|
-
let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue`;
|
|
249
|
-
const params = [];
|
|
250
|
-
const where = [];
|
|
251
|
-
let paramIdx = 1;
|
|
252
|
-
if (filters) {
|
|
253
|
-
if (filters.jobType) {
|
|
254
|
-
where.push(`job_type = $${paramIdx++}`);
|
|
255
|
-
params.push(filters.jobType);
|
|
256
|
-
}
|
|
257
|
-
if (filters.priority !== void 0) {
|
|
258
|
-
where.push(`priority = $${paramIdx++}`);
|
|
259
|
-
params.push(filters.priority);
|
|
260
|
-
}
|
|
261
|
-
if (filters.runAt) {
|
|
262
|
-
if (filters.runAt instanceof Date) {
|
|
263
|
-
where.push(`run_at = $${paramIdx++}`);
|
|
264
|
-
params.push(filters.runAt);
|
|
265
|
-
} else if (typeof filters.runAt === "object" && (filters.runAt.gt !== void 0 || filters.runAt.gte !== void 0 || filters.runAt.lt !== void 0 || filters.runAt.lte !== void 0 || filters.runAt.eq !== void 0)) {
|
|
266
|
-
const ops = filters.runAt;
|
|
267
|
-
if (ops.gt) {
|
|
268
|
-
where.push(`run_at > $${paramIdx++}`);
|
|
269
|
-
params.push(ops.gt);
|
|
270
|
-
}
|
|
271
|
-
if (ops.gte) {
|
|
272
|
-
where.push(`run_at >= $${paramIdx++}`);
|
|
273
|
-
params.push(ops.gte);
|
|
274
|
-
}
|
|
275
|
-
if (ops.lt) {
|
|
276
|
-
where.push(`run_at < $${paramIdx++}`);
|
|
277
|
-
params.push(ops.lt);
|
|
278
|
-
}
|
|
279
|
-
if (ops.lte) {
|
|
280
|
-
where.push(`run_at <= $${paramIdx++}`);
|
|
281
|
-
params.push(ops.lte);
|
|
282
|
-
}
|
|
283
|
-
if (ops.eq) {
|
|
284
|
-
where.push(`run_at = $${paramIdx++}`);
|
|
285
|
-
params.push(ops.eq);
|
|
286
|
-
}
|
|
287
|
-
}
|
|
288
|
-
}
|
|
289
|
-
if (filters.tags && filters.tags.values && filters.tags.values.length > 0) {
|
|
290
|
-
const mode = filters.tags.mode || "all";
|
|
291
|
-
const tagValues = filters.tags.values;
|
|
292
|
-
switch (mode) {
|
|
293
|
-
case "exact":
|
|
294
|
-
where.push(`tags = $${paramIdx++}`);
|
|
295
|
-
params.push(tagValues);
|
|
296
|
-
break;
|
|
297
|
-
case "all":
|
|
298
|
-
where.push(`tags @> $${paramIdx++}`);
|
|
299
|
-
params.push(tagValues);
|
|
300
|
-
break;
|
|
301
|
-
case "any":
|
|
302
|
-
where.push(`tags && $${paramIdx++}`);
|
|
303
|
-
params.push(tagValues);
|
|
304
|
-
break;
|
|
305
|
-
case "none":
|
|
306
|
-
where.push(`NOT (tags && $${paramIdx++})`);
|
|
307
|
-
params.push(tagValues);
|
|
308
|
-
break;
|
|
309
|
-
default:
|
|
310
|
-
where.push(`tags @> $${paramIdx++}`);
|
|
311
|
-
params.push(tagValues);
|
|
312
|
-
}
|
|
313
|
-
}
|
|
314
|
-
if (filters.cursor !== void 0) {
|
|
315
|
-
where.push(`id < $${paramIdx++}`);
|
|
316
|
-
params.push(filters.cursor);
|
|
317
|
-
}
|
|
318
|
-
}
|
|
319
|
-
if (where.length > 0) {
|
|
320
|
-
query += ` WHERE ${where.join(" AND ")}`;
|
|
321
|
-
}
|
|
322
|
-
paramIdx = params.length + 1;
|
|
323
|
-
query += ` ORDER BY id DESC LIMIT $${paramIdx++}`;
|
|
324
|
-
if (!filters?.cursor) {
|
|
325
|
-
query += ` OFFSET $${paramIdx}`;
|
|
326
|
-
params.push(limit, offset);
|
|
327
|
-
} else {
|
|
328
|
-
params.push(limit);
|
|
329
|
-
}
|
|
330
|
-
const result = await client.query(query, params);
|
|
331
|
-
log(`Found ${result.rows.length} jobs`);
|
|
332
|
-
return result.rows.map((job) => ({
|
|
333
|
-
...job,
|
|
334
|
-
payload: job.payload,
|
|
335
|
-
timeoutMs: job.timeoutMs,
|
|
336
|
-
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
337
|
-
failureReason: job.failureReason
|
|
338
|
-
}));
|
|
339
|
-
} catch (error) {
|
|
340
|
-
log(`Error getting jobs: ${error}`);
|
|
341
|
-
throw error;
|
|
342
|
-
} finally {
|
|
343
|
-
client.release();
|
|
344
|
-
}
|
|
345
|
-
}
|
|
346
|
-
async getJobsByTags(tags, mode = "all", limit = 100, offset = 0) {
|
|
347
|
-
const client = await this.pool.connect();
|
|
348
|
-
try {
|
|
349
|
-
let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress
|
|
350
|
-
FROM job_queue`;
|
|
351
|
-
let params = [];
|
|
352
|
-
switch (mode) {
|
|
353
|
-
case "exact":
|
|
354
|
-
query += " WHERE tags = $1";
|
|
355
|
-
params = [tags];
|
|
356
|
-
break;
|
|
357
|
-
case "all":
|
|
358
|
-
query += " WHERE tags @> $1";
|
|
359
|
-
params = [tags];
|
|
360
|
-
break;
|
|
361
|
-
case "any":
|
|
362
|
-
query += " WHERE tags && $1";
|
|
363
|
-
params = [tags];
|
|
364
|
-
break;
|
|
365
|
-
case "none":
|
|
366
|
-
query += " WHERE NOT (tags && $1)";
|
|
367
|
-
params = [tags];
|
|
368
|
-
break;
|
|
369
|
-
default:
|
|
370
|
-
query += " WHERE tags @> $1";
|
|
371
|
-
params = [tags];
|
|
372
|
-
}
|
|
373
|
-
query += " ORDER BY created_at DESC LIMIT $2 OFFSET $3";
|
|
374
|
-
params.push(limit, offset);
|
|
375
|
-
const result = await client.query(query, params);
|
|
376
|
-
log(
|
|
377
|
-
`Found ${result.rows.length} jobs by tags ${JSON.stringify(tags)} (mode: ${mode})`
|
|
378
|
-
);
|
|
379
|
-
return result.rows.map((job) => ({
|
|
380
|
-
...job,
|
|
381
|
-
payload: job.payload,
|
|
382
|
-
timeoutMs: job.timeoutMs,
|
|
383
|
-
forceKillOnTimeout: job.forceKillOnTimeout,
|
|
384
|
-
failureReason: job.failureReason
|
|
385
|
-
}));
|
|
386
|
-
} catch (error) {
|
|
387
|
-
log(
|
|
388
|
-
`Error getting jobs by tags ${JSON.stringify(tags)} (mode: ${mode}): ${error}`
|
|
389
|
-
);
|
|
390
|
-
throw error;
|
|
391
|
-
} finally {
|
|
392
|
-
client.release();
|
|
393
|
-
}
|
|
394
|
-
}
|
|
395
|
-
// ── Processing lifecycle ──────────────────────────────────────────────
|
|
396
|
-
async getNextBatch(workerId, batchSize = 10, jobType) {
|
|
397
|
-
const client = await this.pool.connect();
|
|
398
|
-
try {
|
|
399
|
-
await client.query("BEGIN");
|
|
400
|
-
let jobTypeFilter = "";
|
|
401
|
-
const params = [workerId, batchSize];
|
|
402
|
-
if (jobType) {
|
|
403
|
-
if (Array.isArray(jobType)) {
|
|
404
|
-
jobTypeFilter = ` AND job_type = ANY($3)`;
|
|
405
|
-
params.push(jobType);
|
|
406
|
-
} else {
|
|
407
|
-
jobTypeFilter = ` AND job_type = $3`;
|
|
408
|
-
params.push(jobType);
|
|
409
|
-
}
|
|
410
|
-
}
|
|
411
|
-
const result = await client.query(
|
|
412
|
-
`
|
|
413
|
-
UPDATE job_queue
|
|
414
|
-
SET status = 'processing',
|
|
415
|
-
locked_at = NOW(),
|
|
416
|
-
locked_by = $1,
|
|
417
|
-
attempts = CASE WHEN status = 'waiting' THEN attempts ELSE attempts + 1 END,
|
|
418
|
-
updated_at = NOW(),
|
|
419
|
-
pending_reason = NULL,
|
|
420
|
-
started_at = COALESCE(started_at, NOW()),
|
|
421
|
-
last_retried_at = CASE WHEN status != 'waiting' AND attempts > 0 THEN NOW() ELSE last_retried_at END,
|
|
422
|
-
wait_until = NULL
|
|
423
|
-
WHERE id IN (
|
|
424
|
-
SELECT id FROM job_queue
|
|
425
|
-
WHERE (
|
|
426
|
-
(
|
|
427
|
-
(status = 'pending' OR (status = 'failed' AND next_attempt_at <= NOW()))
|
|
428
|
-
AND (attempts < max_attempts)
|
|
429
|
-
AND run_at <= NOW()
|
|
430
|
-
)
|
|
431
|
-
OR (
|
|
432
|
-
status = 'waiting'
|
|
433
|
-
AND wait_until IS NOT NULL
|
|
434
|
-
AND wait_until <= NOW()
|
|
435
|
-
AND wait_token_id IS NULL
|
|
436
|
-
)
|
|
437
|
-
)
|
|
438
|
-
${jobTypeFilter}
|
|
439
|
-
ORDER BY priority DESC, created_at ASC
|
|
440
|
-
LIMIT $2
|
|
441
|
-
FOR UPDATE SKIP LOCKED
|
|
442
|
-
)
|
|
443
|
-
RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress
|
|
444
|
-
`,
|
|
445
|
-
params
|
|
446
|
-
);
|
|
447
|
-
log(`Found ${result.rows.length} jobs to process`);
|
|
448
|
-
await client.query("COMMIT");
|
|
449
|
-
if (result.rows.length > 0) {
|
|
450
|
-
await this.recordJobEventsBatch(
|
|
451
|
-
result.rows.map((row) => ({
|
|
452
|
-
jobId: row.id,
|
|
453
|
-
eventType: "processing" /* Processing */
|
|
454
|
-
}))
|
|
455
|
-
);
|
|
456
|
-
}
|
|
457
|
-
return result.rows.map((job) => ({
|
|
458
|
-
...job,
|
|
459
|
-
payload: job.payload,
|
|
460
|
-
timeoutMs: job.timeoutMs,
|
|
461
|
-
forceKillOnTimeout: job.forceKillOnTimeout
|
|
462
|
-
}));
|
|
463
|
-
} catch (error) {
|
|
464
|
-
log(`Error getting next batch: ${error}`);
|
|
465
|
-
await client.query("ROLLBACK");
|
|
466
|
-
throw error;
|
|
467
|
-
} finally {
|
|
468
|
-
client.release();
|
|
469
|
-
}
|
|
470
|
-
}
|
|
471
|
-
async completeJob(jobId) {
|
|
472
|
-
const client = await this.pool.connect();
|
|
473
|
-
try {
|
|
474
|
-
const result = await client.query(
|
|
475
|
-
`
|
|
476
|
-
UPDATE job_queue
|
|
477
|
-
SET status = 'completed', updated_at = NOW(), completed_at = NOW(),
|
|
478
|
-
step_data = NULL, wait_until = NULL, wait_token_id = NULL
|
|
479
|
-
WHERE id = $1 AND status = 'processing'
|
|
480
|
-
`,
|
|
481
|
-
[jobId]
|
|
482
|
-
);
|
|
483
|
-
if (result.rowCount === 0) {
|
|
484
|
-
log(
|
|
485
|
-
`Job ${jobId} could not be completed (not in processing state or does not exist)`
|
|
486
|
-
);
|
|
487
|
-
}
|
|
488
|
-
await this.recordJobEvent(jobId, "completed" /* Completed */);
|
|
489
|
-
log(`Completed job ${jobId}`);
|
|
490
|
-
} catch (error) {
|
|
491
|
-
log(`Error completing job ${jobId}: ${error}`);
|
|
492
|
-
throw error;
|
|
493
|
-
} finally {
|
|
494
|
-
client.release();
|
|
495
|
-
}
|
|
496
|
-
}
|
|
497
|
-
async failJob(jobId, error, failureReason) {
|
|
498
|
-
const client = await this.pool.connect();
|
|
499
|
-
try {
|
|
500
|
-
const result = await client.query(
|
|
501
|
-
`
|
|
502
|
-
UPDATE job_queue
|
|
503
|
-
SET status = 'failed',
|
|
504
|
-
updated_at = NOW(),
|
|
505
|
-
next_attempt_at = CASE
|
|
506
|
-
WHEN attempts < max_attempts THEN NOW() + (POWER(2, attempts) * INTERVAL '1 minute')
|
|
507
|
-
ELSE NULL
|
|
508
|
-
END,
|
|
509
|
-
error_history = COALESCE(error_history, '[]'::jsonb) || $2::jsonb,
|
|
510
|
-
failure_reason = $3,
|
|
511
|
-
last_failed_at = NOW()
|
|
512
|
-
WHERE id = $1 AND status IN ('processing', 'pending')
|
|
513
|
-
`,
|
|
514
|
-
[
|
|
515
|
-
jobId,
|
|
516
|
-
JSON.stringify([
|
|
517
|
-
{
|
|
518
|
-
message: error.message || String(error),
|
|
519
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
520
|
-
}
|
|
521
|
-
]),
|
|
522
|
-
failureReason ?? null
|
|
523
|
-
]
|
|
524
|
-
);
|
|
525
|
-
if (result.rowCount === 0) {
|
|
526
|
-
log(
|
|
527
|
-
`Job ${jobId} could not be failed (not in processing/pending state or does not exist)`
|
|
528
|
-
);
|
|
529
|
-
}
|
|
530
|
-
await this.recordJobEvent(jobId, "failed" /* Failed */, {
|
|
531
|
-
message: error.message || String(error),
|
|
532
|
-
failureReason
|
|
533
|
-
});
|
|
534
|
-
log(`Failed job ${jobId}`);
|
|
535
|
-
} catch (err) {
|
|
536
|
-
log(`Error failing job ${jobId}: ${err}`);
|
|
537
|
-
throw err;
|
|
538
|
-
} finally {
|
|
539
|
-
client.release();
|
|
540
|
-
}
|
|
541
|
-
}
|
|
542
|
-
async prolongJob(jobId) {
|
|
543
|
-
const client = await this.pool.connect();
|
|
544
|
-
try {
|
|
545
|
-
await client.query(
|
|
546
|
-
`
|
|
547
|
-
UPDATE job_queue
|
|
548
|
-
SET locked_at = NOW(), updated_at = NOW()
|
|
549
|
-
WHERE id = $1 AND status = 'processing'
|
|
550
|
-
`,
|
|
551
|
-
[jobId]
|
|
552
|
-
);
|
|
553
|
-
await this.recordJobEvent(jobId, "prolonged" /* Prolonged */);
|
|
554
|
-
log(`Prolonged job ${jobId}`);
|
|
555
|
-
} catch (error) {
|
|
556
|
-
log(`Error prolonging job ${jobId}: ${error}`);
|
|
557
|
-
} finally {
|
|
558
|
-
client.release();
|
|
559
|
-
}
|
|
560
|
-
}
|
|
561
|
-
// ── Progress ──────────────────────────────────────────────────────────
|
|
562
|
-
async updateProgress(jobId, progress) {
|
|
563
|
-
const client = await this.pool.connect();
|
|
564
|
-
try {
|
|
565
|
-
await client.query(
|
|
566
|
-
`UPDATE job_queue SET progress = $2, updated_at = NOW() WHERE id = $1`,
|
|
567
|
-
[jobId, progress]
|
|
568
|
-
);
|
|
569
|
-
log(`Updated progress for job ${jobId}: ${progress}%`);
|
|
570
|
-
} catch (error) {
|
|
571
|
-
log(`Error updating progress for job ${jobId}: ${error}`);
|
|
572
|
-
} finally {
|
|
573
|
-
client.release();
|
|
574
|
-
}
|
|
575
|
-
}
|
|
576
|
-
// ── Job management ────────────────────────────────────────────────────
|
|
577
|
-
async retryJob(jobId) {
|
|
578
|
-
const client = await this.pool.connect();
|
|
579
|
-
try {
|
|
580
|
-
const result = await client.query(
|
|
581
|
-
`
|
|
582
|
-
UPDATE job_queue
|
|
583
|
-
SET status = 'pending',
|
|
584
|
-
updated_at = NOW(),
|
|
585
|
-
locked_at = NULL,
|
|
586
|
-
locked_by = NULL,
|
|
587
|
-
next_attempt_at = NOW(),
|
|
588
|
-
last_retried_at = NOW()
|
|
589
|
-
WHERE id = $1 AND status IN ('failed', 'processing')
|
|
590
|
-
`,
|
|
591
|
-
[jobId]
|
|
592
|
-
);
|
|
593
|
-
if (result.rowCount === 0) {
|
|
594
|
-
log(
|
|
595
|
-
`Job ${jobId} could not be retried (not in failed/processing state or does not exist)`
|
|
596
|
-
);
|
|
597
|
-
}
|
|
598
|
-
await this.recordJobEvent(jobId, "retried" /* Retried */);
|
|
599
|
-
log(`Retried job ${jobId}`);
|
|
600
|
-
} catch (error) {
|
|
601
|
-
log(`Error retrying job ${jobId}: ${error}`);
|
|
602
|
-
throw error;
|
|
603
|
-
} finally {
|
|
604
|
-
client.release();
|
|
605
|
-
}
|
|
606
|
-
}
|
|
607
|
-
async cancelJob(jobId) {
|
|
608
|
-
const client = await this.pool.connect();
|
|
609
|
-
try {
|
|
610
|
-
await client.query(
|
|
611
|
-
`
|
|
612
|
-
UPDATE job_queue
|
|
613
|
-
SET status = 'cancelled', updated_at = NOW(), last_cancelled_at = NOW(),
|
|
614
|
-
wait_until = NULL, wait_token_id = NULL
|
|
615
|
-
WHERE id = $1 AND status IN ('pending', 'waiting')
|
|
616
|
-
`,
|
|
617
|
-
[jobId]
|
|
618
|
-
);
|
|
619
|
-
await this.recordJobEvent(jobId, "cancelled" /* Cancelled */);
|
|
620
|
-
log(`Cancelled job ${jobId}`);
|
|
621
|
-
} catch (error) {
|
|
622
|
-
log(`Error cancelling job ${jobId}: ${error}`);
|
|
623
|
-
throw error;
|
|
624
|
-
} finally {
|
|
625
|
-
client.release();
|
|
626
|
-
}
|
|
627
|
-
}
|
|
628
|
-
async cancelAllUpcomingJobs(filters) {
|
|
629
|
-
const client = await this.pool.connect();
|
|
630
|
-
try {
|
|
631
|
-
let query = `
|
|
632
|
-
UPDATE job_queue
|
|
633
|
-
SET status = 'cancelled', updated_at = NOW()
|
|
634
|
-
WHERE status = 'pending'`;
|
|
635
|
-
const params = [];
|
|
636
|
-
let paramIdx = 1;
|
|
637
|
-
if (filters) {
|
|
638
|
-
if (filters.jobType) {
|
|
639
|
-
query += ` AND job_type = $${paramIdx++}`;
|
|
640
|
-
params.push(filters.jobType);
|
|
641
|
-
}
|
|
642
|
-
if (filters.priority !== void 0) {
|
|
643
|
-
query += ` AND priority = $${paramIdx++}`;
|
|
644
|
-
params.push(filters.priority);
|
|
645
|
-
}
|
|
646
|
-
if (filters.runAt) {
|
|
647
|
-
if (filters.runAt instanceof Date) {
|
|
648
|
-
query += ` AND run_at = $${paramIdx++}`;
|
|
649
|
-
params.push(filters.runAt);
|
|
650
|
-
} else if (typeof filters.runAt === "object") {
|
|
651
|
-
const ops = filters.runAt;
|
|
652
|
-
if (ops.gt) {
|
|
653
|
-
query += ` AND run_at > $${paramIdx++}`;
|
|
654
|
-
params.push(ops.gt);
|
|
655
|
-
}
|
|
656
|
-
if (ops.gte) {
|
|
657
|
-
query += ` AND run_at >= $${paramIdx++}`;
|
|
658
|
-
params.push(ops.gte);
|
|
659
|
-
}
|
|
660
|
-
if (ops.lt) {
|
|
661
|
-
query += ` AND run_at < $${paramIdx++}`;
|
|
662
|
-
params.push(ops.lt);
|
|
663
|
-
}
|
|
664
|
-
if (ops.lte) {
|
|
665
|
-
query += ` AND run_at <= $${paramIdx++}`;
|
|
666
|
-
params.push(ops.lte);
|
|
667
|
-
}
|
|
668
|
-
if (ops.eq) {
|
|
669
|
-
query += ` AND run_at = $${paramIdx++}`;
|
|
670
|
-
params.push(ops.eq);
|
|
671
|
-
}
|
|
672
|
-
}
|
|
673
|
-
}
|
|
674
|
-
if (filters.tags && filters.tags.values && filters.tags.values.length > 0) {
|
|
675
|
-
const mode = filters.tags.mode || "all";
|
|
676
|
-
const tagValues = filters.tags.values;
|
|
677
|
-
switch (mode) {
|
|
678
|
-
case "exact":
|
|
679
|
-
query += ` AND tags = $${paramIdx++}`;
|
|
680
|
-
params.push(tagValues);
|
|
681
|
-
break;
|
|
682
|
-
case "all":
|
|
683
|
-
query += ` AND tags @> $${paramIdx++}`;
|
|
684
|
-
params.push(tagValues);
|
|
685
|
-
break;
|
|
686
|
-
case "any":
|
|
687
|
-
query += ` AND tags && $${paramIdx++}`;
|
|
688
|
-
params.push(tagValues);
|
|
689
|
-
break;
|
|
690
|
-
case "none":
|
|
691
|
-
query += ` AND NOT (tags && $${paramIdx++})`;
|
|
692
|
-
params.push(tagValues);
|
|
693
|
-
break;
|
|
694
|
-
default:
|
|
695
|
-
query += ` AND tags @> $${paramIdx++}`;
|
|
696
|
-
params.push(tagValues);
|
|
697
|
-
}
|
|
698
|
-
}
|
|
699
|
-
}
|
|
700
|
-
query += "\nRETURNING id";
|
|
701
|
-
const result = await client.query(query, params);
|
|
702
|
-
log(`Cancelled ${result.rowCount} jobs`);
|
|
703
|
-
return result.rowCount || 0;
|
|
704
|
-
} catch (error) {
|
|
705
|
-
log(`Error cancelling upcoming jobs: ${error}`);
|
|
706
|
-
throw error;
|
|
707
|
-
} finally {
|
|
708
|
-
client.release();
|
|
709
|
-
}
|
|
710
|
-
}
|
|
711
|
-
async editJob(jobId, updates) {
|
|
712
|
-
const client = await this.pool.connect();
|
|
713
|
-
try {
|
|
714
|
-
const updateFields = [];
|
|
715
|
-
const params = [];
|
|
716
|
-
let paramIdx = 1;
|
|
717
|
-
if (updates.payload !== void 0) {
|
|
718
|
-
updateFields.push(`payload = $${paramIdx++}`);
|
|
719
|
-
params.push(updates.payload);
|
|
720
|
-
}
|
|
721
|
-
if (updates.maxAttempts !== void 0) {
|
|
722
|
-
updateFields.push(`max_attempts = $${paramIdx++}`);
|
|
723
|
-
params.push(updates.maxAttempts);
|
|
724
|
-
}
|
|
725
|
-
if (updates.priority !== void 0) {
|
|
726
|
-
updateFields.push(`priority = $${paramIdx++}`);
|
|
727
|
-
params.push(updates.priority);
|
|
728
|
-
}
|
|
729
|
-
if (updates.runAt !== void 0) {
|
|
730
|
-
if (updates.runAt === null) {
|
|
731
|
-
updateFields.push(`run_at = NOW()`);
|
|
732
|
-
} else {
|
|
733
|
-
updateFields.push(`run_at = $${paramIdx++}`);
|
|
734
|
-
params.push(updates.runAt);
|
|
735
|
-
}
|
|
736
|
-
}
|
|
737
|
-
if (updates.timeoutMs !== void 0) {
|
|
738
|
-
updateFields.push(`timeout_ms = $${paramIdx++}`);
|
|
739
|
-
params.push(updates.timeoutMs ?? null);
|
|
740
|
-
}
|
|
741
|
-
if (updates.tags !== void 0) {
|
|
742
|
-
updateFields.push(`tags = $${paramIdx++}`);
|
|
743
|
-
params.push(updates.tags ?? null);
|
|
744
|
-
}
|
|
745
|
-
if (updateFields.length === 0) {
|
|
746
|
-
log(`No fields to update for job ${jobId}`);
|
|
747
|
-
return;
|
|
748
|
-
}
|
|
749
|
-
updateFields.push(`updated_at = NOW()`);
|
|
750
|
-
params.push(jobId);
|
|
751
|
-
const query = `
|
|
752
|
-
UPDATE job_queue
|
|
753
|
-
SET ${updateFields.join(", ")}
|
|
754
|
-
WHERE id = $${paramIdx} AND status = 'pending'
|
|
755
|
-
`;
|
|
756
|
-
await client.query(query, params);
|
|
757
|
-
const metadata = {};
|
|
758
|
-
if (updates.payload !== void 0) metadata.payload = updates.payload;
|
|
759
|
-
if (updates.maxAttempts !== void 0)
|
|
760
|
-
metadata.maxAttempts = updates.maxAttempts;
|
|
761
|
-
if (updates.priority !== void 0) metadata.priority = updates.priority;
|
|
762
|
-
if (updates.runAt !== void 0) metadata.runAt = updates.runAt;
|
|
763
|
-
if (updates.timeoutMs !== void 0)
|
|
764
|
-
metadata.timeoutMs = updates.timeoutMs;
|
|
765
|
-
if (updates.tags !== void 0) metadata.tags = updates.tags;
|
|
766
|
-
await this.recordJobEvent(jobId, "edited" /* Edited */, metadata);
|
|
767
|
-
log(`Edited job ${jobId}: ${JSON.stringify(metadata)}`);
|
|
768
|
-
} catch (error) {
|
|
769
|
-
log(`Error editing job ${jobId}: ${error}`);
|
|
770
|
-
throw error;
|
|
771
|
-
} finally {
|
|
772
|
-
client.release();
|
|
773
|
-
}
|
|
774
|
-
}
|
|
775
|
-
async editAllPendingJobs(filters = void 0, updates) {
|
|
776
|
-
const client = await this.pool.connect();
|
|
777
|
-
try {
|
|
778
|
-
const updateFields = [];
|
|
779
|
-
const params = [];
|
|
780
|
-
let paramIdx = 1;
|
|
781
|
-
if (updates.payload !== void 0) {
|
|
782
|
-
updateFields.push(`payload = $${paramIdx++}`);
|
|
783
|
-
params.push(updates.payload);
|
|
784
|
-
}
|
|
785
|
-
if (updates.maxAttempts !== void 0) {
|
|
786
|
-
updateFields.push(`max_attempts = $${paramIdx++}`);
|
|
787
|
-
params.push(updates.maxAttempts);
|
|
788
|
-
}
|
|
789
|
-
if (updates.priority !== void 0) {
|
|
790
|
-
updateFields.push(`priority = $${paramIdx++}`);
|
|
791
|
-
params.push(updates.priority);
|
|
792
|
-
}
|
|
793
|
-
if (updates.runAt !== void 0) {
|
|
794
|
-
if (updates.runAt === null) {
|
|
795
|
-
updateFields.push(`run_at = NOW()`);
|
|
796
|
-
} else {
|
|
797
|
-
updateFields.push(`run_at = $${paramIdx++}`);
|
|
798
|
-
params.push(updates.runAt);
|
|
799
|
-
}
|
|
800
|
-
}
|
|
801
|
-
if (updates.timeoutMs !== void 0) {
|
|
802
|
-
updateFields.push(`timeout_ms = $${paramIdx++}`);
|
|
803
|
-
params.push(updates.timeoutMs ?? null);
|
|
804
|
-
}
|
|
805
|
-
if (updates.tags !== void 0) {
|
|
806
|
-
updateFields.push(`tags = $${paramIdx++}`);
|
|
807
|
-
params.push(updates.tags ?? null);
|
|
808
|
-
}
|
|
809
|
-
if (updateFields.length === 0) {
|
|
810
|
-
log(`No fields to update for batch edit`);
|
|
811
|
-
return 0;
|
|
812
|
-
}
|
|
813
|
-
updateFields.push(`updated_at = NOW()`);
|
|
814
|
-
let query = `
|
|
815
|
-
UPDATE job_queue
|
|
816
|
-
SET ${updateFields.join(", ")}
|
|
817
|
-
WHERE status = 'pending'`;
|
|
818
|
-
if (filters) {
|
|
819
|
-
if (filters.jobType) {
|
|
820
|
-
query += ` AND job_type = $${paramIdx++}`;
|
|
821
|
-
params.push(filters.jobType);
|
|
822
|
-
}
|
|
823
|
-
if (filters.priority !== void 0) {
|
|
824
|
-
query += ` AND priority = $${paramIdx++}`;
|
|
825
|
-
params.push(filters.priority);
|
|
826
|
-
}
|
|
827
|
-
if (filters.runAt) {
|
|
828
|
-
if (filters.runAt instanceof Date) {
|
|
829
|
-
query += ` AND run_at = $${paramIdx++}`;
|
|
830
|
-
params.push(filters.runAt);
|
|
831
|
-
} else if (typeof filters.runAt === "object") {
|
|
832
|
-
const ops = filters.runAt;
|
|
833
|
-
if (ops.gt) {
|
|
834
|
-
query += ` AND run_at > $${paramIdx++}`;
|
|
835
|
-
params.push(ops.gt);
|
|
836
|
-
}
|
|
837
|
-
if (ops.gte) {
|
|
838
|
-
query += ` AND run_at >= $${paramIdx++}`;
|
|
839
|
-
params.push(ops.gte);
|
|
840
|
-
}
|
|
841
|
-
if (ops.lt) {
|
|
842
|
-
query += ` AND run_at < $${paramIdx++}`;
|
|
843
|
-
params.push(ops.lt);
|
|
844
|
-
}
|
|
845
|
-
if (ops.lte) {
|
|
846
|
-
query += ` AND run_at <= $${paramIdx++}`;
|
|
847
|
-
params.push(ops.lte);
|
|
848
|
-
}
|
|
849
|
-
if (ops.eq) {
|
|
850
|
-
query += ` AND run_at = $${paramIdx++}`;
|
|
851
|
-
params.push(ops.eq);
|
|
852
|
-
}
|
|
853
|
-
}
|
|
854
|
-
}
|
|
855
|
-
if (filters.tags && filters.tags.values && filters.tags.values.length > 0) {
|
|
856
|
-
const mode = filters.tags.mode || "all";
|
|
857
|
-
const tagValues = filters.tags.values;
|
|
858
|
-
switch (mode) {
|
|
859
|
-
case "exact":
|
|
860
|
-
query += ` AND tags = $${paramIdx++}`;
|
|
861
|
-
params.push(tagValues);
|
|
862
|
-
break;
|
|
863
|
-
case "all":
|
|
864
|
-
query += ` AND tags @> $${paramIdx++}`;
|
|
865
|
-
params.push(tagValues);
|
|
866
|
-
break;
|
|
867
|
-
case "any":
|
|
868
|
-
query += ` AND tags && $${paramIdx++}`;
|
|
869
|
-
params.push(tagValues);
|
|
870
|
-
break;
|
|
871
|
-
case "none":
|
|
872
|
-
query += ` AND NOT (tags && $${paramIdx++})`;
|
|
873
|
-
params.push(tagValues);
|
|
874
|
-
break;
|
|
875
|
-
default:
|
|
876
|
-
query += ` AND tags @> $${paramIdx++}`;
|
|
877
|
-
params.push(tagValues);
|
|
878
|
-
}
|
|
879
|
-
}
|
|
880
|
-
}
|
|
881
|
-
query += "\nRETURNING id";
|
|
882
|
-
const result = await client.query(query, params);
|
|
883
|
-
const editedCount = result.rowCount || 0;
|
|
884
|
-
const metadata = {};
|
|
885
|
-
if (updates.payload !== void 0) metadata.payload = updates.payload;
|
|
886
|
-
if (updates.maxAttempts !== void 0)
|
|
887
|
-
metadata.maxAttempts = updates.maxAttempts;
|
|
888
|
-
if (updates.priority !== void 0) metadata.priority = updates.priority;
|
|
889
|
-
if (updates.runAt !== void 0) metadata.runAt = updates.runAt;
|
|
890
|
-
if (updates.timeoutMs !== void 0)
|
|
891
|
-
metadata.timeoutMs = updates.timeoutMs;
|
|
892
|
-
if (updates.tags !== void 0) metadata.tags = updates.tags;
|
|
893
|
-
for (const row of result.rows) {
|
|
894
|
-
await this.recordJobEvent(row.id, "edited" /* Edited */, metadata);
|
|
895
|
-
}
|
|
896
|
-
log(`Edited ${editedCount} pending jobs: ${JSON.stringify(metadata)}`);
|
|
897
|
-
return editedCount;
|
|
898
|
-
} catch (error) {
|
|
899
|
-
log(`Error editing pending jobs: ${error}`);
|
|
900
|
-
throw error;
|
|
901
|
-
} finally {
|
|
902
|
-
client.release();
|
|
903
|
-
}
|
|
904
|
-
}
|
|
905
|
-
async cleanupOldJobs(daysToKeep = 30) {
|
|
906
|
-
const client = await this.pool.connect();
|
|
907
|
-
try {
|
|
908
|
-
const result = await client.query(
|
|
909
|
-
`
|
|
910
|
-
DELETE FROM job_queue
|
|
911
|
-
WHERE status = 'completed'
|
|
912
|
-
AND updated_at < NOW() - INTERVAL '1 day' * $1::int
|
|
913
|
-
RETURNING id
|
|
914
|
-
`,
|
|
915
|
-
[daysToKeep]
|
|
916
|
-
);
|
|
917
|
-
log(`Deleted ${result.rowCount} old jobs`);
|
|
918
|
-
return result.rowCount || 0;
|
|
919
|
-
} catch (error) {
|
|
920
|
-
log(`Error cleaning up old jobs: ${error}`);
|
|
921
|
-
throw error;
|
|
922
|
-
} finally {
|
|
923
|
-
client.release();
|
|
924
|
-
}
|
|
925
|
-
}
|
|
926
|
-
async cleanupOldJobEvents(daysToKeep = 30) {
|
|
927
|
-
const client = await this.pool.connect();
|
|
928
|
-
try {
|
|
929
|
-
const result = await client.query(
|
|
930
|
-
`
|
|
931
|
-
DELETE FROM job_events
|
|
932
|
-
WHERE created_at < NOW() - INTERVAL '1 day' * $1::int
|
|
933
|
-
RETURNING id
|
|
934
|
-
`,
|
|
935
|
-
[daysToKeep]
|
|
936
|
-
);
|
|
937
|
-
log(`Deleted ${result.rowCount} old job events`);
|
|
938
|
-
return result.rowCount || 0;
|
|
939
|
-
} catch (error) {
|
|
940
|
-
log(`Error cleaning up old job events: ${error}`);
|
|
941
|
-
throw error;
|
|
942
|
-
} finally {
|
|
943
|
-
client.release();
|
|
944
|
-
}
|
|
945
|
-
}
|
|
946
|
-
async reclaimStuckJobs(maxProcessingTimeMinutes = 10) {
|
|
947
|
-
const client = await this.pool.connect();
|
|
948
|
-
try {
|
|
949
|
-
const result = await client.query(
|
|
950
|
-
`
|
|
951
|
-
UPDATE job_queue
|
|
952
|
-
SET status = 'pending', locked_at = NULL, locked_by = NULL, updated_at = NOW()
|
|
953
|
-
WHERE status = 'processing'
|
|
954
|
-
AND locked_at < NOW() - GREATEST(
|
|
955
|
-
INTERVAL '1 minute' * $1::int,
|
|
956
|
-
INTERVAL '1 millisecond' * COALESCE(timeout_ms, 0)
|
|
957
|
-
)
|
|
958
|
-
RETURNING id
|
|
959
|
-
`,
|
|
960
|
-
[maxProcessingTimeMinutes]
|
|
961
|
-
);
|
|
962
|
-
log(`Reclaimed ${result.rowCount} stuck jobs`);
|
|
963
|
-
return result.rowCount || 0;
|
|
964
|
-
} catch (error) {
|
|
965
|
-
log(`Error reclaiming stuck jobs: ${error}`);
|
|
966
|
-
throw error;
|
|
967
|
-
} finally {
|
|
968
|
-
client.release();
|
|
969
|
-
}
|
|
970
|
-
}
|
|
971
|
-
// ── Internal helpers ──────────────────────────────────────────────────
|
|
972
|
-
/**
|
|
973
|
-
* Batch-insert multiple job events in a single query.
|
|
974
|
-
* More efficient than individual recordJobEvent calls.
|
|
975
|
-
*/
|
|
976
|
-
async recordJobEventsBatch(events) {
|
|
977
|
-
if (events.length === 0) return;
|
|
978
|
-
const client = await this.pool.connect();
|
|
979
|
-
try {
|
|
980
|
-
const values = [];
|
|
981
|
-
const params = [];
|
|
982
|
-
let paramIdx = 1;
|
|
983
|
-
for (const event of events) {
|
|
984
|
-
values.push(`($${paramIdx++}, $${paramIdx++}, $${paramIdx++})`);
|
|
985
|
-
params.push(
|
|
986
|
-
event.jobId,
|
|
987
|
-
event.eventType,
|
|
988
|
-
event.metadata ? JSON.stringify(event.metadata) : null
|
|
989
|
-
);
|
|
990
|
-
}
|
|
991
|
-
await client.query(
|
|
992
|
-
`INSERT INTO job_events (job_id, event_type, metadata) VALUES ${values.join(", ")}`,
|
|
993
|
-
params
|
|
994
|
-
);
|
|
995
|
-
} catch (error) {
|
|
996
|
-
log(`Error recording batch job events: ${error}`);
|
|
997
|
-
} finally {
|
|
998
|
-
client.release();
|
|
999
|
-
}
|
|
1000
|
-
}
|
|
1001
|
-
async setPendingReasonForUnpickedJobs(reason, jobType) {
|
|
1002
|
-
const client = await this.pool.connect();
|
|
1003
|
-
try {
|
|
1004
|
-
let jobTypeFilter = "";
|
|
1005
|
-
const params = [reason];
|
|
1006
|
-
if (jobType) {
|
|
1007
|
-
if (Array.isArray(jobType)) {
|
|
1008
|
-
jobTypeFilter = ` AND job_type = ANY($2)`;
|
|
1009
|
-
params.push(jobType);
|
|
1010
|
-
} else {
|
|
1011
|
-
jobTypeFilter = ` AND job_type = $2`;
|
|
1012
|
-
params.push(jobType);
|
|
1013
|
-
}
|
|
1014
|
-
}
|
|
1015
|
-
await client.query(
|
|
1016
|
-
`UPDATE job_queue SET pending_reason = $1 WHERE status = 'pending'${jobTypeFilter}`,
|
|
1017
|
-
params
|
|
1018
|
-
);
|
|
1019
|
-
} finally {
|
|
1020
|
-
client.release();
|
|
1021
|
-
}
|
|
1022
|
-
}
|
|
1023
|
-
};
|
|
1024
|
-
var recordJobEvent = async (pool, jobId, eventType, metadata) => new PostgresBackend(pool).recordJobEvent(jobId, eventType, metadata);
|
|
1025
|
-
var waitJob = async (pool, jobId, options) => {
|
|
1026
|
-
const client = await pool.connect();
|
|
1027
|
-
try {
|
|
1028
|
-
const result = await client.query(
|
|
1029
|
-
`
|
|
1030
|
-
UPDATE job_queue
|
|
1031
|
-
SET status = 'waiting',
|
|
1032
|
-
wait_until = $2,
|
|
1033
|
-
wait_token_id = $3,
|
|
1034
|
-
step_data = $4,
|
|
1035
|
-
locked_at = NULL,
|
|
1036
|
-
locked_by = NULL,
|
|
1037
|
-
updated_at = NOW()
|
|
1038
|
-
WHERE id = $1 AND status = 'processing'
|
|
1039
|
-
`,
|
|
1040
|
-
[
|
|
1041
|
-
jobId,
|
|
1042
|
-
options.waitUntil ?? null,
|
|
1043
|
-
options.waitTokenId ?? null,
|
|
1044
|
-
JSON.stringify(options.stepData)
|
|
1045
|
-
]
|
|
1046
|
-
);
|
|
1047
|
-
if (result.rowCount === 0) {
|
|
1048
|
-
log(
|
|
1049
|
-
`Job ${jobId} could not be set to waiting (may have been reclaimed or is no longer processing)`
|
|
1050
|
-
);
|
|
1051
|
-
return;
|
|
1052
|
-
}
|
|
1053
|
-
await recordJobEvent(pool, jobId, "waiting" /* Waiting */, {
|
|
1054
|
-
waitUntil: options.waitUntil?.toISOString() ?? null,
|
|
1055
|
-
waitTokenId: options.waitTokenId ?? null
|
|
1056
|
-
});
|
|
1057
|
-
log(`Job ${jobId} set to waiting`);
|
|
1058
|
-
} catch (error) {
|
|
1059
|
-
log(`Error setting job ${jobId} to waiting: ${error}`);
|
|
1060
|
-
throw error;
|
|
1061
|
-
} finally {
|
|
1062
|
-
client.release();
|
|
1063
|
-
}
|
|
1064
|
-
};
|
|
1065
|
-
var updateStepData = async (pool, jobId, stepData) => {
|
|
1066
|
-
const client = await pool.connect();
|
|
1067
|
-
try {
|
|
1068
|
-
await client.query(
|
|
1069
|
-
`UPDATE job_queue SET step_data = $2, updated_at = NOW() WHERE id = $1`,
|
|
1070
|
-
[jobId, JSON.stringify(stepData)]
|
|
1071
|
-
);
|
|
1072
|
-
} catch (error) {
|
|
1073
|
-
log(`Error updating step_data for job ${jobId}: ${error}`);
|
|
1074
|
-
} finally {
|
|
1075
|
-
client.release();
|
|
1076
|
-
}
|
|
1077
|
-
};
|
|
1078
|
-
var MAX_TIMEOUT_MS = 365 * 24 * 60 * 60 * 1e3;
|
|
1079
|
-
function parseTimeoutString(timeout) {
|
|
1080
|
-
const match = timeout.match(/^(\d+)(s|m|h|d)$/);
|
|
1081
|
-
if (!match) {
|
|
1082
|
-
throw new Error(
|
|
1083
|
-
`Invalid timeout format: "${timeout}". Expected format like "10m", "1h", "24h", "7d".`
|
|
1084
|
-
);
|
|
1085
|
-
}
|
|
1086
|
-
const value = parseInt(match[1], 10);
|
|
1087
|
-
const unit = match[2];
|
|
1088
|
-
let ms;
|
|
1089
|
-
switch (unit) {
|
|
1090
|
-
case "s":
|
|
1091
|
-
ms = value * 1e3;
|
|
1092
|
-
break;
|
|
1093
|
-
case "m":
|
|
1094
|
-
ms = value * 60 * 1e3;
|
|
1095
|
-
break;
|
|
1096
|
-
case "h":
|
|
1097
|
-
ms = value * 60 * 60 * 1e3;
|
|
1098
|
-
break;
|
|
1099
|
-
case "d":
|
|
1100
|
-
ms = value * 24 * 60 * 60 * 1e3;
|
|
1101
|
-
break;
|
|
1102
|
-
default:
|
|
1103
|
-
throw new Error(`Unknown timeout unit: "${unit}"`);
|
|
1104
|
-
}
|
|
1105
|
-
if (!Number.isFinite(ms) || ms > MAX_TIMEOUT_MS) {
|
|
1106
|
-
throw new Error(
|
|
1107
|
-
`Timeout value "${timeout}" is too large. Maximum allowed is 365 days.`
|
|
1108
|
-
);
|
|
1109
|
-
}
|
|
1110
|
-
return ms;
|
|
1111
|
-
}
|
|
1112
|
-
var createWaitpoint = async (pool, jobId, options) => {
|
|
1113
|
-
const client = await pool.connect();
|
|
1114
|
-
try {
|
|
1115
|
-
const id = `wp_${randomUUID()}`;
|
|
1116
|
-
let timeoutAt = null;
|
|
1117
|
-
if (options?.timeout) {
|
|
1118
|
-
const ms = parseTimeoutString(options.timeout);
|
|
1119
|
-
timeoutAt = new Date(Date.now() + ms);
|
|
1120
|
-
}
|
|
1121
|
-
await client.query(
|
|
1122
|
-
`INSERT INTO waitpoints (id, job_id, status, timeout_at, tags) VALUES ($1, $2, 'waiting', $3, $4)`,
|
|
1123
|
-
[id, jobId, timeoutAt, options?.tags ?? null]
|
|
1124
|
-
);
|
|
1125
|
-
log(`Created waitpoint ${id} for job ${jobId}`);
|
|
1126
|
-
return { id };
|
|
1127
|
-
} catch (error) {
|
|
1128
|
-
log(`Error creating waitpoint: ${error}`);
|
|
1129
|
-
throw error;
|
|
1130
|
-
} finally {
|
|
1131
|
-
client.release();
|
|
1132
|
-
}
|
|
1133
|
-
};
|
|
1134
|
-
var completeWaitpoint = async (pool, tokenId, data) => {
|
|
1135
|
-
const client = await pool.connect();
|
|
1136
|
-
try {
|
|
1137
|
-
await client.query("BEGIN");
|
|
1138
|
-
const wpResult = await client.query(
|
|
1139
|
-
`UPDATE waitpoints SET status = 'completed', output = $2, completed_at = NOW()
|
|
1140
|
-
WHERE id = $1 AND status = 'waiting'
|
|
1141
|
-
RETURNING job_id`,
|
|
1142
|
-
[tokenId, data != null ? JSON.stringify(data) : null]
|
|
1143
|
-
);
|
|
1144
|
-
if (wpResult.rows.length === 0) {
|
|
1145
|
-
await client.query("ROLLBACK");
|
|
1146
|
-
log(`Waitpoint ${tokenId} not found or already completed`);
|
|
1147
|
-
return;
|
|
1148
|
-
}
|
|
1149
|
-
const jobId = wpResult.rows[0].job_id;
|
|
1150
|
-
if (jobId != null) {
|
|
1151
|
-
await client.query(
|
|
1152
|
-
`UPDATE job_queue
|
|
1153
|
-
SET status = 'pending', wait_token_id = NULL, wait_until = NULL, updated_at = NOW()
|
|
1154
|
-
WHERE id = $1 AND status = 'waiting'`,
|
|
1155
|
-
[jobId]
|
|
1156
|
-
);
|
|
1157
|
-
}
|
|
1158
|
-
await client.query("COMMIT");
|
|
1159
|
-
log(`Completed waitpoint ${tokenId} for job ${jobId}`);
|
|
1160
|
-
} catch (error) {
|
|
1161
|
-
await client.query("ROLLBACK");
|
|
1162
|
-
log(`Error completing waitpoint ${tokenId}: ${error}`);
|
|
1163
|
-
throw error;
|
|
1164
|
-
} finally {
|
|
1165
|
-
client.release();
|
|
1166
|
-
}
|
|
1167
|
-
};
|
|
1168
|
-
var getWaitpoint = async (pool, tokenId) => {
|
|
1169
|
-
const client = await pool.connect();
|
|
1170
|
-
try {
|
|
1171
|
-
const result = await client.query(
|
|
1172
|
-
`SELECT id, job_id AS "jobId", status, output, timeout_at AS "timeoutAt", created_at AS "createdAt", completed_at AS "completedAt", tags FROM waitpoints WHERE id = $1`,
|
|
1173
|
-
[tokenId]
|
|
1174
|
-
);
|
|
1175
|
-
if (result.rows.length === 0) return null;
|
|
1176
|
-
return result.rows[0];
|
|
1177
|
-
} catch (error) {
|
|
1178
|
-
log(`Error getting waitpoint ${tokenId}: ${error}`);
|
|
1179
|
-
throw error;
|
|
1180
|
-
} finally {
|
|
1181
|
-
client.release();
|
|
1182
|
-
}
|
|
1183
|
-
};
|
|
1184
|
-
var expireTimedOutWaitpoints = async (pool) => {
|
|
1185
|
-
const client = await pool.connect();
|
|
1186
|
-
try {
|
|
1187
|
-
await client.query("BEGIN");
|
|
1188
|
-
const result = await client.query(
|
|
1189
|
-
`UPDATE waitpoints
|
|
1190
|
-
SET status = 'timed_out'
|
|
1191
|
-
WHERE status = 'waiting' AND timeout_at IS NOT NULL AND timeout_at <= NOW()
|
|
1192
|
-
RETURNING id, job_id`
|
|
1193
|
-
);
|
|
1194
|
-
for (const row of result.rows) {
|
|
1195
|
-
if (row.job_id != null) {
|
|
1196
|
-
await client.query(
|
|
1197
|
-
`UPDATE job_queue
|
|
1198
|
-
SET status = 'pending', wait_token_id = NULL, wait_until = NULL, updated_at = NOW()
|
|
1199
|
-
WHERE id = $1 AND status = 'waiting'`,
|
|
1200
|
-
[row.job_id]
|
|
1201
|
-
);
|
|
1202
|
-
}
|
|
1203
|
-
}
|
|
1204
|
-
await client.query("COMMIT");
|
|
1205
|
-
const count = result.rowCount || 0;
|
|
1206
|
-
if (count > 0) {
|
|
1207
|
-
log(`Expired ${count} timed-out waitpoints`);
|
|
1208
|
-
}
|
|
1209
|
-
return count;
|
|
1210
|
-
} catch (error) {
|
|
1211
|
-
await client.query("ROLLBACK");
|
|
1212
|
-
log(`Error expiring timed-out waitpoints: ${error}`);
|
|
1213
|
-
throw error;
|
|
1214
|
-
} finally {
|
|
1215
|
-
client.release();
|
|
1216
|
-
}
|
|
1217
|
-
};
|
|
1218
|
-
function tryExtractPool(backend) {
|
|
1219
|
-
if (backend instanceof PostgresBackend) {
|
|
1220
|
-
return backend.getPool();
|
|
1221
|
-
}
|
|
1222
|
-
return null;
|
|
1223
|
-
}
|
|
1224
|
-
function buildBasicContext(backend, jobId, baseCtx) {
|
|
1225
|
-
const waitError = () => new Error(
|
|
1226
|
-
"Wait features (waitFor, waitUntil, createToken, waitForToken, ctx.run) are currently only supported with the PostgreSQL backend."
|
|
1227
|
-
);
|
|
1228
|
-
return {
|
|
1229
|
-
prolong: baseCtx.prolong,
|
|
1230
|
-
onTimeout: baseCtx.onTimeout,
|
|
1231
|
-
run: async (_stepName, fn) => {
|
|
1232
|
-
return fn();
|
|
1233
|
-
},
|
|
1234
|
-
waitFor: async () => {
|
|
1235
|
-
throw waitError();
|
|
1236
|
-
},
|
|
1237
|
-
waitUntil: async () => {
|
|
1238
|
-
throw waitError();
|
|
1239
|
-
},
|
|
1240
|
-
createToken: async () => {
|
|
1241
|
-
throw waitError();
|
|
1242
|
-
},
|
|
1243
|
-
waitForToken: async () => {
|
|
1244
|
-
throw waitError();
|
|
1245
|
-
},
|
|
1246
|
-
setProgress: async (percent) => {
|
|
1247
|
-
if (percent < 0 || percent > 100)
|
|
1248
|
-
throw new Error("Progress must be between 0 and 100");
|
|
1249
|
-
await backend.updateProgress(jobId, Math.round(percent));
|
|
1250
|
-
}
|
|
1251
|
-
};
|
|
1252
|
-
}
|
|
1253
|
-
function validateHandlerSerializable(handler, jobType) {
|
|
1254
|
-
try {
|
|
1255
|
-
const handlerString = handler.toString();
|
|
1256
|
-
if (handlerString.includes("this.") && !handlerString.match(/\([^)]*this[^)]*\)/)) {
|
|
1257
|
-
throw new Error(
|
|
1258
|
-
`Handler for job type "${jobType}" uses 'this' context which cannot be serialized. Use a regular function or avoid 'this' references when forceKillOnTimeout is enabled.`
|
|
1259
|
-
);
|
|
1260
|
-
}
|
|
1261
|
-
if (handlerString.includes("[native code]")) {
|
|
1262
|
-
throw new Error(
|
|
1263
|
-
`Handler for job type "${jobType}" contains native code which cannot be serialized. Ensure your handler is a plain function when forceKillOnTimeout is enabled.`
|
|
1264
|
-
);
|
|
1265
|
-
}
|
|
1266
|
-
try {
|
|
1267
|
-
new Function("return " + handlerString);
|
|
1268
|
-
} catch (parseError) {
|
|
1269
|
-
throw new Error(
|
|
1270
|
-
`Handler for job type "${jobType}" cannot be serialized: ${parseError instanceof Error ? parseError.message : String(parseError)}. When using forceKillOnTimeout, handlers must be serializable functions without closures over external variables.`
|
|
1271
|
-
);
|
|
1272
|
-
}
|
|
1273
|
-
} catch (error) {
|
|
1274
|
-
if (error instanceof Error) {
|
|
1275
|
-
throw error;
|
|
1276
|
-
}
|
|
1277
|
-
throw new Error(
|
|
1278
|
-
`Failed to validate handler serialization for job type "${jobType}": ${String(error)}`
|
|
1279
|
-
);
|
|
1280
|
-
}
|
|
1281
|
-
}
|
|
1282
|
-
async function runHandlerInWorker(handler, payload, timeoutMs, jobType) {
|
|
1283
|
-
validateHandlerSerializable(handler, jobType);
|
|
1284
|
-
return new Promise((resolve, reject) => {
|
|
1285
|
-
const workerCode = `
|
|
1286
|
-
(function() {
|
|
1287
|
-
const { parentPort, workerData } = require('worker_threads');
|
|
1288
|
-
const { handlerCode, payload, timeoutMs } = workerData;
|
|
1289
|
-
|
|
1290
|
-
// Create an AbortController for the handler
|
|
1291
|
-
const controller = new AbortController();
|
|
1292
|
-
const signal = controller.signal;
|
|
1293
|
-
|
|
1294
|
-
// Set up timeout
|
|
1295
|
-
const timeoutId = setTimeout(() => {
|
|
1296
|
-
controller.abort();
|
|
1297
|
-
parentPort.postMessage({ type: 'timeout' });
|
|
1298
|
-
}, timeoutMs);
|
|
1299
|
-
|
|
1300
|
-
try {
|
|
1301
|
-
// Execute the handler
|
|
1302
|
-
// Note: This uses Function constructor which requires the handler to be serializable.
|
|
1303
|
-
// The handler should be validated before reaching this point.
|
|
1304
|
-
let handlerFn;
|
|
1305
|
-
try {
|
|
1306
|
-
// Wrap handlerCode in parentheses to ensure it's treated as an expression
|
|
1307
|
-
// This handles both arrow functions and regular functions
|
|
1308
|
-
const wrappedCode = handlerCode.trim().startsWith('async') || handlerCode.trim().startsWith('function')
|
|
1309
|
-
? handlerCode
|
|
1310
|
-
: '(' + handlerCode + ')';
|
|
1311
|
-
handlerFn = new Function('return ' + wrappedCode)();
|
|
1312
|
-
} catch (parseError) {
|
|
1313
|
-
clearTimeout(timeoutId);
|
|
1314
|
-
parentPort.postMessage({
|
|
1315
|
-
type: 'error',
|
|
1316
|
-
error: {
|
|
1317
|
-
message: 'Handler cannot be deserialized in worker thread. ' +
|
|
1318
|
-
'Ensure your handler is a standalone function without closures over external variables. ' +
|
|
1319
|
-
'Original error: ' + (parseError instanceof Error ? parseError.message : String(parseError)),
|
|
1320
|
-
stack: parseError instanceof Error ? parseError.stack : undefined,
|
|
1321
|
-
name: 'SerializationError',
|
|
1322
|
-
},
|
|
1323
|
-
});
|
|
1324
|
-
return;
|
|
1325
|
-
}
|
|
1326
|
-
|
|
1327
|
-
// Ensure handlerFn is actually a function
|
|
1328
|
-
if (typeof handlerFn !== 'function') {
|
|
1329
|
-
clearTimeout(timeoutId);
|
|
1330
|
-
parentPort.postMessage({
|
|
1331
|
-
type: 'error',
|
|
1332
|
-
error: {
|
|
1333
|
-
message: 'Handler deserialization did not produce a function. ' +
|
|
1334
|
-
'Ensure your handler is a valid function when forceKillOnTimeout is enabled.',
|
|
1335
|
-
name: 'SerializationError',
|
|
1336
|
-
},
|
|
1337
|
-
});
|
|
1338
|
-
return;
|
|
1339
|
-
}
|
|
1340
|
-
|
|
1341
|
-
handlerFn(payload, signal)
|
|
1342
|
-
.then(() => {
|
|
1343
|
-
clearTimeout(timeoutId);
|
|
1344
|
-
parentPort.postMessage({ type: 'success' });
|
|
1345
|
-
})
|
|
1346
|
-
.catch((error) => {
|
|
1347
|
-
clearTimeout(timeoutId);
|
|
1348
|
-
parentPort.postMessage({
|
|
1349
|
-
type: 'error',
|
|
1350
|
-
error: {
|
|
1351
|
-
message: error.message,
|
|
1352
|
-
stack: error.stack,
|
|
1353
|
-
name: error.name,
|
|
1354
|
-
},
|
|
1355
|
-
});
|
|
1356
|
-
});
|
|
1357
|
-
} catch (error) {
|
|
1358
|
-
clearTimeout(timeoutId);
|
|
1359
|
-
parentPort.postMessage({
|
|
1360
|
-
type: 'error',
|
|
1361
|
-
error: {
|
|
1362
|
-
message: error.message,
|
|
1363
|
-
stack: error.stack,
|
|
1364
|
-
name: error.name,
|
|
1365
|
-
},
|
|
1366
|
-
});
|
|
1367
|
-
}
|
|
1368
|
-
})();
|
|
1369
|
-
`;
|
|
1370
|
-
const worker = new Worker(workerCode, {
|
|
1371
|
-
eval: true,
|
|
1372
|
-
workerData: {
|
|
1373
|
-
handlerCode: handler.toString(),
|
|
1374
|
-
payload,
|
|
1375
|
-
timeoutMs
|
|
1376
|
-
}
|
|
1377
|
-
});
|
|
1378
|
-
let resolved = false;
|
|
1379
|
-
worker.on("message", (message) => {
|
|
1380
|
-
if (resolved) return;
|
|
1381
|
-
resolved = true;
|
|
1382
|
-
if (message.type === "success") {
|
|
1383
|
-
resolve();
|
|
1384
|
-
} else if (message.type === "timeout") {
|
|
1385
|
-
const timeoutError = new Error(
|
|
1386
|
-
`Job timed out after ${timeoutMs} ms and was forcefully terminated`
|
|
1387
|
-
);
|
|
1388
|
-
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
1389
|
-
reject(timeoutError);
|
|
1390
|
-
} else if (message.type === "error") {
|
|
1391
|
-
const error = new Error(message.error.message);
|
|
1392
|
-
error.stack = message.error.stack;
|
|
1393
|
-
error.name = message.error.name;
|
|
1394
|
-
reject(error);
|
|
1395
|
-
}
|
|
1396
|
-
});
|
|
1397
|
-
worker.on("error", (error) => {
|
|
1398
|
-
if (resolved) return;
|
|
1399
|
-
resolved = true;
|
|
1400
|
-
reject(error);
|
|
1401
|
-
});
|
|
1402
|
-
worker.on("exit", (code) => {
|
|
1403
|
-
if (resolved) return;
|
|
1404
|
-
if (code !== 0) {
|
|
1405
|
-
resolved = true;
|
|
1406
|
-
reject(new Error(`Worker stopped with exit code ${code}`));
|
|
1407
|
-
}
|
|
1408
|
-
});
|
|
1409
|
-
setTimeout(() => {
|
|
1410
|
-
if (!resolved) {
|
|
1411
|
-
resolved = true;
|
|
1412
|
-
worker.terminate().then(() => {
|
|
1413
|
-
const timeoutError = new Error(
|
|
1414
|
-
`Job timed out after ${timeoutMs} ms and was forcefully terminated`
|
|
1415
|
-
);
|
|
1416
|
-
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
1417
|
-
reject(timeoutError);
|
|
1418
|
-
}).catch((err) => {
|
|
1419
|
-
reject(err);
|
|
1420
|
-
});
|
|
1421
|
-
}
|
|
1422
|
-
}, timeoutMs + 100);
|
|
1423
|
-
});
|
|
1424
|
-
}
|
|
1425
|
-
function calculateWaitUntil(duration) {
|
|
1426
|
-
const now = Date.now();
|
|
1427
|
-
let ms = 0;
|
|
1428
|
-
if (duration.seconds) ms += duration.seconds * 1e3;
|
|
1429
|
-
if (duration.minutes) ms += duration.minutes * 60 * 1e3;
|
|
1430
|
-
if (duration.hours) ms += duration.hours * 60 * 60 * 1e3;
|
|
1431
|
-
if (duration.days) ms += duration.days * 24 * 60 * 60 * 1e3;
|
|
1432
|
-
if (duration.weeks) ms += duration.weeks * 7 * 24 * 60 * 60 * 1e3;
|
|
1433
|
-
if (duration.months) ms += duration.months * 30 * 24 * 60 * 60 * 1e3;
|
|
1434
|
-
if (duration.years) ms += duration.years * 365 * 24 * 60 * 60 * 1e3;
|
|
1435
|
-
if (ms <= 0) {
|
|
1436
|
-
throw new Error(
|
|
1437
|
-
"waitFor duration must be positive. Provide at least one positive duration field."
|
|
1438
|
-
);
|
|
1439
|
-
}
|
|
1440
|
-
return new Date(now + ms);
|
|
1441
|
-
}
|
|
1442
|
-
async function resolveCompletedWaits(pool, stepData) {
|
|
1443
|
-
for (const key of Object.keys(stepData)) {
|
|
1444
|
-
if (!key.startsWith("__wait_")) continue;
|
|
1445
|
-
const entry = stepData[key];
|
|
1446
|
-
if (!entry || typeof entry !== "object" || entry.completed) continue;
|
|
1447
|
-
if (entry.type === "duration" || entry.type === "date") {
|
|
1448
|
-
stepData[key] = { ...entry, completed: true };
|
|
1449
|
-
} else if (entry.type === "token" && entry.tokenId) {
|
|
1450
|
-
const wp = await getWaitpoint(pool, entry.tokenId);
|
|
1451
|
-
if (wp && wp.status === "completed") {
|
|
1452
|
-
stepData[key] = {
|
|
1453
|
-
...entry,
|
|
1454
|
-
completed: true,
|
|
1455
|
-
result: { ok: true, output: wp.output }
|
|
1456
|
-
};
|
|
1457
|
-
} else if (wp && wp.status === "timed_out") {
|
|
1458
|
-
stepData[key] = {
|
|
1459
|
-
...entry,
|
|
1460
|
-
completed: true,
|
|
1461
|
-
result: { ok: false, error: "Token timed out" }
|
|
1462
|
-
};
|
|
1463
|
-
}
|
|
1464
|
-
}
|
|
1465
|
-
}
|
|
1466
|
-
}
|
|
1467
|
-
function buildWaitContext(backend, pool, jobId, stepData, baseCtx) {
|
|
1468
|
-
let waitCounter = 0;
|
|
1469
|
-
const ctx = {
|
|
1470
|
-
prolong: baseCtx.prolong,
|
|
1471
|
-
onTimeout: baseCtx.onTimeout,
|
|
1472
|
-
run: async (stepName, fn) => {
|
|
1473
|
-
const cached = stepData[stepName];
|
|
1474
|
-
if (cached && typeof cached === "object" && cached.__completed) {
|
|
1475
|
-
log(`Step "${stepName}" replayed from cache for job ${jobId}`);
|
|
1476
|
-
return cached.result;
|
|
1477
|
-
}
|
|
1478
|
-
const result = await fn();
|
|
1479
|
-
stepData[stepName] = { __completed: true, result };
|
|
1480
|
-
await updateStepData(pool, jobId, stepData);
|
|
1481
|
-
return result;
|
|
1482
|
-
},
|
|
1483
|
-
waitFor: async (duration) => {
|
|
1484
|
-
const waitKey = `__wait_${waitCounter++}`;
|
|
1485
|
-
const cached = stepData[waitKey];
|
|
1486
|
-
if (cached && typeof cached === "object" && cached.completed) {
|
|
1487
|
-
log(`Wait "${waitKey}" already completed for job ${jobId}, skipping`);
|
|
1488
|
-
return;
|
|
1489
|
-
}
|
|
1490
|
-
const waitUntilDate = calculateWaitUntil(duration);
|
|
1491
|
-
stepData[waitKey] = { type: "duration", completed: false };
|
|
1492
|
-
throw new WaitSignal("duration", waitUntilDate, void 0, stepData);
|
|
1493
|
-
},
|
|
1494
|
-
waitUntil: async (date) => {
|
|
1495
|
-
const waitKey = `__wait_${waitCounter++}`;
|
|
1496
|
-
const cached = stepData[waitKey];
|
|
1497
|
-
if (cached && typeof cached === "object" && cached.completed) {
|
|
1498
|
-
log(`Wait "${waitKey}" already completed for job ${jobId}, skipping`);
|
|
1499
|
-
return;
|
|
1500
|
-
}
|
|
1501
|
-
stepData[waitKey] = { type: "date", completed: false };
|
|
1502
|
-
throw new WaitSignal("date", date, void 0, stepData);
|
|
1503
|
-
},
|
|
1504
|
-
createToken: async (options) => {
|
|
1505
|
-
const token = await createWaitpoint(pool, jobId, options);
|
|
1506
|
-
return token;
|
|
1507
|
-
},
|
|
1508
|
-
waitForToken: async (tokenId) => {
|
|
1509
|
-
const waitKey = `__wait_${waitCounter++}`;
|
|
1510
|
-
const cached = stepData[waitKey];
|
|
1511
|
-
if (cached && typeof cached === "object" && cached.completed) {
|
|
1512
|
-
log(
|
|
1513
|
-
`Token wait "${waitKey}" already completed for job ${jobId}, returning cached result`
|
|
1514
|
-
);
|
|
1515
|
-
return cached.result;
|
|
1516
|
-
}
|
|
1517
|
-
const wp = await getWaitpoint(pool, tokenId);
|
|
1518
|
-
if (wp && wp.status === "completed") {
|
|
1519
|
-
const result = {
|
|
1520
|
-
ok: true,
|
|
1521
|
-
output: wp.output
|
|
1522
|
-
};
|
|
1523
|
-
stepData[waitKey] = {
|
|
1524
|
-
type: "token",
|
|
1525
|
-
tokenId,
|
|
1526
|
-
completed: true,
|
|
1527
|
-
result
|
|
1528
|
-
};
|
|
1529
|
-
await updateStepData(pool, jobId, stepData);
|
|
1530
|
-
return result;
|
|
1531
|
-
}
|
|
1532
|
-
if (wp && wp.status === "timed_out") {
|
|
1533
|
-
const result = {
|
|
1534
|
-
ok: false,
|
|
1535
|
-
error: "Token timed out"
|
|
1536
|
-
};
|
|
1537
|
-
stepData[waitKey] = {
|
|
1538
|
-
type: "token",
|
|
1539
|
-
tokenId,
|
|
1540
|
-
completed: true,
|
|
1541
|
-
result
|
|
1542
|
-
};
|
|
1543
|
-
await updateStepData(pool, jobId, stepData);
|
|
1544
|
-
return result;
|
|
1545
|
-
}
|
|
1546
|
-
stepData[waitKey] = { type: "token", tokenId, completed: false };
|
|
1547
|
-
throw new WaitSignal("token", void 0, tokenId, stepData);
|
|
1548
|
-
},
|
|
1549
|
-
setProgress: async (percent) => {
|
|
1550
|
-
if (percent < 0 || percent > 100)
|
|
1551
|
-
throw new Error("Progress must be between 0 and 100");
|
|
1552
|
-
await backend.updateProgress(jobId, Math.round(percent));
|
|
1553
|
-
}
|
|
1554
|
-
};
|
|
1555
|
-
return ctx;
|
|
1556
|
-
}
|
|
1557
|
-
async function processJobWithHandlers(backend, job, jobHandlers) {
|
|
1558
|
-
const handler = jobHandlers[job.jobType];
|
|
1559
|
-
if (!handler) {
|
|
1560
|
-
await backend.setPendingReasonForUnpickedJobs(
|
|
1561
|
-
`No handler registered for job type: ${job.jobType}`,
|
|
1562
|
-
job.jobType
|
|
1563
|
-
);
|
|
1564
|
-
await backend.failJob(
|
|
1565
|
-
job.id,
|
|
1566
|
-
new Error(`No handler registered for job type: ${job.jobType}`),
|
|
1567
|
-
"no_handler" /* NoHandler */
|
|
1568
|
-
);
|
|
1569
|
-
return;
|
|
1570
|
-
}
|
|
1571
|
-
const stepData = { ...job.stepData || {} };
|
|
1572
|
-
const pool = tryExtractPool(backend);
|
|
1573
|
-
const hasStepHistory = Object.keys(stepData).some(
|
|
1574
|
-
(k) => k.startsWith("__wait_")
|
|
1575
|
-
);
|
|
1576
|
-
if (hasStepHistory && pool) {
|
|
1577
|
-
await resolveCompletedWaits(pool, stepData);
|
|
1578
|
-
await updateStepData(pool, job.id, stepData);
|
|
1579
|
-
}
|
|
1580
|
-
const timeoutMs = job.timeoutMs ?? void 0;
|
|
1581
|
-
const forceKillOnTimeout = job.forceKillOnTimeout ?? false;
|
|
1582
|
-
let timeoutId;
|
|
1583
|
-
const controller = new AbortController();
|
|
1584
|
-
try {
|
|
1585
|
-
if (forceKillOnTimeout && timeoutMs && timeoutMs > 0) {
|
|
1586
|
-
await runHandlerInWorker(handler, job.payload, timeoutMs, job.jobType);
|
|
1587
|
-
} else {
|
|
1588
|
-
let onTimeoutCallback;
|
|
1589
|
-
let timeoutReject;
|
|
1590
|
-
const armTimeout = (ms) => {
|
|
1591
|
-
if (timeoutId) clearTimeout(timeoutId);
|
|
1592
|
-
timeoutId = setTimeout(() => {
|
|
1593
|
-
if (onTimeoutCallback) {
|
|
1594
|
-
try {
|
|
1595
|
-
const extension = onTimeoutCallback();
|
|
1596
|
-
if (typeof extension === "number" && extension > 0) {
|
|
1597
|
-
backend.prolongJob(job.id).catch(() => {
|
|
1598
|
-
});
|
|
1599
|
-
armTimeout(extension);
|
|
1600
|
-
return;
|
|
1601
|
-
}
|
|
1602
|
-
} catch (callbackError) {
|
|
1603
|
-
log(
|
|
1604
|
-
`onTimeout callback threw for job ${job.id}: ${callbackError}`
|
|
1605
|
-
);
|
|
1606
|
-
}
|
|
1607
|
-
}
|
|
1608
|
-
controller.abort();
|
|
1609
|
-
const timeoutError = new Error(`Job timed out after ${ms} ms`);
|
|
1610
|
-
timeoutError.failureReason = "timeout" /* Timeout */;
|
|
1611
|
-
if (timeoutReject) {
|
|
1612
|
-
timeoutReject(timeoutError);
|
|
1613
|
-
}
|
|
1614
|
-
}, ms);
|
|
1615
|
-
};
|
|
1616
|
-
const hasTimeout = timeoutMs != null && timeoutMs > 0;
|
|
1617
|
-
const baseCtx = hasTimeout ? {
|
|
1618
|
-
prolong: (ms) => {
|
|
1619
|
-
const duration = ms ?? timeoutMs;
|
|
1620
|
-
if (duration != null && duration > 0) {
|
|
1621
|
-
armTimeout(duration);
|
|
1622
|
-
backend.prolongJob(job.id).catch(() => {
|
|
1623
|
-
});
|
|
1624
|
-
}
|
|
1625
|
-
},
|
|
1626
|
-
onTimeout: (callback) => {
|
|
1627
|
-
onTimeoutCallback = callback;
|
|
1628
|
-
}
|
|
1629
|
-
} : {
|
|
1630
|
-
prolong: () => {
|
|
1631
|
-
log("prolong() called but ignored: job has no timeout set");
|
|
1632
|
-
},
|
|
1633
|
-
onTimeout: () => {
|
|
1634
|
-
log("onTimeout() called but ignored: job has no timeout set");
|
|
1635
|
-
}
|
|
1636
|
-
};
|
|
1637
|
-
const ctx = pool ? buildWaitContext(backend, pool, job.id, stepData, baseCtx) : buildBasicContext(backend, job.id, baseCtx);
|
|
1638
|
-
if (forceKillOnTimeout && !hasTimeout) {
|
|
1639
|
-
log(
|
|
1640
|
-
`forceKillOnTimeout is set but no timeoutMs for job ${job.id}, running without force kill`
|
|
1641
|
-
);
|
|
1642
|
-
}
|
|
1643
|
-
const jobPromise = handler(job.payload, controller.signal, ctx);
|
|
1644
|
-
if (hasTimeout) {
|
|
1645
|
-
await Promise.race([
|
|
1646
|
-
jobPromise,
|
|
1647
|
-
new Promise((_, reject) => {
|
|
1648
|
-
timeoutReject = reject;
|
|
1649
|
-
armTimeout(timeoutMs);
|
|
1650
|
-
})
|
|
1651
|
-
]);
|
|
1652
|
-
} else {
|
|
1653
|
-
await jobPromise;
|
|
1654
|
-
}
|
|
1655
|
-
}
|
|
1656
|
-
if (timeoutId) clearTimeout(timeoutId);
|
|
1657
|
-
await backend.completeJob(job.id);
|
|
1658
|
-
} catch (error) {
|
|
1659
|
-
if (timeoutId) clearTimeout(timeoutId);
|
|
1660
|
-
if (error instanceof WaitSignal) {
|
|
1661
|
-
if (!pool) {
|
|
1662
|
-
await backend.failJob(
|
|
1663
|
-
job.id,
|
|
1664
|
-
new Error(
|
|
1665
|
-
"WaitSignal received but wait features require the PostgreSQL backend."
|
|
1666
|
-
),
|
|
1667
|
-
"handler_error" /* HandlerError */
|
|
1668
|
-
);
|
|
1669
|
-
return;
|
|
1670
|
-
}
|
|
1671
|
-
log(
|
|
1672
|
-
`Job ${job.id} entering wait: type=${error.type}, waitUntil=${error.waitUntil?.toISOString() ?? "none"}, tokenId=${error.tokenId ?? "none"}`
|
|
1673
|
-
);
|
|
1674
|
-
await waitJob(pool, job.id, {
|
|
1675
|
-
waitUntil: error.waitUntil,
|
|
1676
|
-
waitTokenId: error.tokenId,
|
|
1677
|
-
stepData: error.stepData
|
|
1678
|
-
});
|
|
1679
|
-
return;
|
|
1680
|
-
}
|
|
1681
|
-
console.error(`Error processing job ${job.id}:`, error);
|
|
1682
|
-
let failureReason = "handler_error" /* HandlerError */;
|
|
1683
|
-
if (error && typeof error === "object" && "failureReason" in error && error.failureReason === "timeout" /* Timeout */) {
|
|
1684
|
-
failureReason = "timeout" /* Timeout */;
|
|
1685
|
-
}
|
|
1686
|
-
await backend.failJob(
|
|
1687
|
-
job.id,
|
|
1688
|
-
error instanceof Error ? error : new Error(String(error)),
|
|
1689
|
-
failureReason
|
|
1690
|
-
);
|
|
1691
|
-
}
|
|
1692
|
-
}
|
|
1693
|
-
async function processBatchWithHandlers(backend, workerId, batchSize, jobType, jobHandlers, concurrency, onError) {
|
|
1694
|
-
const jobs = await backend.getNextBatch(
|
|
1695
|
-
workerId,
|
|
1696
|
-
batchSize,
|
|
1697
|
-
jobType
|
|
1698
|
-
);
|
|
1699
|
-
if (!concurrency || concurrency >= jobs.length) {
|
|
1700
|
-
await Promise.all(
|
|
1701
|
-
jobs.map((job) => processJobWithHandlers(backend, job, jobHandlers))
|
|
1702
|
-
);
|
|
1703
|
-
return jobs.length;
|
|
1704
|
-
}
|
|
1705
|
-
let idx = 0;
|
|
1706
|
-
let running = 0;
|
|
1707
|
-
let finished = 0;
|
|
1708
|
-
return new Promise((resolve, reject) => {
|
|
1709
|
-
const next = () => {
|
|
1710
|
-
if (finished === jobs.length) return resolve(jobs.length);
|
|
1711
|
-
while (running < concurrency && idx < jobs.length) {
|
|
1712
|
-
const job = jobs[idx++];
|
|
1713
|
-
running++;
|
|
1714
|
-
processJobWithHandlers(backend, job, jobHandlers).then(() => {
|
|
1715
|
-
running--;
|
|
1716
|
-
finished++;
|
|
1717
|
-
next();
|
|
1718
|
-
}).catch((err) => {
|
|
1719
|
-
running--;
|
|
1720
|
-
finished++;
|
|
1721
|
-
if (onError) {
|
|
1722
|
-
onError(err instanceof Error ? err : new Error(String(err)));
|
|
1723
|
-
}
|
|
1724
|
-
next();
|
|
1725
|
-
});
|
|
1726
|
-
}
|
|
1727
|
-
};
|
|
1728
|
-
next();
|
|
1729
|
-
});
|
|
1730
|
-
}
|
|
1731
|
-
var createProcessor = (backend, handlers, options = {}) => {
|
|
1732
|
-
const {
|
|
1733
|
-
workerId = `worker-${Math.random().toString(36).substring(2, 9)}`,
|
|
1734
|
-
batchSize = 10,
|
|
1735
|
-
pollInterval = 5e3,
|
|
1736
|
-
onError = (error) => console.error("Job processor error:", error),
|
|
1737
|
-
jobType,
|
|
1738
|
-
concurrency = 3
|
|
1739
|
-
} = options;
|
|
1740
|
-
let running = false;
|
|
1741
|
-
let intervalId = null;
|
|
1742
|
-
let currentBatchPromise = null;
|
|
1743
|
-
setLogContext(options.verbose ?? false);
|
|
1744
|
-
const processJobs = async () => {
|
|
1745
|
-
if (!running) return 0;
|
|
1746
|
-
log(
|
|
1747
|
-
`Processing jobs with workerId: ${workerId}${jobType ? ` and jobType: ${Array.isArray(jobType) ? jobType.join(",") : jobType}` : ""}`
|
|
1748
|
-
);
|
|
1749
|
-
try {
|
|
1750
|
-
const processed = await processBatchWithHandlers(
|
|
1751
|
-
backend,
|
|
1752
|
-
workerId,
|
|
1753
|
-
batchSize,
|
|
1754
|
-
jobType,
|
|
1755
|
-
handlers,
|
|
1756
|
-
concurrency,
|
|
1757
|
-
onError
|
|
1758
|
-
);
|
|
1759
|
-
return processed;
|
|
1760
|
-
} catch (error) {
|
|
1761
|
-
onError(error instanceof Error ? error : new Error(String(error)));
|
|
1762
|
-
}
|
|
1763
|
-
return 0;
|
|
1764
|
-
};
|
|
1765
|
-
return {
|
|
1766
|
-
/**
|
|
1767
|
-
* Start the job processor in the background.
|
|
1768
|
-
* - This will run periodically (every pollInterval milliseconds or 5 seconds if not provided) and process jobs as they become available.
|
|
1769
|
-
* - You have to call the stop method to stop the processor.
|
|
1770
|
-
*/
|
|
1771
|
-
startInBackground: () => {
|
|
1772
|
-
if (running) return;
|
|
1773
|
-
log(`Starting job processor with workerId: ${workerId}`);
|
|
1774
|
-
running = true;
|
|
1775
|
-
const scheduleNext = (immediate) => {
|
|
1776
|
-
if (!running) return;
|
|
1777
|
-
if (immediate) {
|
|
1778
|
-
intervalId = setTimeout(loop, 0);
|
|
1779
|
-
} else {
|
|
1780
|
-
intervalId = setTimeout(loop, pollInterval);
|
|
1781
|
-
}
|
|
1782
|
-
};
|
|
1783
|
-
const loop = async () => {
|
|
1784
|
-
if (!running) return;
|
|
1785
|
-
currentBatchPromise = processJobs();
|
|
1786
|
-
const processed = await currentBatchPromise;
|
|
1787
|
-
currentBatchPromise = null;
|
|
1788
|
-
scheduleNext(processed === batchSize);
|
|
1789
|
-
};
|
|
1790
|
-
loop();
|
|
1791
|
-
},
|
|
1792
|
-
/**
|
|
1793
|
-
* Stop the job processor that runs in the background.
|
|
1794
|
-
* Does not wait for in-flight jobs.
|
|
1795
|
-
*/
|
|
1796
|
-
stop: () => {
|
|
1797
|
-
log(`Stopping job processor with workerId: ${workerId}`);
|
|
1798
|
-
running = false;
|
|
1799
|
-
if (intervalId) {
|
|
1800
|
-
clearTimeout(intervalId);
|
|
1801
|
-
intervalId = null;
|
|
1802
|
-
}
|
|
1803
|
-
},
|
|
1804
|
-
/**
|
|
1805
|
-
* Stop the job processor and wait for all in-flight jobs to complete.
|
|
1806
|
-
* Useful for graceful shutdown (e.g., SIGTERM handling).
|
|
1807
|
-
*/
|
|
1808
|
-
stopAndDrain: async (drainTimeoutMs = 3e4) => {
|
|
1809
|
-
log(`Stopping and draining job processor with workerId: ${workerId}`);
|
|
1810
|
-
running = false;
|
|
1811
|
-
if (intervalId) {
|
|
1812
|
-
clearTimeout(intervalId);
|
|
1813
|
-
intervalId = null;
|
|
1814
|
-
}
|
|
1815
|
-
if (currentBatchPromise) {
|
|
1816
|
-
await Promise.race([
|
|
1817
|
-
currentBatchPromise.catch(() => {
|
|
1818
|
-
}),
|
|
1819
|
-
new Promise((resolve) => setTimeout(resolve, drainTimeoutMs))
|
|
1820
|
-
]);
|
|
1821
|
-
currentBatchPromise = null;
|
|
1822
|
-
}
|
|
1823
|
-
log(`Job processor ${workerId} drained`);
|
|
1824
|
-
},
|
|
1825
|
-
/**
|
|
1826
|
-
* Start the job processor synchronously.
|
|
1827
|
-
* - This will process all jobs immediately and then stop.
|
|
1828
|
-
* - The pollInterval is ignored.
|
|
1829
|
-
*/
|
|
1830
|
-
start: async () => {
|
|
1831
|
-
log(`Starting job processor with workerId: ${workerId}`);
|
|
1832
|
-
running = true;
|
|
1833
|
-
const processed = await processJobs();
|
|
1834
|
-
running = false;
|
|
1835
|
-
return processed;
|
|
1836
|
-
},
|
|
1837
|
-
isRunning: () => running
|
|
1838
|
-
};
|
|
1839
|
-
};
|
|
1840
|
-
function loadPemOrFile(value) {
|
|
1841
|
-
if (!value) return void 0;
|
|
1842
|
-
if (value.startsWith("file://")) {
|
|
1843
|
-
const filePath = value.slice(7);
|
|
1844
|
-
return fs.readFileSync(filePath, "utf8");
|
|
1845
|
-
}
|
|
1846
|
-
return value;
|
|
1847
|
-
}
|
|
1848
|
-
var createPool = (config) => {
|
|
1849
|
-
let searchPath;
|
|
1850
|
-
let ssl = void 0;
|
|
1851
|
-
let customCA;
|
|
1852
|
-
let sslmode;
|
|
1853
|
-
if (config.connectionString) {
|
|
1854
|
-
try {
|
|
1855
|
-
const url = new URL(config.connectionString);
|
|
1856
|
-
searchPath = url.searchParams.get("search_path") || void 0;
|
|
1857
|
-
sslmode = url.searchParams.get("sslmode") || void 0;
|
|
1858
|
-
if (sslmode === "no-verify") {
|
|
1859
|
-
ssl = { rejectUnauthorized: false };
|
|
1860
|
-
}
|
|
1861
|
-
} catch (e) {
|
|
1862
|
-
const parsed = parse(config.connectionString);
|
|
1863
|
-
if (parsed.options) {
|
|
1864
|
-
const match = parsed.options.match(/search_path=([^\s]+)/);
|
|
1865
|
-
if (match) {
|
|
1866
|
-
searchPath = match[1];
|
|
1867
|
-
}
|
|
1868
|
-
}
|
|
1869
|
-
sslmode = typeof parsed.sslmode === "string" ? parsed.sslmode : void 0;
|
|
1870
|
-
if (sslmode === "no-verify") {
|
|
1871
|
-
ssl = { rejectUnauthorized: false };
|
|
1872
|
-
}
|
|
1873
|
-
}
|
|
1874
|
-
}
|
|
1875
|
-
if (config.ssl) {
|
|
1876
|
-
if (typeof config.ssl.ca === "string") {
|
|
1877
|
-
customCA = config.ssl.ca;
|
|
1878
|
-
} else if (typeof process.env.PGSSLROOTCERT === "string") {
|
|
1879
|
-
customCA = process.env.PGSSLROOTCERT;
|
|
1880
|
-
} else {
|
|
1881
|
-
customCA = void 0;
|
|
1882
|
-
}
|
|
1883
|
-
const caValue = typeof customCA === "string" ? loadPemOrFile(customCA) : void 0;
|
|
1884
|
-
ssl = {
|
|
1885
|
-
...ssl,
|
|
1886
|
-
...caValue ? { ca: caValue } : {},
|
|
1887
|
-
cert: loadPemOrFile(
|
|
1888
|
-
typeof config.ssl.cert === "string" ? config.ssl.cert : process.env.PGSSLCERT
|
|
1889
|
-
),
|
|
1890
|
-
key: loadPemOrFile(
|
|
1891
|
-
typeof config.ssl.key === "string" ? config.ssl.key : process.env.PGSSLKEY
|
|
1892
|
-
),
|
|
1893
|
-
rejectUnauthorized: config.ssl.rejectUnauthorized !== void 0 ? config.ssl.rejectUnauthorized : true
|
|
1894
|
-
};
|
|
1895
|
-
}
|
|
1896
|
-
if (sslmode && customCA) {
|
|
1897
|
-
const warning = `
|
|
1898
|
-
|
|
1899
|
-
\x1B[33m**************************************************
|
|
1900
|
-
\u26A0\uFE0F WARNING: SSL CONFIGURATION ISSUE
|
|
1901
|
-
**************************************************
|
|
1902
|
-
Both sslmode ('${sslmode}') is set in the connection string
|
|
1903
|
-
and a custom CA is provided (via config.ssl.ca or PGSSLROOTCERT).
|
|
1904
|
-
This combination may cause connection failures or unexpected behavior.
|
|
1905
|
-
|
|
1906
|
-
Recommended: Remove sslmode from the connection string when using a custom CA.
|
|
1907
|
-
**************************************************\x1B[0m
|
|
1908
|
-
`;
|
|
1909
|
-
console.warn(warning);
|
|
1910
|
-
}
|
|
1911
|
-
const pool = new Pool({
|
|
1912
|
-
...config,
|
|
1913
|
-
...ssl ? { ssl } : {}
|
|
1914
|
-
});
|
|
1915
|
-
if (searchPath) {
|
|
1916
|
-
pool.on("connect", (client) => {
|
|
1917
|
-
client.query(`SET search_path TO ${searchPath}`);
|
|
1918
|
-
});
|
|
1919
|
-
}
|
|
1920
|
-
return pool;
|
|
1921
|
-
};
|
|
1922
|
-
|
|
1923
|
-
// src/backends/redis-scripts.ts
|
|
1924
|
-
var SCORE_RANGE = "1000000000000000";
|
|
1925
|
-
var ADD_JOB_SCRIPT = `
|
|
1926
|
-
local prefix = KEYS[1]
|
|
1927
|
-
local jobType = ARGV[1]
|
|
1928
|
-
local payloadJson = ARGV[2]
|
|
1929
|
-
local maxAttempts = tonumber(ARGV[3])
|
|
1930
|
-
local priority = tonumber(ARGV[4])
|
|
1931
|
-
local runAtMs = ARGV[5] -- "0" means now
|
|
1932
|
-
local timeoutMs = ARGV[6] -- "null" string if not set
|
|
1933
|
-
local forceKillOnTimeout = ARGV[7]
|
|
1934
|
-
local tagsJson = ARGV[8] -- "null" or JSON array string
|
|
1935
|
-
local idempotencyKey = ARGV[9] -- "null" string if not set
|
|
1936
|
-
local nowMs = tonumber(ARGV[10])
|
|
1937
|
-
|
|
1938
|
-
-- Idempotency check
|
|
1939
|
-
if idempotencyKey ~= "null" then
|
|
1940
|
-
local existing = redis.call('GET', prefix .. 'idempotency:' .. idempotencyKey)
|
|
1941
|
-
if existing then
|
|
1942
|
-
return existing
|
|
1943
|
-
end
|
|
1944
|
-
end
|
|
1945
|
-
|
|
1946
|
-
-- Generate ID
|
|
1947
|
-
local id = redis.call('INCR', prefix .. 'id_seq')
|
|
1948
|
-
local jobKey = prefix .. 'job:' .. id
|
|
1949
|
-
local runAt = runAtMs ~= "0" and tonumber(runAtMs) or nowMs
|
|
1950
|
-
|
|
1951
|
-
-- Store the job hash
|
|
1952
|
-
redis.call('HMSET', jobKey,
|
|
1953
|
-
'id', id,
|
|
1954
|
-
'jobType', jobType,
|
|
1955
|
-
'payload', payloadJson,
|
|
1956
|
-
'status', 'pending',
|
|
1957
|
-
'maxAttempts', maxAttempts,
|
|
1958
|
-
'attempts', 0,
|
|
1959
|
-
'priority', priority,
|
|
1960
|
-
'runAt', runAt,
|
|
1961
|
-
'timeoutMs', timeoutMs,
|
|
1962
|
-
'forceKillOnTimeout', forceKillOnTimeout,
|
|
1963
|
-
'createdAt', nowMs,
|
|
1964
|
-
'updatedAt', nowMs,
|
|
1965
|
-
'lockedAt', 'null',
|
|
1966
|
-
'lockedBy', 'null',
|
|
1967
|
-
'nextAttemptAt', 'null',
|
|
1968
|
-
'pendingReason', 'null',
|
|
1969
|
-
'errorHistory', '[]',
|
|
1970
|
-
'failureReason', 'null',
|
|
1971
|
-
'completedAt', 'null',
|
|
1972
|
-
'startedAt', 'null',
|
|
1973
|
-
'lastRetriedAt', 'null',
|
|
1974
|
-
'lastFailedAt', 'null',
|
|
1975
|
-
'lastCancelledAt', 'null',
|
|
1976
|
-
'tags', tagsJson,
|
|
1977
|
-
'idempotencyKey', idempotencyKey
|
|
1978
|
-
)
|
|
1979
|
-
|
|
1980
|
-
-- Status index
|
|
1981
|
-
redis.call('SADD', prefix .. 'status:pending', id)
|
|
1982
|
-
|
|
1983
|
-
-- Type index
|
|
1984
|
-
redis.call('SADD', prefix .. 'type:' .. jobType, id)
|
|
1985
|
-
|
|
1986
|
-
-- Tag indexes
|
|
1987
|
-
if tagsJson ~= "null" then
|
|
1988
|
-
local tags = cjson.decode(tagsJson)
|
|
1989
|
-
for _, tag in ipairs(tags) do
|
|
1990
|
-
redis.call('SADD', prefix .. 'tag:' .. tag, id)
|
|
1991
|
-
end
|
|
1992
|
-
-- Store tags for exact-match queries
|
|
1993
|
-
for _, tag in ipairs(tags) do
|
|
1994
|
-
redis.call('SADD', prefix .. 'job:' .. id .. ':tags', tag)
|
|
1995
|
-
end
|
|
1996
|
-
end
|
|
1997
|
-
|
|
1998
|
-
-- Idempotency mapping
|
|
1999
|
-
if idempotencyKey ~= "null" then
|
|
2000
|
-
redis.call('SET', prefix .. 'idempotency:' .. idempotencyKey, id)
|
|
2001
|
-
end
|
|
2002
|
-
|
|
2003
|
-
-- All-jobs sorted set (for ordering by createdAt)
|
|
2004
|
-
redis.call('ZADD', prefix .. 'all', nowMs, id)
|
|
2005
|
-
|
|
2006
|
-
-- Queue or delayed
|
|
2007
|
-
if runAt <= nowMs then
|
|
2008
|
-
-- Ready now: add to queue with priority score
|
|
2009
|
-
local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - nowMs)
|
|
2010
|
-
redis.call('ZADD', prefix .. 'queue', score, id)
|
|
2011
|
-
else
|
|
2012
|
-
-- Future: add to delayed set
|
|
2013
|
-
redis.call('ZADD', prefix .. 'delayed', runAt, id)
|
|
2014
|
-
end
|
|
2015
|
-
|
|
2016
|
-
return id
|
|
2017
|
-
`;
|
|
2018
|
-
var GET_NEXT_BATCH_SCRIPT = `
|
|
2019
|
-
local prefix = KEYS[1]
|
|
2020
|
-
local workerId = ARGV[1]
|
|
2021
|
-
local batchSize = tonumber(ARGV[2])
|
|
2022
|
-
local nowMs = tonumber(ARGV[3])
|
|
2023
|
-
local jobTypeFilter = ARGV[4] -- "null" or JSON array or single string
|
|
2024
|
-
|
|
2025
|
-
-- 1. Move ready delayed jobs into queue
|
|
2026
|
-
local delayed = redis.call('ZRANGEBYSCORE', prefix .. 'delayed', '-inf', nowMs, 'LIMIT', 0, 200)
|
|
2027
|
-
for _, jobId in ipairs(delayed) do
|
|
2028
|
-
local jk = prefix .. 'job:' .. jobId
|
|
2029
|
-
local status = redis.call('HGET', jk, 'status')
|
|
2030
|
-
local attempts = tonumber(redis.call('HGET', jk, 'attempts'))
|
|
2031
|
-
local maxAttempts = tonumber(redis.call('HGET', jk, 'maxAttempts'))
|
|
2032
|
-
if status == 'pending' and attempts < maxAttempts then
|
|
2033
|
-
local pri = tonumber(redis.call('HGET', jk, 'priority') or '0')
|
|
2034
|
-
local ca = tonumber(redis.call('HGET', jk, 'createdAt'))
|
|
2035
|
-
local score = pri * ${SCORE_RANGE} + (${SCORE_RANGE} - ca)
|
|
2036
|
-
redis.call('ZADD', prefix .. 'queue', score, jobId)
|
|
2037
|
-
end
|
|
2038
|
-
redis.call('ZREM', prefix .. 'delayed', jobId)
|
|
2039
|
-
end
|
|
2040
|
-
|
|
2041
|
-
-- 2. Move ready retry jobs into queue
|
|
2042
|
-
local retries = redis.call('ZRANGEBYSCORE', prefix .. 'retry', '-inf', nowMs, 'LIMIT', 0, 200)
|
|
2043
|
-
for _, jobId in ipairs(retries) do
|
|
2044
|
-
local jk = prefix .. 'job:' .. jobId
|
|
2045
|
-
local status = redis.call('HGET', jk, 'status')
|
|
2046
|
-
local attempts = tonumber(redis.call('HGET', jk, 'attempts'))
|
|
2047
|
-
local maxAttempts = tonumber(redis.call('HGET', jk, 'maxAttempts'))
|
|
2048
|
-
if status == 'failed' and attempts < maxAttempts then
|
|
2049
|
-
local pri = tonumber(redis.call('HGET', jk, 'priority') or '0')
|
|
2050
|
-
local ca = tonumber(redis.call('HGET', jk, 'createdAt'))
|
|
2051
|
-
local score = pri * ${SCORE_RANGE} + (${SCORE_RANGE} - ca)
|
|
2052
|
-
redis.call('ZADD', prefix .. 'queue', score, jobId)
|
|
2053
|
-
redis.call('SREM', prefix .. 'status:failed', jobId)
|
|
2054
|
-
redis.call('SADD', prefix .. 'status:pending', jobId)
|
|
2055
|
-
redis.call('HMSET', jk, 'status', 'pending')
|
|
2056
|
-
end
|
|
2057
|
-
redis.call('ZREM', prefix .. 'retry', jobId)
|
|
2058
|
-
end
|
|
2059
|
-
|
|
2060
|
-
-- 3. Parse job type filter
|
|
2061
|
-
local filterTypes = nil
|
|
2062
|
-
if jobTypeFilter ~= "null" then
|
|
2063
|
-
-- Could be a JSON array or a plain string
|
|
2064
|
-
local ok, decoded = pcall(cjson.decode, jobTypeFilter)
|
|
2065
|
-
if ok and type(decoded) == 'table' then
|
|
2066
|
-
filterTypes = {}
|
|
2067
|
-
for _, t in ipairs(decoded) do filterTypes[t] = true end
|
|
2068
|
-
else
|
|
2069
|
-
filterTypes = { [jobTypeFilter] = true }
|
|
2070
|
-
end
|
|
2071
|
-
end
|
|
2072
|
-
|
|
2073
|
-
-- 4. Pop candidates from queue (highest score first)
|
|
2074
|
-
-- We pop more than batchSize because some may be filtered out
|
|
2075
|
-
local popCount = batchSize * 3
|
|
2076
|
-
local candidates = redis.call('ZPOPMAX', prefix .. 'queue', popCount)
|
|
2077
|
-
-- candidates: [member1, score1, member2, score2, ...]
|
|
2078
|
-
|
|
2079
|
-
local results = {}
|
|
2080
|
-
local jobsClaimed = 0
|
|
2081
|
-
local putBack = {} -- {score, id} pairs to put back
|
|
2082
|
-
|
|
2083
|
-
for i = 1, #candidates, 2 do
|
|
2084
|
-
local jobId = candidates[i]
|
|
2085
|
-
local score = candidates[i + 1]
|
|
2086
|
-
local jk = prefix .. 'job:' .. jobId
|
|
2087
|
-
|
|
2088
|
-
if jobsClaimed >= batchSize then
|
|
2089
|
-
-- We have enough; put the rest back
|
|
2090
|
-
table.insert(putBack, score)
|
|
2091
|
-
table.insert(putBack, jobId)
|
|
2092
|
-
else
|
|
2093
|
-
-- Check job type filter
|
|
2094
|
-
local jt = redis.call('HGET', jk, 'jobType')
|
|
2095
|
-
if filterTypes and not filterTypes[jt] then
|
|
2096
|
-
-- Doesn't match filter: put back
|
|
2097
|
-
table.insert(putBack, score)
|
|
2098
|
-
table.insert(putBack, jobId)
|
|
2099
|
-
else
|
|
2100
|
-
-- Check run_at
|
|
2101
|
-
local runAt = tonumber(redis.call('HGET', jk, 'runAt'))
|
|
2102
|
-
if runAt > nowMs then
|
|
2103
|
-
-- Not ready yet: move to delayed
|
|
2104
|
-
redis.call('ZADD', prefix .. 'delayed', runAt, jobId)
|
|
2105
|
-
else
|
|
2106
|
-
-- Claim this job
|
|
2107
|
-
local attempts = tonumber(redis.call('HGET', jk, 'attempts'))
|
|
2108
|
-
local startedAt = redis.call('HGET', jk, 'startedAt')
|
|
2109
|
-
local lastRetriedAt = redis.call('HGET', jk, 'lastRetriedAt')
|
|
2110
|
-
if startedAt == 'null' then startedAt = nowMs end
|
|
2111
|
-
if attempts > 0 then lastRetriedAt = nowMs end
|
|
2112
|
-
|
|
2113
|
-
redis.call('HMSET', jk,
|
|
2114
|
-
'status', 'processing',
|
|
2115
|
-
'lockedAt', nowMs,
|
|
2116
|
-
'lockedBy', workerId,
|
|
2117
|
-
'attempts', attempts + 1,
|
|
2118
|
-
'updatedAt', nowMs,
|
|
2119
|
-
'pendingReason', 'null',
|
|
2120
|
-
'startedAt', startedAt,
|
|
2121
|
-
'lastRetriedAt', lastRetriedAt
|
|
2122
|
-
)
|
|
2123
|
-
|
|
2124
|
-
-- Update status sets
|
|
2125
|
-
redis.call('SREM', prefix .. 'status:pending', jobId)
|
|
2126
|
-
redis.call('SADD', prefix .. 'status:processing', jobId)
|
|
2127
|
-
|
|
2128
|
-
-- Return job data as flat array
|
|
2129
|
-
local data = redis.call('HGETALL', jk)
|
|
2130
|
-
for _, v in ipairs(data) do
|
|
2131
|
-
table.insert(results, v)
|
|
2132
|
-
end
|
|
2133
|
-
-- Separator
|
|
2134
|
-
table.insert(results, '__JOB_SEP__')
|
|
2135
|
-
jobsClaimed = jobsClaimed + 1
|
|
2136
|
-
end
|
|
2137
|
-
end
|
|
2138
|
-
end
|
|
2139
|
-
end
|
|
2140
|
-
|
|
2141
|
-
-- Put back jobs we didn't claim
|
|
2142
|
-
if #putBack > 0 then
|
|
2143
|
-
redis.call('ZADD', prefix .. 'queue', unpack(putBack))
|
|
2144
|
-
end
|
|
2145
|
-
|
|
2146
|
-
return results
|
|
2147
|
-
`;
|
|
2148
|
-
var COMPLETE_JOB_SCRIPT = `
|
|
2149
|
-
local prefix = KEYS[1]
|
|
2150
|
-
local jobId = ARGV[1]
|
|
2151
|
-
local nowMs = ARGV[2]
|
|
2152
|
-
local jk = prefix .. 'job:' .. jobId
|
|
2153
|
-
|
|
2154
|
-
redis.call('HMSET', jk,
|
|
2155
|
-
'status', 'completed',
|
|
2156
|
-
'updatedAt', nowMs,
|
|
2157
|
-
'completedAt', nowMs
|
|
2158
|
-
)
|
|
2159
|
-
redis.call('SREM', prefix .. 'status:processing', jobId)
|
|
2160
|
-
redis.call('SADD', prefix .. 'status:completed', jobId)
|
|
2161
|
-
|
|
2162
|
-
return 1
|
|
2163
|
-
`;
|
|
2164
|
-
var FAIL_JOB_SCRIPT = `
|
|
2165
|
-
local prefix = KEYS[1]
|
|
2166
|
-
local jobId = ARGV[1]
|
|
2167
|
-
local errorJson = ARGV[2]
|
|
2168
|
-
local failureReason = ARGV[3]
|
|
2169
|
-
local nowMs = tonumber(ARGV[4])
|
|
2170
|
-
local jk = prefix .. 'job:' .. jobId
|
|
2171
|
-
|
|
2172
|
-
local attempts = tonumber(redis.call('HGET', jk, 'attempts'))
|
|
2173
|
-
local maxAttempts = tonumber(redis.call('HGET', jk, 'maxAttempts'))
|
|
2174
|
-
|
|
2175
|
-
-- Compute next_attempt_at: 2^attempts minutes from now
|
|
2176
|
-
local nextAttemptAt = 'null'
|
|
2177
|
-
if attempts < maxAttempts then
|
|
2178
|
-
local delayMs = math.pow(2, attempts) * 60000
|
|
2179
|
-
nextAttemptAt = nowMs + delayMs
|
|
2180
|
-
end
|
|
2181
|
-
|
|
2182
|
-
-- Append to error_history
|
|
2183
|
-
local history = redis.call('HGET', jk, 'errorHistory') or '[]'
|
|
2184
|
-
local ok, arr = pcall(cjson.decode, history)
|
|
2185
|
-
if not ok then arr = {} end
|
|
2186
|
-
local newErrors = cjson.decode(errorJson)
|
|
2187
|
-
for _, e in ipairs(newErrors) do
|
|
2188
|
-
table.insert(arr, e)
|
|
2189
|
-
end
|
|
2190
|
-
|
|
2191
|
-
redis.call('HMSET', jk,
|
|
2192
|
-
'status', 'failed',
|
|
2193
|
-
'updatedAt', nowMs,
|
|
2194
|
-
'nextAttemptAt', tostring(nextAttemptAt),
|
|
2195
|
-
'errorHistory', cjson.encode(arr),
|
|
2196
|
-
'failureReason', failureReason,
|
|
2197
|
-
'lastFailedAt', nowMs
|
|
2198
|
-
)
|
|
2199
|
-
redis.call('SREM', prefix .. 'status:processing', jobId)
|
|
2200
|
-
redis.call('SADD', prefix .. 'status:failed', jobId)
|
|
2201
|
-
|
|
2202
|
-
-- Schedule retry if applicable
|
|
2203
|
-
if nextAttemptAt ~= 'null' then
|
|
2204
|
-
redis.call('ZADD', prefix .. 'retry', nextAttemptAt, jobId)
|
|
2205
|
-
end
|
|
2206
|
-
|
|
2207
|
-
return 1
|
|
2208
|
-
`;
|
|
2209
|
-
var RETRY_JOB_SCRIPT = `
|
|
2210
|
-
local prefix = KEYS[1]
|
|
2211
|
-
local jobId = ARGV[1]
|
|
2212
|
-
local nowMs = tonumber(ARGV[2])
|
|
2213
|
-
local jk = prefix .. 'job:' .. jobId
|
|
2214
|
-
|
|
2215
|
-
local oldStatus = redis.call('HGET', jk, 'status')
|
|
2216
|
-
|
|
2217
|
-
redis.call('HMSET', jk,
|
|
2218
|
-
'status', 'pending',
|
|
2219
|
-
'updatedAt', nowMs,
|
|
2220
|
-
'lockedAt', 'null',
|
|
2221
|
-
'lockedBy', 'null',
|
|
2222
|
-
'nextAttemptAt', nowMs,
|
|
2223
|
-
'lastRetriedAt', nowMs
|
|
2224
|
-
)
|
|
2225
|
-
|
|
2226
|
-
-- Remove from old status, add to pending
|
|
2227
|
-
if oldStatus then
|
|
2228
|
-
redis.call('SREM', prefix .. 'status:' .. oldStatus, jobId)
|
|
2229
|
-
end
|
|
2230
|
-
redis.call('SADD', prefix .. 'status:pending', jobId)
|
|
2231
|
-
|
|
2232
|
-
-- Remove from retry sorted set if present
|
|
2233
|
-
redis.call('ZREM', prefix .. 'retry', jobId)
|
|
2234
|
-
|
|
2235
|
-
-- Add to queue (ready now)
|
|
2236
|
-
local priority = tonumber(redis.call('HGET', jk, 'priority') or '0')
|
|
2237
|
-
local createdAt = tonumber(redis.call('HGET', jk, 'createdAt'))
|
|
2238
|
-
local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - createdAt)
|
|
2239
|
-
redis.call('ZADD', prefix .. 'queue', score, jobId)
|
|
2240
|
-
|
|
2241
|
-
return 1
|
|
2242
|
-
`;
|
|
2243
|
-
var CANCEL_JOB_SCRIPT = `
|
|
2244
|
-
local prefix = KEYS[1]
|
|
2245
|
-
local jobId = ARGV[1]
|
|
2246
|
-
local nowMs = ARGV[2]
|
|
2247
|
-
local jk = prefix .. 'job:' .. jobId
|
|
2248
|
-
|
|
2249
|
-
local status = redis.call('HGET', jk, 'status')
|
|
2250
|
-
if status ~= 'pending' then return 0 end
|
|
2251
|
-
|
|
2252
|
-
redis.call('HMSET', jk,
|
|
2253
|
-
'status', 'cancelled',
|
|
2254
|
-
'updatedAt', nowMs,
|
|
2255
|
-
'lastCancelledAt', nowMs
|
|
2256
|
-
)
|
|
2257
|
-
redis.call('SREM', prefix .. 'status:pending', jobId)
|
|
2258
|
-
redis.call('SADD', prefix .. 'status:cancelled', jobId)
|
|
2259
|
-
-- Remove from queue / delayed
|
|
2260
|
-
redis.call('ZREM', prefix .. 'queue', jobId)
|
|
2261
|
-
redis.call('ZREM', prefix .. 'delayed', jobId)
|
|
2262
|
-
|
|
2263
|
-
return 1
|
|
2264
|
-
`;
|
|
2265
|
-
var PROLONG_JOB_SCRIPT = `
|
|
2266
|
-
local prefix = KEYS[1]
|
|
2267
|
-
local jobId = ARGV[1]
|
|
2268
|
-
local nowMs = ARGV[2]
|
|
2269
|
-
local jk = prefix .. 'job:' .. jobId
|
|
2270
|
-
|
|
2271
|
-
local status = redis.call('HGET', jk, 'status')
|
|
2272
|
-
if status ~= 'processing' then return 0 end
|
|
2273
|
-
|
|
2274
|
-
redis.call('HMSET', jk,
|
|
2275
|
-
'lockedAt', nowMs,
|
|
2276
|
-
'updatedAt', nowMs
|
|
2277
|
-
)
|
|
2278
|
-
|
|
2279
|
-
return 1
|
|
2280
|
-
`;
|
|
2281
|
-
var RECLAIM_STUCK_JOBS_SCRIPT = `
|
|
2282
|
-
local prefix = KEYS[1]
|
|
2283
|
-
local maxAgeMs = tonumber(ARGV[1])
|
|
2284
|
-
local nowMs = tonumber(ARGV[2])
|
|
2285
|
-
|
|
2286
|
-
local processing = redis.call('SMEMBERS', prefix .. 'status:processing')
|
|
2287
|
-
local count = 0
|
|
2288
|
-
|
|
2289
|
-
for _, jobId in ipairs(processing) do
|
|
2290
|
-
local jk = prefix .. 'job:' .. jobId
|
|
2291
|
-
local lockedAt = redis.call('HGET', jk, 'lockedAt')
|
|
2292
|
-
if lockedAt and lockedAt ~= 'null' then
|
|
2293
|
-
local lockedAtNum = tonumber(lockedAt)
|
|
2294
|
-
if lockedAtNum then
|
|
2295
|
-
-- Use the greater of maxAgeMs and the job's own timeoutMs
|
|
2296
|
-
local jobMaxAge = maxAgeMs
|
|
2297
|
-
local timeoutMs = redis.call('HGET', jk, 'timeoutMs')
|
|
2298
|
-
if timeoutMs and timeoutMs ~= 'null' then
|
|
2299
|
-
local tMs = tonumber(timeoutMs)
|
|
2300
|
-
if tMs and tMs > jobMaxAge then
|
|
2301
|
-
jobMaxAge = tMs
|
|
2302
|
-
end
|
|
2303
|
-
end
|
|
2304
|
-
local cutoff = nowMs - jobMaxAge
|
|
2305
|
-
if lockedAtNum < cutoff then
|
|
2306
|
-
redis.call('HMSET', jk,
|
|
2307
|
-
'status', 'pending',
|
|
2308
|
-
'lockedAt', 'null',
|
|
2309
|
-
'lockedBy', 'null',
|
|
2310
|
-
'updatedAt', nowMs
|
|
2311
|
-
)
|
|
2312
|
-
redis.call('SREM', prefix .. 'status:processing', jobId)
|
|
2313
|
-
redis.call('SADD', prefix .. 'status:pending', jobId)
|
|
2314
|
-
|
|
2315
|
-
-- Re-add to queue
|
|
2316
|
-
local priority = tonumber(redis.call('HGET', jk, 'priority') or '0')
|
|
2317
|
-
local createdAt = tonumber(redis.call('HGET', jk, 'createdAt'))
|
|
2318
|
-
local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - createdAt)
|
|
2319
|
-
redis.call('ZADD', prefix .. 'queue', score, jobId)
|
|
2320
|
-
|
|
2321
|
-
count = count + 1
|
|
2322
|
-
end
|
|
2323
|
-
end
|
|
2324
|
-
end
|
|
2325
|
-
end
|
|
2326
|
-
|
|
2327
|
-
return count
|
|
2328
|
-
`;
|
|
2329
|
-
var CLEANUP_OLD_JOBS_SCRIPT = `
|
|
2330
|
-
local prefix = KEYS[1]
|
|
2331
|
-
local cutoffMs = tonumber(ARGV[1])
|
|
2332
|
-
|
|
2333
|
-
local completed = redis.call('SMEMBERS', prefix .. 'status:completed')
|
|
2334
|
-
local count = 0
|
|
2335
|
-
|
|
2336
|
-
for _, jobId in ipairs(completed) do
|
|
2337
|
-
local jk = prefix .. 'job:' .. jobId
|
|
2338
|
-
local updatedAt = tonumber(redis.call('HGET', jk, 'updatedAt'))
|
|
2339
|
-
if updatedAt and updatedAt < cutoffMs then
|
|
2340
|
-
-- Remove all indexes
|
|
2341
|
-
local jobType = redis.call('HGET', jk, 'jobType')
|
|
2342
|
-
local tagsJson = redis.call('HGET', jk, 'tags')
|
|
2343
|
-
local idempotencyKey = redis.call('HGET', jk, 'idempotencyKey')
|
|
2344
|
-
|
|
2345
|
-
redis.call('DEL', jk)
|
|
2346
|
-
redis.call('SREM', prefix .. 'status:completed', jobId)
|
|
2347
|
-
redis.call('ZREM', prefix .. 'all', jobId)
|
|
2348
|
-
if jobType then
|
|
2349
|
-
redis.call('SREM', prefix .. 'type:' .. jobType, jobId)
|
|
2350
|
-
end
|
|
2351
|
-
if tagsJson and tagsJson ~= 'null' then
|
|
2352
|
-
local ok, tags = pcall(cjson.decode, tagsJson)
|
|
2353
|
-
if ok and type(tags) == 'table' then
|
|
2354
|
-
for _, tag in ipairs(tags) do
|
|
2355
|
-
redis.call('SREM', prefix .. 'tag:' .. tag, jobId)
|
|
2356
|
-
end
|
|
2357
|
-
end
|
|
2358
|
-
redis.call('DEL', prefix .. 'job:' .. jobId .. ':tags')
|
|
2359
|
-
end
|
|
2360
|
-
if idempotencyKey and idempotencyKey ~= 'null' then
|
|
2361
|
-
redis.call('DEL', prefix .. 'idempotency:' .. idempotencyKey)
|
|
2362
|
-
end
|
|
2363
|
-
-- Delete events
|
|
2364
|
-
redis.call('DEL', prefix .. 'events:' .. jobId)
|
|
2365
|
-
|
|
2366
|
-
count = count + 1
|
|
2367
|
-
end
|
|
2368
|
-
end
|
|
2369
|
-
|
|
2370
|
-
return count
|
|
2371
|
-
`;
|
|
2372
|
-
|
|
2373
|
-
// src/backends/redis.ts
|
|
2374
|
-
function hashToObject(arr) {
|
|
2375
|
-
const obj = {};
|
|
2376
|
-
for (let i = 0; i < arr.length; i += 2) {
|
|
2377
|
-
obj[arr[i]] = arr[i + 1];
|
|
2378
|
-
}
|
|
2379
|
-
return obj;
|
|
2380
|
-
}
|
|
2381
|
-
function deserializeJob(h) {
|
|
2382
|
-
const nullish = (v) => v === void 0 || v === "null" || v === "" ? null : v;
|
|
2383
|
-
const numOrNull = (v) => {
|
|
2384
|
-
const n = nullish(v);
|
|
2385
|
-
return n === null ? null : Number(n);
|
|
2386
|
-
};
|
|
2387
|
-
const dateOrNull = (v) => {
|
|
2388
|
-
const n = numOrNull(v);
|
|
2389
|
-
return n === null ? null : new Date(n);
|
|
2390
|
-
};
|
|
2391
|
-
let errorHistory = [];
|
|
2392
|
-
try {
|
|
2393
|
-
const raw = h.errorHistory;
|
|
2394
|
-
if (raw && raw !== "[]") {
|
|
2395
|
-
errorHistory = JSON.parse(raw);
|
|
2396
|
-
}
|
|
2397
|
-
} catch {
|
|
2398
|
-
}
|
|
2399
|
-
let tags;
|
|
2400
|
-
try {
|
|
2401
|
-
const raw = h.tags;
|
|
2402
|
-
if (raw && raw !== "null") {
|
|
2403
|
-
tags = JSON.parse(raw);
|
|
2404
|
-
}
|
|
2405
|
-
} catch {
|
|
2406
|
-
}
|
|
2407
|
-
let payload;
|
|
2408
|
-
try {
|
|
2409
|
-
payload = JSON.parse(h.payload);
|
|
2410
|
-
} catch {
|
|
2411
|
-
payload = h.payload;
|
|
2412
|
-
}
|
|
2413
|
-
return {
|
|
2414
|
-
id: Number(h.id),
|
|
2415
|
-
jobType: h.jobType,
|
|
2416
|
-
payload,
|
|
2417
|
-
status: h.status,
|
|
2418
|
-
createdAt: new Date(Number(h.createdAt)),
|
|
2419
|
-
updatedAt: new Date(Number(h.updatedAt)),
|
|
2420
|
-
lockedAt: dateOrNull(h.lockedAt),
|
|
2421
|
-
lockedBy: nullish(h.lockedBy),
|
|
2422
|
-
attempts: Number(h.attempts),
|
|
2423
|
-
maxAttempts: Number(h.maxAttempts),
|
|
2424
|
-
nextAttemptAt: dateOrNull(h.nextAttemptAt),
|
|
2425
|
-
priority: Number(h.priority),
|
|
2426
|
-
runAt: new Date(Number(h.runAt)),
|
|
2427
|
-
pendingReason: nullish(h.pendingReason),
|
|
2428
|
-
errorHistory,
|
|
2429
|
-
timeoutMs: numOrNull(h.timeoutMs),
|
|
2430
|
-
forceKillOnTimeout: h.forceKillOnTimeout === "true" || h.forceKillOnTimeout === "1" ? true : h.forceKillOnTimeout === "false" || h.forceKillOnTimeout === "0" ? false : null,
|
|
2431
|
-
failureReason: nullish(h.failureReason) ?? null,
|
|
2432
|
-
completedAt: dateOrNull(h.completedAt),
|
|
2433
|
-
startedAt: dateOrNull(h.startedAt),
|
|
2434
|
-
lastRetriedAt: dateOrNull(h.lastRetriedAt),
|
|
2435
|
-
lastFailedAt: dateOrNull(h.lastFailedAt),
|
|
2436
|
-
lastCancelledAt: dateOrNull(h.lastCancelledAt),
|
|
2437
|
-
tags,
|
|
2438
|
-
idempotencyKey: nullish(h.idempotencyKey),
|
|
2439
|
-
progress: numOrNull(h.progress)
|
|
2440
|
-
};
|
|
2441
|
-
}
|
|
2442
|
-
var RedisBackend = class {
|
|
2443
|
-
constructor(redisConfig) {
|
|
2444
|
-
let IORedis;
|
|
2445
|
-
try {
|
|
2446
|
-
const _require = createRequire(import.meta.url);
|
|
2447
|
-
IORedis = _require("ioredis");
|
|
2448
|
-
} catch {
|
|
2449
|
-
throw new Error(
|
|
2450
|
-
'Redis backend requires the "ioredis" package. Install it with: npm install ioredis'
|
|
2451
|
-
);
|
|
2452
|
-
}
|
|
2453
|
-
this.prefix = redisConfig.keyPrefix ?? "dq:";
|
|
2454
|
-
if (redisConfig.url) {
|
|
2455
|
-
this.client = new IORedis(redisConfig.url, {
|
|
2456
|
-
...redisConfig.tls ? { tls: redisConfig.tls } : {},
|
|
2457
|
-
...redisConfig.db !== void 0 ? { db: redisConfig.db } : {}
|
|
2458
|
-
});
|
|
2459
|
-
} else {
|
|
2460
|
-
this.client = new IORedis({
|
|
2461
|
-
host: redisConfig.host ?? "127.0.0.1",
|
|
2462
|
-
port: redisConfig.port ?? 6379,
|
|
2463
|
-
password: redisConfig.password,
|
|
2464
|
-
db: redisConfig.db ?? 0,
|
|
2465
|
-
...redisConfig.tls ? { tls: redisConfig.tls } : {}
|
|
2466
|
-
});
|
|
2467
|
-
}
|
|
2468
|
-
}
|
|
2469
|
-
/** Expose the raw ioredis client for advanced usage. */
|
|
2470
|
-
getClient() {
|
|
2471
|
-
return this.client;
|
|
2472
|
-
}
|
|
2473
|
-
nowMs() {
|
|
2474
|
-
return Date.now();
|
|
2475
|
-
}
|
|
2476
|
-
// ── Events ──────────────────────────────────────────────────────────
|
|
2477
|
-
async recordJobEvent(jobId, eventType, metadata) {
|
|
2478
|
-
try {
|
|
2479
|
-
const eventId = await this.client.incr(`${this.prefix}event_id_seq`);
|
|
2480
|
-
const event = JSON.stringify({
|
|
2481
|
-
id: eventId,
|
|
2482
|
-
jobId,
|
|
2483
|
-
eventType,
|
|
2484
|
-
createdAt: this.nowMs(),
|
|
2485
|
-
metadata: metadata ?? null
|
|
2486
|
-
});
|
|
2487
|
-
await this.client.rpush(`${this.prefix}events:${jobId}`, event);
|
|
2488
|
-
} catch (error) {
|
|
2489
|
-
log(`Error recording job event for job ${jobId}: ${error}`);
|
|
2490
|
-
}
|
|
2491
|
-
}
|
|
2492
|
-
async getJobEvents(jobId) {
|
|
2493
|
-
const raw = await this.client.lrange(
|
|
2494
|
-
`${this.prefix}events:${jobId}`,
|
|
2495
|
-
0,
|
|
2496
|
-
-1
|
|
2497
|
-
);
|
|
2498
|
-
return raw.map((r) => {
|
|
2499
|
-
const e = JSON.parse(r);
|
|
2500
|
-
return {
|
|
2501
|
-
...e,
|
|
2502
|
-
createdAt: new Date(e.createdAt)
|
|
2503
|
-
};
|
|
2504
|
-
});
|
|
2505
|
-
}
|
|
2506
|
-
// ── Job CRUD ──────────────────────────────────────────────────────────
|
|
2507
|
-
async addJob({
|
|
2508
|
-
jobType,
|
|
2509
|
-
payload,
|
|
2510
|
-
maxAttempts = 3,
|
|
2511
|
-
priority = 0,
|
|
2512
|
-
runAt = null,
|
|
2513
|
-
timeoutMs = void 0,
|
|
2514
|
-
forceKillOnTimeout = false,
|
|
2515
|
-
tags = void 0,
|
|
2516
|
-
idempotencyKey = void 0
|
|
2517
|
-
}) {
|
|
2518
|
-
const now = this.nowMs();
|
|
2519
|
-
const runAtMs = runAt ? runAt.getTime() : 0;
|
|
2520
|
-
const result = await this.client.eval(
|
|
2521
|
-
ADD_JOB_SCRIPT,
|
|
2522
|
-
1,
|
|
2523
|
-
this.prefix,
|
|
2524
|
-
jobType,
|
|
2525
|
-
JSON.stringify(payload),
|
|
2526
|
-
maxAttempts,
|
|
2527
|
-
priority,
|
|
2528
|
-
runAtMs.toString(),
|
|
2529
|
-
timeoutMs !== void 0 ? timeoutMs.toString() : "null",
|
|
2530
|
-
forceKillOnTimeout ? "true" : "false",
|
|
2531
|
-
tags ? JSON.stringify(tags) : "null",
|
|
2532
|
-
idempotencyKey ?? "null",
|
|
2533
|
-
now
|
|
2534
|
-
);
|
|
2535
|
-
const jobId = Number(result);
|
|
2536
|
-
log(
|
|
2537
|
-
`Added job ${jobId}: payload ${JSON.stringify(payload)}, ${runAt ? `runAt ${runAt.toISOString()}, ` : ""}priority ${priority}, maxAttempts ${maxAttempts}, jobType ${jobType}, tags ${JSON.stringify(tags)}${idempotencyKey ? `, idempotencyKey "${idempotencyKey}"` : ""}`
|
|
2538
|
-
);
|
|
2539
|
-
await this.recordJobEvent(jobId, "added" /* Added */, {
|
|
2540
|
-
jobType,
|
|
2541
|
-
payload,
|
|
2542
|
-
tags,
|
|
2543
|
-
idempotencyKey
|
|
2544
|
-
});
|
|
2545
|
-
return jobId;
|
|
2546
|
-
}
|
|
2547
|
-
async getJob(id) {
|
|
2548
|
-
const data = await this.client.hgetall(`${this.prefix}job:${id}`);
|
|
2549
|
-
if (!data || Object.keys(data).length === 0) {
|
|
2550
|
-
log(`Job ${id} not found`);
|
|
2551
|
-
return null;
|
|
2552
|
-
}
|
|
2553
|
-
log(`Found job ${id}`);
|
|
2554
|
-
return deserializeJob(data);
|
|
2555
|
-
}
|
|
2556
|
-
async getJobsByStatus(status, limit = 100, offset = 0) {
|
|
2557
|
-
const ids = await this.client.smembers(`${this.prefix}status:${status}`);
|
|
2558
|
-
if (ids.length === 0) return [];
|
|
2559
|
-
const jobs = await this.loadJobsByIds(ids);
|
|
2560
|
-
jobs.sort((a, b) => b.createdAt.getTime() - a.createdAt.getTime());
|
|
2561
|
-
return jobs.slice(offset, offset + limit);
|
|
2562
|
-
}
|
|
2563
|
-
async getAllJobs(limit = 100, offset = 0) {
|
|
2564
|
-
const ids = await this.client.zrevrange(
|
|
2565
|
-
`${this.prefix}all`,
|
|
2566
|
-
offset,
|
|
2567
|
-
offset + limit - 1
|
|
2568
|
-
);
|
|
2569
|
-
if (ids.length === 0) return [];
|
|
2570
|
-
return this.loadJobsByIds(ids);
|
|
2571
|
-
}
|
|
2572
|
-
async getJobs(filters, limit = 100, offset = 0) {
|
|
2573
|
-
let candidateIds;
|
|
2574
|
-
if (filters?.jobType) {
|
|
2575
|
-
candidateIds = await this.client.smembers(
|
|
2576
|
-
`${this.prefix}type:${filters.jobType}`
|
|
2577
|
-
);
|
|
2578
|
-
} else {
|
|
2579
|
-
candidateIds = await this.client.zrevrange(`${this.prefix}all`, 0, -1);
|
|
2580
|
-
}
|
|
2581
|
-
if (candidateIds.length === 0) return [];
|
|
2582
|
-
if (filters?.tags && filters.tags.values.length > 0) {
|
|
2583
|
-
candidateIds = await this.filterByTags(
|
|
2584
|
-
candidateIds,
|
|
2585
|
-
filters.tags.values,
|
|
2586
|
-
filters.tags.mode || "all"
|
|
2587
|
-
);
|
|
2588
|
-
}
|
|
2589
|
-
let jobs = await this.loadJobsByIds(candidateIds);
|
|
2590
|
-
if (filters) {
|
|
2591
|
-
if (filters.priority !== void 0) {
|
|
2592
|
-
jobs = jobs.filter((j) => j.priority === filters.priority);
|
|
2593
|
-
}
|
|
2594
|
-
if (filters.runAt) {
|
|
2595
|
-
jobs = this.filterByRunAt(jobs, filters.runAt);
|
|
2596
|
-
}
|
|
2597
|
-
}
|
|
2598
|
-
jobs.sort((a, b) => b.createdAt.getTime() - a.createdAt.getTime());
|
|
2599
|
-
return jobs.slice(offset, offset + limit);
|
|
2600
|
-
}
|
|
2601
|
-
async getJobsByTags(tags, mode = "all", limit = 100, offset = 0) {
|
|
2602
|
-
const allIds = await this.client.zrevrange(`${this.prefix}all`, 0, -1);
|
|
2603
|
-
if (allIds.length === 0) return [];
|
|
2604
|
-
const filtered = await this.filterByTags(allIds, tags, mode);
|
|
2605
|
-
if (filtered.length === 0) return [];
|
|
2606
|
-
const jobs = await this.loadJobsByIds(filtered);
|
|
2607
|
-
jobs.sort((a, b) => b.createdAt.getTime() - a.createdAt.getTime());
|
|
2608
|
-
return jobs.slice(offset, offset + limit);
|
|
2609
|
-
}
|
|
2610
|
-
// ── Processing lifecycle ──────────────────────────────────────────────
|
|
2611
|
-
async getNextBatch(workerId, batchSize = 10, jobType) {
|
|
2612
|
-
const now = this.nowMs();
|
|
2613
|
-
const jobTypeFilter = jobType === void 0 ? "null" : Array.isArray(jobType) ? JSON.stringify(jobType) : jobType;
|
|
2614
|
-
const result = await this.client.eval(
|
|
2615
|
-
GET_NEXT_BATCH_SCRIPT,
|
|
2616
|
-
1,
|
|
2617
|
-
this.prefix,
|
|
2618
|
-
workerId,
|
|
2619
|
-
batchSize,
|
|
2620
|
-
now,
|
|
2621
|
-
jobTypeFilter
|
|
2622
|
-
);
|
|
2623
|
-
if (!result || result.length === 0) {
|
|
2624
|
-
log("Found 0 jobs to process");
|
|
2625
|
-
return [];
|
|
2626
|
-
}
|
|
2627
|
-
const jobs = [];
|
|
2628
|
-
let current = [];
|
|
2629
|
-
for (const item of result) {
|
|
2630
|
-
if (item === "__JOB_SEP__") {
|
|
2631
|
-
if (current.length > 0) {
|
|
2632
|
-
const h = hashToObject(current);
|
|
2633
|
-
jobs.push(deserializeJob(h));
|
|
2634
|
-
}
|
|
2635
|
-
current = [];
|
|
2636
|
-
} else {
|
|
2637
|
-
current.push(item);
|
|
2638
|
-
}
|
|
2639
|
-
}
|
|
2640
|
-
log(`Found ${jobs.length} jobs to process`);
|
|
2641
|
-
for (const job of jobs) {
|
|
2642
|
-
await this.recordJobEvent(job.id, "processing" /* Processing */);
|
|
2643
|
-
}
|
|
2644
|
-
return jobs;
|
|
2645
|
-
}
|
|
2646
|
-
async completeJob(jobId) {
|
|
2647
|
-
const now = this.nowMs();
|
|
2648
|
-
await this.client.eval(COMPLETE_JOB_SCRIPT, 1, this.prefix, jobId, now);
|
|
2649
|
-
await this.recordJobEvent(jobId, "completed" /* Completed */);
|
|
2650
|
-
log(`Completed job ${jobId}`);
|
|
2651
|
-
}
|
|
2652
|
-
async failJob(jobId, error, failureReason) {
|
|
2653
|
-
const now = this.nowMs();
|
|
2654
|
-
const errorJson = JSON.stringify([
|
|
2655
|
-
{
|
|
2656
|
-
message: error.message || String(error),
|
|
2657
|
-
timestamp: new Date(now).toISOString()
|
|
2658
|
-
}
|
|
2659
|
-
]);
|
|
2660
|
-
await this.client.eval(
|
|
2661
|
-
FAIL_JOB_SCRIPT,
|
|
2662
|
-
1,
|
|
2663
|
-
this.prefix,
|
|
2664
|
-
jobId,
|
|
2665
|
-
errorJson,
|
|
2666
|
-
failureReason ?? "null",
|
|
2667
|
-
now
|
|
2668
|
-
);
|
|
2669
|
-
await this.recordJobEvent(jobId, "failed" /* Failed */, {
|
|
2670
|
-
message: error.message || String(error),
|
|
2671
|
-
failureReason
|
|
2672
|
-
});
|
|
2673
|
-
log(`Failed job ${jobId}`);
|
|
2674
|
-
}
|
|
2675
|
-
async prolongJob(jobId) {
|
|
2676
|
-
try {
|
|
2677
|
-
const now = this.nowMs();
|
|
2678
|
-
await this.client.eval(PROLONG_JOB_SCRIPT, 1, this.prefix, jobId, now);
|
|
2679
|
-
await this.recordJobEvent(jobId, "prolonged" /* Prolonged */);
|
|
2680
|
-
log(`Prolonged job ${jobId}`);
|
|
2681
|
-
} catch (error) {
|
|
2682
|
-
log(`Error prolonging job ${jobId}: ${error}`);
|
|
2683
|
-
}
|
|
2684
|
-
}
|
|
2685
|
-
// ── Progress ──────────────────────────────────────────────────────────
|
|
2686
|
-
async updateProgress(jobId, progress) {
|
|
2687
|
-
try {
|
|
2688
|
-
const now = this.nowMs();
|
|
2689
|
-
await this.client.hset(
|
|
2690
|
-
`${this.prefix}job:${jobId}`,
|
|
2691
|
-
"progress",
|
|
2692
|
-
progress.toString(),
|
|
2693
|
-
"updatedAt",
|
|
2694
|
-
now.toString()
|
|
2695
|
-
);
|
|
2696
|
-
log(`Updated progress for job ${jobId}: ${progress}%`);
|
|
2697
|
-
} catch (error) {
|
|
2698
|
-
log(`Error updating progress for job ${jobId}: ${error}`);
|
|
2699
|
-
}
|
|
2700
|
-
}
|
|
2701
|
-
// ── Job management ────────────────────────────────────────────────────
|
|
2702
|
-
async retryJob(jobId) {
|
|
2703
|
-
const now = this.nowMs();
|
|
2704
|
-
await this.client.eval(RETRY_JOB_SCRIPT, 1, this.prefix, jobId, now);
|
|
2705
|
-
await this.recordJobEvent(jobId, "retried" /* Retried */);
|
|
2706
|
-
log(`Retried job ${jobId}`);
|
|
2707
|
-
}
|
|
2708
|
-
async cancelJob(jobId) {
|
|
2709
|
-
const now = this.nowMs();
|
|
2710
|
-
await this.client.eval(CANCEL_JOB_SCRIPT, 1, this.prefix, jobId, now);
|
|
2711
|
-
await this.recordJobEvent(jobId, "cancelled" /* Cancelled */);
|
|
2712
|
-
log(`Cancelled job ${jobId}`);
|
|
2713
|
-
}
|
|
2714
|
-
async cancelAllUpcomingJobs(filters) {
|
|
2715
|
-
let ids = await this.client.smembers(`${this.prefix}status:pending`);
|
|
2716
|
-
if (ids.length === 0) return 0;
|
|
2717
|
-
if (filters) {
|
|
2718
|
-
ids = await this.applyFilters(ids, filters);
|
|
2719
|
-
}
|
|
2720
|
-
const now = this.nowMs();
|
|
2721
|
-
let count = 0;
|
|
2722
|
-
for (const id of ids) {
|
|
2723
|
-
const result = await this.client.eval(
|
|
2724
|
-
CANCEL_JOB_SCRIPT,
|
|
2725
|
-
1,
|
|
2726
|
-
this.prefix,
|
|
2727
|
-
id,
|
|
2728
|
-
now
|
|
2729
|
-
);
|
|
2730
|
-
if (Number(result) === 1) count++;
|
|
2731
|
-
}
|
|
2732
|
-
log(`Cancelled ${count} jobs`);
|
|
2733
|
-
return count;
|
|
2734
|
-
}
|
|
2735
|
-
async editJob(jobId, updates) {
|
|
2736
|
-
const jk = `${this.prefix}job:${jobId}`;
|
|
2737
|
-
const status = await this.client.hget(jk, "status");
|
|
2738
|
-
if (status !== "pending") {
|
|
2739
|
-
log(`Job ${jobId} is not pending (status: ${status}), skipping edit`);
|
|
2740
|
-
return;
|
|
2741
|
-
}
|
|
2742
|
-
const now = this.nowMs();
|
|
2743
|
-
const fields = [];
|
|
2744
|
-
const metadata = {};
|
|
2745
|
-
if (updates.payload !== void 0) {
|
|
2746
|
-
fields.push("payload", JSON.stringify(updates.payload));
|
|
2747
|
-
metadata.payload = updates.payload;
|
|
2748
|
-
}
|
|
2749
|
-
if (updates.maxAttempts !== void 0) {
|
|
2750
|
-
fields.push("maxAttempts", updates.maxAttempts.toString());
|
|
2751
|
-
metadata.maxAttempts = updates.maxAttempts;
|
|
2752
|
-
}
|
|
2753
|
-
if (updates.priority !== void 0) {
|
|
2754
|
-
fields.push("priority", updates.priority.toString());
|
|
2755
|
-
metadata.priority = updates.priority;
|
|
2756
|
-
const createdAt = await this.client.hget(jk, "createdAt");
|
|
2757
|
-
const score = updates.priority * 1e15 + (1e15 - Number(createdAt));
|
|
2758
|
-
const inQueue = await this.client.zscore(
|
|
2759
|
-
`${this.prefix}queue`,
|
|
2760
|
-
jobId.toString()
|
|
2761
|
-
);
|
|
2762
|
-
if (inQueue !== null) {
|
|
2763
|
-
await this.client.zadd(`${this.prefix}queue`, score, jobId.toString());
|
|
2764
|
-
}
|
|
2765
|
-
}
|
|
2766
|
-
if (updates.runAt !== void 0) {
|
|
2767
|
-
if (updates.runAt === null) {
|
|
2768
|
-
fields.push("runAt", now.toString());
|
|
2769
|
-
} else {
|
|
2770
|
-
fields.push("runAt", updates.runAt.getTime().toString());
|
|
2771
|
-
}
|
|
2772
|
-
metadata.runAt = updates.runAt;
|
|
2773
|
-
}
|
|
2774
|
-
if (updates.timeoutMs !== void 0) {
|
|
2775
|
-
fields.push(
|
|
2776
|
-
"timeoutMs",
|
|
2777
|
-
updates.timeoutMs !== null ? updates.timeoutMs.toString() : "null"
|
|
2778
|
-
);
|
|
2779
|
-
metadata.timeoutMs = updates.timeoutMs;
|
|
2780
|
-
}
|
|
2781
|
-
if (updates.tags !== void 0) {
|
|
2782
|
-
const oldTagsJson = await this.client.hget(jk, "tags");
|
|
2783
|
-
if (oldTagsJson && oldTagsJson !== "null") {
|
|
2784
|
-
try {
|
|
2785
|
-
const oldTags = JSON.parse(oldTagsJson);
|
|
2786
|
-
for (const tag of oldTags) {
|
|
2787
|
-
await this.client.srem(
|
|
2788
|
-
`${this.prefix}tag:${tag}`,
|
|
2789
|
-
jobId.toString()
|
|
2790
|
-
);
|
|
2791
|
-
}
|
|
2792
|
-
} catch {
|
|
2793
|
-
}
|
|
2794
|
-
}
|
|
2795
|
-
await this.client.del(`${this.prefix}job:${jobId}:tags`);
|
|
2796
|
-
if (updates.tags !== null) {
|
|
2797
|
-
for (const tag of updates.tags) {
|
|
2798
|
-
await this.client.sadd(`${this.prefix}tag:${tag}`, jobId.toString());
|
|
2799
|
-
await this.client.sadd(`${this.prefix}job:${jobId}:tags`, tag);
|
|
2800
|
-
}
|
|
2801
|
-
fields.push("tags", JSON.stringify(updates.tags));
|
|
2802
|
-
} else {
|
|
2803
|
-
fields.push("tags", "null");
|
|
2804
|
-
}
|
|
2805
|
-
metadata.tags = updates.tags;
|
|
2806
|
-
}
|
|
2807
|
-
if (fields.length === 0) {
|
|
2808
|
-
log(`No fields to update for job ${jobId}`);
|
|
2809
|
-
return;
|
|
2810
|
-
}
|
|
2811
|
-
fields.push("updatedAt", now.toString());
|
|
2812
|
-
await this.client.hmset(jk, ...fields);
|
|
2813
|
-
await this.recordJobEvent(jobId, "edited" /* Edited */, metadata);
|
|
2814
|
-
log(`Edited job ${jobId}: ${JSON.stringify(metadata)}`);
|
|
2815
|
-
}
|
|
2816
|
-
async editAllPendingJobs(filters, updates) {
|
|
2817
|
-
let ids = await this.client.smembers(`${this.prefix}status:pending`);
|
|
2818
|
-
if (ids.length === 0) return 0;
|
|
2819
|
-
if (filters) {
|
|
2820
|
-
ids = await this.applyFilters(ids, filters);
|
|
2821
|
-
}
|
|
2822
|
-
let count = 0;
|
|
2823
|
-
for (const id of ids) {
|
|
2824
|
-
await this.editJob(Number(id), updates);
|
|
2825
|
-
count++;
|
|
2826
|
-
}
|
|
2827
|
-
log(`Edited ${count} pending jobs`);
|
|
2828
|
-
return count;
|
|
2829
|
-
}
|
|
2830
|
-
async cleanupOldJobs(daysToKeep = 30) {
|
|
2831
|
-
const cutoffMs = this.nowMs() - daysToKeep * 24 * 60 * 60 * 1e3;
|
|
2832
|
-
const result = await this.client.eval(
|
|
2833
|
-
CLEANUP_OLD_JOBS_SCRIPT,
|
|
2834
|
-
1,
|
|
2835
|
-
this.prefix,
|
|
2836
|
-
cutoffMs
|
|
2837
|
-
);
|
|
2838
|
-
log(`Deleted ${result} old jobs`);
|
|
2839
|
-
return Number(result);
|
|
2840
|
-
}
|
|
2841
|
-
async cleanupOldJobEvents(daysToKeep = 30) {
|
|
2842
|
-
log(
|
|
2843
|
-
`cleanupOldJobEvents is a no-op for Redis backend (events are cleaned up with their jobs)`
|
|
2844
|
-
);
|
|
2845
|
-
return 0;
|
|
2846
|
-
}
|
|
2847
|
-
async reclaimStuckJobs(maxProcessingTimeMinutes = 10) {
|
|
2848
|
-
const maxAgeMs = maxProcessingTimeMinutes * 60 * 1e3;
|
|
2849
|
-
const now = this.nowMs();
|
|
2850
|
-
const result = await this.client.eval(
|
|
2851
|
-
RECLAIM_STUCK_JOBS_SCRIPT,
|
|
2852
|
-
1,
|
|
2853
|
-
this.prefix,
|
|
2854
|
-
maxAgeMs,
|
|
2855
|
-
now
|
|
2856
|
-
);
|
|
2857
|
-
log(`Reclaimed ${result} stuck jobs`);
|
|
2858
|
-
return Number(result);
|
|
2859
|
-
}
|
|
2860
|
-
// ── Internal helpers ──────────────────────────────────────────────────
|
|
2861
|
-
async setPendingReasonForUnpickedJobs(reason, jobType) {
|
|
2862
|
-
let ids = await this.client.smembers(`${this.prefix}status:pending`);
|
|
2863
|
-
if (ids.length === 0) return;
|
|
2864
|
-
if (jobType) {
|
|
2865
|
-
const types = Array.isArray(jobType) ? jobType : [jobType];
|
|
2866
|
-
const typeSet = /* @__PURE__ */ new Set();
|
|
2867
|
-
for (const t of types) {
|
|
2868
|
-
const typeIds = await this.client.smembers(`${this.prefix}type:${t}`);
|
|
2869
|
-
for (const id of typeIds) typeSet.add(id);
|
|
2870
|
-
}
|
|
2871
|
-
ids = ids.filter((id) => typeSet.has(id));
|
|
2872
|
-
}
|
|
2873
|
-
for (const id of ids) {
|
|
2874
|
-
await this.client.hset(
|
|
2875
|
-
`${this.prefix}job:${id}`,
|
|
2876
|
-
"pendingReason",
|
|
2877
|
-
reason
|
|
2878
|
-
);
|
|
2879
|
-
}
|
|
2880
|
-
}
|
|
2881
|
-
// ── Private helpers ───────────────────────────────────────────────────
|
|
2882
|
-
async loadJobsByIds(ids) {
|
|
2883
|
-
const pipeline = this.client.pipeline();
|
|
2884
|
-
for (const id of ids) {
|
|
2885
|
-
pipeline.hgetall(`${this.prefix}job:${id}`);
|
|
2886
|
-
}
|
|
2887
|
-
const results = await pipeline.exec();
|
|
2888
|
-
const jobs = [];
|
|
2889
|
-
if (results) {
|
|
2890
|
-
for (const [err, data] of results) {
|
|
2891
|
-
if (!err && data && typeof data === "object" && Object.keys(data).length > 0) {
|
|
2892
|
-
jobs.push(
|
|
2893
|
-
deserializeJob(data)
|
|
2894
|
-
);
|
|
2895
|
-
}
|
|
2896
|
-
}
|
|
2897
|
-
}
|
|
2898
|
-
return jobs;
|
|
2899
|
-
}
|
|
2900
|
-
async filterByTags(candidateIds, tags, mode) {
|
|
2901
|
-
const candidateSet = new Set(candidateIds.map(String));
|
|
2902
|
-
if (mode === "exact") {
|
|
2903
|
-
const tagSet = new Set(tags);
|
|
2904
|
-
const result = [];
|
|
2905
|
-
for (const id of candidateIds) {
|
|
2906
|
-
const jobTags = await this.client.smembers(
|
|
2907
|
-
`${this.prefix}job:${id}:tags`
|
|
2908
|
-
);
|
|
2909
|
-
if (jobTags.length === tagSet.size && jobTags.every((t) => tagSet.has(t))) {
|
|
2910
|
-
result.push(id);
|
|
2911
|
-
}
|
|
2912
|
-
}
|
|
2913
|
-
return result;
|
|
2914
|
-
}
|
|
2915
|
-
if (mode === "all") {
|
|
2916
|
-
let intersection = new Set(candidateIds.map(String));
|
|
2917
|
-
for (const tag of tags) {
|
|
2918
|
-
const tagMembers = await this.client.smembers(
|
|
2919
|
-
`${this.prefix}tag:${tag}`
|
|
2920
|
-
);
|
|
2921
|
-
const tagSet = new Set(tagMembers.map(String));
|
|
2922
|
-
intersection = new Set(
|
|
2923
|
-
[...intersection].filter((id) => tagSet.has(id))
|
|
2924
|
-
);
|
|
2925
|
-
}
|
|
2926
|
-
return [...intersection].filter((id) => candidateSet.has(id));
|
|
2927
|
-
}
|
|
2928
|
-
if (mode === "any") {
|
|
2929
|
-
const union = /* @__PURE__ */ new Set();
|
|
2930
|
-
for (const tag of tags) {
|
|
2931
|
-
const tagMembers = await this.client.smembers(
|
|
2932
|
-
`${this.prefix}tag:${tag}`
|
|
2933
|
-
);
|
|
2934
|
-
for (const id of tagMembers) union.add(String(id));
|
|
2935
|
-
}
|
|
2936
|
-
return [...union].filter((id) => candidateSet.has(id));
|
|
2937
|
-
}
|
|
2938
|
-
if (mode === "none") {
|
|
2939
|
-
const exclude = /* @__PURE__ */ new Set();
|
|
2940
|
-
for (const tag of tags) {
|
|
2941
|
-
const tagMembers = await this.client.smembers(
|
|
2942
|
-
`${this.prefix}tag:${tag}`
|
|
2943
|
-
);
|
|
2944
|
-
for (const id of tagMembers) exclude.add(String(id));
|
|
2945
|
-
}
|
|
2946
|
-
return candidateIds.filter((id) => !exclude.has(String(id)));
|
|
2947
|
-
}
|
|
2948
|
-
return this.filterByTags(candidateIds, tags, "all");
|
|
2949
|
-
}
|
|
2950
|
-
filterByRunAt(jobs, runAt) {
|
|
2951
|
-
if (runAt instanceof Date) {
|
|
2952
|
-
return jobs.filter((j) => j.runAt.getTime() === runAt.getTime());
|
|
2953
|
-
}
|
|
2954
|
-
return jobs.filter((j) => {
|
|
2955
|
-
const t = j.runAt.getTime();
|
|
2956
|
-
if (runAt.gt && !(t > runAt.gt.getTime())) return false;
|
|
2957
|
-
if (runAt.gte && !(t >= runAt.gte.getTime())) return false;
|
|
2958
|
-
if (runAt.lt && !(t < runAt.lt.getTime())) return false;
|
|
2959
|
-
if (runAt.lte && !(t <= runAt.lte.getTime())) return false;
|
|
2960
|
-
if (runAt.eq && t !== runAt.eq.getTime()) return false;
|
|
2961
|
-
return true;
|
|
2962
|
-
});
|
|
2963
|
-
}
|
|
2964
|
-
async applyFilters(ids, filters) {
|
|
2965
|
-
let result = ids;
|
|
2966
|
-
if (filters.jobType) {
|
|
2967
|
-
const typeIds = new Set(
|
|
2968
|
-
await this.client.smembers(`${this.prefix}type:${filters.jobType}`)
|
|
2969
|
-
);
|
|
2970
|
-
result = result.filter((id) => typeIds.has(id));
|
|
2971
|
-
}
|
|
2972
|
-
if (filters.tags && filters.tags.values.length > 0) {
|
|
2973
|
-
result = await this.filterByTags(
|
|
2974
|
-
result,
|
|
2975
|
-
filters.tags.values,
|
|
2976
|
-
filters.tags.mode || "all"
|
|
2977
|
-
);
|
|
2978
|
-
}
|
|
2979
|
-
if (filters.priority !== void 0 || filters.runAt) {
|
|
2980
|
-
const jobs = await this.loadJobsByIds(result);
|
|
2981
|
-
let filtered = jobs;
|
|
2982
|
-
if (filters.priority !== void 0) {
|
|
2983
|
-
filtered = filtered.filter((j) => j.priority === filters.priority);
|
|
2984
|
-
}
|
|
2985
|
-
if (filters.runAt) {
|
|
2986
|
-
filtered = this.filterByRunAt(filtered, filters.runAt);
|
|
2987
|
-
}
|
|
2988
|
-
result = filtered.map((j) => j.id.toString());
|
|
2989
|
-
}
|
|
2990
|
-
return result;
|
|
2991
|
-
}
|
|
2992
|
-
};
|
|
2993
|
-
|
|
2994
|
-
// src/handler-validation.ts
|
|
2995
|
-
function validateHandlerSerializable2(handler, jobType) {
|
|
2996
|
-
try {
|
|
2997
|
-
const handlerString = handler.toString();
|
|
2998
|
-
const typeLabel = jobType ? `job type "${jobType}"` : "handler";
|
|
2999
|
-
if (handlerString.includes("this.") && !handlerString.match(/\([^)]*this[^)]*\)/)) {
|
|
3000
|
-
return {
|
|
3001
|
-
isSerializable: false,
|
|
3002
|
-
error: `Handler for ${typeLabel} uses 'this' context which cannot be serialized. Use a regular function or avoid 'this' references when forceKillOnTimeout is enabled.`
|
|
3003
|
-
};
|
|
3004
|
-
}
|
|
3005
|
-
if (handlerString.includes("[native code]")) {
|
|
3006
|
-
return {
|
|
3007
|
-
isSerializable: false,
|
|
3008
|
-
error: `Handler for ${typeLabel} contains native code which cannot be serialized. Ensure your handler is a plain function when forceKillOnTimeout is enabled.`
|
|
3009
|
-
};
|
|
3010
|
-
}
|
|
3011
|
-
try {
|
|
3012
|
-
new Function("return " + handlerString);
|
|
3013
|
-
} catch (parseError) {
|
|
3014
|
-
return {
|
|
3015
|
-
isSerializable: false,
|
|
3016
|
-
error: `Handler for ${typeLabel} cannot be serialized: ${parseError instanceof Error ? parseError.message : String(parseError)}. When using forceKillOnTimeout, handlers must be serializable functions without closures over external variables.`
|
|
3017
|
-
};
|
|
3018
|
-
}
|
|
3019
|
-
const hasPotentialClosure = /const\s+\w+\s*=\s*[^;]+;\s*async\s*\(/.test(handlerString) || /let\s+\w+\s*=\s*[^;]+;\s*async\s*\(/.test(handlerString);
|
|
3020
|
-
if (hasPotentialClosure) {
|
|
3021
|
-
return {
|
|
3022
|
-
isSerializable: true,
|
|
3023
|
-
// Still serializable, but might have issues
|
|
3024
|
-
error: `Warning: Handler for ${typeLabel} may have closures over external variables. Test thoroughly with forceKillOnTimeout enabled. If the handler fails to execute in a worker thread, ensure all dependencies are imported within the handler function.`
|
|
3025
|
-
};
|
|
3026
|
-
}
|
|
3027
|
-
return { isSerializable: true };
|
|
3028
|
-
} catch (error) {
|
|
3029
|
-
return {
|
|
3030
|
-
isSerializable: false,
|
|
3031
|
-
error: `Failed to validate handler serialization${jobType ? ` for job type "${jobType}"` : ""}: ${error instanceof Error ? error.message : String(error)}`
|
|
3032
|
-
};
|
|
3033
|
-
}
|
|
3034
|
-
}
|
|
3035
|
-
async function testHandlerSerialization(handler, jobType) {
|
|
3036
|
-
const basicValidation = validateHandlerSerializable2(handler, jobType);
|
|
3037
|
-
if (!basicValidation.isSerializable) {
|
|
3038
|
-
return basicValidation;
|
|
3039
|
-
}
|
|
3040
|
-
try {
|
|
3041
|
-
const handlerString = handler.toString();
|
|
3042
|
-
const handlerFn = new Function("return " + handlerString)();
|
|
3043
|
-
const testPromise = handlerFn({}, new AbortController().signal);
|
|
3044
|
-
const timeoutPromise = new Promise(
|
|
3045
|
-
(_, reject) => setTimeout(() => reject(new Error("Handler test timeout")), 100)
|
|
3046
|
-
);
|
|
3047
|
-
try {
|
|
3048
|
-
await Promise.race([testPromise, timeoutPromise]);
|
|
3049
|
-
} catch (execError) {
|
|
3050
|
-
if (execError instanceof Error && execError.message === "Handler test timeout") {
|
|
3051
|
-
return { isSerializable: true };
|
|
3052
|
-
}
|
|
3053
|
-
}
|
|
3054
|
-
return { isSerializable: true };
|
|
3055
|
-
} catch (error) {
|
|
3056
|
-
return {
|
|
3057
|
-
isSerializable: false,
|
|
3058
|
-
error: `Handler failed serialization test: ${error instanceof Error ? error.message : String(error)}`
|
|
3059
|
-
};
|
|
3060
|
-
}
|
|
3061
|
-
}
|
|
3062
|
-
|
|
3063
|
-
// src/index.ts
|
|
3064
|
-
var initJobQueue = (config) => {
|
|
3065
|
-
const backendType = config.backend ?? "postgres";
|
|
3066
|
-
setLogContext(config.verbose ?? false);
|
|
3067
|
-
let backend;
|
|
3068
|
-
let pool;
|
|
3069
|
-
if (backendType === "postgres") {
|
|
3070
|
-
const pgConfig = config;
|
|
3071
|
-
pool = createPool(pgConfig.databaseConfig);
|
|
3072
|
-
backend = new PostgresBackend(pool);
|
|
3073
|
-
} else if (backendType === "redis") {
|
|
3074
|
-
const redisConfig = config.redisConfig;
|
|
3075
|
-
backend = new RedisBackend(redisConfig);
|
|
3076
|
-
} else {
|
|
3077
|
-
throw new Error(`Unknown backend: ${backendType}`);
|
|
3078
|
-
}
|
|
3079
|
-
const requirePool = () => {
|
|
3080
|
-
if (!pool) {
|
|
3081
|
-
throw new Error(
|
|
3082
|
-
'Wait/Token features require the PostgreSQL backend. Configure with backend: "postgres" to use these features.'
|
|
3083
|
-
);
|
|
3084
|
-
}
|
|
3085
|
-
return pool;
|
|
3086
|
-
};
|
|
3087
|
-
return {
|
|
3088
|
-
// Job queue operations
|
|
3089
|
-
addJob: withLogContext(
|
|
3090
|
-
(job) => backend.addJob(job),
|
|
3091
|
-
config.verbose ?? false
|
|
3092
|
-
),
|
|
3093
|
-
getJob: withLogContext(
|
|
3094
|
-
(id) => backend.getJob(id),
|
|
3095
|
-
config.verbose ?? false
|
|
3096
|
-
),
|
|
3097
|
-
getJobsByStatus: withLogContext(
|
|
3098
|
-
(status, limit, offset) => backend.getJobsByStatus(status, limit, offset),
|
|
3099
|
-
config.verbose ?? false
|
|
3100
|
-
),
|
|
3101
|
-
getAllJobs: withLogContext(
|
|
3102
|
-
(limit, offset) => backend.getAllJobs(limit, offset),
|
|
3103
|
-
config.verbose ?? false
|
|
3104
|
-
),
|
|
3105
|
-
getJobs: withLogContext(
|
|
3106
|
-
(filters, limit, offset) => backend.getJobs(filters, limit, offset),
|
|
3107
|
-
config.verbose ?? false
|
|
3108
|
-
),
|
|
3109
|
-
retryJob: (jobId) => backend.retryJob(jobId),
|
|
3110
|
-
cleanupOldJobs: (daysToKeep) => backend.cleanupOldJobs(daysToKeep),
|
|
3111
|
-
cleanupOldJobEvents: (daysToKeep) => backend.cleanupOldJobEvents(daysToKeep),
|
|
3112
|
-
cancelJob: withLogContext(
|
|
3113
|
-
(jobId) => backend.cancelJob(jobId),
|
|
3114
|
-
config.verbose ?? false
|
|
3115
|
-
),
|
|
3116
|
-
editJob: withLogContext(
|
|
3117
|
-
(jobId, updates) => backend.editJob(jobId, updates),
|
|
3118
|
-
config.verbose ?? false
|
|
3119
|
-
),
|
|
3120
|
-
editAllPendingJobs: withLogContext(
|
|
3121
|
-
(filters, updates) => backend.editAllPendingJobs(
|
|
3122
|
-
filters,
|
|
3123
|
-
updates
|
|
3124
|
-
),
|
|
3125
|
-
config.verbose ?? false
|
|
3126
|
-
),
|
|
3127
|
-
cancelAllUpcomingJobs: withLogContext(
|
|
3128
|
-
(filters) => backend.cancelAllUpcomingJobs(filters),
|
|
3129
|
-
config.verbose ?? false
|
|
3130
|
-
),
|
|
3131
|
-
reclaimStuckJobs: withLogContext(
|
|
3132
|
-
(maxProcessingTimeMinutes) => backend.reclaimStuckJobs(maxProcessingTimeMinutes),
|
|
3133
|
-
config.verbose ?? false
|
|
3134
|
-
),
|
|
3135
|
-
getJobsByTags: withLogContext(
|
|
3136
|
-
(tags, mode = "all", limit, offset) => backend.getJobsByTags(tags, mode, limit, offset),
|
|
3137
|
-
config.verbose ?? false
|
|
3138
|
-
),
|
|
3139
|
-
// Job processing
|
|
3140
|
-
createProcessor: (handlers, options) => createProcessor(backend, handlers, options),
|
|
3141
|
-
// Job events
|
|
3142
|
-
getJobEvents: withLogContext(
|
|
3143
|
-
(jobId) => backend.getJobEvents(jobId),
|
|
3144
|
-
config.verbose ?? false
|
|
3145
|
-
),
|
|
3146
|
-
// Wait / Token support (PostgreSQL-only for now)
|
|
3147
|
-
createToken: withLogContext(
|
|
3148
|
-
(options) => createWaitpoint(requirePool(), null, options),
|
|
3149
|
-
config.verbose ?? false
|
|
3150
|
-
),
|
|
3151
|
-
completeToken: withLogContext(
|
|
3152
|
-
(tokenId, data) => completeWaitpoint(requirePool(), tokenId, data),
|
|
3153
|
-
config.verbose ?? false
|
|
3154
|
-
),
|
|
3155
|
-
getToken: withLogContext(
|
|
3156
|
-
(tokenId) => getWaitpoint(requirePool(), tokenId),
|
|
3157
|
-
config.verbose ?? false
|
|
3158
|
-
),
|
|
3159
|
-
expireTimedOutTokens: withLogContext(
|
|
3160
|
-
() => expireTimedOutWaitpoints(requirePool()),
|
|
3161
|
-
config.verbose ?? false
|
|
3162
|
-
),
|
|
3163
|
-
// Advanced access
|
|
3164
|
-
getPool: () => {
|
|
3165
|
-
if (backendType !== "postgres") {
|
|
3166
|
-
throw new Error(
|
|
3167
|
-
"getPool() is only available with the PostgreSQL backend."
|
|
3168
|
-
);
|
|
3169
|
-
}
|
|
3170
|
-
return backend.getPool();
|
|
3171
|
-
},
|
|
3172
|
-
getRedisClient: () => {
|
|
3173
|
-
if (backendType !== "redis") {
|
|
3174
|
-
throw new Error(
|
|
3175
|
-
"getRedisClient() is only available with the Redis backend."
|
|
3176
|
-
);
|
|
3177
|
-
}
|
|
3178
|
-
return backend.getClient();
|
|
3179
|
-
}
|
|
3180
|
-
};
|
|
3181
|
-
};
|
|
3182
|
-
var withLogContext = (fn, verbose) => (...args) => {
|
|
3183
|
-
setLogContext(verbose);
|
|
3184
|
-
return fn(...args);
|
|
3185
|
-
};
|
|
3186
|
-
|
|
3187
|
-
export { FailureReason, JobEventType, PostgresBackend, WaitSignal, initJobQueue, testHandlerSerialization, validateHandlerSerializable2 as validateHandlerSerializable };
|
|
3188
|
-
//# sourceMappingURL=index.js.map
|
|
3189
|
-
//# sourceMappingURL=index.js.map
|