@nicnocquee/dataqueue 1.26.0 → 1.31.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs ADDED
@@ -0,0 +1,3968 @@
1
+ 'use strict';
2
+
3
+ var async_hooks = require('async_hooks');
4
+ var crypto = require('crypto');
5
+ var worker_threads = require('worker_threads');
6
+ var pg = require('pg');
7
+ var pgConnectionString = require('pg-connection-string');
8
+ var fs = require('fs');
9
+ var module$1 = require('module');
10
+ var croner = require('croner');
11
+
12
+ var _documentCurrentScript = typeof document !== 'undefined' ? document.currentScript : null;
13
+ function _interopDefault (e) { return e && e.__esModule ? e : { default: e }; }
14
+
15
+ var fs__default = /*#__PURE__*/_interopDefault(fs);
16
+
17
+ // src/types.ts
18
+ var JobEventType = /* @__PURE__ */ ((JobEventType2) => {
19
+ JobEventType2["Added"] = "added";
20
+ JobEventType2["Processing"] = "processing";
21
+ JobEventType2["Completed"] = "completed";
22
+ JobEventType2["Failed"] = "failed";
23
+ JobEventType2["Cancelled"] = "cancelled";
24
+ JobEventType2["Retried"] = "retried";
25
+ JobEventType2["Edited"] = "edited";
26
+ JobEventType2["Prolonged"] = "prolonged";
27
+ JobEventType2["Waiting"] = "waiting";
28
+ return JobEventType2;
29
+ })(JobEventType || {});
30
+ var FailureReason = /* @__PURE__ */ ((FailureReason5) => {
31
+ FailureReason5["Timeout"] = "timeout";
32
+ FailureReason5["HandlerError"] = "handler_error";
33
+ FailureReason5["NoHandler"] = "no_handler";
34
+ return FailureReason5;
35
+ })(FailureReason || {});
36
+ var WaitSignal = class extends Error {
37
+ constructor(type, waitUntil, tokenId, stepData) {
38
+ super("WaitSignal");
39
+ this.type = type;
40
+ this.waitUntil = waitUntil;
41
+ this.tokenId = tokenId;
42
+ this.stepData = stepData;
43
+ this.isWaitSignal = true;
44
+ this.name = "WaitSignal";
45
+ }
46
+ };
47
+ var logStorage = new async_hooks.AsyncLocalStorage();
48
+ var setLogContext = (verbose) => {
49
+ logStorage.enterWith({ verbose });
50
+ };
51
+ var getLogContext = () => {
52
+ return logStorage.getStore();
53
+ };
54
+ var log = (message) => {
55
+ const context = getLogContext();
56
+ if (context?.verbose) {
57
+ console.log(message);
58
+ }
59
+ };
60
+
61
+ // src/backends/postgres.ts
62
+ var PostgresBackend = class {
63
+ constructor(pool) {
64
+ this.pool = pool;
65
+ }
66
+ /** Expose the raw pool for advanced usage. */
67
+ getPool() {
68
+ return this.pool;
69
+ }
70
+ // ── Events ──────────────────────────────────────────────────────────
71
+ async recordJobEvent(jobId, eventType, metadata) {
72
+ const client = await this.pool.connect();
73
+ try {
74
+ await client.query(
75
+ `INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
76
+ [jobId, eventType, metadata ? JSON.stringify(metadata) : null]
77
+ );
78
+ } catch (error) {
79
+ log(`Error recording job event for job ${jobId}: ${error}`);
80
+ } finally {
81
+ client.release();
82
+ }
83
+ }
84
+ async getJobEvents(jobId) {
85
+ const client = await this.pool.connect();
86
+ try {
87
+ const res = await client.query(
88
+ `SELECT id, job_id AS "jobId", event_type AS "eventType", metadata, created_at AS "createdAt" FROM job_events WHERE job_id = $1 ORDER BY created_at ASC`,
89
+ [jobId]
90
+ );
91
+ return res.rows;
92
+ } finally {
93
+ client.release();
94
+ }
95
+ }
96
+ // ── Job CRUD ──────────────────────────────────────────────────────────
97
+ async addJob({
98
+ jobType,
99
+ payload,
100
+ maxAttempts = 3,
101
+ priority = 0,
102
+ runAt = null,
103
+ timeoutMs = void 0,
104
+ forceKillOnTimeout = false,
105
+ tags = void 0,
106
+ idempotencyKey = void 0
107
+ }) {
108
+ const client = await this.pool.connect();
109
+ try {
110
+ let result;
111
+ const onConflict = idempotencyKey ? `ON CONFLICT (idempotency_key) WHERE idempotency_key IS NOT NULL DO NOTHING` : "";
112
+ if (runAt) {
113
+ result = await client.query(
114
+ `INSERT INTO job_queue
115
+ (job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key)
116
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
117
+ ${onConflict}
118
+ RETURNING id`,
119
+ [
120
+ jobType,
121
+ payload,
122
+ maxAttempts,
123
+ priority,
124
+ runAt,
125
+ timeoutMs ?? null,
126
+ forceKillOnTimeout ?? false,
127
+ tags ?? null,
128
+ idempotencyKey ?? null
129
+ ]
130
+ );
131
+ } else {
132
+ result = await client.query(
133
+ `INSERT INTO job_queue
134
+ (job_type, payload, max_attempts, priority, timeout_ms, force_kill_on_timeout, tags, idempotency_key)
135
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
136
+ ${onConflict}
137
+ RETURNING id`,
138
+ [
139
+ jobType,
140
+ payload,
141
+ maxAttempts,
142
+ priority,
143
+ timeoutMs ?? null,
144
+ forceKillOnTimeout ?? false,
145
+ tags ?? null,
146
+ idempotencyKey ?? null
147
+ ]
148
+ );
149
+ }
150
+ if (result.rows.length === 0 && idempotencyKey) {
151
+ const existing = await client.query(
152
+ `SELECT id FROM job_queue WHERE idempotency_key = $1`,
153
+ [idempotencyKey]
154
+ );
155
+ if (existing.rows.length > 0) {
156
+ log(
157
+ `Job with idempotency key "${idempotencyKey}" already exists (id: ${existing.rows[0].id}), returning existing job`
158
+ );
159
+ return existing.rows[0].id;
160
+ }
161
+ throw new Error(
162
+ `Failed to insert job and could not find existing job with idempotency key "${idempotencyKey}"`
163
+ );
164
+ }
165
+ const jobId = result.rows[0].id;
166
+ log(
167
+ `Added job ${jobId}: payload ${JSON.stringify(payload)}, ${runAt ? `runAt ${runAt.toISOString()}, ` : ""}priority ${priority}, maxAttempts ${maxAttempts}, jobType ${jobType}, tags ${JSON.stringify(tags)}${idempotencyKey ? `, idempotencyKey "${idempotencyKey}"` : ""}`
168
+ );
169
+ await this.recordJobEvent(jobId, "added" /* Added */, {
170
+ jobType,
171
+ payload,
172
+ tags,
173
+ idempotencyKey
174
+ });
175
+ return jobId;
176
+ } catch (error) {
177
+ log(`Error adding job: ${error}`);
178
+ throw error;
179
+ } finally {
180
+ client.release();
181
+ }
182
+ }
183
+ async getJob(id) {
184
+ const client = await this.pool.connect();
185
+ try {
186
+ const result = await client.query(
187
+ `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue WHERE id = $1`,
188
+ [id]
189
+ );
190
+ if (result.rows.length === 0) {
191
+ log(`Job ${id} not found`);
192
+ return null;
193
+ }
194
+ log(`Found job ${id}`);
195
+ const job = result.rows[0];
196
+ return {
197
+ ...job,
198
+ payload: job.payload,
199
+ timeoutMs: job.timeoutMs,
200
+ forceKillOnTimeout: job.forceKillOnTimeout,
201
+ failureReason: job.failureReason
202
+ };
203
+ } catch (error) {
204
+ log(`Error getting job ${id}: ${error}`);
205
+ throw error;
206
+ } finally {
207
+ client.release();
208
+ }
209
+ }
210
+ async getJobsByStatus(status, limit = 100, offset = 0) {
211
+ const client = await this.pool.connect();
212
+ try {
213
+ const result = await client.query(
214
+ `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue WHERE status = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3`,
215
+ [status, limit, offset]
216
+ );
217
+ log(`Found ${result.rows.length} jobs by status ${status}`);
218
+ return result.rows.map((job) => ({
219
+ ...job,
220
+ payload: job.payload,
221
+ timeoutMs: job.timeoutMs,
222
+ forceKillOnTimeout: job.forceKillOnTimeout,
223
+ failureReason: job.failureReason
224
+ }));
225
+ } catch (error) {
226
+ log(`Error getting jobs by status ${status}: ${error}`);
227
+ throw error;
228
+ } finally {
229
+ client.release();
230
+ }
231
+ }
232
+ async getAllJobs(limit = 100, offset = 0) {
233
+ const client = await this.pool.connect();
234
+ try {
235
+ const result = await client.query(
236
+ `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2`,
237
+ [limit, offset]
238
+ );
239
+ log(`Found ${result.rows.length} jobs (all)`);
240
+ return result.rows.map((job) => ({
241
+ ...job,
242
+ payload: job.payload,
243
+ timeoutMs: job.timeoutMs,
244
+ forceKillOnTimeout: job.forceKillOnTimeout
245
+ }));
246
+ } catch (error) {
247
+ log(`Error getting all jobs: ${error}`);
248
+ throw error;
249
+ } finally {
250
+ client.release();
251
+ }
252
+ }
253
+ async getJobs(filters, limit = 100, offset = 0) {
254
+ const client = await this.pool.connect();
255
+ try {
256
+ let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue`;
257
+ const params = [];
258
+ const where = [];
259
+ let paramIdx = 1;
260
+ if (filters) {
261
+ if (filters.jobType) {
262
+ where.push(`job_type = $${paramIdx++}`);
263
+ params.push(filters.jobType);
264
+ }
265
+ if (filters.priority !== void 0) {
266
+ where.push(`priority = $${paramIdx++}`);
267
+ params.push(filters.priority);
268
+ }
269
+ if (filters.runAt) {
270
+ if (filters.runAt instanceof Date) {
271
+ where.push(`run_at = $${paramIdx++}`);
272
+ params.push(filters.runAt);
273
+ } else if (typeof filters.runAt === "object" && (filters.runAt.gt !== void 0 || filters.runAt.gte !== void 0 || filters.runAt.lt !== void 0 || filters.runAt.lte !== void 0 || filters.runAt.eq !== void 0)) {
274
+ const ops = filters.runAt;
275
+ if (ops.gt) {
276
+ where.push(`run_at > $${paramIdx++}`);
277
+ params.push(ops.gt);
278
+ }
279
+ if (ops.gte) {
280
+ where.push(`run_at >= $${paramIdx++}`);
281
+ params.push(ops.gte);
282
+ }
283
+ if (ops.lt) {
284
+ where.push(`run_at < $${paramIdx++}`);
285
+ params.push(ops.lt);
286
+ }
287
+ if (ops.lte) {
288
+ where.push(`run_at <= $${paramIdx++}`);
289
+ params.push(ops.lte);
290
+ }
291
+ if (ops.eq) {
292
+ where.push(`run_at = $${paramIdx++}`);
293
+ params.push(ops.eq);
294
+ }
295
+ }
296
+ }
297
+ if (filters.tags && filters.tags.values && filters.tags.values.length > 0) {
298
+ const mode = filters.tags.mode || "all";
299
+ const tagValues = filters.tags.values;
300
+ switch (mode) {
301
+ case "exact":
302
+ where.push(`tags = $${paramIdx++}`);
303
+ params.push(tagValues);
304
+ break;
305
+ case "all":
306
+ where.push(`tags @> $${paramIdx++}`);
307
+ params.push(tagValues);
308
+ break;
309
+ case "any":
310
+ where.push(`tags && $${paramIdx++}`);
311
+ params.push(tagValues);
312
+ break;
313
+ case "none":
314
+ where.push(`NOT (tags && $${paramIdx++})`);
315
+ params.push(tagValues);
316
+ break;
317
+ default:
318
+ where.push(`tags @> $${paramIdx++}`);
319
+ params.push(tagValues);
320
+ }
321
+ }
322
+ if (filters.cursor !== void 0) {
323
+ where.push(`id < $${paramIdx++}`);
324
+ params.push(filters.cursor);
325
+ }
326
+ }
327
+ if (where.length > 0) {
328
+ query += ` WHERE ${where.join(" AND ")}`;
329
+ }
330
+ paramIdx = params.length + 1;
331
+ query += ` ORDER BY id DESC LIMIT $${paramIdx++}`;
332
+ if (!filters?.cursor) {
333
+ query += ` OFFSET $${paramIdx}`;
334
+ params.push(limit, offset);
335
+ } else {
336
+ params.push(limit);
337
+ }
338
+ const result = await client.query(query, params);
339
+ log(`Found ${result.rows.length} jobs`);
340
+ return result.rows.map((job) => ({
341
+ ...job,
342
+ payload: job.payload,
343
+ timeoutMs: job.timeoutMs,
344
+ forceKillOnTimeout: job.forceKillOnTimeout,
345
+ failureReason: job.failureReason
346
+ }));
347
+ } catch (error) {
348
+ log(`Error getting jobs: ${error}`);
349
+ throw error;
350
+ } finally {
351
+ client.release();
352
+ }
353
+ }
354
+ async getJobsByTags(tags, mode = "all", limit = 100, offset = 0) {
355
+ const client = await this.pool.connect();
356
+ try {
357
+ let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress
358
+ FROM job_queue`;
359
+ let params = [];
360
+ switch (mode) {
361
+ case "exact":
362
+ query += " WHERE tags = $1";
363
+ params = [tags];
364
+ break;
365
+ case "all":
366
+ query += " WHERE tags @> $1";
367
+ params = [tags];
368
+ break;
369
+ case "any":
370
+ query += " WHERE tags && $1";
371
+ params = [tags];
372
+ break;
373
+ case "none":
374
+ query += " WHERE NOT (tags && $1)";
375
+ params = [tags];
376
+ break;
377
+ default:
378
+ query += " WHERE tags @> $1";
379
+ params = [tags];
380
+ }
381
+ query += " ORDER BY created_at DESC LIMIT $2 OFFSET $3";
382
+ params.push(limit, offset);
383
+ const result = await client.query(query, params);
384
+ log(
385
+ `Found ${result.rows.length} jobs by tags ${JSON.stringify(tags)} (mode: ${mode})`
386
+ );
387
+ return result.rows.map((job) => ({
388
+ ...job,
389
+ payload: job.payload,
390
+ timeoutMs: job.timeoutMs,
391
+ forceKillOnTimeout: job.forceKillOnTimeout,
392
+ failureReason: job.failureReason
393
+ }));
394
+ } catch (error) {
395
+ log(
396
+ `Error getting jobs by tags ${JSON.stringify(tags)} (mode: ${mode}): ${error}`
397
+ );
398
+ throw error;
399
+ } finally {
400
+ client.release();
401
+ }
402
+ }
403
+ // ── Processing lifecycle ──────────────────────────────────────────────
404
+ async getNextBatch(workerId, batchSize = 10, jobType) {
405
+ const client = await this.pool.connect();
406
+ try {
407
+ await client.query("BEGIN");
408
+ let jobTypeFilter = "";
409
+ const params = [workerId, batchSize];
410
+ if (jobType) {
411
+ if (Array.isArray(jobType)) {
412
+ jobTypeFilter = ` AND job_type = ANY($3)`;
413
+ params.push(jobType);
414
+ } else {
415
+ jobTypeFilter = ` AND job_type = $3`;
416
+ params.push(jobType);
417
+ }
418
+ }
419
+ const result = await client.query(
420
+ `
421
+ UPDATE job_queue
422
+ SET status = 'processing',
423
+ locked_at = NOW(),
424
+ locked_by = $1,
425
+ attempts = CASE WHEN status = 'waiting' THEN attempts ELSE attempts + 1 END,
426
+ updated_at = NOW(),
427
+ pending_reason = NULL,
428
+ started_at = COALESCE(started_at, NOW()),
429
+ last_retried_at = CASE WHEN status != 'waiting' AND attempts > 0 THEN NOW() ELSE last_retried_at END,
430
+ wait_until = NULL
431
+ WHERE id IN (
432
+ SELECT id FROM job_queue
433
+ WHERE (
434
+ (
435
+ (status = 'pending' OR (status = 'failed' AND next_attempt_at <= NOW()))
436
+ AND (attempts < max_attempts)
437
+ AND run_at <= NOW()
438
+ )
439
+ OR (
440
+ status = 'waiting'
441
+ AND wait_until IS NOT NULL
442
+ AND wait_until <= NOW()
443
+ AND wait_token_id IS NULL
444
+ )
445
+ )
446
+ ${jobTypeFilter}
447
+ ORDER BY priority DESC, created_at ASC
448
+ LIMIT $2
449
+ FOR UPDATE SKIP LOCKED
450
+ )
451
+ RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress
452
+ `,
453
+ params
454
+ );
455
+ log(`Found ${result.rows.length} jobs to process`);
456
+ await client.query("COMMIT");
457
+ if (result.rows.length > 0) {
458
+ await this.recordJobEventsBatch(
459
+ result.rows.map((row) => ({
460
+ jobId: row.id,
461
+ eventType: "processing" /* Processing */
462
+ }))
463
+ );
464
+ }
465
+ return result.rows.map((job) => ({
466
+ ...job,
467
+ payload: job.payload,
468
+ timeoutMs: job.timeoutMs,
469
+ forceKillOnTimeout: job.forceKillOnTimeout
470
+ }));
471
+ } catch (error) {
472
+ log(`Error getting next batch: ${error}`);
473
+ await client.query("ROLLBACK");
474
+ throw error;
475
+ } finally {
476
+ client.release();
477
+ }
478
+ }
479
+ async completeJob(jobId) {
480
+ const client = await this.pool.connect();
481
+ try {
482
+ const result = await client.query(
483
+ `
484
+ UPDATE job_queue
485
+ SET status = 'completed', updated_at = NOW(), completed_at = NOW(),
486
+ step_data = NULL, wait_until = NULL, wait_token_id = NULL
487
+ WHERE id = $1 AND status = 'processing'
488
+ `,
489
+ [jobId]
490
+ );
491
+ if (result.rowCount === 0) {
492
+ log(
493
+ `Job ${jobId} could not be completed (not in processing state or does not exist)`
494
+ );
495
+ }
496
+ await this.recordJobEvent(jobId, "completed" /* Completed */);
497
+ log(`Completed job ${jobId}`);
498
+ } catch (error) {
499
+ log(`Error completing job ${jobId}: ${error}`);
500
+ throw error;
501
+ } finally {
502
+ client.release();
503
+ }
504
+ }
505
+ async failJob(jobId, error, failureReason) {
506
+ const client = await this.pool.connect();
507
+ try {
508
+ const result = await client.query(
509
+ `
510
+ UPDATE job_queue
511
+ SET status = 'failed',
512
+ updated_at = NOW(),
513
+ next_attempt_at = CASE
514
+ WHEN attempts < max_attempts THEN NOW() + (POWER(2, attempts) * INTERVAL '1 minute')
515
+ ELSE NULL
516
+ END,
517
+ error_history = COALESCE(error_history, '[]'::jsonb) || $2::jsonb,
518
+ failure_reason = $3,
519
+ last_failed_at = NOW()
520
+ WHERE id = $1 AND status IN ('processing', 'pending')
521
+ `,
522
+ [
523
+ jobId,
524
+ JSON.stringify([
525
+ {
526
+ message: error.message || String(error),
527
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
528
+ }
529
+ ]),
530
+ failureReason ?? null
531
+ ]
532
+ );
533
+ if (result.rowCount === 0) {
534
+ log(
535
+ `Job ${jobId} could not be failed (not in processing/pending state or does not exist)`
536
+ );
537
+ }
538
+ await this.recordJobEvent(jobId, "failed" /* Failed */, {
539
+ message: error.message || String(error),
540
+ failureReason
541
+ });
542
+ log(`Failed job ${jobId}`);
543
+ } catch (err) {
544
+ log(`Error failing job ${jobId}: ${err}`);
545
+ throw err;
546
+ } finally {
547
+ client.release();
548
+ }
549
+ }
550
+ async prolongJob(jobId) {
551
+ const client = await this.pool.connect();
552
+ try {
553
+ await client.query(
554
+ `
555
+ UPDATE job_queue
556
+ SET locked_at = NOW(), updated_at = NOW()
557
+ WHERE id = $1 AND status = 'processing'
558
+ `,
559
+ [jobId]
560
+ );
561
+ await this.recordJobEvent(jobId, "prolonged" /* Prolonged */);
562
+ log(`Prolonged job ${jobId}`);
563
+ } catch (error) {
564
+ log(`Error prolonging job ${jobId}: ${error}`);
565
+ } finally {
566
+ client.release();
567
+ }
568
+ }
569
+ // ── Progress ──────────────────────────────────────────────────────────
570
+ async updateProgress(jobId, progress) {
571
+ const client = await this.pool.connect();
572
+ try {
573
+ await client.query(
574
+ `UPDATE job_queue SET progress = $2, updated_at = NOW() WHERE id = $1`,
575
+ [jobId, progress]
576
+ );
577
+ log(`Updated progress for job ${jobId}: ${progress}%`);
578
+ } catch (error) {
579
+ log(`Error updating progress for job ${jobId}: ${error}`);
580
+ } finally {
581
+ client.release();
582
+ }
583
+ }
584
+ // ── Job management ────────────────────────────────────────────────────
585
+ async retryJob(jobId) {
586
+ const client = await this.pool.connect();
587
+ try {
588
+ const result = await client.query(
589
+ `
590
+ UPDATE job_queue
591
+ SET status = 'pending',
592
+ updated_at = NOW(),
593
+ locked_at = NULL,
594
+ locked_by = NULL,
595
+ next_attempt_at = NOW(),
596
+ last_retried_at = NOW()
597
+ WHERE id = $1 AND status IN ('failed', 'processing')
598
+ `,
599
+ [jobId]
600
+ );
601
+ if (result.rowCount === 0) {
602
+ log(
603
+ `Job ${jobId} could not be retried (not in failed/processing state or does not exist)`
604
+ );
605
+ }
606
+ await this.recordJobEvent(jobId, "retried" /* Retried */);
607
+ log(`Retried job ${jobId}`);
608
+ } catch (error) {
609
+ log(`Error retrying job ${jobId}: ${error}`);
610
+ throw error;
611
+ } finally {
612
+ client.release();
613
+ }
614
+ }
615
+ async cancelJob(jobId) {
616
+ const client = await this.pool.connect();
617
+ try {
618
+ await client.query(
619
+ `
620
+ UPDATE job_queue
621
+ SET status = 'cancelled', updated_at = NOW(), last_cancelled_at = NOW(),
622
+ wait_until = NULL, wait_token_id = NULL
623
+ WHERE id = $1 AND status IN ('pending', 'waiting')
624
+ `,
625
+ [jobId]
626
+ );
627
+ await this.recordJobEvent(jobId, "cancelled" /* Cancelled */);
628
+ log(`Cancelled job ${jobId}`);
629
+ } catch (error) {
630
+ log(`Error cancelling job ${jobId}: ${error}`);
631
+ throw error;
632
+ } finally {
633
+ client.release();
634
+ }
635
+ }
636
+ async cancelAllUpcomingJobs(filters) {
637
+ const client = await this.pool.connect();
638
+ try {
639
+ let query = `
640
+ UPDATE job_queue
641
+ SET status = 'cancelled', updated_at = NOW()
642
+ WHERE status = 'pending'`;
643
+ const params = [];
644
+ let paramIdx = 1;
645
+ if (filters) {
646
+ if (filters.jobType) {
647
+ query += ` AND job_type = $${paramIdx++}`;
648
+ params.push(filters.jobType);
649
+ }
650
+ if (filters.priority !== void 0) {
651
+ query += ` AND priority = $${paramIdx++}`;
652
+ params.push(filters.priority);
653
+ }
654
+ if (filters.runAt) {
655
+ if (filters.runAt instanceof Date) {
656
+ query += ` AND run_at = $${paramIdx++}`;
657
+ params.push(filters.runAt);
658
+ } else if (typeof filters.runAt === "object") {
659
+ const ops = filters.runAt;
660
+ if (ops.gt) {
661
+ query += ` AND run_at > $${paramIdx++}`;
662
+ params.push(ops.gt);
663
+ }
664
+ if (ops.gte) {
665
+ query += ` AND run_at >= $${paramIdx++}`;
666
+ params.push(ops.gte);
667
+ }
668
+ if (ops.lt) {
669
+ query += ` AND run_at < $${paramIdx++}`;
670
+ params.push(ops.lt);
671
+ }
672
+ if (ops.lte) {
673
+ query += ` AND run_at <= $${paramIdx++}`;
674
+ params.push(ops.lte);
675
+ }
676
+ if (ops.eq) {
677
+ query += ` AND run_at = $${paramIdx++}`;
678
+ params.push(ops.eq);
679
+ }
680
+ }
681
+ }
682
+ if (filters.tags && filters.tags.values && filters.tags.values.length > 0) {
683
+ const mode = filters.tags.mode || "all";
684
+ const tagValues = filters.tags.values;
685
+ switch (mode) {
686
+ case "exact":
687
+ query += ` AND tags = $${paramIdx++}`;
688
+ params.push(tagValues);
689
+ break;
690
+ case "all":
691
+ query += ` AND tags @> $${paramIdx++}`;
692
+ params.push(tagValues);
693
+ break;
694
+ case "any":
695
+ query += ` AND tags && $${paramIdx++}`;
696
+ params.push(tagValues);
697
+ break;
698
+ case "none":
699
+ query += ` AND NOT (tags && $${paramIdx++})`;
700
+ params.push(tagValues);
701
+ break;
702
+ default:
703
+ query += ` AND tags @> $${paramIdx++}`;
704
+ params.push(tagValues);
705
+ }
706
+ }
707
+ }
708
+ query += "\nRETURNING id";
709
+ const result = await client.query(query, params);
710
+ log(`Cancelled ${result.rowCount} jobs`);
711
+ return result.rowCount || 0;
712
+ } catch (error) {
713
+ log(`Error cancelling upcoming jobs: ${error}`);
714
+ throw error;
715
+ } finally {
716
+ client.release();
717
+ }
718
+ }
719
+ async editJob(jobId, updates) {
720
+ const client = await this.pool.connect();
721
+ try {
722
+ const updateFields = [];
723
+ const params = [];
724
+ let paramIdx = 1;
725
+ if (updates.payload !== void 0) {
726
+ updateFields.push(`payload = $${paramIdx++}`);
727
+ params.push(updates.payload);
728
+ }
729
+ if (updates.maxAttempts !== void 0) {
730
+ updateFields.push(`max_attempts = $${paramIdx++}`);
731
+ params.push(updates.maxAttempts);
732
+ }
733
+ if (updates.priority !== void 0) {
734
+ updateFields.push(`priority = $${paramIdx++}`);
735
+ params.push(updates.priority);
736
+ }
737
+ if (updates.runAt !== void 0) {
738
+ if (updates.runAt === null) {
739
+ updateFields.push(`run_at = NOW()`);
740
+ } else {
741
+ updateFields.push(`run_at = $${paramIdx++}`);
742
+ params.push(updates.runAt);
743
+ }
744
+ }
745
+ if (updates.timeoutMs !== void 0) {
746
+ updateFields.push(`timeout_ms = $${paramIdx++}`);
747
+ params.push(updates.timeoutMs ?? null);
748
+ }
749
+ if (updates.tags !== void 0) {
750
+ updateFields.push(`tags = $${paramIdx++}`);
751
+ params.push(updates.tags ?? null);
752
+ }
753
+ if (updateFields.length === 0) {
754
+ log(`No fields to update for job ${jobId}`);
755
+ return;
756
+ }
757
+ updateFields.push(`updated_at = NOW()`);
758
+ params.push(jobId);
759
+ const query = `
760
+ UPDATE job_queue
761
+ SET ${updateFields.join(", ")}
762
+ WHERE id = $${paramIdx} AND status = 'pending'
763
+ `;
764
+ await client.query(query, params);
765
+ const metadata = {};
766
+ if (updates.payload !== void 0) metadata.payload = updates.payload;
767
+ if (updates.maxAttempts !== void 0)
768
+ metadata.maxAttempts = updates.maxAttempts;
769
+ if (updates.priority !== void 0) metadata.priority = updates.priority;
770
+ if (updates.runAt !== void 0) metadata.runAt = updates.runAt;
771
+ if (updates.timeoutMs !== void 0)
772
+ metadata.timeoutMs = updates.timeoutMs;
773
+ if (updates.tags !== void 0) metadata.tags = updates.tags;
774
+ await this.recordJobEvent(jobId, "edited" /* Edited */, metadata);
775
+ log(`Edited job ${jobId}: ${JSON.stringify(metadata)}`);
776
+ } catch (error) {
777
+ log(`Error editing job ${jobId}: ${error}`);
778
+ throw error;
779
+ } finally {
780
+ client.release();
781
+ }
782
+ }
783
+ async editAllPendingJobs(filters = void 0, updates) {
784
+ const client = await this.pool.connect();
785
+ try {
786
+ const updateFields = [];
787
+ const params = [];
788
+ let paramIdx = 1;
789
+ if (updates.payload !== void 0) {
790
+ updateFields.push(`payload = $${paramIdx++}`);
791
+ params.push(updates.payload);
792
+ }
793
+ if (updates.maxAttempts !== void 0) {
794
+ updateFields.push(`max_attempts = $${paramIdx++}`);
795
+ params.push(updates.maxAttempts);
796
+ }
797
+ if (updates.priority !== void 0) {
798
+ updateFields.push(`priority = $${paramIdx++}`);
799
+ params.push(updates.priority);
800
+ }
801
+ if (updates.runAt !== void 0) {
802
+ if (updates.runAt === null) {
803
+ updateFields.push(`run_at = NOW()`);
804
+ } else {
805
+ updateFields.push(`run_at = $${paramIdx++}`);
806
+ params.push(updates.runAt);
807
+ }
808
+ }
809
+ if (updates.timeoutMs !== void 0) {
810
+ updateFields.push(`timeout_ms = $${paramIdx++}`);
811
+ params.push(updates.timeoutMs ?? null);
812
+ }
813
+ if (updates.tags !== void 0) {
814
+ updateFields.push(`tags = $${paramIdx++}`);
815
+ params.push(updates.tags ?? null);
816
+ }
817
+ if (updateFields.length === 0) {
818
+ log(`No fields to update for batch edit`);
819
+ return 0;
820
+ }
821
+ updateFields.push(`updated_at = NOW()`);
822
+ let query = `
823
+ UPDATE job_queue
824
+ SET ${updateFields.join(", ")}
825
+ WHERE status = 'pending'`;
826
+ if (filters) {
827
+ if (filters.jobType) {
828
+ query += ` AND job_type = $${paramIdx++}`;
829
+ params.push(filters.jobType);
830
+ }
831
+ if (filters.priority !== void 0) {
832
+ query += ` AND priority = $${paramIdx++}`;
833
+ params.push(filters.priority);
834
+ }
835
+ if (filters.runAt) {
836
+ if (filters.runAt instanceof Date) {
837
+ query += ` AND run_at = $${paramIdx++}`;
838
+ params.push(filters.runAt);
839
+ } else if (typeof filters.runAt === "object") {
840
+ const ops = filters.runAt;
841
+ if (ops.gt) {
842
+ query += ` AND run_at > $${paramIdx++}`;
843
+ params.push(ops.gt);
844
+ }
845
+ if (ops.gte) {
846
+ query += ` AND run_at >= $${paramIdx++}`;
847
+ params.push(ops.gte);
848
+ }
849
+ if (ops.lt) {
850
+ query += ` AND run_at < $${paramIdx++}`;
851
+ params.push(ops.lt);
852
+ }
853
+ if (ops.lte) {
854
+ query += ` AND run_at <= $${paramIdx++}`;
855
+ params.push(ops.lte);
856
+ }
857
+ if (ops.eq) {
858
+ query += ` AND run_at = $${paramIdx++}`;
859
+ params.push(ops.eq);
860
+ }
861
+ }
862
+ }
863
+ if (filters.tags && filters.tags.values && filters.tags.values.length > 0) {
864
+ const mode = filters.tags.mode || "all";
865
+ const tagValues = filters.tags.values;
866
+ switch (mode) {
867
+ case "exact":
868
+ query += ` AND tags = $${paramIdx++}`;
869
+ params.push(tagValues);
870
+ break;
871
+ case "all":
872
+ query += ` AND tags @> $${paramIdx++}`;
873
+ params.push(tagValues);
874
+ break;
875
+ case "any":
876
+ query += ` AND tags && $${paramIdx++}`;
877
+ params.push(tagValues);
878
+ break;
879
+ case "none":
880
+ query += ` AND NOT (tags && $${paramIdx++})`;
881
+ params.push(tagValues);
882
+ break;
883
+ default:
884
+ query += ` AND tags @> $${paramIdx++}`;
885
+ params.push(tagValues);
886
+ }
887
+ }
888
+ }
889
+ query += "\nRETURNING id";
890
+ const result = await client.query(query, params);
891
+ const editedCount = result.rowCount || 0;
892
+ const metadata = {};
893
+ if (updates.payload !== void 0) metadata.payload = updates.payload;
894
+ if (updates.maxAttempts !== void 0)
895
+ metadata.maxAttempts = updates.maxAttempts;
896
+ if (updates.priority !== void 0) metadata.priority = updates.priority;
897
+ if (updates.runAt !== void 0) metadata.runAt = updates.runAt;
898
+ if (updates.timeoutMs !== void 0)
899
+ metadata.timeoutMs = updates.timeoutMs;
900
+ if (updates.tags !== void 0) metadata.tags = updates.tags;
901
+ for (const row of result.rows) {
902
+ await this.recordJobEvent(row.id, "edited" /* Edited */, metadata);
903
+ }
904
+ log(`Edited ${editedCount} pending jobs: ${JSON.stringify(metadata)}`);
905
+ return editedCount;
906
+ } catch (error) {
907
+ log(`Error editing pending jobs: ${error}`);
908
+ throw error;
909
+ } finally {
910
+ client.release();
911
+ }
912
+ }
913
+ async cleanupOldJobs(daysToKeep = 30) {
914
+ const client = await this.pool.connect();
915
+ try {
916
+ const result = await client.query(
917
+ `
918
+ DELETE FROM job_queue
919
+ WHERE status = 'completed'
920
+ AND updated_at < NOW() - INTERVAL '1 day' * $1::int
921
+ RETURNING id
922
+ `,
923
+ [daysToKeep]
924
+ );
925
+ log(`Deleted ${result.rowCount} old jobs`);
926
+ return result.rowCount || 0;
927
+ } catch (error) {
928
+ log(`Error cleaning up old jobs: ${error}`);
929
+ throw error;
930
+ } finally {
931
+ client.release();
932
+ }
933
+ }
934
+ async cleanupOldJobEvents(daysToKeep = 30) {
935
+ const client = await this.pool.connect();
936
+ try {
937
+ const result = await client.query(
938
+ `
939
+ DELETE FROM job_events
940
+ WHERE created_at < NOW() - INTERVAL '1 day' * $1::int
941
+ RETURNING id
942
+ `,
943
+ [daysToKeep]
944
+ );
945
+ log(`Deleted ${result.rowCount} old job events`);
946
+ return result.rowCount || 0;
947
+ } catch (error) {
948
+ log(`Error cleaning up old job events: ${error}`);
949
+ throw error;
950
+ } finally {
951
+ client.release();
952
+ }
953
+ }
954
+ async reclaimStuckJobs(maxProcessingTimeMinutes = 10) {
955
+ const client = await this.pool.connect();
956
+ try {
957
+ const result = await client.query(
958
+ `
959
+ UPDATE job_queue
960
+ SET status = 'pending', locked_at = NULL, locked_by = NULL, updated_at = NOW()
961
+ WHERE status = 'processing'
962
+ AND locked_at < NOW() - GREATEST(
963
+ INTERVAL '1 minute' * $1::int,
964
+ INTERVAL '1 millisecond' * COALESCE(timeout_ms, 0)
965
+ )
966
+ RETURNING id
967
+ `,
968
+ [maxProcessingTimeMinutes]
969
+ );
970
+ log(`Reclaimed ${result.rowCount} stuck jobs`);
971
+ return result.rowCount || 0;
972
+ } catch (error) {
973
+ log(`Error reclaiming stuck jobs: ${error}`);
974
+ throw error;
975
+ } finally {
976
+ client.release();
977
+ }
978
+ }
979
+ // ── Internal helpers ──────────────────────────────────────────────────
980
+ /**
981
+ * Batch-insert multiple job events in a single query.
982
+ * More efficient than individual recordJobEvent calls.
983
+ */
984
+ async recordJobEventsBatch(events) {
985
+ if (events.length === 0) return;
986
+ const client = await this.pool.connect();
987
+ try {
988
+ const values = [];
989
+ const params = [];
990
+ let paramIdx = 1;
991
+ for (const event of events) {
992
+ values.push(`($${paramIdx++}, $${paramIdx++}, $${paramIdx++})`);
993
+ params.push(
994
+ event.jobId,
995
+ event.eventType,
996
+ event.metadata ? JSON.stringify(event.metadata) : null
997
+ );
998
+ }
999
+ await client.query(
1000
+ `INSERT INTO job_events (job_id, event_type, metadata) VALUES ${values.join(", ")}`,
1001
+ params
1002
+ );
1003
+ } catch (error) {
1004
+ log(`Error recording batch job events: ${error}`);
1005
+ } finally {
1006
+ client.release();
1007
+ }
1008
+ }
1009
+ // ── Cron schedules ──────────────────────────────────────────────────
1010
+ /** Create a cron schedule and return its ID. */
1011
+ async addCronSchedule(input) {
1012
+ const client = await this.pool.connect();
1013
+ try {
1014
+ const result = await client.query(
1015
+ `INSERT INTO cron_schedules
1016
+ (schedule_name, cron_expression, job_type, payload, max_attempts,
1017
+ priority, timeout_ms, force_kill_on_timeout, tags, timezone,
1018
+ allow_overlap, next_run_at)
1019
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)
1020
+ RETURNING id`,
1021
+ [
1022
+ input.scheduleName,
1023
+ input.cronExpression,
1024
+ input.jobType,
1025
+ input.payload,
1026
+ input.maxAttempts,
1027
+ input.priority,
1028
+ input.timeoutMs,
1029
+ input.forceKillOnTimeout,
1030
+ input.tags ?? null,
1031
+ input.timezone,
1032
+ input.allowOverlap,
1033
+ input.nextRunAt
1034
+ ]
1035
+ );
1036
+ const id = result.rows[0].id;
1037
+ log(`Added cron schedule ${id}: "${input.scheduleName}"`);
1038
+ return id;
1039
+ } catch (error) {
1040
+ if (error?.code === "23505") {
1041
+ throw new Error(
1042
+ `Cron schedule with name "${input.scheduleName}" already exists`
1043
+ );
1044
+ }
1045
+ log(`Error adding cron schedule: ${error}`);
1046
+ throw error;
1047
+ } finally {
1048
+ client.release();
1049
+ }
1050
+ }
1051
+ /** Get a cron schedule by ID. */
1052
+ async getCronSchedule(id) {
1053
+ const client = await this.pool.connect();
1054
+ try {
1055
+ const result = await client.query(
1056
+ `SELECT id, schedule_name AS "scheduleName", cron_expression AS "cronExpression",
1057
+ job_type AS "jobType", payload, max_attempts AS "maxAttempts",
1058
+ priority, timeout_ms AS "timeoutMs",
1059
+ force_kill_on_timeout AS "forceKillOnTimeout", tags,
1060
+ timezone, allow_overlap AS "allowOverlap", status,
1061
+ last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
1062
+ next_run_at AS "nextRunAt",
1063
+ created_at AS "createdAt", updated_at AS "updatedAt"
1064
+ FROM cron_schedules WHERE id = $1`,
1065
+ [id]
1066
+ );
1067
+ if (result.rows.length === 0) return null;
1068
+ return result.rows[0];
1069
+ } catch (error) {
1070
+ log(`Error getting cron schedule ${id}: ${error}`);
1071
+ throw error;
1072
+ } finally {
1073
+ client.release();
1074
+ }
1075
+ }
1076
+ /** Get a cron schedule by its unique name. */
1077
+ async getCronScheduleByName(name) {
1078
+ const client = await this.pool.connect();
1079
+ try {
1080
+ const result = await client.query(
1081
+ `SELECT id, schedule_name AS "scheduleName", cron_expression AS "cronExpression",
1082
+ job_type AS "jobType", payload, max_attempts AS "maxAttempts",
1083
+ priority, timeout_ms AS "timeoutMs",
1084
+ force_kill_on_timeout AS "forceKillOnTimeout", tags,
1085
+ timezone, allow_overlap AS "allowOverlap", status,
1086
+ last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
1087
+ next_run_at AS "nextRunAt",
1088
+ created_at AS "createdAt", updated_at AS "updatedAt"
1089
+ FROM cron_schedules WHERE schedule_name = $1`,
1090
+ [name]
1091
+ );
1092
+ if (result.rows.length === 0) return null;
1093
+ return result.rows[0];
1094
+ } catch (error) {
1095
+ log(`Error getting cron schedule by name "${name}": ${error}`);
1096
+ throw error;
1097
+ } finally {
1098
+ client.release();
1099
+ }
1100
+ }
1101
+ /** List cron schedules, optionally filtered by status. */
1102
+ async listCronSchedules(status) {
1103
+ const client = await this.pool.connect();
1104
+ try {
1105
+ let query = `SELECT id, schedule_name AS "scheduleName", cron_expression AS "cronExpression",
1106
+ job_type AS "jobType", payload, max_attempts AS "maxAttempts",
1107
+ priority, timeout_ms AS "timeoutMs",
1108
+ force_kill_on_timeout AS "forceKillOnTimeout", tags,
1109
+ timezone, allow_overlap AS "allowOverlap", status,
1110
+ last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
1111
+ next_run_at AS "nextRunAt",
1112
+ created_at AS "createdAt", updated_at AS "updatedAt"
1113
+ FROM cron_schedules`;
1114
+ const params = [];
1115
+ if (status) {
1116
+ query += ` WHERE status = $1`;
1117
+ params.push(status);
1118
+ }
1119
+ query += ` ORDER BY created_at ASC`;
1120
+ const result = await client.query(query, params);
1121
+ return result.rows;
1122
+ } catch (error) {
1123
+ log(`Error listing cron schedules: ${error}`);
1124
+ throw error;
1125
+ } finally {
1126
+ client.release();
1127
+ }
1128
+ }
1129
+ /** Delete a cron schedule by ID. */
1130
+ async removeCronSchedule(id) {
1131
+ const client = await this.pool.connect();
1132
+ try {
1133
+ await client.query(`DELETE FROM cron_schedules WHERE id = $1`, [id]);
1134
+ log(`Removed cron schedule ${id}`);
1135
+ } catch (error) {
1136
+ log(`Error removing cron schedule ${id}: ${error}`);
1137
+ throw error;
1138
+ } finally {
1139
+ client.release();
1140
+ }
1141
+ }
1142
+ /** Pause a cron schedule. */
1143
+ async pauseCronSchedule(id) {
1144
+ const client = await this.pool.connect();
1145
+ try {
1146
+ await client.query(
1147
+ `UPDATE cron_schedules SET status = 'paused', updated_at = NOW() WHERE id = $1`,
1148
+ [id]
1149
+ );
1150
+ log(`Paused cron schedule ${id}`);
1151
+ } catch (error) {
1152
+ log(`Error pausing cron schedule ${id}: ${error}`);
1153
+ throw error;
1154
+ } finally {
1155
+ client.release();
1156
+ }
1157
+ }
1158
+ /** Resume a paused cron schedule. */
1159
+ async resumeCronSchedule(id) {
1160
+ const client = await this.pool.connect();
1161
+ try {
1162
+ await client.query(
1163
+ `UPDATE cron_schedules SET status = 'active', updated_at = NOW() WHERE id = $1`,
1164
+ [id]
1165
+ );
1166
+ log(`Resumed cron schedule ${id}`);
1167
+ } catch (error) {
1168
+ log(`Error resuming cron schedule ${id}: ${error}`);
1169
+ throw error;
1170
+ } finally {
1171
+ client.release();
1172
+ }
1173
+ }
1174
+ /** Edit a cron schedule. */
1175
+ async editCronSchedule(id, updates, nextRunAt) {
1176
+ const client = await this.pool.connect();
1177
+ try {
1178
+ const updateFields = [];
1179
+ const params = [];
1180
+ let paramIdx = 1;
1181
+ if (updates.cronExpression !== void 0) {
1182
+ updateFields.push(`cron_expression = $${paramIdx++}`);
1183
+ params.push(updates.cronExpression);
1184
+ }
1185
+ if (updates.payload !== void 0) {
1186
+ updateFields.push(`payload = $${paramIdx++}`);
1187
+ params.push(updates.payload);
1188
+ }
1189
+ if (updates.maxAttempts !== void 0) {
1190
+ updateFields.push(`max_attempts = $${paramIdx++}`);
1191
+ params.push(updates.maxAttempts);
1192
+ }
1193
+ if (updates.priority !== void 0) {
1194
+ updateFields.push(`priority = $${paramIdx++}`);
1195
+ params.push(updates.priority);
1196
+ }
1197
+ if (updates.timeoutMs !== void 0) {
1198
+ updateFields.push(`timeout_ms = $${paramIdx++}`);
1199
+ params.push(updates.timeoutMs);
1200
+ }
1201
+ if (updates.forceKillOnTimeout !== void 0) {
1202
+ updateFields.push(`force_kill_on_timeout = $${paramIdx++}`);
1203
+ params.push(updates.forceKillOnTimeout);
1204
+ }
1205
+ if (updates.tags !== void 0) {
1206
+ updateFields.push(`tags = $${paramIdx++}`);
1207
+ params.push(updates.tags);
1208
+ }
1209
+ if (updates.timezone !== void 0) {
1210
+ updateFields.push(`timezone = $${paramIdx++}`);
1211
+ params.push(updates.timezone);
1212
+ }
1213
+ if (updates.allowOverlap !== void 0) {
1214
+ updateFields.push(`allow_overlap = $${paramIdx++}`);
1215
+ params.push(updates.allowOverlap);
1216
+ }
1217
+ if (nextRunAt !== void 0) {
1218
+ updateFields.push(`next_run_at = $${paramIdx++}`);
1219
+ params.push(nextRunAt);
1220
+ }
1221
+ if (updateFields.length === 0) {
1222
+ log(`No fields to update for cron schedule ${id}`);
1223
+ return;
1224
+ }
1225
+ updateFields.push(`updated_at = NOW()`);
1226
+ params.push(id);
1227
+ const query = `UPDATE cron_schedules SET ${updateFields.join(", ")} WHERE id = $${paramIdx}`;
1228
+ await client.query(query, params);
1229
+ log(`Edited cron schedule ${id}`);
1230
+ } catch (error) {
1231
+ log(`Error editing cron schedule ${id}: ${error}`);
1232
+ throw error;
1233
+ } finally {
1234
+ client.release();
1235
+ }
1236
+ }
1237
+ /**
1238
+ * Atomically fetch all active cron schedules whose nextRunAt <= NOW().
1239
+ * Uses FOR UPDATE SKIP LOCKED to prevent duplicate enqueuing across workers.
1240
+ */
1241
+ async getDueCronSchedules() {
1242
+ const client = await this.pool.connect();
1243
+ try {
1244
+ const result = await client.query(
1245
+ `SELECT id, schedule_name AS "scheduleName", cron_expression AS "cronExpression",
1246
+ job_type AS "jobType", payload, max_attempts AS "maxAttempts",
1247
+ priority, timeout_ms AS "timeoutMs",
1248
+ force_kill_on_timeout AS "forceKillOnTimeout", tags,
1249
+ timezone, allow_overlap AS "allowOverlap", status,
1250
+ last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
1251
+ next_run_at AS "nextRunAt",
1252
+ created_at AS "createdAt", updated_at AS "updatedAt"
1253
+ FROM cron_schedules
1254
+ WHERE status = 'active'
1255
+ AND next_run_at IS NOT NULL
1256
+ AND next_run_at <= NOW()
1257
+ ORDER BY next_run_at ASC
1258
+ FOR UPDATE SKIP LOCKED`
1259
+ );
1260
+ log(`Found ${result.rows.length} due cron schedules`);
1261
+ return result.rows;
1262
+ } catch (error) {
1263
+ if (error?.code === "42P01") {
1264
+ log("cron_schedules table does not exist, skipping cron enqueue");
1265
+ return [];
1266
+ }
1267
+ log(`Error getting due cron schedules: ${error}`);
1268
+ throw error;
1269
+ } finally {
1270
+ client.release();
1271
+ }
1272
+ }
1273
+ /**
1274
+ * Update a cron schedule after a job has been enqueued.
1275
+ * Sets lastEnqueuedAt, lastJobId, and advances nextRunAt.
1276
+ */
1277
+ async updateCronScheduleAfterEnqueue(id, lastEnqueuedAt, lastJobId, nextRunAt) {
1278
+ const client = await this.pool.connect();
1279
+ try {
1280
+ await client.query(
1281
+ `UPDATE cron_schedules
1282
+ SET last_enqueued_at = $2,
1283
+ last_job_id = $3,
1284
+ next_run_at = $4,
1285
+ updated_at = NOW()
1286
+ WHERE id = $1`,
1287
+ [id, lastEnqueuedAt, lastJobId, nextRunAt]
1288
+ );
1289
+ log(
1290
+ `Updated cron schedule ${id}: lastJobId=${lastJobId}, nextRunAt=${nextRunAt?.toISOString() ?? "null"}`
1291
+ );
1292
+ } catch (error) {
1293
+ log(`Error updating cron schedule ${id} after enqueue: ${error}`);
1294
+ throw error;
1295
+ } finally {
1296
+ client.release();
1297
+ }
1298
+ }
1299
+ // ── Internal helpers ──────────────────────────────────────────────────
1300
+ async setPendingReasonForUnpickedJobs(reason, jobType) {
1301
+ const client = await this.pool.connect();
1302
+ try {
1303
+ let jobTypeFilter = "";
1304
+ const params = [reason];
1305
+ if (jobType) {
1306
+ if (Array.isArray(jobType)) {
1307
+ jobTypeFilter = ` AND job_type = ANY($2)`;
1308
+ params.push(jobType);
1309
+ } else {
1310
+ jobTypeFilter = ` AND job_type = $2`;
1311
+ params.push(jobType);
1312
+ }
1313
+ }
1314
+ await client.query(
1315
+ `UPDATE job_queue SET pending_reason = $1 WHERE status = 'pending'${jobTypeFilter}`,
1316
+ params
1317
+ );
1318
+ } finally {
1319
+ client.release();
1320
+ }
1321
+ }
1322
+ };
1323
+ var recordJobEvent = async (pool, jobId, eventType, metadata) => new PostgresBackend(pool).recordJobEvent(jobId, eventType, metadata);
1324
+ var waitJob = async (pool, jobId, options) => {
1325
+ const client = await pool.connect();
1326
+ try {
1327
+ const result = await client.query(
1328
+ `
1329
+ UPDATE job_queue
1330
+ SET status = 'waiting',
1331
+ wait_until = $2,
1332
+ wait_token_id = $3,
1333
+ step_data = $4,
1334
+ locked_at = NULL,
1335
+ locked_by = NULL,
1336
+ updated_at = NOW()
1337
+ WHERE id = $1 AND status = 'processing'
1338
+ `,
1339
+ [
1340
+ jobId,
1341
+ options.waitUntil ?? null,
1342
+ options.waitTokenId ?? null,
1343
+ JSON.stringify(options.stepData)
1344
+ ]
1345
+ );
1346
+ if (result.rowCount === 0) {
1347
+ log(
1348
+ `Job ${jobId} could not be set to waiting (may have been reclaimed or is no longer processing)`
1349
+ );
1350
+ return;
1351
+ }
1352
+ await recordJobEvent(pool, jobId, "waiting" /* Waiting */, {
1353
+ waitUntil: options.waitUntil?.toISOString() ?? null,
1354
+ waitTokenId: options.waitTokenId ?? null
1355
+ });
1356
+ log(`Job ${jobId} set to waiting`);
1357
+ } catch (error) {
1358
+ log(`Error setting job ${jobId} to waiting: ${error}`);
1359
+ throw error;
1360
+ } finally {
1361
+ client.release();
1362
+ }
1363
+ };
1364
+ var updateStepData = async (pool, jobId, stepData) => {
1365
+ const client = await pool.connect();
1366
+ try {
1367
+ await client.query(
1368
+ `UPDATE job_queue SET step_data = $2, updated_at = NOW() WHERE id = $1`,
1369
+ [jobId, JSON.stringify(stepData)]
1370
+ );
1371
+ } catch (error) {
1372
+ log(`Error updating step_data for job ${jobId}: ${error}`);
1373
+ } finally {
1374
+ client.release();
1375
+ }
1376
+ };
1377
+ var MAX_TIMEOUT_MS = 365 * 24 * 60 * 60 * 1e3;
1378
+ function parseTimeoutString(timeout) {
1379
+ const match = timeout.match(/^(\d+)(s|m|h|d)$/);
1380
+ if (!match) {
1381
+ throw new Error(
1382
+ `Invalid timeout format: "${timeout}". Expected format like "10m", "1h", "24h", "7d".`
1383
+ );
1384
+ }
1385
+ const value = parseInt(match[1], 10);
1386
+ const unit = match[2];
1387
+ let ms;
1388
+ switch (unit) {
1389
+ case "s":
1390
+ ms = value * 1e3;
1391
+ break;
1392
+ case "m":
1393
+ ms = value * 60 * 1e3;
1394
+ break;
1395
+ case "h":
1396
+ ms = value * 60 * 60 * 1e3;
1397
+ break;
1398
+ case "d":
1399
+ ms = value * 24 * 60 * 60 * 1e3;
1400
+ break;
1401
+ default:
1402
+ throw new Error(`Unknown timeout unit: "${unit}"`);
1403
+ }
1404
+ if (!Number.isFinite(ms) || ms > MAX_TIMEOUT_MS) {
1405
+ throw new Error(
1406
+ `Timeout value "${timeout}" is too large. Maximum allowed is 365 days.`
1407
+ );
1408
+ }
1409
+ return ms;
1410
+ }
1411
+ var createWaitpoint = async (pool, jobId, options) => {
1412
+ const client = await pool.connect();
1413
+ try {
1414
+ const id = `wp_${crypto.randomUUID()}`;
1415
+ let timeoutAt = null;
1416
+ if (options?.timeout) {
1417
+ const ms = parseTimeoutString(options.timeout);
1418
+ timeoutAt = new Date(Date.now() + ms);
1419
+ }
1420
+ await client.query(
1421
+ `INSERT INTO waitpoints (id, job_id, status, timeout_at, tags) VALUES ($1, $2, 'waiting', $3, $4)`,
1422
+ [id, jobId, timeoutAt, options?.tags ?? null]
1423
+ );
1424
+ log(`Created waitpoint ${id} for job ${jobId}`);
1425
+ return { id };
1426
+ } catch (error) {
1427
+ log(`Error creating waitpoint: ${error}`);
1428
+ throw error;
1429
+ } finally {
1430
+ client.release();
1431
+ }
1432
+ };
1433
+ var completeWaitpoint = async (pool, tokenId, data) => {
1434
+ const client = await pool.connect();
1435
+ try {
1436
+ await client.query("BEGIN");
1437
+ const wpResult = await client.query(
1438
+ `UPDATE waitpoints SET status = 'completed', output = $2, completed_at = NOW()
1439
+ WHERE id = $1 AND status = 'waiting'
1440
+ RETURNING job_id`,
1441
+ [tokenId, data != null ? JSON.stringify(data) : null]
1442
+ );
1443
+ if (wpResult.rows.length === 0) {
1444
+ await client.query("ROLLBACK");
1445
+ log(`Waitpoint ${tokenId} not found or already completed`);
1446
+ return;
1447
+ }
1448
+ const jobId = wpResult.rows[0].job_id;
1449
+ if (jobId != null) {
1450
+ await client.query(
1451
+ `UPDATE job_queue
1452
+ SET status = 'pending', wait_token_id = NULL, wait_until = NULL, updated_at = NOW()
1453
+ WHERE id = $1 AND status = 'waiting'`,
1454
+ [jobId]
1455
+ );
1456
+ }
1457
+ await client.query("COMMIT");
1458
+ log(`Completed waitpoint ${tokenId} for job ${jobId}`);
1459
+ } catch (error) {
1460
+ await client.query("ROLLBACK");
1461
+ log(`Error completing waitpoint ${tokenId}: ${error}`);
1462
+ throw error;
1463
+ } finally {
1464
+ client.release();
1465
+ }
1466
+ };
1467
+ var getWaitpoint = async (pool, tokenId) => {
1468
+ const client = await pool.connect();
1469
+ try {
1470
+ const result = await client.query(
1471
+ `SELECT id, job_id AS "jobId", status, output, timeout_at AS "timeoutAt", created_at AS "createdAt", completed_at AS "completedAt", tags FROM waitpoints WHERE id = $1`,
1472
+ [tokenId]
1473
+ );
1474
+ if (result.rows.length === 0) return null;
1475
+ return result.rows[0];
1476
+ } catch (error) {
1477
+ log(`Error getting waitpoint ${tokenId}: ${error}`);
1478
+ throw error;
1479
+ } finally {
1480
+ client.release();
1481
+ }
1482
+ };
1483
+ var expireTimedOutWaitpoints = async (pool) => {
1484
+ const client = await pool.connect();
1485
+ try {
1486
+ await client.query("BEGIN");
1487
+ const result = await client.query(
1488
+ `UPDATE waitpoints
1489
+ SET status = 'timed_out'
1490
+ WHERE status = 'waiting' AND timeout_at IS NOT NULL AND timeout_at <= NOW()
1491
+ RETURNING id, job_id`
1492
+ );
1493
+ for (const row of result.rows) {
1494
+ if (row.job_id != null) {
1495
+ await client.query(
1496
+ `UPDATE job_queue
1497
+ SET status = 'pending', wait_token_id = NULL, wait_until = NULL, updated_at = NOW()
1498
+ WHERE id = $1 AND status = 'waiting'`,
1499
+ [row.job_id]
1500
+ );
1501
+ }
1502
+ }
1503
+ await client.query("COMMIT");
1504
+ const count = result.rowCount || 0;
1505
+ if (count > 0) {
1506
+ log(`Expired ${count} timed-out waitpoints`);
1507
+ }
1508
+ return count;
1509
+ } catch (error) {
1510
+ await client.query("ROLLBACK");
1511
+ log(`Error expiring timed-out waitpoints: ${error}`);
1512
+ throw error;
1513
+ } finally {
1514
+ client.release();
1515
+ }
1516
+ };
1517
+ function tryExtractPool(backend) {
1518
+ if (backend instanceof PostgresBackend) {
1519
+ return backend.getPool();
1520
+ }
1521
+ return null;
1522
+ }
1523
+ function buildBasicContext(backend, jobId, baseCtx) {
1524
+ const waitError = () => new Error(
1525
+ "Wait features (waitFor, waitUntil, createToken, waitForToken, ctx.run) are currently only supported with the PostgreSQL backend."
1526
+ );
1527
+ return {
1528
+ prolong: baseCtx.prolong,
1529
+ onTimeout: baseCtx.onTimeout,
1530
+ run: async (_stepName, fn) => {
1531
+ return fn();
1532
+ },
1533
+ waitFor: async () => {
1534
+ throw waitError();
1535
+ },
1536
+ waitUntil: async () => {
1537
+ throw waitError();
1538
+ },
1539
+ createToken: async () => {
1540
+ throw waitError();
1541
+ },
1542
+ waitForToken: async () => {
1543
+ throw waitError();
1544
+ },
1545
+ setProgress: async (percent) => {
1546
+ if (percent < 0 || percent > 100)
1547
+ throw new Error("Progress must be between 0 and 100");
1548
+ await backend.updateProgress(jobId, Math.round(percent));
1549
+ }
1550
+ };
1551
+ }
1552
+ function validateHandlerSerializable(handler, jobType) {
1553
+ try {
1554
+ const handlerString = handler.toString();
1555
+ if (handlerString.includes("this.") && !handlerString.match(/\([^)]*this[^)]*\)/)) {
1556
+ throw new Error(
1557
+ `Handler for job type "${jobType}" uses 'this' context which cannot be serialized. Use a regular function or avoid 'this' references when forceKillOnTimeout is enabled.`
1558
+ );
1559
+ }
1560
+ if (handlerString.includes("[native code]")) {
1561
+ throw new Error(
1562
+ `Handler for job type "${jobType}" contains native code which cannot be serialized. Ensure your handler is a plain function when forceKillOnTimeout is enabled.`
1563
+ );
1564
+ }
1565
+ try {
1566
+ new Function("return " + handlerString);
1567
+ } catch (parseError) {
1568
+ throw new Error(
1569
+ `Handler for job type "${jobType}" cannot be serialized: ${parseError instanceof Error ? parseError.message : String(parseError)}. When using forceKillOnTimeout, handlers must be serializable functions without closures over external variables.`
1570
+ );
1571
+ }
1572
+ } catch (error) {
1573
+ if (error instanceof Error) {
1574
+ throw error;
1575
+ }
1576
+ throw new Error(
1577
+ `Failed to validate handler serialization for job type "${jobType}": ${String(error)}`
1578
+ );
1579
+ }
1580
+ }
1581
+ async function runHandlerInWorker(handler, payload, timeoutMs, jobType) {
1582
+ validateHandlerSerializable(handler, jobType);
1583
+ return new Promise((resolve, reject) => {
1584
+ const workerCode = `
1585
+ (function() {
1586
+ const { parentPort, workerData } = require('worker_threads');
1587
+ const { handlerCode, payload, timeoutMs } = workerData;
1588
+
1589
+ // Create an AbortController for the handler
1590
+ const controller = new AbortController();
1591
+ const signal = controller.signal;
1592
+
1593
+ // Set up timeout
1594
+ const timeoutId = setTimeout(() => {
1595
+ controller.abort();
1596
+ parentPort.postMessage({ type: 'timeout' });
1597
+ }, timeoutMs);
1598
+
1599
+ try {
1600
+ // Execute the handler
1601
+ // Note: This uses Function constructor which requires the handler to be serializable.
1602
+ // The handler should be validated before reaching this point.
1603
+ let handlerFn;
1604
+ try {
1605
+ // Wrap handlerCode in parentheses to ensure it's treated as an expression
1606
+ // This handles both arrow functions and regular functions
1607
+ const wrappedCode = handlerCode.trim().startsWith('async') || handlerCode.trim().startsWith('function')
1608
+ ? handlerCode
1609
+ : '(' + handlerCode + ')';
1610
+ handlerFn = new Function('return ' + wrappedCode)();
1611
+ } catch (parseError) {
1612
+ clearTimeout(timeoutId);
1613
+ parentPort.postMessage({
1614
+ type: 'error',
1615
+ error: {
1616
+ message: 'Handler cannot be deserialized in worker thread. ' +
1617
+ 'Ensure your handler is a standalone function without closures over external variables. ' +
1618
+ 'Original error: ' + (parseError instanceof Error ? parseError.message : String(parseError)),
1619
+ stack: parseError instanceof Error ? parseError.stack : undefined,
1620
+ name: 'SerializationError',
1621
+ },
1622
+ });
1623
+ return;
1624
+ }
1625
+
1626
+ // Ensure handlerFn is actually a function
1627
+ if (typeof handlerFn !== 'function') {
1628
+ clearTimeout(timeoutId);
1629
+ parentPort.postMessage({
1630
+ type: 'error',
1631
+ error: {
1632
+ message: 'Handler deserialization did not produce a function. ' +
1633
+ 'Ensure your handler is a valid function when forceKillOnTimeout is enabled.',
1634
+ name: 'SerializationError',
1635
+ },
1636
+ });
1637
+ return;
1638
+ }
1639
+
1640
+ handlerFn(payload, signal)
1641
+ .then(() => {
1642
+ clearTimeout(timeoutId);
1643
+ parentPort.postMessage({ type: 'success' });
1644
+ })
1645
+ .catch((error) => {
1646
+ clearTimeout(timeoutId);
1647
+ parentPort.postMessage({
1648
+ type: 'error',
1649
+ error: {
1650
+ message: error.message,
1651
+ stack: error.stack,
1652
+ name: error.name,
1653
+ },
1654
+ });
1655
+ });
1656
+ } catch (error) {
1657
+ clearTimeout(timeoutId);
1658
+ parentPort.postMessage({
1659
+ type: 'error',
1660
+ error: {
1661
+ message: error.message,
1662
+ stack: error.stack,
1663
+ name: error.name,
1664
+ },
1665
+ });
1666
+ }
1667
+ })();
1668
+ `;
1669
+ const worker = new worker_threads.Worker(workerCode, {
1670
+ eval: true,
1671
+ workerData: {
1672
+ handlerCode: handler.toString(),
1673
+ payload,
1674
+ timeoutMs
1675
+ }
1676
+ });
1677
+ let resolved = false;
1678
+ worker.on("message", (message) => {
1679
+ if (resolved) return;
1680
+ resolved = true;
1681
+ if (message.type === "success") {
1682
+ resolve();
1683
+ } else if (message.type === "timeout") {
1684
+ const timeoutError = new Error(
1685
+ `Job timed out after ${timeoutMs} ms and was forcefully terminated`
1686
+ );
1687
+ timeoutError.failureReason = "timeout" /* Timeout */;
1688
+ reject(timeoutError);
1689
+ } else if (message.type === "error") {
1690
+ const error = new Error(message.error.message);
1691
+ error.stack = message.error.stack;
1692
+ error.name = message.error.name;
1693
+ reject(error);
1694
+ }
1695
+ });
1696
+ worker.on("error", (error) => {
1697
+ if (resolved) return;
1698
+ resolved = true;
1699
+ reject(error);
1700
+ });
1701
+ worker.on("exit", (code) => {
1702
+ if (resolved) return;
1703
+ if (code !== 0) {
1704
+ resolved = true;
1705
+ reject(new Error(`Worker stopped with exit code ${code}`));
1706
+ }
1707
+ });
1708
+ setTimeout(() => {
1709
+ if (!resolved) {
1710
+ resolved = true;
1711
+ worker.terminate().then(() => {
1712
+ const timeoutError = new Error(
1713
+ `Job timed out after ${timeoutMs} ms and was forcefully terminated`
1714
+ );
1715
+ timeoutError.failureReason = "timeout" /* Timeout */;
1716
+ reject(timeoutError);
1717
+ }).catch((err) => {
1718
+ reject(err);
1719
+ });
1720
+ }
1721
+ }, timeoutMs + 100);
1722
+ });
1723
+ }
1724
+ function calculateWaitUntil(duration) {
1725
+ const now = Date.now();
1726
+ let ms = 0;
1727
+ if (duration.seconds) ms += duration.seconds * 1e3;
1728
+ if (duration.minutes) ms += duration.minutes * 60 * 1e3;
1729
+ if (duration.hours) ms += duration.hours * 60 * 60 * 1e3;
1730
+ if (duration.days) ms += duration.days * 24 * 60 * 60 * 1e3;
1731
+ if (duration.weeks) ms += duration.weeks * 7 * 24 * 60 * 60 * 1e3;
1732
+ if (duration.months) ms += duration.months * 30 * 24 * 60 * 60 * 1e3;
1733
+ if (duration.years) ms += duration.years * 365 * 24 * 60 * 60 * 1e3;
1734
+ if (ms <= 0) {
1735
+ throw new Error(
1736
+ "waitFor duration must be positive. Provide at least one positive duration field."
1737
+ );
1738
+ }
1739
+ return new Date(now + ms);
1740
+ }
1741
+ async function resolveCompletedWaits(pool, stepData) {
1742
+ for (const key of Object.keys(stepData)) {
1743
+ if (!key.startsWith("__wait_")) continue;
1744
+ const entry = stepData[key];
1745
+ if (!entry || typeof entry !== "object" || entry.completed) continue;
1746
+ if (entry.type === "duration" || entry.type === "date") {
1747
+ stepData[key] = { ...entry, completed: true };
1748
+ } else if (entry.type === "token" && entry.tokenId) {
1749
+ const wp = await getWaitpoint(pool, entry.tokenId);
1750
+ if (wp && wp.status === "completed") {
1751
+ stepData[key] = {
1752
+ ...entry,
1753
+ completed: true,
1754
+ result: { ok: true, output: wp.output }
1755
+ };
1756
+ } else if (wp && wp.status === "timed_out") {
1757
+ stepData[key] = {
1758
+ ...entry,
1759
+ completed: true,
1760
+ result: { ok: false, error: "Token timed out" }
1761
+ };
1762
+ }
1763
+ }
1764
+ }
1765
+ }
1766
+ function buildWaitContext(backend, pool, jobId, stepData, baseCtx) {
1767
+ let waitCounter = 0;
1768
+ const ctx = {
1769
+ prolong: baseCtx.prolong,
1770
+ onTimeout: baseCtx.onTimeout,
1771
+ run: async (stepName, fn) => {
1772
+ const cached = stepData[stepName];
1773
+ if (cached && typeof cached === "object" && cached.__completed) {
1774
+ log(`Step "${stepName}" replayed from cache for job ${jobId}`);
1775
+ return cached.result;
1776
+ }
1777
+ const result = await fn();
1778
+ stepData[stepName] = { __completed: true, result };
1779
+ await updateStepData(pool, jobId, stepData);
1780
+ return result;
1781
+ },
1782
+ waitFor: async (duration) => {
1783
+ const waitKey = `__wait_${waitCounter++}`;
1784
+ const cached = stepData[waitKey];
1785
+ if (cached && typeof cached === "object" && cached.completed) {
1786
+ log(`Wait "${waitKey}" already completed for job ${jobId}, skipping`);
1787
+ return;
1788
+ }
1789
+ const waitUntilDate = calculateWaitUntil(duration);
1790
+ stepData[waitKey] = { type: "duration", completed: false };
1791
+ throw new WaitSignal("duration", waitUntilDate, void 0, stepData);
1792
+ },
1793
+ waitUntil: async (date) => {
1794
+ const waitKey = `__wait_${waitCounter++}`;
1795
+ const cached = stepData[waitKey];
1796
+ if (cached && typeof cached === "object" && cached.completed) {
1797
+ log(`Wait "${waitKey}" already completed for job ${jobId}, skipping`);
1798
+ return;
1799
+ }
1800
+ stepData[waitKey] = { type: "date", completed: false };
1801
+ throw new WaitSignal("date", date, void 0, stepData);
1802
+ },
1803
+ createToken: async (options) => {
1804
+ const token = await createWaitpoint(pool, jobId, options);
1805
+ return token;
1806
+ },
1807
+ waitForToken: async (tokenId) => {
1808
+ const waitKey = `__wait_${waitCounter++}`;
1809
+ const cached = stepData[waitKey];
1810
+ if (cached && typeof cached === "object" && cached.completed) {
1811
+ log(
1812
+ `Token wait "${waitKey}" already completed for job ${jobId}, returning cached result`
1813
+ );
1814
+ return cached.result;
1815
+ }
1816
+ const wp = await getWaitpoint(pool, tokenId);
1817
+ if (wp && wp.status === "completed") {
1818
+ const result = {
1819
+ ok: true,
1820
+ output: wp.output
1821
+ };
1822
+ stepData[waitKey] = {
1823
+ type: "token",
1824
+ tokenId,
1825
+ completed: true,
1826
+ result
1827
+ };
1828
+ await updateStepData(pool, jobId, stepData);
1829
+ return result;
1830
+ }
1831
+ if (wp && wp.status === "timed_out") {
1832
+ const result = {
1833
+ ok: false,
1834
+ error: "Token timed out"
1835
+ };
1836
+ stepData[waitKey] = {
1837
+ type: "token",
1838
+ tokenId,
1839
+ completed: true,
1840
+ result
1841
+ };
1842
+ await updateStepData(pool, jobId, stepData);
1843
+ return result;
1844
+ }
1845
+ stepData[waitKey] = { type: "token", tokenId, completed: false };
1846
+ throw new WaitSignal("token", void 0, tokenId, stepData);
1847
+ },
1848
+ setProgress: async (percent) => {
1849
+ if (percent < 0 || percent > 100)
1850
+ throw new Error("Progress must be between 0 and 100");
1851
+ await backend.updateProgress(jobId, Math.round(percent));
1852
+ }
1853
+ };
1854
+ return ctx;
1855
+ }
1856
+ async function processJobWithHandlers(backend, job, jobHandlers) {
1857
+ const handler = jobHandlers[job.jobType];
1858
+ if (!handler) {
1859
+ await backend.setPendingReasonForUnpickedJobs(
1860
+ `No handler registered for job type: ${job.jobType}`,
1861
+ job.jobType
1862
+ );
1863
+ await backend.failJob(
1864
+ job.id,
1865
+ new Error(`No handler registered for job type: ${job.jobType}`),
1866
+ "no_handler" /* NoHandler */
1867
+ );
1868
+ return;
1869
+ }
1870
+ const stepData = { ...job.stepData || {} };
1871
+ const pool = tryExtractPool(backend);
1872
+ const hasStepHistory = Object.keys(stepData).some(
1873
+ (k) => k.startsWith("__wait_")
1874
+ );
1875
+ if (hasStepHistory && pool) {
1876
+ await resolveCompletedWaits(pool, stepData);
1877
+ await updateStepData(pool, job.id, stepData);
1878
+ }
1879
+ const timeoutMs = job.timeoutMs ?? void 0;
1880
+ const forceKillOnTimeout = job.forceKillOnTimeout ?? false;
1881
+ let timeoutId;
1882
+ const controller = new AbortController();
1883
+ try {
1884
+ if (forceKillOnTimeout && timeoutMs && timeoutMs > 0) {
1885
+ await runHandlerInWorker(handler, job.payload, timeoutMs, job.jobType);
1886
+ } else {
1887
+ let onTimeoutCallback;
1888
+ let timeoutReject;
1889
+ const armTimeout = (ms) => {
1890
+ if (timeoutId) clearTimeout(timeoutId);
1891
+ timeoutId = setTimeout(() => {
1892
+ if (onTimeoutCallback) {
1893
+ try {
1894
+ const extension = onTimeoutCallback();
1895
+ if (typeof extension === "number" && extension > 0) {
1896
+ backend.prolongJob(job.id).catch(() => {
1897
+ });
1898
+ armTimeout(extension);
1899
+ return;
1900
+ }
1901
+ } catch (callbackError) {
1902
+ log(
1903
+ `onTimeout callback threw for job ${job.id}: ${callbackError}`
1904
+ );
1905
+ }
1906
+ }
1907
+ controller.abort();
1908
+ const timeoutError = new Error(`Job timed out after ${ms} ms`);
1909
+ timeoutError.failureReason = "timeout" /* Timeout */;
1910
+ if (timeoutReject) {
1911
+ timeoutReject(timeoutError);
1912
+ }
1913
+ }, ms);
1914
+ };
1915
+ const hasTimeout = timeoutMs != null && timeoutMs > 0;
1916
+ const baseCtx = hasTimeout ? {
1917
+ prolong: (ms) => {
1918
+ const duration = ms ?? timeoutMs;
1919
+ if (duration != null && duration > 0) {
1920
+ armTimeout(duration);
1921
+ backend.prolongJob(job.id).catch(() => {
1922
+ });
1923
+ }
1924
+ },
1925
+ onTimeout: (callback) => {
1926
+ onTimeoutCallback = callback;
1927
+ }
1928
+ } : {
1929
+ prolong: () => {
1930
+ log("prolong() called but ignored: job has no timeout set");
1931
+ },
1932
+ onTimeout: () => {
1933
+ log("onTimeout() called but ignored: job has no timeout set");
1934
+ }
1935
+ };
1936
+ const ctx = pool ? buildWaitContext(backend, pool, job.id, stepData, baseCtx) : buildBasicContext(backend, job.id, baseCtx);
1937
+ if (forceKillOnTimeout && !hasTimeout) {
1938
+ log(
1939
+ `forceKillOnTimeout is set but no timeoutMs for job ${job.id}, running without force kill`
1940
+ );
1941
+ }
1942
+ const jobPromise = handler(job.payload, controller.signal, ctx);
1943
+ if (hasTimeout) {
1944
+ await Promise.race([
1945
+ jobPromise,
1946
+ new Promise((_, reject) => {
1947
+ timeoutReject = reject;
1948
+ armTimeout(timeoutMs);
1949
+ })
1950
+ ]);
1951
+ } else {
1952
+ await jobPromise;
1953
+ }
1954
+ }
1955
+ if (timeoutId) clearTimeout(timeoutId);
1956
+ await backend.completeJob(job.id);
1957
+ } catch (error) {
1958
+ if (timeoutId) clearTimeout(timeoutId);
1959
+ if (error instanceof WaitSignal) {
1960
+ if (!pool) {
1961
+ await backend.failJob(
1962
+ job.id,
1963
+ new Error(
1964
+ "WaitSignal received but wait features require the PostgreSQL backend."
1965
+ ),
1966
+ "handler_error" /* HandlerError */
1967
+ );
1968
+ return;
1969
+ }
1970
+ log(
1971
+ `Job ${job.id} entering wait: type=${error.type}, waitUntil=${error.waitUntil?.toISOString() ?? "none"}, tokenId=${error.tokenId ?? "none"}`
1972
+ );
1973
+ await waitJob(pool, job.id, {
1974
+ waitUntil: error.waitUntil,
1975
+ waitTokenId: error.tokenId,
1976
+ stepData: error.stepData
1977
+ });
1978
+ return;
1979
+ }
1980
+ console.error(`Error processing job ${job.id}:`, error);
1981
+ let failureReason = "handler_error" /* HandlerError */;
1982
+ if (error && typeof error === "object" && "failureReason" in error && error.failureReason === "timeout" /* Timeout */) {
1983
+ failureReason = "timeout" /* Timeout */;
1984
+ }
1985
+ await backend.failJob(
1986
+ job.id,
1987
+ error instanceof Error ? error : new Error(String(error)),
1988
+ failureReason
1989
+ );
1990
+ }
1991
+ }
1992
+ async function processBatchWithHandlers(backend, workerId, batchSize, jobType, jobHandlers, concurrency, onError) {
1993
+ const jobs = await backend.getNextBatch(
1994
+ workerId,
1995
+ batchSize,
1996
+ jobType
1997
+ );
1998
+ if (!concurrency || concurrency >= jobs.length) {
1999
+ await Promise.all(
2000
+ jobs.map((job) => processJobWithHandlers(backend, job, jobHandlers))
2001
+ );
2002
+ return jobs.length;
2003
+ }
2004
+ let idx = 0;
2005
+ let running = 0;
2006
+ let finished = 0;
2007
+ return new Promise((resolve, reject) => {
2008
+ const next = () => {
2009
+ if (finished === jobs.length) return resolve(jobs.length);
2010
+ while (running < concurrency && idx < jobs.length) {
2011
+ const job = jobs[idx++];
2012
+ running++;
2013
+ processJobWithHandlers(backend, job, jobHandlers).then(() => {
2014
+ running--;
2015
+ finished++;
2016
+ next();
2017
+ }).catch((err) => {
2018
+ running--;
2019
+ finished++;
2020
+ if (onError) {
2021
+ onError(err instanceof Error ? err : new Error(String(err)));
2022
+ }
2023
+ next();
2024
+ });
2025
+ }
2026
+ };
2027
+ next();
2028
+ });
2029
+ }
2030
+ var createProcessor = (backend, handlers, options = {}, onBeforeBatch) => {
2031
+ const {
2032
+ workerId = `worker-${Math.random().toString(36).substring(2, 9)}`,
2033
+ batchSize = 10,
2034
+ pollInterval = 5e3,
2035
+ onError = (error) => console.error("Job processor error:", error),
2036
+ jobType,
2037
+ concurrency = 3
2038
+ } = options;
2039
+ let running = false;
2040
+ let intervalId = null;
2041
+ let currentBatchPromise = null;
2042
+ setLogContext(options.verbose ?? false);
2043
+ const processJobs = async () => {
2044
+ if (!running) return 0;
2045
+ if (onBeforeBatch) {
2046
+ try {
2047
+ await onBeforeBatch();
2048
+ } catch (hookError) {
2049
+ log(`onBeforeBatch hook error: ${hookError}`);
2050
+ if (onError) {
2051
+ onError(
2052
+ hookError instanceof Error ? hookError : new Error(String(hookError))
2053
+ );
2054
+ }
2055
+ }
2056
+ }
2057
+ log(
2058
+ `Processing jobs with workerId: ${workerId}${jobType ? ` and jobType: ${Array.isArray(jobType) ? jobType.join(",") : jobType}` : ""}`
2059
+ );
2060
+ try {
2061
+ const processed = await processBatchWithHandlers(
2062
+ backend,
2063
+ workerId,
2064
+ batchSize,
2065
+ jobType,
2066
+ handlers,
2067
+ concurrency,
2068
+ onError
2069
+ );
2070
+ return processed;
2071
+ } catch (error) {
2072
+ onError(error instanceof Error ? error : new Error(String(error)));
2073
+ }
2074
+ return 0;
2075
+ };
2076
+ return {
2077
+ /**
2078
+ * Start the job processor in the background.
2079
+ * - This will run periodically (every pollInterval milliseconds or 5 seconds if not provided) and process jobs as they become available.
2080
+ * - You have to call the stop method to stop the processor.
2081
+ */
2082
+ startInBackground: () => {
2083
+ if (running) return;
2084
+ log(`Starting job processor with workerId: ${workerId}`);
2085
+ running = true;
2086
+ const scheduleNext = (immediate) => {
2087
+ if (!running) return;
2088
+ if (immediate) {
2089
+ intervalId = setTimeout(loop, 0);
2090
+ } else {
2091
+ intervalId = setTimeout(loop, pollInterval);
2092
+ }
2093
+ };
2094
+ const loop = async () => {
2095
+ if (!running) return;
2096
+ currentBatchPromise = processJobs();
2097
+ const processed = await currentBatchPromise;
2098
+ currentBatchPromise = null;
2099
+ scheduleNext(processed === batchSize);
2100
+ };
2101
+ loop();
2102
+ },
2103
+ /**
2104
+ * Stop the job processor that runs in the background.
2105
+ * Does not wait for in-flight jobs.
2106
+ */
2107
+ stop: () => {
2108
+ log(`Stopping job processor with workerId: ${workerId}`);
2109
+ running = false;
2110
+ if (intervalId) {
2111
+ clearTimeout(intervalId);
2112
+ intervalId = null;
2113
+ }
2114
+ },
2115
+ /**
2116
+ * Stop the job processor and wait for all in-flight jobs to complete.
2117
+ * Useful for graceful shutdown (e.g., SIGTERM handling).
2118
+ */
2119
+ stopAndDrain: async (drainTimeoutMs = 3e4) => {
2120
+ log(`Stopping and draining job processor with workerId: ${workerId}`);
2121
+ running = false;
2122
+ if (intervalId) {
2123
+ clearTimeout(intervalId);
2124
+ intervalId = null;
2125
+ }
2126
+ if (currentBatchPromise) {
2127
+ await Promise.race([
2128
+ currentBatchPromise.catch(() => {
2129
+ }),
2130
+ new Promise((resolve) => setTimeout(resolve, drainTimeoutMs))
2131
+ ]);
2132
+ currentBatchPromise = null;
2133
+ }
2134
+ log(`Job processor ${workerId} drained`);
2135
+ },
2136
+ /**
2137
+ * Start the job processor synchronously.
2138
+ * - This will process all jobs immediately and then stop.
2139
+ * - The pollInterval is ignored.
2140
+ */
2141
+ start: async () => {
2142
+ log(`Starting job processor with workerId: ${workerId}`);
2143
+ running = true;
2144
+ const processed = await processJobs();
2145
+ running = false;
2146
+ return processed;
2147
+ },
2148
+ isRunning: () => running
2149
+ };
2150
+ };
2151
+ function loadPemOrFile(value) {
2152
+ if (!value) return void 0;
2153
+ if (value.startsWith("file://")) {
2154
+ const filePath = value.slice(7);
2155
+ return fs__default.default.readFileSync(filePath, "utf8");
2156
+ }
2157
+ return value;
2158
+ }
2159
+ var createPool = (config) => {
2160
+ let searchPath;
2161
+ let ssl = void 0;
2162
+ let customCA;
2163
+ let sslmode;
2164
+ if (config.connectionString) {
2165
+ try {
2166
+ const url = new URL(config.connectionString);
2167
+ searchPath = url.searchParams.get("search_path") || void 0;
2168
+ sslmode = url.searchParams.get("sslmode") || void 0;
2169
+ if (sslmode === "no-verify") {
2170
+ ssl = { rejectUnauthorized: false };
2171
+ }
2172
+ } catch (e) {
2173
+ const parsed = pgConnectionString.parse(config.connectionString);
2174
+ if (parsed.options) {
2175
+ const match = parsed.options.match(/search_path=([^\s]+)/);
2176
+ if (match) {
2177
+ searchPath = match[1];
2178
+ }
2179
+ }
2180
+ sslmode = typeof parsed.sslmode === "string" ? parsed.sslmode : void 0;
2181
+ if (sslmode === "no-verify") {
2182
+ ssl = { rejectUnauthorized: false };
2183
+ }
2184
+ }
2185
+ }
2186
+ if (config.ssl) {
2187
+ if (typeof config.ssl.ca === "string") {
2188
+ customCA = config.ssl.ca;
2189
+ } else if (typeof process.env.PGSSLROOTCERT === "string") {
2190
+ customCA = process.env.PGSSLROOTCERT;
2191
+ } else {
2192
+ customCA = void 0;
2193
+ }
2194
+ const caValue = typeof customCA === "string" ? loadPemOrFile(customCA) : void 0;
2195
+ ssl = {
2196
+ ...ssl,
2197
+ ...caValue ? { ca: caValue } : {},
2198
+ cert: loadPemOrFile(
2199
+ typeof config.ssl.cert === "string" ? config.ssl.cert : process.env.PGSSLCERT
2200
+ ),
2201
+ key: loadPemOrFile(
2202
+ typeof config.ssl.key === "string" ? config.ssl.key : process.env.PGSSLKEY
2203
+ ),
2204
+ rejectUnauthorized: config.ssl.rejectUnauthorized !== void 0 ? config.ssl.rejectUnauthorized : true
2205
+ };
2206
+ }
2207
+ if (sslmode && customCA) {
2208
+ const warning = `
2209
+
2210
+ \x1B[33m**************************************************
2211
+ \u26A0\uFE0F WARNING: SSL CONFIGURATION ISSUE
2212
+ **************************************************
2213
+ Both sslmode ('${sslmode}') is set in the connection string
2214
+ and a custom CA is provided (via config.ssl.ca or PGSSLROOTCERT).
2215
+ This combination may cause connection failures or unexpected behavior.
2216
+
2217
+ Recommended: Remove sslmode from the connection string when using a custom CA.
2218
+ **************************************************\x1B[0m
2219
+ `;
2220
+ console.warn(warning);
2221
+ }
2222
+ const pool = new pg.Pool({
2223
+ ...config,
2224
+ ...ssl ? { ssl } : {}
2225
+ });
2226
+ if (searchPath) {
2227
+ pool.on("connect", (client) => {
2228
+ client.query(`SET search_path TO ${searchPath}`);
2229
+ });
2230
+ }
2231
+ return pool;
2232
+ };
2233
+
2234
+ // src/backends/redis-scripts.ts
2235
+ var SCORE_RANGE = "1000000000000000";
2236
+ var ADD_JOB_SCRIPT = `
2237
+ local prefix = KEYS[1]
2238
+ local jobType = ARGV[1]
2239
+ local payloadJson = ARGV[2]
2240
+ local maxAttempts = tonumber(ARGV[3])
2241
+ local priority = tonumber(ARGV[4])
2242
+ local runAtMs = ARGV[5] -- "0" means now
2243
+ local timeoutMs = ARGV[6] -- "null" string if not set
2244
+ local forceKillOnTimeout = ARGV[7]
2245
+ local tagsJson = ARGV[8] -- "null" or JSON array string
2246
+ local idempotencyKey = ARGV[9] -- "null" string if not set
2247
+ local nowMs = tonumber(ARGV[10])
2248
+
2249
+ -- Idempotency check
2250
+ if idempotencyKey ~= "null" then
2251
+ local existing = redis.call('GET', prefix .. 'idempotency:' .. idempotencyKey)
2252
+ if existing then
2253
+ return existing
2254
+ end
2255
+ end
2256
+
2257
+ -- Generate ID
2258
+ local id = redis.call('INCR', prefix .. 'id_seq')
2259
+ local jobKey = prefix .. 'job:' .. id
2260
+ local runAt = runAtMs ~= "0" and tonumber(runAtMs) or nowMs
2261
+
2262
+ -- Store the job hash
2263
+ redis.call('HMSET', jobKey,
2264
+ 'id', id,
2265
+ 'jobType', jobType,
2266
+ 'payload', payloadJson,
2267
+ 'status', 'pending',
2268
+ 'maxAttempts', maxAttempts,
2269
+ 'attempts', 0,
2270
+ 'priority', priority,
2271
+ 'runAt', runAt,
2272
+ 'timeoutMs', timeoutMs,
2273
+ 'forceKillOnTimeout', forceKillOnTimeout,
2274
+ 'createdAt', nowMs,
2275
+ 'updatedAt', nowMs,
2276
+ 'lockedAt', 'null',
2277
+ 'lockedBy', 'null',
2278
+ 'nextAttemptAt', 'null',
2279
+ 'pendingReason', 'null',
2280
+ 'errorHistory', '[]',
2281
+ 'failureReason', 'null',
2282
+ 'completedAt', 'null',
2283
+ 'startedAt', 'null',
2284
+ 'lastRetriedAt', 'null',
2285
+ 'lastFailedAt', 'null',
2286
+ 'lastCancelledAt', 'null',
2287
+ 'tags', tagsJson,
2288
+ 'idempotencyKey', idempotencyKey
2289
+ )
2290
+
2291
+ -- Status index
2292
+ redis.call('SADD', prefix .. 'status:pending', id)
2293
+
2294
+ -- Type index
2295
+ redis.call('SADD', prefix .. 'type:' .. jobType, id)
2296
+
2297
+ -- Tag indexes
2298
+ if tagsJson ~= "null" then
2299
+ local tags = cjson.decode(tagsJson)
2300
+ for _, tag in ipairs(tags) do
2301
+ redis.call('SADD', prefix .. 'tag:' .. tag, id)
2302
+ end
2303
+ -- Store tags for exact-match queries
2304
+ for _, tag in ipairs(tags) do
2305
+ redis.call('SADD', prefix .. 'job:' .. id .. ':tags', tag)
2306
+ end
2307
+ end
2308
+
2309
+ -- Idempotency mapping
2310
+ if idempotencyKey ~= "null" then
2311
+ redis.call('SET', prefix .. 'idempotency:' .. idempotencyKey, id)
2312
+ end
2313
+
2314
+ -- All-jobs sorted set (for ordering by createdAt)
2315
+ redis.call('ZADD', prefix .. 'all', nowMs, id)
2316
+
2317
+ -- Queue or delayed
2318
+ if runAt <= nowMs then
2319
+ -- Ready now: add to queue with priority score
2320
+ local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - nowMs)
2321
+ redis.call('ZADD', prefix .. 'queue', score, id)
2322
+ else
2323
+ -- Future: add to delayed set
2324
+ redis.call('ZADD', prefix .. 'delayed', runAt, id)
2325
+ end
2326
+
2327
+ return id
2328
+ `;
2329
+ var GET_NEXT_BATCH_SCRIPT = `
2330
+ local prefix = KEYS[1]
2331
+ local workerId = ARGV[1]
2332
+ local batchSize = tonumber(ARGV[2])
2333
+ local nowMs = tonumber(ARGV[3])
2334
+ local jobTypeFilter = ARGV[4] -- "null" or JSON array or single string
2335
+
2336
+ -- 1. Move ready delayed jobs into queue
2337
+ local delayed = redis.call('ZRANGEBYSCORE', prefix .. 'delayed', '-inf', nowMs, 'LIMIT', 0, 200)
2338
+ for _, jobId in ipairs(delayed) do
2339
+ local jk = prefix .. 'job:' .. jobId
2340
+ local status = redis.call('HGET', jk, 'status')
2341
+ local attempts = tonumber(redis.call('HGET', jk, 'attempts'))
2342
+ local maxAttempts = tonumber(redis.call('HGET', jk, 'maxAttempts'))
2343
+ if status == 'pending' and attempts < maxAttempts then
2344
+ local pri = tonumber(redis.call('HGET', jk, 'priority') or '0')
2345
+ local ca = tonumber(redis.call('HGET', jk, 'createdAt'))
2346
+ local score = pri * ${SCORE_RANGE} + (${SCORE_RANGE} - ca)
2347
+ redis.call('ZADD', prefix .. 'queue', score, jobId)
2348
+ end
2349
+ redis.call('ZREM', prefix .. 'delayed', jobId)
2350
+ end
2351
+
2352
+ -- 2. Move ready retry jobs into queue
2353
+ local retries = redis.call('ZRANGEBYSCORE', prefix .. 'retry', '-inf', nowMs, 'LIMIT', 0, 200)
2354
+ for _, jobId in ipairs(retries) do
2355
+ local jk = prefix .. 'job:' .. jobId
2356
+ local status = redis.call('HGET', jk, 'status')
2357
+ local attempts = tonumber(redis.call('HGET', jk, 'attempts'))
2358
+ local maxAttempts = tonumber(redis.call('HGET', jk, 'maxAttempts'))
2359
+ if status == 'failed' and attempts < maxAttempts then
2360
+ local pri = tonumber(redis.call('HGET', jk, 'priority') or '0')
2361
+ local ca = tonumber(redis.call('HGET', jk, 'createdAt'))
2362
+ local score = pri * ${SCORE_RANGE} + (${SCORE_RANGE} - ca)
2363
+ redis.call('ZADD', prefix .. 'queue', score, jobId)
2364
+ redis.call('SREM', prefix .. 'status:failed', jobId)
2365
+ redis.call('SADD', prefix .. 'status:pending', jobId)
2366
+ redis.call('HMSET', jk, 'status', 'pending')
2367
+ end
2368
+ redis.call('ZREM', prefix .. 'retry', jobId)
2369
+ end
2370
+
2371
+ -- 3. Parse job type filter
2372
+ local filterTypes = nil
2373
+ if jobTypeFilter ~= "null" then
2374
+ -- Could be a JSON array or a plain string
2375
+ local ok, decoded = pcall(cjson.decode, jobTypeFilter)
2376
+ if ok and type(decoded) == 'table' then
2377
+ filterTypes = {}
2378
+ for _, t in ipairs(decoded) do filterTypes[t] = true end
2379
+ else
2380
+ filterTypes = { [jobTypeFilter] = true }
2381
+ end
2382
+ end
2383
+
2384
+ -- 4. Pop candidates from queue (highest score first)
2385
+ -- We pop more than batchSize because some may be filtered out
2386
+ local popCount = batchSize * 3
2387
+ local candidates = redis.call('ZPOPMAX', prefix .. 'queue', popCount)
2388
+ -- candidates: [member1, score1, member2, score2, ...]
2389
+
2390
+ local results = {}
2391
+ local jobsClaimed = 0
2392
+ local putBack = {} -- {score, id} pairs to put back
2393
+
2394
+ for i = 1, #candidates, 2 do
2395
+ local jobId = candidates[i]
2396
+ local score = candidates[i + 1]
2397
+ local jk = prefix .. 'job:' .. jobId
2398
+
2399
+ if jobsClaimed >= batchSize then
2400
+ -- We have enough; put the rest back
2401
+ table.insert(putBack, score)
2402
+ table.insert(putBack, jobId)
2403
+ else
2404
+ -- Check job type filter
2405
+ local jt = redis.call('HGET', jk, 'jobType')
2406
+ if filterTypes and not filterTypes[jt] then
2407
+ -- Doesn't match filter: put back
2408
+ table.insert(putBack, score)
2409
+ table.insert(putBack, jobId)
2410
+ else
2411
+ -- Check run_at
2412
+ local runAt = tonumber(redis.call('HGET', jk, 'runAt'))
2413
+ if runAt > nowMs then
2414
+ -- Not ready yet: move to delayed
2415
+ redis.call('ZADD', prefix .. 'delayed', runAt, jobId)
2416
+ else
2417
+ -- Claim this job
2418
+ local attempts = tonumber(redis.call('HGET', jk, 'attempts'))
2419
+ local startedAt = redis.call('HGET', jk, 'startedAt')
2420
+ local lastRetriedAt = redis.call('HGET', jk, 'lastRetriedAt')
2421
+ if startedAt == 'null' then startedAt = nowMs end
2422
+ if attempts > 0 then lastRetriedAt = nowMs end
2423
+
2424
+ redis.call('HMSET', jk,
2425
+ 'status', 'processing',
2426
+ 'lockedAt', nowMs,
2427
+ 'lockedBy', workerId,
2428
+ 'attempts', attempts + 1,
2429
+ 'updatedAt', nowMs,
2430
+ 'pendingReason', 'null',
2431
+ 'startedAt', startedAt,
2432
+ 'lastRetriedAt', lastRetriedAt
2433
+ )
2434
+
2435
+ -- Update status sets
2436
+ redis.call('SREM', prefix .. 'status:pending', jobId)
2437
+ redis.call('SADD', prefix .. 'status:processing', jobId)
2438
+
2439
+ -- Return job data as flat array
2440
+ local data = redis.call('HGETALL', jk)
2441
+ for _, v in ipairs(data) do
2442
+ table.insert(results, v)
2443
+ end
2444
+ -- Separator
2445
+ table.insert(results, '__JOB_SEP__')
2446
+ jobsClaimed = jobsClaimed + 1
2447
+ end
2448
+ end
2449
+ end
2450
+ end
2451
+
2452
+ -- Put back jobs we didn't claim
2453
+ if #putBack > 0 then
2454
+ redis.call('ZADD', prefix .. 'queue', unpack(putBack))
2455
+ end
2456
+
2457
+ return results
2458
+ `;
2459
+ var COMPLETE_JOB_SCRIPT = `
2460
+ local prefix = KEYS[1]
2461
+ local jobId = ARGV[1]
2462
+ local nowMs = ARGV[2]
2463
+ local jk = prefix .. 'job:' .. jobId
2464
+
2465
+ redis.call('HMSET', jk,
2466
+ 'status', 'completed',
2467
+ 'updatedAt', nowMs,
2468
+ 'completedAt', nowMs
2469
+ )
2470
+ redis.call('SREM', prefix .. 'status:processing', jobId)
2471
+ redis.call('SADD', prefix .. 'status:completed', jobId)
2472
+
2473
+ return 1
2474
+ `;
2475
+ var FAIL_JOB_SCRIPT = `
2476
+ local prefix = KEYS[1]
2477
+ local jobId = ARGV[1]
2478
+ local errorJson = ARGV[2]
2479
+ local failureReason = ARGV[3]
2480
+ local nowMs = tonumber(ARGV[4])
2481
+ local jk = prefix .. 'job:' .. jobId
2482
+
2483
+ local attempts = tonumber(redis.call('HGET', jk, 'attempts'))
2484
+ local maxAttempts = tonumber(redis.call('HGET', jk, 'maxAttempts'))
2485
+
2486
+ -- Compute next_attempt_at: 2^attempts minutes from now
2487
+ local nextAttemptAt = 'null'
2488
+ if attempts < maxAttempts then
2489
+ local delayMs = math.pow(2, attempts) * 60000
2490
+ nextAttemptAt = nowMs + delayMs
2491
+ end
2492
+
2493
+ -- Append to error_history
2494
+ local history = redis.call('HGET', jk, 'errorHistory') or '[]'
2495
+ local ok, arr = pcall(cjson.decode, history)
2496
+ if not ok then arr = {} end
2497
+ local newErrors = cjson.decode(errorJson)
2498
+ for _, e in ipairs(newErrors) do
2499
+ table.insert(arr, e)
2500
+ end
2501
+
2502
+ redis.call('HMSET', jk,
2503
+ 'status', 'failed',
2504
+ 'updatedAt', nowMs,
2505
+ 'nextAttemptAt', tostring(nextAttemptAt),
2506
+ 'errorHistory', cjson.encode(arr),
2507
+ 'failureReason', failureReason,
2508
+ 'lastFailedAt', nowMs
2509
+ )
2510
+ redis.call('SREM', prefix .. 'status:processing', jobId)
2511
+ redis.call('SADD', prefix .. 'status:failed', jobId)
2512
+
2513
+ -- Schedule retry if applicable
2514
+ if nextAttemptAt ~= 'null' then
2515
+ redis.call('ZADD', prefix .. 'retry', nextAttemptAt, jobId)
2516
+ end
2517
+
2518
+ return 1
2519
+ `;
2520
+ var RETRY_JOB_SCRIPT = `
2521
+ local prefix = KEYS[1]
2522
+ local jobId = ARGV[1]
2523
+ local nowMs = tonumber(ARGV[2])
2524
+ local jk = prefix .. 'job:' .. jobId
2525
+
2526
+ local oldStatus = redis.call('HGET', jk, 'status')
2527
+
2528
+ redis.call('HMSET', jk,
2529
+ 'status', 'pending',
2530
+ 'updatedAt', nowMs,
2531
+ 'lockedAt', 'null',
2532
+ 'lockedBy', 'null',
2533
+ 'nextAttemptAt', nowMs,
2534
+ 'lastRetriedAt', nowMs
2535
+ )
2536
+
2537
+ -- Remove from old status, add to pending
2538
+ if oldStatus then
2539
+ redis.call('SREM', prefix .. 'status:' .. oldStatus, jobId)
2540
+ end
2541
+ redis.call('SADD', prefix .. 'status:pending', jobId)
2542
+
2543
+ -- Remove from retry sorted set if present
2544
+ redis.call('ZREM', prefix .. 'retry', jobId)
2545
+
2546
+ -- Add to queue (ready now)
2547
+ local priority = tonumber(redis.call('HGET', jk, 'priority') or '0')
2548
+ local createdAt = tonumber(redis.call('HGET', jk, 'createdAt'))
2549
+ local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - createdAt)
2550
+ redis.call('ZADD', prefix .. 'queue', score, jobId)
2551
+
2552
+ return 1
2553
+ `;
2554
+ var CANCEL_JOB_SCRIPT = `
2555
+ local prefix = KEYS[1]
2556
+ local jobId = ARGV[1]
2557
+ local nowMs = ARGV[2]
2558
+ local jk = prefix .. 'job:' .. jobId
2559
+
2560
+ local status = redis.call('HGET', jk, 'status')
2561
+ if status ~= 'pending' then return 0 end
2562
+
2563
+ redis.call('HMSET', jk,
2564
+ 'status', 'cancelled',
2565
+ 'updatedAt', nowMs,
2566
+ 'lastCancelledAt', nowMs
2567
+ )
2568
+ redis.call('SREM', prefix .. 'status:pending', jobId)
2569
+ redis.call('SADD', prefix .. 'status:cancelled', jobId)
2570
+ -- Remove from queue / delayed
2571
+ redis.call('ZREM', prefix .. 'queue', jobId)
2572
+ redis.call('ZREM', prefix .. 'delayed', jobId)
2573
+
2574
+ return 1
2575
+ `;
2576
+ var PROLONG_JOB_SCRIPT = `
2577
+ local prefix = KEYS[1]
2578
+ local jobId = ARGV[1]
2579
+ local nowMs = ARGV[2]
2580
+ local jk = prefix .. 'job:' .. jobId
2581
+
2582
+ local status = redis.call('HGET', jk, 'status')
2583
+ if status ~= 'processing' then return 0 end
2584
+
2585
+ redis.call('HMSET', jk,
2586
+ 'lockedAt', nowMs,
2587
+ 'updatedAt', nowMs
2588
+ )
2589
+
2590
+ return 1
2591
+ `;
2592
+ var RECLAIM_STUCK_JOBS_SCRIPT = `
2593
+ local prefix = KEYS[1]
2594
+ local maxAgeMs = tonumber(ARGV[1])
2595
+ local nowMs = tonumber(ARGV[2])
2596
+
2597
+ local processing = redis.call('SMEMBERS', prefix .. 'status:processing')
2598
+ local count = 0
2599
+
2600
+ for _, jobId in ipairs(processing) do
2601
+ local jk = prefix .. 'job:' .. jobId
2602
+ local lockedAt = redis.call('HGET', jk, 'lockedAt')
2603
+ if lockedAt and lockedAt ~= 'null' then
2604
+ local lockedAtNum = tonumber(lockedAt)
2605
+ if lockedAtNum then
2606
+ -- Use the greater of maxAgeMs and the job's own timeoutMs
2607
+ local jobMaxAge = maxAgeMs
2608
+ local timeoutMs = redis.call('HGET', jk, 'timeoutMs')
2609
+ if timeoutMs and timeoutMs ~= 'null' then
2610
+ local tMs = tonumber(timeoutMs)
2611
+ if tMs and tMs > jobMaxAge then
2612
+ jobMaxAge = tMs
2613
+ end
2614
+ end
2615
+ local cutoff = nowMs - jobMaxAge
2616
+ if lockedAtNum < cutoff then
2617
+ redis.call('HMSET', jk,
2618
+ 'status', 'pending',
2619
+ 'lockedAt', 'null',
2620
+ 'lockedBy', 'null',
2621
+ 'updatedAt', nowMs
2622
+ )
2623
+ redis.call('SREM', prefix .. 'status:processing', jobId)
2624
+ redis.call('SADD', prefix .. 'status:pending', jobId)
2625
+
2626
+ -- Re-add to queue
2627
+ local priority = tonumber(redis.call('HGET', jk, 'priority') or '0')
2628
+ local createdAt = tonumber(redis.call('HGET', jk, 'createdAt'))
2629
+ local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - createdAt)
2630
+ redis.call('ZADD', prefix .. 'queue', score, jobId)
2631
+
2632
+ count = count + 1
2633
+ end
2634
+ end
2635
+ end
2636
+ end
2637
+
2638
+ return count
2639
+ `;
2640
+ var CLEANUP_OLD_JOBS_SCRIPT = `
2641
+ local prefix = KEYS[1]
2642
+ local cutoffMs = tonumber(ARGV[1])
2643
+
2644
+ local completed = redis.call('SMEMBERS', prefix .. 'status:completed')
2645
+ local count = 0
2646
+
2647
+ for _, jobId in ipairs(completed) do
2648
+ local jk = prefix .. 'job:' .. jobId
2649
+ local updatedAt = tonumber(redis.call('HGET', jk, 'updatedAt'))
2650
+ if updatedAt and updatedAt < cutoffMs then
2651
+ -- Remove all indexes
2652
+ local jobType = redis.call('HGET', jk, 'jobType')
2653
+ local tagsJson = redis.call('HGET', jk, 'tags')
2654
+ local idempotencyKey = redis.call('HGET', jk, 'idempotencyKey')
2655
+
2656
+ redis.call('DEL', jk)
2657
+ redis.call('SREM', prefix .. 'status:completed', jobId)
2658
+ redis.call('ZREM', prefix .. 'all', jobId)
2659
+ if jobType then
2660
+ redis.call('SREM', prefix .. 'type:' .. jobType, jobId)
2661
+ end
2662
+ if tagsJson and tagsJson ~= 'null' then
2663
+ local ok, tags = pcall(cjson.decode, tagsJson)
2664
+ if ok and type(tags) == 'table' then
2665
+ for _, tag in ipairs(tags) do
2666
+ redis.call('SREM', prefix .. 'tag:' .. tag, jobId)
2667
+ end
2668
+ end
2669
+ redis.call('DEL', prefix .. 'job:' .. jobId .. ':tags')
2670
+ end
2671
+ if idempotencyKey and idempotencyKey ~= 'null' then
2672
+ redis.call('DEL', prefix .. 'idempotency:' .. idempotencyKey)
2673
+ end
2674
+ -- Delete events
2675
+ redis.call('DEL', prefix .. 'events:' .. jobId)
2676
+
2677
+ count = count + 1
2678
+ end
2679
+ end
2680
+
2681
+ return count
2682
+ `;
2683
+
2684
+ // src/backends/redis.ts
2685
+ function hashToObject(arr) {
2686
+ const obj = {};
2687
+ for (let i = 0; i < arr.length; i += 2) {
2688
+ obj[arr[i]] = arr[i + 1];
2689
+ }
2690
+ return obj;
2691
+ }
2692
+ function deserializeJob(h) {
2693
+ const nullish = (v) => v === void 0 || v === "null" || v === "" ? null : v;
2694
+ const numOrNull = (v) => {
2695
+ const n = nullish(v);
2696
+ return n === null ? null : Number(n);
2697
+ };
2698
+ const dateOrNull = (v) => {
2699
+ const n = numOrNull(v);
2700
+ return n === null ? null : new Date(n);
2701
+ };
2702
+ let errorHistory = [];
2703
+ try {
2704
+ const raw = h.errorHistory;
2705
+ if (raw && raw !== "[]") {
2706
+ errorHistory = JSON.parse(raw);
2707
+ }
2708
+ } catch {
2709
+ }
2710
+ let tags;
2711
+ try {
2712
+ const raw = h.tags;
2713
+ if (raw && raw !== "null") {
2714
+ tags = JSON.parse(raw);
2715
+ }
2716
+ } catch {
2717
+ }
2718
+ let payload;
2719
+ try {
2720
+ payload = JSON.parse(h.payload);
2721
+ } catch {
2722
+ payload = h.payload;
2723
+ }
2724
+ return {
2725
+ id: Number(h.id),
2726
+ jobType: h.jobType,
2727
+ payload,
2728
+ status: h.status,
2729
+ createdAt: new Date(Number(h.createdAt)),
2730
+ updatedAt: new Date(Number(h.updatedAt)),
2731
+ lockedAt: dateOrNull(h.lockedAt),
2732
+ lockedBy: nullish(h.lockedBy),
2733
+ attempts: Number(h.attempts),
2734
+ maxAttempts: Number(h.maxAttempts),
2735
+ nextAttemptAt: dateOrNull(h.nextAttemptAt),
2736
+ priority: Number(h.priority),
2737
+ runAt: new Date(Number(h.runAt)),
2738
+ pendingReason: nullish(h.pendingReason),
2739
+ errorHistory,
2740
+ timeoutMs: numOrNull(h.timeoutMs),
2741
+ forceKillOnTimeout: h.forceKillOnTimeout === "true" || h.forceKillOnTimeout === "1" ? true : h.forceKillOnTimeout === "false" || h.forceKillOnTimeout === "0" ? false : null,
2742
+ failureReason: nullish(h.failureReason) ?? null,
2743
+ completedAt: dateOrNull(h.completedAt),
2744
+ startedAt: dateOrNull(h.startedAt),
2745
+ lastRetriedAt: dateOrNull(h.lastRetriedAt),
2746
+ lastFailedAt: dateOrNull(h.lastFailedAt),
2747
+ lastCancelledAt: dateOrNull(h.lastCancelledAt),
2748
+ tags,
2749
+ idempotencyKey: nullish(h.idempotencyKey),
2750
+ progress: numOrNull(h.progress)
2751
+ };
2752
+ }
2753
+ var RedisBackend = class {
2754
+ constructor(redisConfig) {
2755
+ let IORedis;
2756
+ try {
2757
+ const _require = module$1.createRequire((typeof document === 'undefined' ? require('u' + 'rl').pathToFileURL(__filename).href : (_documentCurrentScript && _documentCurrentScript.tagName.toUpperCase() === 'SCRIPT' && _documentCurrentScript.src || new URL('index.cjs', document.baseURI).href)));
2758
+ IORedis = _require("ioredis");
2759
+ } catch {
2760
+ throw new Error(
2761
+ 'Redis backend requires the "ioredis" package. Install it with: npm install ioredis'
2762
+ );
2763
+ }
2764
+ this.prefix = redisConfig.keyPrefix ?? "dq:";
2765
+ if (redisConfig.url) {
2766
+ this.client = new IORedis(redisConfig.url, {
2767
+ ...redisConfig.tls ? { tls: redisConfig.tls } : {},
2768
+ ...redisConfig.db !== void 0 ? { db: redisConfig.db } : {}
2769
+ });
2770
+ } else {
2771
+ this.client = new IORedis({
2772
+ host: redisConfig.host ?? "127.0.0.1",
2773
+ port: redisConfig.port ?? 6379,
2774
+ password: redisConfig.password,
2775
+ db: redisConfig.db ?? 0,
2776
+ ...redisConfig.tls ? { tls: redisConfig.tls } : {}
2777
+ });
2778
+ }
2779
+ }
2780
+ /** Expose the raw ioredis client for advanced usage. */
2781
+ getClient() {
2782
+ return this.client;
2783
+ }
2784
+ nowMs() {
2785
+ return Date.now();
2786
+ }
2787
+ // ── Events ──────────────────────────────────────────────────────────
2788
+ async recordJobEvent(jobId, eventType, metadata) {
2789
+ try {
2790
+ const eventId = await this.client.incr(`${this.prefix}event_id_seq`);
2791
+ const event = JSON.stringify({
2792
+ id: eventId,
2793
+ jobId,
2794
+ eventType,
2795
+ createdAt: this.nowMs(),
2796
+ metadata: metadata ?? null
2797
+ });
2798
+ await this.client.rpush(`${this.prefix}events:${jobId}`, event);
2799
+ } catch (error) {
2800
+ log(`Error recording job event for job ${jobId}: ${error}`);
2801
+ }
2802
+ }
2803
+ async getJobEvents(jobId) {
2804
+ const raw = await this.client.lrange(
2805
+ `${this.prefix}events:${jobId}`,
2806
+ 0,
2807
+ -1
2808
+ );
2809
+ return raw.map((r) => {
2810
+ const e = JSON.parse(r);
2811
+ return {
2812
+ ...e,
2813
+ createdAt: new Date(e.createdAt)
2814
+ };
2815
+ });
2816
+ }
2817
+ // ── Job CRUD ──────────────────────────────────────────────────────────
2818
+ async addJob({
2819
+ jobType,
2820
+ payload,
2821
+ maxAttempts = 3,
2822
+ priority = 0,
2823
+ runAt = null,
2824
+ timeoutMs = void 0,
2825
+ forceKillOnTimeout = false,
2826
+ tags = void 0,
2827
+ idempotencyKey = void 0
2828
+ }) {
2829
+ const now = this.nowMs();
2830
+ const runAtMs = runAt ? runAt.getTime() : 0;
2831
+ const result = await this.client.eval(
2832
+ ADD_JOB_SCRIPT,
2833
+ 1,
2834
+ this.prefix,
2835
+ jobType,
2836
+ JSON.stringify(payload),
2837
+ maxAttempts,
2838
+ priority,
2839
+ runAtMs.toString(),
2840
+ timeoutMs !== void 0 ? timeoutMs.toString() : "null",
2841
+ forceKillOnTimeout ? "true" : "false",
2842
+ tags ? JSON.stringify(tags) : "null",
2843
+ idempotencyKey ?? "null",
2844
+ now
2845
+ );
2846
+ const jobId = Number(result);
2847
+ log(
2848
+ `Added job ${jobId}: payload ${JSON.stringify(payload)}, ${runAt ? `runAt ${runAt.toISOString()}, ` : ""}priority ${priority}, maxAttempts ${maxAttempts}, jobType ${jobType}, tags ${JSON.stringify(tags)}${idempotencyKey ? `, idempotencyKey "${idempotencyKey}"` : ""}`
2849
+ );
2850
+ await this.recordJobEvent(jobId, "added" /* Added */, {
2851
+ jobType,
2852
+ payload,
2853
+ tags,
2854
+ idempotencyKey
2855
+ });
2856
+ return jobId;
2857
+ }
2858
+ async getJob(id) {
2859
+ const data = await this.client.hgetall(`${this.prefix}job:${id}`);
2860
+ if (!data || Object.keys(data).length === 0) {
2861
+ log(`Job ${id} not found`);
2862
+ return null;
2863
+ }
2864
+ log(`Found job ${id}`);
2865
+ return deserializeJob(data);
2866
+ }
2867
+ async getJobsByStatus(status, limit = 100, offset = 0) {
2868
+ const ids = await this.client.smembers(`${this.prefix}status:${status}`);
2869
+ if (ids.length === 0) return [];
2870
+ const jobs = await this.loadJobsByIds(ids);
2871
+ jobs.sort((a, b) => b.createdAt.getTime() - a.createdAt.getTime());
2872
+ return jobs.slice(offset, offset + limit);
2873
+ }
2874
+ async getAllJobs(limit = 100, offset = 0) {
2875
+ const ids = await this.client.zrevrange(
2876
+ `${this.prefix}all`,
2877
+ offset,
2878
+ offset + limit - 1
2879
+ );
2880
+ if (ids.length === 0) return [];
2881
+ return this.loadJobsByIds(ids);
2882
+ }
2883
+ async getJobs(filters, limit = 100, offset = 0) {
2884
+ let candidateIds;
2885
+ if (filters?.jobType) {
2886
+ candidateIds = await this.client.smembers(
2887
+ `${this.prefix}type:${filters.jobType}`
2888
+ );
2889
+ } else {
2890
+ candidateIds = await this.client.zrevrange(`${this.prefix}all`, 0, -1);
2891
+ }
2892
+ if (candidateIds.length === 0) return [];
2893
+ if (filters?.tags && filters.tags.values.length > 0) {
2894
+ candidateIds = await this.filterByTags(
2895
+ candidateIds,
2896
+ filters.tags.values,
2897
+ filters.tags.mode || "all"
2898
+ );
2899
+ }
2900
+ let jobs = await this.loadJobsByIds(candidateIds);
2901
+ if (filters) {
2902
+ if (filters.priority !== void 0) {
2903
+ jobs = jobs.filter((j) => j.priority === filters.priority);
2904
+ }
2905
+ if (filters.runAt) {
2906
+ jobs = this.filterByRunAt(jobs, filters.runAt);
2907
+ }
2908
+ }
2909
+ jobs.sort((a, b) => b.createdAt.getTime() - a.createdAt.getTime());
2910
+ return jobs.slice(offset, offset + limit);
2911
+ }
2912
+ async getJobsByTags(tags, mode = "all", limit = 100, offset = 0) {
2913
+ const allIds = await this.client.zrevrange(`${this.prefix}all`, 0, -1);
2914
+ if (allIds.length === 0) return [];
2915
+ const filtered = await this.filterByTags(allIds, tags, mode);
2916
+ if (filtered.length === 0) return [];
2917
+ const jobs = await this.loadJobsByIds(filtered);
2918
+ jobs.sort((a, b) => b.createdAt.getTime() - a.createdAt.getTime());
2919
+ return jobs.slice(offset, offset + limit);
2920
+ }
2921
+ // ── Processing lifecycle ──────────────────────────────────────────────
2922
+ async getNextBatch(workerId, batchSize = 10, jobType) {
2923
+ const now = this.nowMs();
2924
+ const jobTypeFilter = jobType === void 0 ? "null" : Array.isArray(jobType) ? JSON.stringify(jobType) : jobType;
2925
+ const result = await this.client.eval(
2926
+ GET_NEXT_BATCH_SCRIPT,
2927
+ 1,
2928
+ this.prefix,
2929
+ workerId,
2930
+ batchSize,
2931
+ now,
2932
+ jobTypeFilter
2933
+ );
2934
+ if (!result || result.length === 0) {
2935
+ log("Found 0 jobs to process");
2936
+ return [];
2937
+ }
2938
+ const jobs = [];
2939
+ let current = [];
2940
+ for (const item of result) {
2941
+ if (item === "__JOB_SEP__") {
2942
+ if (current.length > 0) {
2943
+ const h = hashToObject(current);
2944
+ jobs.push(deserializeJob(h));
2945
+ }
2946
+ current = [];
2947
+ } else {
2948
+ current.push(item);
2949
+ }
2950
+ }
2951
+ log(`Found ${jobs.length} jobs to process`);
2952
+ for (const job of jobs) {
2953
+ await this.recordJobEvent(job.id, "processing" /* Processing */);
2954
+ }
2955
+ return jobs;
2956
+ }
2957
+ async completeJob(jobId) {
2958
+ const now = this.nowMs();
2959
+ await this.client.eval(COMPLETE_JOB_SCRIPT, 1, this.prefix, jobId, now);
2960
+ await this.recordJobEvent(jobId, "completed" /* Completed */);
2961
+ log(`Completed job ${jobId}`);
2962
+ }
2963
+ async failJob(jobId, error, failureReason) {
2964
+ const now = this.nowMs();
2965
+ const errorJson = JSON.stringify([
2966
+ {
2967
+ message: error.message || String(error),
2968
+ timestamp: new Date(now).toISOString()
2969
+ }
2970
+ ]);
2971
+ await this.client.eval(
2972
+ FAIL_JOB_SCRIPT,
2973
+ 1,
2974
+ this.prefix,
2975
+ jobId,
2976
+ errorJson,
2977
+ failureReason ?? "null",
2978
+ now
2979
+ );
2980
+ await this.recordJobEvent(jobId, "failed" /* Failed */, {
2981
+ message: error.message || String(error),
2982
+ failureReason
2983
+ });
2984
+ log(`Failed job ${jobId}`);
2985
+ }
2986
+ async prolongJob(jobId) {
2987
+ try {
2988
+ const now = this.nowMs();
2989
+ await this.client.eval(PROLONG_JOB_SCRIPT, 1, this.prefix, jobId, now);
2990
+ await this.recordJobEvent(jobId, "prolonged" /* Prolonged */);
2991
+ log(`Prolonged job ${jobId}`);
2992
+ } catch (error) {
2993
+ log(`Error prolonging job ${jobId}: ${error}`);
2994
+ }
2995
+ }
2996
+ // ── Progress ──────────────────────────────────────────────────────────
2997
+ async updateProgress(jobId, progress) {
2998
+ try {
2999
+ const now = this.nowMs();
3000
+ await this.client.hset(
3001
+ `${this.prefix}job:${jobId}`,
3002
+ "progress",
3003
+ progress.toString(),
3004
+ "updatedAt",
3005
+ now.toString()
3006
+ );
3007
+ log(`Updated progress for job ${jobId}: ${progress}%`);
3008
+ } catch (error) {
3009
+ log(`Error updating progress for job ${jobId}: ${error}`);
3010
+ }
3011
+ }
3012
+ // ── Job management ────────────────────────────────────────────────────
3013
+ async retryJob(jobId) {
3014
+ const now = this.nowMs();
3015
+ await this.client.eval(RETRY_JOB_SCRIPT, 1, this.prefix, jobId, now);
3016
+ await this.recordJobEvent(jobId, "retried" /* Retried */);
3017
+ log(`Retried job ${jobId}`);
3018
+ }
3019
+ async cancelJob(jobId) {
3020
+ const now = this.nowMs();
3021
+ await this.client.eval(CANCEL_JOB_SCRIPT, 1, this.prefix, jobId, now);
3022
+ await this.recordJobEvent(jobId, "cancelled" /* Cancelled */);
3023
+ log(`Cancelled job ${jobId}`);
3024
+ }
3025
+ async cancelAllUpcomingJobs(filters) {
3026
+ let ids = await this.client.smembers(`${this.prefix}status:pending`);
3027
+ if (ids.length === 0) return 0;
3028
+ if (filters) {
3029
+ ids = await this.applyFilters(ids, filters);
3030
+ }
3031
+ const now = this.nowMs();
3032
+ let count = 0;
3033
+ for (const id of ids) {
3034
+ const result = await this.client.eval(
3035
+ CANCEL_JOB_SCRIPT,
3036
+ 1,
3037
+ this.prefix,
3038
+ id,
3039
+ now
3040
+ );
3041
+ if (Number(result) === 1) count++;
3042
+ }
3043
+ log(`Cancelled ${count} jobs`);
3044
+ return count;
3045
+ }
3046
+ async editJob(jobId, updates) {
3047
+ const jk = `${this.prefix}job:${jobId}`;
3048
+ const status = await this.client.hget(jk, "status");
3049
+ if (status !== "pending") {
3050
+ log(`Job ${jobId} is not pending (status: ${status}), skipping edit`);
3051
+ return;
3052
+ }
3053
+ const now = this.nowMs();
3054
+ const fields = [];
3055
+ const metadata = {};
3056
+ if (updates.payload !== void 0) {
3057
+ fields.push("payload", JSON.stringify(updates.payload));
3058
+ metadata.payload = updates.payload;
3059
+ }
3060
+ if (updates.maxAttempts !== void 0) {
3061
+ fields.push("maxAttempts", updates.maxAttempts.toString());
3062
+ metadata.maxAttempts = updates.maxAttempts;
3063
+ }
3064
+ if (updates.priority !== void 0) {
3065
+ fields.push("priority", updates.priority.toString());
3066
+ metadata.priority = updates.priority;
3067
+ const createdAt = await this.client.hget(jk, "createdAt");
3068
+ const score = updates.priority * 1e15 + (1e15 - Number(createdAt));
3069
+ const inQueue = await this.client.zscore(
3070
+ `${this.prefix}queue`,
3071
+ jobId.toString()
3072
+ );
3073
+ if (inQueue !== null) {
3074
+ await this.client.zadd(`${this.prefix}queue`, score, jobId.toString());
3075
+ }
3076
+ }
3077
+ if (updates.runAt !== void 0) {
3078
+ if (updates.runAt === null) {
3079
+ fields.push("runAt", now.toString());
3080
+ } else {
3081
+ fields.push("runAt", updates.runAt.getTime().toString());
3082
+ }
3083
+ metadata.runAt = updates.runAt;
3084
+ }
3085
+ if (updates.timeoutMs !== void 0) {
3086
+ fields.push(
3087
+ "timeoutMs",
3088
+ updates.timeoutMs !== null ? updates.timeoutMs.toString() : "null"
3089
+ );
3090
+ metadata.timeoutMs = updates.timeoutMs;
3091
+ }
3092
+ if (updates.tags !== void 0) {
3093
+ const oldTagsJson = await this.client.hget(jk, "tags");
3094
+ if (oldTagsJson && oldTagsJson !== "null") {
3095
+ try {
3096
+ const oldTags = JSON.parse(oldTagsJson);
3097
+ for (const tag of oldTags) {
3098
+ await this.client.srem(
3099
+ `${this.prefix}tag:${tag}`,
3100
+ jobId.toString()
3101
+ );
3102
+ }
3103
+ } catch {
3104
+ }
3105
+ }
3106
+ await this.client.del(`${this.prefix}job:${jobId}:tags`);
3107
+ if (updates.tags !== null) {
3108
+ for (const tag of updates.tags) {
3109
+ await this.client.sadd(`${this.prefix}tag:${tag}`, jobId.toString());
3110
+ await this.client.sadd(`${this.prefix}job:${jobId}:tags`, tag);
3111
+ }
3112
+ fields.push("tags", JSON.stringify(updates.tags));
3113
+ } else {
3114
+ fields.push("tags", "null");
3115
+ }
3116
+ metadata.tags = updates.tags;
3117
+ }
3118
+ if (fields.length === 0) {
3119
+ log(`No fields to update for job ${jobId}`);
3120
+ return;
3121
+ }
3122
+ fields.push("updatedAt", now.toString());
3123
+ await this.client.hmset(jk, ...fields);
3124
+ await this.recordJobEvent(jobId, "edited" /* Edited */, metadata);
3125
+ log(`Edited job ${jobId}: ${JSON.stringify(metadata)}`);
3126
+ }
3127
+ async editAllPendingJobs(filters, updates) {
3128
+ let ids = await this.client.smembers(`${this.prefix}status:pending`);
3129
+ if (ids.length === 0) return 0;
3130
+ if (filters) {
3131
+ ids = await this.applyFilters(ids, filters);
3132
+ }
3133
+ let count = 0;
3134
+ for (const id of ids) {
3135
+ await this.editJob(Number(id), updates);
3136
+ count++;
3137
+ }
3138
+ log(`Edited ${count} pending jobs`);
3139
+ return count;
3140
+ }
3141
+ async cleanupOldJobs(daysToKeep = 30) {
3142
+ const cutoffMs = this.nowMs() - daysToKeep * 24 * 60 * 60 * 1e3;
3143
+ const result = await this.client.eval(
3144
+ CLEANUP_OLD_JOBS_SCRIPT,
3145
+ 1,
3146
+ this.prefix,
3147
+ cutoffMs
3148
+ );
3149
+ log(`Deleted ${result} old jobs`);
3150
+ return Number(result);
3151
+ }
3152
+ async cleanupOldJobEvents(daysToKeep = 30) {
3153
+ log(
3154
+ `cleanupOldJobEvents is a no-op for Redis backend (events are cleaned up with their jobs)`
3155
+ );
3156
+ return 0;
3157
+ }
3158
+ async reclaimStuckJobs(maxProcessingTimeMinutes = 10) {
3159
+ const maxAgeMs = maxProcessingTimeMinutes * 60 * 1e3;
3160
+ const now = this.nowMs();
3161
+ const result = await this.client.eval(
3162
+ RECLAIM_STUCK_JOBS_SCRIPT,
3163
+ 1,
3164
+ this.prefix,
3165
+ maxAgeMs,
3166
+ now
3167
+ );
3168
+ log(`Reclaimed ${result} stuck jobs`);
3169
+ return Number(result);
3170
+ }
3171
+ // ── Internal helpers ──────────────────────────────────────────────────
3172
+ async setPendingReasonForUnpickedJobs(reason, jobType) {
3173
+ let ids = await this.client.smembers(`${this.prefix}status:pending`);
3174
+ if (ids.length === 0) return;
3175
+ if (jobType) {
3176
+ const types = Array.isArray(jobType) ? jobType : [jobType];
3177
+ const typeSet = /* @__PURE__ */ new Set();
3178
+ for (const t of types) {
3179
+ const typeIds = await this.client.smembers(`${this.prefix}type:${t}`);
3180
+ for (const id of typeIds) typeSet.add(id);
3181
+ }
3182
+ ids = ids.filter((id) => typeSet.has(id));
3183
+ }
3184
+ for (const id of ids) {
3185
+ await this.client.hset(
3186
+ `${this.prefix}job:${id}`,
3187
+ "pendingReason",
3188
+ reason
3189
+ );
3190
+ }
3191
+ }
3192
+ // ── Private helpers ───────────────────────────────────────────────────
3193
+ async loadJobsByIds(ids) {
3194
+ const pipeline = this.client.pipeline();
3195
+ for (const id of ids) {
3196
+ pipeline.hgetall(`${this.prefix}job:${id}`);
3197
+ }
3198
+ const results = await pipeline.exec();
3199
+ const jobs = [];
3200
+ if (results) {
3201
+ for (const [err, data] of results) {
3202
+ if (!err && data && typeof data === "object" && Object.keys(data).length > 0) {
3203
+ jobs.push(
3204
+ deserializeJob(data)
3205
+ );
3206
+ }
3207
+ }
3208
+ }
3209
+ return jobs;
3210
+ }
3211
+ async filterByTags(candidateIds, tags, mode) {
3212
+ const candidateSet = new Set(candidateIds.map(String));
3213
+ if (mode === "exact") {
3214
+ const tagSet = new Set(tags);
3215
+ const result = [];
3216
+ for (const id of candidateIds) {
3217
+ const jobTags = await this.client.smembers(
3218
+ `${this.prefix}job:${id}:tags`
3219
+ );
3220
+ if (jobTags.length === tagSet.size && jobTags.every((t) => tagSet.has(t))) {
3221
+ result.push(id);
3222
+ }
3223
+ }
3224
+ return result;
3225
+ }
3226
+ if (mode === "all") {
3227
+ let intersection = new Set(candidateIds.map(String));
3228
+ for (const tag of tags) {
3229
+ const tagMembers = await this.client.smembers(
3230
+ `${this.prefix}tag:${tag}`
3231
+ );
3232
+ const tagSet = new Set(tagMembers.map(String));
3233
+ intersection = new Set(
3234
+ [...intersection].filter((id) => tagSet.has(id))
3235
+ );
3236
+ }
3237
+ return [...intersection].filter((id) => candidateSet.has(id));
3238
+ }
3239
+ if (mode === "any") {
3240
+ const union = /* @__PURE__ */ new Set();
3241
+ for (const tag of tags) {
3242
+ const tagMembers = await this.client.smembers(
3243
+ `${this.prefix}tag:${tag}`
3244
+ );
3245
+ for (const id of tagMembers) union.add(String(id));
3246
+ }
3247
+ return [...union].filter((id) => candidateSet.has(id));
3248
+ }
3249
+ if (mode === "none") {
3250
+ const exclude = /* @__PURE__ */ new Set();
3251
+ for (const tag of tags) {
3252
+ const tagMembers = await this.client.smembers(
3253
+ `${this.prefix}tag:${tag}`
3254
+ );
3255
+ for (const id of tagMembers) exclude.add(String(id));
3256
+ }
3257
+ return candidateIds.filter((id) => !exclude.has(String(id)));
3258
+ }
3259
+ return this.filterByTags(candidateIds, tags, "all");
3260
+ }
3261
+ filterByRunAt(jobs, runAt) {
3262
+ if (runAt instanceof Date) {
3263
+ return jobs.filter((j) => j.runAt.getTime() === runAt.getTime());
3264
+ }
3265
+ return jobs.filter((j) => {
3266
+ const t = j.runAt.getTime();
3267
+ if (runAt.gt && !(t > runAt.gt.getTime())) return false;
3268
+ if (runAt.gte && !(t >= runAt.gte.getTime())) return false;
3269
+ if (runAt.lt && !(t < runAt.lt.getTime())) return false;
3270
+ if (runAt.lte && !(t <= runAt.lte.getTime())) return false;
3271
+ if (runAt.eq && t !== runAt.eq.getTime()) return false;
3272
+ return true;
3273
+ });
3274
+ }
3275
+ // ── Cron schedules ──────────────────────────────────────────────────
3276
+ /** Create a cron schedule and return its ID. */
3277
+ async addCronSchedule(input) {
3278
+ const existingId = await this.client.get(
3279
+ `${this.prefix}cron_name:${input.scheduleName}`
3280
+ );
3281
+ if (existingId !== null) {
3282
+ throw new Error(
3283
+ `Cron schedule with name "${input.scheduleName}" already exists`
3284
+ );
3285
+ }
3286
+ const id = await this.client.incr(`${this.prefix}cron_id_seq`);
3287
+ const now = this.nowMs();
3288
+ const key = `${this.prefix}cron:${id}`;
3289
+ const fields = [
3290
+ "id",
3291
+ id.toString(),
3292
+ "scheduleName",
3293
+ input.scheduleName,
3294
+ "cronExpression",
3295
+ input.cronExpression,
3296
+ "jobType",
3297
+ input.jobType,
3298
+ "payload",
3299
+ JSON.stringify(input.payload),
3300
+ "maxAttempts",
3301
+ input.maxAttempts.toString(),
3302
+ "priority",
3303
+ input.priority.toString(),
3304
+ "timeoutMs",
3305
+ input.timeoutMs !== null ? input.timeoutMs.toString() : "null",
3306
+ "forceKillOnTimeout",
3307
+ input.forceKillOnTimeout ? "true" : "false",
3308
+ "tags",
3309
+ input.tags ? JSON.stringify(input.tags) : "null",
3310
+ "timezone",
3311
+ input.timezone,
3312
+ "allowOverlap",
3313
+ input.allowOverlap ? "true" : "false",
3314
+ "status",
3315
+ "active",
3316
+ "lastEnqueuedAt",
3317
+ "null",
3318
+ "lastJobId",
3319
+ "null",
3320
+ "nextRunAt",
3321
+ input.nextRunAt ? input.nextRunAt.getTime().toString() : "null",
3322
+ "createdAt",
3323
+ now.toString(),
3324
+ "updatedAt",
3325
+ now.toString()
3326
+ ];
3327
+ await this.client.hmset(key, ...fields);
3328
+ await this.client.set(
3329
+ `${this.prefix}cron_name:${input.scheduleName}`,
3330
+ id.toString()
3331
+ );
3332
+ await this.client.sadd(`${this.prefix}crons`, id.toString());
3333
+ await this.client.sadd(`${this.prefix}cron_status:active`, id.toString());
3334
+ if (input.nextRunAt) {
3335
+ await this.client.zadd(
3336
+ `${this.prefix}cron_due`,
3337
+ input.nextRunAt.getTime(),
3338
+ id.toString()
3339
+ );
3340
+ }
3341
+ log(`Added cron schedule ${id}: "${input.scheduleName}"`);
3342
+ return id;
3343
+ }
3344
+ /** Get a cron schedule by ID. */
3345
+ async getCronSchedule(id) {
3346
+ const data = await this.client.hgetall(`${this.prefix}cron:${id}`);
3347
+ if (!data || Object.keys(data).length === 0) return null;
3348
+ return this.deserializeCronSchedule(data);
3349
+ }
3350
+ /** Get a cron schedule by its unique name. */
3351
+ async getCronScheduleByName(name) {
3352
+ const id = await this.client.get(`${this.prefix}cron_name:${name}`);
3353
+ if (id === null) return null;
3354
+ return this.getCronSchedule(Number(id));
3355
+ }
3356
+ /** List cron schedules, optionally filtered by status. */
3357
+ async listCronSchedules(status) {
3358
+ let ids;
3359
+ if (status) {
3360
+ ids = await this.client.smembers(`${this.prefix}cron_status:${status}`);
3361
+ } else {
3362
+ ids = await this.client.smembers(`${this.prefix}crons`);
3363
+ }
3364
+ if (ids.length === 0) return [];
3365
+ const pipeline = this.client.pipeline();
3366
+ for (const id of ids) {
3367
+ pipeline.hgetall(`${this.prefix}cron:${id}`);
3368
+ }
3369
+ const results = await pipeline.exec();
3370
+ const schedules = [];
3371
+ if (results) {
3372
+ for (const [err, data] of results) {
3373
+ if (!err && data && typeof data === "object" && Object.keys(data).length > 0) {
3374
+ schedules.push(
3375
+ this.deserializeCronSchedule(data)
3376
+ );
3377
+ }
3378
+ }
3379
+ }
3380
+ schedules.sort((a, b) => a.createdAt.getTime() - b.createdAt.getTime());
3381
+ return schedules;
3382
+ }
3383
+ /** Delete a cron schedule by ID. */
3384
+ async removeCronSchedule(id) {
3385
+ const data = await this.client.hgetall(`${this.prefix}cron:${id}`);
3386
+ if (!data || Object.keys(data).length === 0) return;
3387
+ const name = data.scheduleName;
3388
+ const status = data.status;
3389
+ await this.client.del(`${this.prefix}cron:${id}`);
3390
+ await this.client.del(`${this.prefix}cron_name:${name}`);
3391
+ await this.client.srem(`${this.prefix}crons`, id.toString());
3392
+ await this.client.srem(
3393
+ `${this.prefix}cron_status:${status}`,
3394
+ id.toString()
3395
+ );
3396
+ await this.client.zrem(`${this.prefix}cron_due`, id.toString());
3397
+ log(`Removed cron schedule ${id}`);
3398
+ }
3399
+ /** Pause a cron schedule. */
3400
+ async pauseCronSchedule(id) {
3401
+ const now = this.nowMs();
3402
+ await this.client.hset(
3403
+ `${this.prefix}cron:${id}`,
3404
+ "status",
3405
+ "paused",
3406
+ "updatedAt",
3407
+ now.toString()
3408
+ );
3409
+ await this.client.srem(`${this.prefix}cron_status:active`, id.toString());
3410
+ await this.client.sadd(`${this.prefix}cron_status:paused`, id.toString());
3411
+ await this.client.zrem(`${this.prefix}cron_due`, id.toString());
3412
+ log(`Paused cron schedule ${id}`);
3413
+ }
3414
+ /** Resume a paused cron schedule. */
3415
+ async resumeCronSchedule(id) {
3416
+ const now = this.nowMs();
3417
+ await this.client.hset(
3418
+ `${this.prefix}cron:${id}`,
3419
+ "status",
3420
+ "active",
3421
+ "updatedAt",
3422
+ now.toString()
3423
+ );
3424
+ await this.client.srem(`${this.prefix}cron_status:paused`, id.toString());
3425
+ await this.client.sadd(`${this.prefix}cron_status:active`, id.toString());
3426
+ const nextRunAt = await this.client.hget(
3427
+ `${this.prefix}cron:${id}`,
3428
+ "nextRunAt"
3429
+ );
3430
+ if (nextRunAt && nextRunAt !== "null") {
3431
+ await this.client.zadd(
3432
+ `${this.prefix}cron_due`,
3433
+ Number(nextRunAt),
3434
+ id.toString()
3435
+ );
3436
+ }
3437
+ log(`Resumed cron schedule ${id}`);
3438
+ }
3439
+ /** Edit a cron schedule. */
3440
+ async editCronSchedule(id, updates, nextRunAt) {
3441
+ const now = this.nowMs();
3442
+ const fields = [];
3443
+ if (updates.cronExpression !== void 0) {
3444
+ fields.push("cronExpression", updates.cronExpression);
3445
+ }
3446
+ if (updates.payload !== void 0) {
3447
+ fields.push("payload", JSON.stringify(updates.payload));
3448
+ }
3449
+ if (updates.maxAttempts !== void 0) {
3450
+ fields.push("maxAttempts", updates.maxAttempts.toString());
3451
+ }
3452
+ if (updates.priority !== void 0) {
3453
+ fields.push("priority", updates.priority.toString());
3454
+ }
3455
+ if (updates.timeoutMs !== void 0) {
3456
+ fields.push(
3457
+ "timeoutMs",
3458
+ updates.timeoutMs !== null ? updates.timeoutMs.toString() : "null"
3459
+ );
3460
+ }
3461
+ if (updates.forceKillOnTimeout !== void 0) {
3462
+ fields.push(
3463
+ "forceKillOnTimeout",
3464
+ updates.forceKillOnTimeout ? "true" : "false"
3465
+ );
3466
+ }
3467
+ if (updates.tags !== void 0) {
3468
+ fields.push(
3469
+ "tags",
3470
+ updates.tags !== null ? JSON.stringify(updates.tags) : "null"
3471
+ );
3472
+ }
3473
+ if (updates.timezone !== void 0) {
3474
+ fields.push("timezone", updates.timezone);
3475
+ }
3476
+ if (updates.allowOverlap !== void 0) {
3477
+ fields.push("allowOverlap", updates.allowOverlap ? "true" : "false");
3478
+ }
3479
+ if (nextRunAt !== void 0) {
3480
+ const val = nextRunAt !== null ? nextRunAt.getTime().toString() : "null";
3481
+ fields.push("nextRunAt", val);
3482
+ if (nextRunAt !== null) {
3483
+ await this.client.zadd(
3484
+ `${this.prefix}cron_due`,
3485
+ nextRunAt.getTime(),
3486
+ id.toString()
3487
+ );
3488
+ } else {
3489
+ await this.client.zrem(`${this.prefix}cron_due`, id.toString());
3490
+ }
3491
+ }
3492
+ if (fields.length === 0) {
3493
+ log(`No fields to update for cron schedule ${id}`);
3494
+ return;
3495
+ }
3496
+ fields.push("updatedAt", now.toString());
3497
+ await this.client.hmset(`${this.prefix}cron:${id}`, ...fields);
3498
+ log(`Edited cron schedule ${id}`);
3499
+ }
3500
+ /**
3501
+ * Fetch all active cron schedules whose nextRunAt <= now.
3502
+ * Uses a sorted set (cron_due) for efficient range query.
3503
+ */
3504
+ async getDueCronSchedules() {
3505
+ const now = this.nowMs();
3506
+ const ids = await this.client.zrangebyscore(
3507
+ `${this.prefix}cron_due`,
3508
+ 0,
3509
+ now
3510
+ );
3511
+ if (ids.length === 0) {
3512
+ log("Found 0 due cron schedules");
3513
+ return [];
3514
+ }
3515
+ const schedules = [];
3516
+ for (const id of ids) {
3517
+ const data = await this.client.hgetall(`${this.prefix}cron:${id}`);
3518
+ if (data && Object.keys(data).length > 0 && data.status === "active") {
3519
+ schedules.push(this.deserializeCronSchedule(data));
3520
+ }
3521
+ }
3522
+ log(`Found ${schedules.length} due cron schedules`);
3523
+ return schedules;
3524
+ }
3525
+ /**
3526
+ * Update a cron schedule after a job has been enqueued.
3527
+ * Sets lastEnqueuedAt, lastJobId, and advances nextRunAt.
3528
+ */
3529
+ async updateCronScheduleAfterEnqueue(id, lastEnqueuedAt, lastJobId, nextRunAt) {
3530
+ const fields = [
3531
+ "lastEnqueuedAt",
3532
+ lastEnqueuedAt.getTime().toString(),
3533
+ "lastJobId",
3534
+ lastJobId.toString(),
3535
+ "nextRunAt",
3536
+ nextRunAt ? nextRunAt.getTime().toString() : "null",
3537
+ "updatedAt",
3538
+ this.nowMs().toString()
3539
+ ];
3540
+ await this.client.hmset(`${this.prefix}cron:${id}`, ...fields);
3541
+ if (nextRunAt) {
3542
+ await this.client.zadd(
3543
+ `${this.prefix}cron_due`,
3544
+ nextRunAt.getTime(),
3545
+ id.toString()
3546
+ );
3547
+ } else {
3548
+ await this.client.zrem(`${this.prefix}cron_due`, id.toString());
3549
+ }
3550
+ log(
3551
+ `Updated cron schedule ${id}: lastJobId=${lastJobId}, nextRunAt=${nextRunAt?.toISOString() ?? "null"}`
3552
+ );
3553
+ }
3554
+ /** Deserialize a Redis hash into a CronScheduleRecord. */
3555
+ deserializeCronSchedule(h) {
3556
+ const nullish = (v) => v === void 0 || v === "null" || v === "" ? null : v;
3557
+ const numOrNull = (v) => {
3558
+ const n = nullish(v);
3559
+ return n === null ? null : Number(n);
3560
+ };
3561
+ const dateOrNull = (v) => {
3562
+ const n = numOrNull(v);
3563
+ return n === null ? null : new Date(n);
3564
+ };
3565
+ let payload;
3566
+ try {
3567
+ payload = JSON.parse(h.payload);
3568
+ } catch {
3569
+ payload = h.payload;
3570
+ }
3571
+ let tags;
3572
+ try {
3573
+ const raw = h.tags;
3574
+ if (raw && raw !== "null") {
3575
+ tags = JSON.parse(raw);
3576
+ }
3577
+ } catch {
3578
+ }
3579
+ return {
3580
+ id: Number(h.id),
3581
+ scheduleName: h.scheduleName,
3582
+ cronExpression: h.cronExpression,
3583
+ jobType: h.jobType,
3584
+ payload,
3585
+ maxAttempts: Number(h.maxAttempts),
3586
+ priority: Number(h.priority),
3587
+ timeoutMs: numOrNull(h.timeoutMs),
3588
+ forceKillOnTimeout: h.forceKillOnTimeout === "true",
3589
+ tags,
3590
+ timezone: h.timezone,
3591
+ allowOverlap: h.allowOverlap === "true",
3592
+ status: h.status,
3593
+ lastEnqueuedAt: dateOrNull(h.lastEnqueuedAt),
3594
+ lastJobId: numOrNull(h.lastJobId),
3595
+ nextRunAt: dateOrNull(h.nextRunAt),
3596
+ createdAt: new Date(Number(h.createdAt)),
3597
+ updatedAt: new Date(Number(h.updatedAt))
3598
+ };
3599
+ }
3600
+ // ── Private helpers (filters) ─────────────────────────────────────────
3601
+ async applyFilters(ids, filters) {
3602
+ let result = ids;
3603
+ if (filters.jobType) {
3604
+ const typeIds = new Set(
3605
+ await this.client.smembers(`${this.prefix}type:${filters.jobType}`)
3606
+ );
3607
+ result = result.filter((id) => typeIds.has(id));
3608
+ }
3609
+ if (filters.tags && filters.tags.values.length > 0) {
3610
+ result = await this.filterByTags(
3611
+ result,
3612
+ filters.tags.values,
3613
+ filters.tags.mode || "all"
3614
+ );
3615
+ }
3616
+ if (filters.priority !== void 0 || filters.runAt) {
3617
+ const jobs = await this.loadJobsByIds(result);
3618
+ let filtered = jobs;
3619
+ if (filters.priority !== void 0) {
3620
+ filtered = filtered.filter((j) => j.priority === filters.priority);
3621
+ }
3622
+ if (filters.runAt) {
3623
+ filtered = this.filterByRunAt(filtered, filters.runAt);
3624
+ }
3625
+ result = filtered.map((j) => j.id.toString());
3626
+ }
3627
+ return result;
3628
+ }
3629
+ };
3630
+ function getNextCronOccurrence(cronExpression, timezone = "UTC", after, CronImpl = croner.Cron) {
3631
+ const cron = new CronImpl(cronExpression, { timezone });
3632
+ const next = cron.nextRun(after ?? /* @__PURE__ */ new Date());
3633
+ return next ?? null;
3634
+ }
3635
+ function validateCronExpression(cronExpression, CronImpl = croner.Cron) {
3636
+ try {
3637
+ new CronImpl(cronExpression);
3638
+ return true;
3639
+ } catch {
3640
+ return false;
3641
+ }
3642
+ }
3643
+
3644
+ // src/handler-validation.ts
3645
+ function validateHandlerSerializable2(handler, jobType) {
3646
+ try {
3647
+ const handlerString = handler.toString();
3648
+ const typeLabel = jobType ? `job type "${jobType}"` : "handler";
3649
+ if (handlerString.includes("this.") && !handlerString.match(/\([^)]*this[^)]*\)/)) {
3650
+ return {
3651
+ isSerializable: false,
3652
+ error: `Handler for ${typeLabel} uses 'this' context which cannot be serialized. Use a regular function or avoid 'this' references when forceKillOnTimeout is enabled.`
3653
+ };
3654
+ }
3655
+ if (handlerString.includes("[native code]")) {
3656
+ return {
3657
+ isSerializable: false,
3658
+ error: `Handler for ${typeLabel} contains native code which cannot be serialized. Ensure your handler is a plain function when forceKillOnTimeout is enabled.`
3659
+ };
3660
+ }
3661
+ try {
3662
+ new Function("return " + handlerString);
3663
+ } catch (parseError) {
3664
+ return {
3665
+ isSerializable: false,
3666
+ error: `Handler for ${typeLabel} cannot be serialized: ${parseError instanceof Error ? parseError.message : String(parseError)}. When using forceKillOnTimeout, handlers must be serializable functions without closures over external variables.`
3667
+ };
3668
+ }
3669
+ const hasPotentialClosure = /const\s+\w+\s*=\s*[^;]+;\s*async\s*\(/.test(handlerString) || /let\s+\w+\s*=\s*[^;]+;\s*async\s*\(/.test(handlerString);
3670
+ if (hasPotentialClosure) {
3671
+ return {
3672
+ isSerializable: true,
3673
+ // Still serializable, but might have issues
3674
+ error: `Warning: Handler for ${typeLabel} may have closures over external variables. Test thoroughly with forceKillOnTimeout enabled. If the handler fails to execute in a worker thread, ensure all dependencies are imported within the handler function.`
3675
+ };
3676
+ }
3677
+ return { isSerializable: true };
3678
+ } catch (error) {
3679
+ return {
3680
+ isSerializable: false,
3681
+ error: `Failed to validate handler serialization${jobType ? ` for job type "${jobType}"` : ""}: ${error instanceof Error ? error.message : String(error)}`
3682
+ };
3683
+ }
3684
+ }
3685
+ async function testHandlerSerialization(handler, jobType) {
3686
+ const basicValidation = validateHandlerSerializable2(handler, jobType);
3687
+ if (!basicValidation.isSerializable) {
3688
+ return basicValidation;
3689
+ }
3690
+ try {
3691
+ const handlerString = handler.toString();
3692
+ const handlerFn = new Function("return " + handlerString)();
3693
+ const testPromise = handlerFn({}, new AbortController().signal);
3694
+ const timeoutPromise = new Promise(
3695
+ (_, reject) => setTimeout(() => reject(new Error("Handler test timeout")), 100)
3696
+ );
3697
+ try {
3698
+ await Promise.race([testPromise, timeoutPromise]);
3699
+ } catch (execError) {
3700
+ if (execError instanceof Error && execError.message === "Handler test timeout") {
3701
+ return { isSerializable: true };
3702
+ }
3703
+ }
3704
+ return { isSerializable: true };
3705
+ } catch (error) {
3706
+ return {
3707
+ isSerializable: false,
3708
+ error: `Handler failed serialization test: ${error instanceof Error ? error.message : String(error)}`
3709
+ };
3710
+ }
3711
+ }
3712
+
3713
+ // src/index.ts
3714
+ var initJobQueue = (config) => {
3715
+ const backendType = config.backend ?? "postgres";
3716
+ setLogContext(config.verbose ?? false);
3717
+ let backend;
3718
+ let pool;
3719
+ if (backendType === "postgres") {
3720
+ const pgConfig = config;
3721
+ pool = createPool(pgConfig.databaseConfig);
3722
+ backend = new PostgresBackend(pool);
3723
+ } else if (backendType === "redis") {
3724
+ const redisConfig = config.redisConfig;
3725
+ backend = new RedisBackend(redisConfig);
3726
+ } else {
3727
+ throw new Error(`Unknown backend: ${backendType}`);
3728
+ }
3729
+ const requirePool = () => {
3730
+ if (!pool) {
3731
+ throw new Error(
3732
+ 'Wait/Token features require the PostgreSQL backend. Configure with backend: "postgres" to use these features.'
3733
+ );
3734
+ }
3735
+ return pool;
3736
+ };
3737
+ const enqueueDueCronJobsImpl = async () => {
3738
+ const dueSchedules = await backend.getDueCronSchedules();
3739
+ let count = 0;
3740
+ for (const schedule of dueSchedules) {
3741
+ if (!schedule.allowOverlap && schedule.lastJobId !== null) {
3742
+ const lastJob = await backend.getJob(schedule.lastJobId);
3743
+ if (lastJob && (lastJob.status === "pending" || lastJob.status === "processing" || lastJob.status === "waiting")) {
3744
+ const nextRunAt2 = getNextCronOccurrence(
3745
+ schedule.cronExpression,
3746
+ schedule.timezone
3747
+ );
3748
+ await backend.updateCronScheduleAfterEnqueue(
3749
+ schedule.id,
3750
+ /* @__PURE__ */ new Date(),
3751
+ schedule.lastJobId,
3752
+ nextRunAt2
3753
+ );
3754
+ continue;
3755
+ }
3756
+ }
3757
+ const jobId = await backend.addJob({
3758
+ jobType: schedule.jobType,
3759
+ payload: schedule.payload,
3760
+ maxAttempts: schedule.maxAttempts,
3761
+ priority: schedule.priority,
3762
+ timeoutMs: schedule.timeoutMs ?? void 0,
3763
+ forceKillOnTimeout: schedule.forceKillOnTimeout,
3764
+ tags: schedule.tags
3765
+ });
3766
+ const nextRunAt = getNextCronOccurrence(
3767
+ schedule.cronExpression,
3768
+ schedule.timezone
3769
+ );
3770
+ await backend.updateCronScheduleAfterEnqueue(
3771
+ schedule.id,
3772
+ /* @__PURE__ */ new Date(),
3773
+ jobId,
3774
+ nextRunAt
3775
+ );
3776
+ count++;
3777
+ }
3778
+ return count;
3779
+ };
3780
+ return {
3781
+ // Job queue operations
3782
+ addJob: withLogContext(
3783
+ (job) => backend.addJob(job),
3784
+ config.verbose ?? false
3785
+ ),
3786
+ getJob: withLogContext(
3787
+ (id) => backend.getJob(id),
3788
+ config.verbose ?? false
3789
+ ),
3790
+ getJobsByStatus: withLogContext(
3791
+ (status, limit, offset) => backend.getJobsByStatus(status, limit, offset),
3792
+ config.verbose ?? false
3793
+ ),
3794
+ getAllJobs: withLogContext(
3795
+ (limit, offset) => backend.getAllJobs(limit, offset),
3796
+ config.verbose ?? false
3797
+ ),
3798
+ getJobs: withLogContext(
3799
+ (filters, limit, offset) => backend.getJobs(filters, limit, offset),
3800
+ config.verbose ?? false
3801
+ ),
3802
+ retryJob: (jobId) => backend.retryJob(jobId),
3803
+ cleanupOldJobs: (daysToKeep) => backend.cleanupOldJobs(daysToKeep),
3804
+ cleanupOldJobEvents: (daysToKeep) => backend.cleanupOldJobEvents(daysToKeep),
3805
+ cancelJob: withLogContext(
3806
+ (jobId) => backend.cancelJob(jobId),
3807
+ config.verbose ?? false
3808
+ ),
3809
+ editJob: withLogContext(
3810
+ (jobId, updates) => backend.editJob(jobId, updates),
3811
+ config.verbose ?? false
3812
+ ),
3813
+ editAllPendingJobs: withLogContext(
3814
+ (filters, updates) => backend.editAllPendingJobs(
3815
+ filters,
3816
+ updates
3817
+ ),
3818
+ config.verbose ?? false
3819
+ ),
3820
+ cancelAllUpcomingJobs: withLogContext(
3821
+ (filters) => backend.cancelAllUpcomingJobs(filters),
3822
+ config.verbose ?? false
3823
+ ),
3824
+ reclaimStuckJobs: withLogContext(
3825
+ (maxProcessingTimeMinutes) => backend.reclaimStuckJobs(maxProcessingTimeMinutes),
3826
+ config.verbose ?? false
3827
+ ),
3828
+ getJobsByTags: withLogContext(
3829
+ (tags, mode = "all", limit, offset) => backend.getJobsByTags(tags, mode, limit, offset),
3830
+ config.verbose ?? false
3831
+ ),
3832
+ // Job processing — automatically enqueues due cron jobs before each batch
3833
+ createProcessor: (handlers, options) => createProcessor(backend, handlers, options, async () => {
3834
+ await enqueueDueCronJobsImpl();
3835
+ }),
3836
+ // Job events
3837
+ getJobEvents: withLogContext(
3838
+ (jobId) => backend.getJobEvents(jobId),
3839
+ config.verbose ?? false
3840
+ ),
3841
+ // Wait / Token support (PostgreSQL-only for now)
3842
+ createToken: withLogContext(
3843
+ (options) => createWaitpoint(requirePool(), null, options),
3844
+ config.verbose ?? false
3845
+ ),
3846
+ completeToken: withLogContext(
3847
+ (tokenId, data) => completeWaitpoint(requirePool(), tokenId, data),
3848
+ config.verbose ?? false
3849
+ ),
3850
+ getToken: withLogContext(
3851
+ (tokenId) => getWaitpoint(requirePool(), tokenId),
3852
+ config.verbose ?? false
3853
+ ),
3854
+ expireTimedOutTokens: withLogContext(
3855
+ () => expireTimedOutWaitpoints(requirePool()),
3856
+ config.verbose ?? false
3857
+ ),
3858
+ // Cron schedule operations
3859
+ addCronJob: withLogContext(
3860
+ (options) => {
3861
+ if (!validateCronExpression(options.cronExpression)) {
3862
+ return Promise.reject(
3863
+ new Error(`Invalid cron expression: "${options.cronExpression}"`)
3864
+ );
3865
+ }
3866
+ const nextRunAt = getNextCronOccurrence(
3867
+ options.cronExpression,
3868
+ options.timezone ?? "UTC"
3869
+ );
3870
+ const input = {
3871
+ scheduleName: options.scheduleName,
3872
+ cronExpression: options.cronExpression,
3873
+ jobType: options.jobType,
3874
+ payload: options.payload,
3875
+ maxAttempts: options.maxAttempts ?? 3,
3876
+ priority: options.priority ?? 0,
3877
+ timeoutMs: options.timeoutMs ?? null,
3878
+ forceKillOnTimeout: options.forceKillOnTimeout ?? false,
3879
+ tags: options.tags,
3880
+ timezone: options.timezone ?? "UTC",
3881
+ allowOverlap: options.allowOverlap ?? false,
3882
+ nextRunAt
3883
+ };
3884
+ return backend.addCronSchedule(input);
3885
+ },
3886
+ config.verbose ?? false
3887
+ ),
3888
+ getCronJob: withLogContext(
3889
+ (id) => backend.getCronSchedule(id),
3890
+ config.verbose ?? false
3891
+ ),
3892
+ getCronJobByName: withLogContext(
3893
+ (name) => backend.getCronScheduleByName(name),
3894
+ config.verbose ?? false
3895
+ ),
3896
+ listCronJobs: withLogContext(
3897
+ (status) => backend.listCronSchedules(status),
3898
+ config.verbose ?? false
3899
+ ),
3900
+ removeCronJob: withLogContext(
3901
+ (id) => backend.removeCronSchedule(id),
3902
+ config.verbose ?? false
3903
+ ),
3904
+ pauseCronJob: withLogContext(
3905
+ (id) => backend.pauseCronSchedule(id),
3906
+ config.verbose ?? false
3907
+ ),
3908
+ resumeCronJob: withLogContext(
3909
+ (id) => backend.resumeCronSchedule(id),
3910
+ config.verbose ?? false
3911
+ ),
3912
+ editCronJob: withLogContext(
3913
+ async (id, updates) => {
3914
+ if (updates.cronExpression !== void 0 && !validateCronExpression(updates.cronExpression)) {
3915
+ throw new Error(
3916
+ `Invalid cron expression: "${updates.cronExpression}"`
3917
+ );
3918
+ }
3919
+ let nextRunAt;
3920
+ if (updates.cronExpression !== void 0 || updates.timezone !== void 0) {
3921
+ const existing = await backend.getCronSchedule(id);
3922
+ const expr = updates.cronExpression ?? existing?.cronExpression ?? "";
3923
+ const tz = updates.timezone ?? existing?.timezone ?? "UTC";
3924
+ nextRunAt = getNextCronOccurrence(expr, tz);
3925
+ }
3926
+ await backend.editCronSchedule(id, updates, nextRunAt);
3927
+ },
3928
+ config.verbose ?? false
3929
+ ),
3930
+ enqueueDueCronJobs: withLogContext(
3931
+ () => enqueueDueCronJobsImpl(),
3932
+ config.verbose ?? false
3933
+ ),
3934
+ // Advanced access
3935
+ getPool: () => {
3936
+ if (backendType !== "postgres") {
3937
+ throw new Error(
3938
+ "getPool() is only available with the PostgreSQL backend."
3939
+ );
3940
+ }
3941
+ return backend.getPool();
3942
+ },
3943
+ getRedisClient: () => {
3944
+ if (backendType !== "redis") {
3945
+ throw new Error(
3946
+ "getRedisClient() is only available with the Redis backend."
3947
+ );
3948
+ }
3949
+ return backend.getClient();
3950
+ }
3951
+ };
3952
+ };
3953
+ var withLogContext = (fn, verbose) => (...args) => {
3954
+ setLogContext(verbose);
3955
+ return fn(...args);
3956
+ };
3957
+
3958
+ exports.FailureReason = FailureReason;
3959
+ exports.JobEventType = JobEventType;
3960
+ exports.PostgresBackend = PostgresBackend;
3961
+ exports.WaitSignal = WaitSignal;
3962
+ exports.getNextCronOccurrence = getNextCronOccurrence;
3963
+ exports.initJobQueue = initJobQueue;
3964
+ exports.testHandlerSerialization = testHandlerSerialization;
3965
+ exports.validateCronExpression = validateCronExpression;
3966
+ exports.validateHandlerSerializable = validateHandlerSerializable2;
3967
+ //# sourceMappingURL=index.cjs.map
3968
+ //# sourceMappingURL=index.cjs.map