@nicnocquee/dataqueue 1.38.0 → 1.39.0-beta.20260322125514

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,10 @@
1
+ -- Up Migration
2
+ ALTER TABLE job_queue ADD COLUMN IF NOT EXISTS depends_on_job_ids INTEGER[];
3
+ ALTER TABLE job_queue ADD COLUMN IF NOT EXISTS depends_on_tags TEXT[];
4
+
5
+ CREATE INDEX IF NOT EXISTS idx_job_queue_depends_on_job_ids ON job_queue USING GIN (depends_on_job_ids);
6
+
7
+ -- Down Migration
8
+ DROP INDEX IF EXISTS idx_job_queue_depends_on_job_ids;
9
+ ALTER TABLE job_queue DROP COLUMN IF EXISTS depends_on_tags;
10
+ ALTER TABLE job_queue DROP COLUMN IF EXISTS depends_on_job_ids;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@nicnocquee/dataqueue",
3
- "version": "1.38.0",
3
+ "version": "1.39.0-beta.20260322125514",
4
4
  "description": "PostgreSQL or Redis-backed job queue for Node.js applications with support for serverless environments",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -24,9 +24,39 @@ import {
24
24
  CronScheduleInput,
25
25
  } from '../backend.js';
26
26
  import { log } from '../log-context.js';
27
+ import {
28
+ assertNoDependencyCycle,
29
+ normalizeDependsOn,
30
+ resolveDependsOnJobIdsForBatch,
31
+ validatePrerequisiteJobIdsExist,
32
+ } from '../job-dependencies.js';
27
33
 
28
34
  const MAX_TIMEOUT_MS = 365 * 24 * 60 * 60 * 1000;
29
35
 
36
+ /** SQL fragment: candidate job may run only if job-id and tag prerequisites are satisfied. */
37
+ const JOB_DEPENDS_ON_PREDICATE = `
38
+ AND (
39
+ candidate.depends_on_job_ids IS NULL
40
+ OR cardinality(candidate.depends_on_job_ids) = 0
41
+ OR NOT EXISTS (
42
+ SELECT 1
43
+ FROM unnest(candidate.depends_on_job_ids) AS dep(id)
44
+ LEFT JOIN job_queue prereq ON prereq.id = dep.id
45
+ WHERE prereq.id IS NULL OR prereq.status <> 'completed'
46
+ )
47
+ )
48
+ AND (
49
+ candidate.depends_on_tags IS NULL
50
+ OR cardinality(candidate.depends_on_tags) = 0
51
+ OR NOT EXISTS (
52
+ SELECT 1 FROM job_queue blocker
53
+ WHERE blocker.id <> candidate.id
54
+ AND blocker.status IN ('pending', 'processing', 'waiting')
55
+ AND blocker.tags IS NOT NULL
56
+ AND blocker.tags @> candidate.depends_on_tags
57
+ )
58
+ )`;
59
+
30
60
  /** Parse a timeout string like '10m', '1h', '24h', '7d' into milliseconds. */
31
61
  function parseTimeoutString(timeout: string): number {
32
62
  const match = timeout.match(/^(\d+)(s|m|h|d)$/);
@@ -159,13 +189,34 @@ export class PostgresBackend implements QueueBackend {
159
189
  retryDelayMax = undefined,
160
190
  deadLetterJobType = undefined,
161
191
  group = undefined,
192
+ dependsOn,
162
193
  }: JobOptions<PayloadMap, T>,
163
194
  options?: AddJobOptions,
164
195
  ): Promise<number> {
165
196
  const externalClient = options?.db;
166
197
  const client: DatabaseClient =
167
198
  externalClient ?? (await this.pool.connect());
199
+ let manageTx = false;
168
200
  try {
201
+ const { jobIds: depJobIdsRaw, tags: depTags } =
202
+ normalizeDependsOn(dependsOn);
203
+ let resolvedDepJobIds: number[] = [];
204
+ if (depJobIdsRaw?.length) {
205
+ if (depJobIdsRaw.some((id) => id < 0)) {
206
+ throw new Error(
207
+ 'dependsOn.jobIds: batch-relative (negative) ids are only supported in addJobs()',
208
+ );
209
+ }
210
+ resolvedDepJobIds = depJobIdsRaw;
211
+ await validatePrerequisiteJobIdsExist(client, resolvedDepJobIds);
212
+ }
213
+ const dependsOnJobIdsParam =
214
+ resolvedDepJobIds.length > 0 ? resolvedDepJobIds : null;
215
+ const dependsOnTagsParam = depTags?.length ? depTags : null;
216
+
217
+ manageTx = resolvedDepJobIds.length > 0 && !externalClient;
218
+ if (manageTx) await client.query('BEGIN');
219
+
169
220
  let result;
170
221
  const onConflict = idempotencyKey
171
222
  ? `ON CONFLICT (idempotency_key) WHERE idempotency_key IS NOT NULL DO NOTHING`
@@ -174,8 +225,8 @@ export class PostgresBackend implements QueueBackend {
174
225
  if (runAt) {
175
226
  result = await client.query(
176
227
  `INSERT INTO job_queue
177
- (job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key, retry_delay, retry_backoff, retry_delay_max, dead_letter_job_type, group_id, group_tier)
178
- VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
228
+ (job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key, retry_delay, retry_backoff, retry_delay_max, dead_letter_job_type, group_id, group_tier, depends_on_job_ids, depends_on_tags)
229
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17)
179
230
  ${onConflict}
180
231
  RETURNING id`,
181
232
  [
@@ -194,13 +245,15 @@ export class PostgresBackend implements QueueBackend {
194
245
  deadLetterJobType ?? null,
195
246
  group?.id ?? null,
196
247
  group?.tier ?? null,
248
+ dependsOnJobIdsParam,
249
+ dependsOnTagsParam,
197
250
  ],
198
251
  );
199
252
  } else {
200
253
  result = await client.query(
201
254
  `INSERT INTO job_queue
202
- (job_type, payload, max_attempts, priority, timeout_ms, force_kill_on_timeout, tags, idempotency_key, retry_delay, retry_backoff, retry_delay_max, dead_letter_job_type, group_id, group_tier)
203
- VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)
255
+ (job_type, payload, max_attempts, priority, timeout_ms, force_kill_on_timeout, tags, idempotency_key, retry_delay, retry_backoff, retry_delay_max, dead_letter_job_type, group_id, group_tier, depends_on_job_ids, depends_on_tags)
256
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
204
257
  ${onConflict}
205
258
  RETURNING id`,
206
259
  [
@@ -218,11 +271,14 @@ export class PostgresBackend implements QueueBackend {
218
271
  deadLetterJobType ?? null,
219
272
  group?.id ?? null,
220
273
  group?.tier ?? null,
274
+ dependsOnJobIdsParam,
275
+ dependsOnTagsParam,
221
276
  ],
222
277
  );
223
278
  }
224
279
 
225
280
  if (result.rows.length === 0 && idempotencyKey) {
281
+ if (manageTx) await client.query('ROLLBACK');
226
282
  const existing = await client.query(
227
283
  `SELECT id FROM job_queue WHERE idempotency_key = $1`,
228
284
  [idempotencyKey],
@@ -233,39 +289,53 @@ export class PostgresBackend implements QueueBackend {
233
289
  );
234
290
  return existing.rows[0].id;
235
291
  }
292
+ if (manageTx) await client.query('ROLLBACK');
236
293
  throw new Error(
237
294
  `Failed to insert job and could not find existing job with idempotency key "${idempotencyKey}"`,
238
295
  );
239
296
  }
240
297
 
241
298
  const jobId = result.rows[0].id;
299
+
300
+ if (resolvedDepJobIds.length > 0) {
301
+ await assertNoDependencyCycle(client, jobId, resolvedDepJobIds);
302
+ }
303
+
242
304
  log(
243
305
  `Added job ${jobId}: payload ${JSON.stringify(payload)}, ${runAt ? `runAt ${runAt.toISOString()}, ` : ''}priority ${priority}, maxAttempts ${maxAttempts}, jobType ${jobType}, tags ${JSON.stringify(tags)}${idempotencyKey ? `, idempotencyKey "${idempotencyKey}"` : ''}`,
244
306
  );
245
307
 
308
+ const addedMeta = {
309
+ jobType,
310
+ payload,
311
+ tags,
312
+ idempotencyKey,
313
+ dependsOn:
314
+ dependsOnJobIdsParam || dependsOnTagsParam ? dependsOn : undefined,
315
+ };
316
+
246
317
  if (externalClient) {
247
318
  try {
248
319
  await client.query(
249
320
  `INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
250
- [
251
- jobId,
252
- JobEventType.Added,
253
- JSON.stringify({ jobType, payload, tags, idempotencyKey }),
254
- ],
321
+ [jobId, JobEventType.Added, JSON.stringify(addedMeta)],
255
322
  );
256
323
  } catch (error) {
257
324
  log(`Error recording job event for job ${jobId}: ${error}`);
258
325
  }
259
326
  } else {
260
- await this.recordJobEvent(jobId, JobEventType.Added, {
261
- jobType,
262
- payload,
263
- tags,
264
- idempotencyKey,
265
- });
327
+ await this.recordJobEvent(jobId, JobEventType.Added, addedMeta);
266
328
  }
329
+ if (manageTx) await client.query('COMMIT');
267
330
  return jobId;
268
331
  } catch (error) {
332
+ if (manageTx) {
333
+ try {
334
+ await client.query('ROLLBACK');
335
+ } catch {
336
+ /* ignore */
337
+ }
338
+ }
269
339
  log(`Error adding job: ${error}`);
270
340
  throw error;
271
341
  } finally {
@@ -289,7 +359,53 @@ export class PostgresBackend implements QueueBackend {
289
359
  const client: DatabaseClient =
290
360
  externalClient ?? (await this.pool.connect());
291
361
  try {
292
- const COLS_PER_JOB = 15;
362
+ const needsSequential = jobs.some((j) => {
363
+ const n = normalizeDependsOn(j.dependsOn);
364
+ return Boolean(n.jobIds?.length || n.tags?.length);
365
+ });
366
+
367
+ if (needsSequential) {
368
+ const useOuterTx = !externalClient;
369
+ if (useOuterTx) await client.query('BEGIN');
370
+ try {
371
+ const ids: number[] = [];
372
+ for (let i = 0; i < jobs.length; i++) {
373
+ let job = jobs[i]!;
374
+ const nd = normalizeDependsOn(job.dependsOn);
375
+ if (nd.jobIds?.some((id) => id < 0)) {
376
+ const resolvedJobIds = resolveDependsOnJobIdsForBatch(
377
+ nd.jobIds,
378
+ ids,
379
+ );
380
+ job = {
381
+ ...job,
382
+ dependsOn: {
383
+ jobIds: resolvedJobIds,
384
+ tags: job.dependsOn?.tags,
385
+ },
386
+ };
387
+ }
388
+ const id = await this.addJob(job, { db: client });
389
+ ids.push(id);
390
+ }
391
+ if (useOuterTx) await client.query('COMMIT');
392
+ log(
393
+ `Batch-inserted ${jobs.length} jobs (sequential), IDs: [${ids.join(', ')}]`,
394
+ );
395
+ return ids;
396
+ } catch (e) {
397
+ if (!externalClient) {
398
+ try {
399
+ await client.query('ROLLBACK');
400
+ } catch {
401
+ /* ignore */
402
+ }
403
+ }
404
+ throw e;
405
+ }
406
+ }
407
+
408
+ const COLS_PER_JOB = 17;
293
409
  const valueClauses: string[] = [];
294
410
  const params: any[] = [];
295
411
 
@@ -318,7 +434,7 @@ export class PostgresBackend implements QueueBackend {
318
434
  `($${base + 1}, $${base + 2}, $${base + 3}, $${base + 4}, ` +
319
435
  `COALESCE($${base + 5}::timestamptz, CURRENT_TIMESTAMP), ` +
320
436
  `$${base + 6}, $${base + 7}, $${base + 8}, $${base + 9}, ` +
321
- `$${base + 10}, $${base + 11}, $${base + 12}, $${base + 13}, $${base + 14}, $${base + 15})`,
437
+ `$${base + 10}, $${base + 11}, $${base + 12}, $${base + 13}, $${base + 14}, $${base + 15}, $${base + 16}, $${base + 17})`,
322
438
  );
323
439
  params.push(
324
440
  jobType,
@@ -336,6 +452,8 @@ export class PostgresBackend implements QueueBackend {
336
452
  deadLetterJobType ?? null,
337
453
  group?.id ?? null,
338
454
  group?.tier ?? null,
455
+ null,
456
+ null,
339
457
  );
340
458
  }
341
459
 
@@ -345,7 +463,7 @@ export class PostgresBackend implements QueueBackend {
345
463
 
346
464
  const result = await client.query(
347
465
  `INSERT INTO job_queue
348
- (job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key, retry_delay, retry_backoff, retry_delay_max, dead_letter_job_type, group_id, group_tier)
466
+ (job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key, retry_delay, retry_backoff, retry_delay_max, dead_letter_job_type, group_id, group_tier, depends_on_job_ids, depends_on_tags)
349
467
  VALUES ${valueClauses.join(', ')}
350
468
  ${onConflict}
351
469
  RETURNING id, idempotency_key`,
@@ -412,6 +530,7 @@ export class PostgresBackend implements QueueBackend {
412
530
  const wasInserted =
413
531
  !job.idempotencyKey || !missingKeys.includes(job.idempotencyKey);
414
532
  if (wasInserted) {
533
+ const nd = normalizeDependsOn(job.dependsOn);
415
534
  newJobEvents.push({
416
535
  jobId: ids[i],
417
536
  eventType: JobEventType.Added,
@@ -420,6 +539,9 @@ export class PostgresBackend implements QueueBackend {
420
539
  payload: job.payload,
421
540
  tags: job.tags,
422
541
  idempotencyKey: job.idempotencyKey,
542
+ ...(nd.jobIds?.length || nd.tags?.length
543
+ ? { dependsOn: job.dependsOn }
544
+ : {}),
423
545
  },
424
546
  });
425
547
  }
@@ -467,7 +589,7 @@ export class PostgresBackend implements QueueBackend {
467
589
  const client = await this.pool.connect();
468
590
  try {
469
591
  const result = await client.query(
470
- `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", output FROM job_queue WHERE id = $1`,
592
+ `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", depends_on_job_ids AS "dependsOnJobIds", depends_on_tags AS "dependsOnTags", output FROM job_queue WHERE id = $1`,
471
593
  [id],
472
594
  );
473
595
 
@@ -501,7 +623,7 @@ export class PostgresBackend implements QueueBackend {
501
623
  const client = await this.pool.connect();
502
624
  try {
503
625
  const result = await client.query(
504
- `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", output FROM job_queue WHERE status = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3`,
626
+ `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", depends_on_job_ids AS "dependsOnJobIds", depends_on_tags AS "dependsOnTags", output FROM job_queue WHERE status = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3`,
505
627
  [status, limit, offset],
506
628
  );
507
629
  log(`Found ${result.rows.length} jobs by status ${status}`);
@@ -527,7 +649,7 @@ export class PostgresBackend implements QueueBackend {
527
649
  const client = await this.pool.connect();
528
650
  try {
529
651
  const result = await client.query(
530
- `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", output FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2`,
652
+ `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", depends_on_job_ids AS "dependsOnJobIds", depends_on_tags AS "dependsOnTags", output FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2`,
531
653
  [limit, offset],
532
654
  );
533
655
  log(`Found ${result.rows.length} jobs (all)`);
@@ -552,7 +674,7 @@ export class PostgresBackend implements QueueBackend {
552
674
  ): Promise<JobRecord<PayloadMap, T>[]> {
553
675
  const client = await this.pool.connect();
554
676
  try {
555
- let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", output FROM job_queue`;
677
+ let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", depends_on_job_ids AS "dependsOnJobIds", depends_on_tags AS "dependsOnTags", output FROM job_queue`;
556
678
  const params: any[] = [];
557
679
  const where: string[] = [];
558
680
  let paramIdx = 1;
@@ -679,7 +801,7 @@ export class PostgresBackend implements QueueBackend {
679
801
  ): Promise<JobRecord<PayloadMap, T>[]> {
680
802
  const client = await this.pool.connect();
681
803
  try {
682
- let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", output
804
+ let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", depends_on_job_ids AS "dependsOnJobIds", depends_on_tags AS "dependsOnTags", output
683
805
  FROM job_queue`;
684
806
  let params: any[] = [];
685
807
  switch (mode) {
@@ -780,11 +902,12 @@ export class PostgresBackend implements QueueBackend {
780
902
  )
781
903
  )
782
904
  ${jobTypeFilter}
905
+ ${JOB_DEPENDS_ON_PREDICATE}
783
906
  ORDER BY candidate.priority DESC, candidate.created_at ASC
784
907
  LIMIT $2
785
908
  FOR UPDATE SKIP LOCKED
786
909
  )
787
- RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", output
910
+ RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", depends_on_job_ids AS "dependsOnJobIds", depends_on_tags AS "dependsOnTags", output
788
911
  `,
789
912
  params,
790
913
  );
@@ -810,6 +933,7 @@ export class PostgresBackend implements QueueBackend {
810
933
  )
811
934
  )
812
935
  ${jobTypeFilter}
936
+ ${JOB_DEPENDS_ON_PREDICATE}
813
937
  FOR UPDATE SKIP LOCKED
814
938
  ),
815
939
  ranked AS (
@@ -852,7 +976,7 @@ export class PostgresBackend implements QueueBackend {
852
976
  last_retried_at = CASE WHEN status != 'waiting' AND attempts > 0 THEN NOW() ELSE last_retried_at END,
853
977
  wait_until = NULL
854
978
  WHERE id IN (SELECT id FROM selected)
855
- RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", output
979
+ RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", depends_on_job_ids AS "dependsOnJobIds", depends_on_tags AS "dependsOnTags", output
856
980
  `,
857
981
  constrainedParams,
858
982
  );
@@ -915,6 +1039,87 @@ export class PostgresBackend implements QueueBackend {
915
1039
  }
916
1040
  }
917
1041
 
1042
+ /**
1043
+ * Cancel pending/waiting jobs that depend on any seed job (by job id or tag superset), transitively.
1044
+ *
1045
+ * @param client - Database client (must be inside an open transaction when used from fail/cancel).
1046
+ * @param initialSeeds - Job ids that just failed or were cancelled.
1047
+ * @param rootJobId - Original job id for event metadata.
1048
+ */
1049
+ private async propagateDependencyCancellations(
1050
+ client: DatabaseClient,
1051
+ initialSeeds: number[],
1052
+ rootJobId: number,
1053
+ ): Promise<void> {
1054
+ const seeds = [...new Set(initialSeeds.filter((id) => id > 0))];
1055
+ if (seeds.length === 0) return;
1056
+
1057
+ const cancelled = new Set<number>();
1058
+ const reasonJson = JSON.stringify({
1059
+ rootJobId,
1060
+ dependencyCascade: true,
1061
+ });
1062
+
1063
+ let frontier = seeds;
1064
+ while (frontier.length > 0) {
1065
+ const res = await client.query(
1066
+ `
1067
+ SELECT DISTINCT j.id
1068
+ FROM job_queue j
1069
+ CROSS JOIN unnest($1::int[]) AS s(id)
1070
+ INNER JOIN job_queue sx ON sx.id = s.id
1071
+ WHERE j.status IN ('pending', 'waiting')
1072
+ AND j.id <> sx.id
1073
+ AND (
1074
+ j.depends_on_job_ids @> ARRAY[s.id]::integer[]
1075
+ OR (
1076
+ j.depends_on_tags IS NOT NULL
1077
+ AND cardinality(j.depends_on_tags) > 0
1078
+ AND sx.tags IS NOT NULL
1079
+ AND sx.tags @> j.depends_on_tags
1080
+ )
1081
+ )
1082
+ `,
1083
+ [frontier],
1084
+ );
1085
+
1086
+ const toCancel: number[] = [];
1087
+ for (const row of res.rows) {
1088
+ const pid = row.id as number;
1089
+ if (cancelled.has(pid)) continue;
1090
+ cancelled.add(pid);
1091
+ toCancel.push(pid);
1092
+ }
1093
+
1094
+ if (toCancel.length === 0) break;
1095
+
1096
+ await client.query(
1097
+ `
1098
+ UPDATE job_queue
1099
+ SET status = 'cancelled',
1100
+ updated_at = NOW(),
1101
+ last_cancelled_at = NOW(),
1102
+ wait_until = NULL,
1103
+ wait_token_id = NULL,
1104
+ pending_reason = $2
1105
+ WHERE id = ANY($1::int[])
1106
+ AND status IN ('pending', 'waiting')
1107
+ `,
1108
+ [toCancel, reasonJson],
1109
+ );
1110
+
1111
+ const meta = JSON.stringify({ rootJobId, dependencyCascade: true });
1112
+ for (const jid of toCancel) {
1113
+ await client.query(
1114
+ `INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
1115
+ [jid, JobEventType.Cancelled, meta],
1116
+ );
1117
+ }
1118
+
1119
+ frontier = toCancel;
1120
+ }
1121
+ }
1122
+
918
1123
  async failJob(
919
1124
  jobId: number,
920
1125
  error: Error,
@@ -1000,8 +1205,8 @@ export class PostgresBackend implements QueueBackend {
1000
1205
 
1001
1206
  const deadLetterInsert = await client.query(
1002
1207
  `INSERT INTO job_queue
1003
- (job_type, payload, max_attempts, priority, run_at)
1004
- VALUES ($1, $2, $3, $4, NOW())
1208
+ (job_type, payload, max_attempts, priority, run_at, depends_on_job_ids, depends_on_tags)
1209
+ VALUES ($1, $2, $3, $4, NOW(), NULL, NULL)
1005
1210
  RETURNING id`,
1006
1211
  [failedJob.deadLetterJobType, deadLetterPayload, 1, 0],
1007
1212
  );
@@ -1042,6 +1247,8 @@ export class PostgresBackend implements QueueBackend {
1042
1247
  ],
1043
1248
  );
1044
1249
 
1250
+ await this.propagateDependencyCancellations(client, [jobId], jobId);
1251
+
1045
1252
  await client.query('COMMIT');
1046
1253
  log(
1047
1254
  `Failed job ${jobId}${deadLetterJobId ? ` and routed to dead-letter job ${deadLetterJobId}` : ''}`,
@@ -1147,7 +1354,8 @@ export class PostgresBackend implements QueueBackend {
1147
1354
  async cancelJob(jobId: number): Promise<void> {
1148
1355
  const client = await this.pool.connect();
1149
1356
  try {
1150
- await client.query(
1357
+ await client.query('BEGIN');
1358
+ const upd = await client.query(
1151
1359
  `
1152
1360
  UPDATE job_queue
1153
1361
  SET status = 'cancelled', updated_at = NOW(), last_cancelled_at = NOW(),
@@ -1156,9 +1364,26 @@ export class PostgresBackend implements QueueBackend {
1156
1364
  `,
1157
1365
  [jobId],
1158
1366
  );
1159
- await this.recordJobEvent(jobId, JobEventType.Cancelled);
1367
+ if (upd.rowCount === 0) {
1368
+ await client.query('ROLLBACK');
1369
+ log(
1370
+ `Job ${jobId} could not be cancelled (not in pending/waiting state or does not exist)`,
1371
+ );
1372
+ return;
1373
+ }
1374
+ await client.query(
1375
+ `INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
1376
+ [jobId, JobEventType.Cancelled, null],
1377
+ );
1378
+ await this.propagateDependencyCancellations(client, [jobId], jobId);
1379
+ await client.query('COMMIT');
1160
1380
  log(`Cancelled job ${jobId}`);
1161
1381
  } catch (error) {
1382
+ try {
1383
+ await client.query('ROLLBACK');
1384
+ } catch {
1385
+ /* ignore */
1386
+ }
1162
1387
  log(`Error cancelling job ${jobId}: ${error}`);
1163
1388
  throw error;
1164
1389
  } finally {