@nicnocquee/dataqueue 1.38.0 → 1.39.0-beta.20260322125514

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -915,7 +915,109 @@ Recommended: Remove sslmode from the connection string when using a custom CA.
915
915
  }
916
916
  return pool;
917
917
  };
918
+
919
+ // src/job-dependencies.ts
920
+ function batchDepRef(batchIndex) {
921
+ if (!Number.isInteger(batchIndex) || batchIndex < 0) {
922
+ throw new Error(
923
+ `batchDepRef: expected non-negative integer index, got ${batchIndex}`
924
+ );
925
+ }
926
+ return -(batchIndex + 1);
927
+ }
928
+ function normalizeDependsOn(dep) {
929
+ if (!dep) return { jobIds: void 0, tags: void 0 };
930
+ const jobIds = dep.jobIds && dep.jobIds.length > 0 ? [...new Set(dep.jobIds)] : void 0;
931
+ const tags = dep.tags && dep.tags.length > 0 ? [...new Set(dep.tags)] : void 0;
932
+ return { jobIds, tags };
933
+ }
934
+ function resolveDependsOnJobIdsForBatch(jobIds, insertedIds) {
935
+ return jobIds.map((id) => {
936
+ if (id >= 0) return id;
937
+ const idx = -id - 1;
938
+ if (idx < 0 || idx >= insertedIds.length) {
939
+ throw new Error(
940
+ `Invalid batch-relative job id ${id}: index ${idx} out of range for ${insertedIds.length} inserted job(s)`
941
+ );
942
+ }
943
+ return insertedIds[idx];
944
+ });
945
+ }
946
+ function tagsAreSuperset(holderTags, requiredTags) {
947
+ if (!requiredTags || requiredTags.length === 0) return false;
948
+ if (!holderTags || holderTags.length === 0) return false;
949
+ const set = new Set(holderTags);
950
+ for (const t of requiredTags) {
951
+ if (!set.has(t)) return false;
952
+ }
953
+ return true;
954
+ }
955
+ async function validatePrerequisiteJobIdsExist(client, jobIds) {
956
+ if (jobIds.length === 0) return;
957
+ const r = await client.query(
958
+ `SELECT COUNT(*)::int AS c FROM job_queue WHERE id = ANY($1::int[])`,
959
+ [jobIds]
960
+ );
961
+ const c = r.rows[0]?.c ?? 0;
962
+ if (c !== jobIds.length) {
963
+ throw new Error(
964
+ `dependsOn.jobIds: one or more job ids do not exist (${jobIds.join(", ")})`
965
+ );
966
+ }
967
+ }
968
+ async function assertNoDependencyCycle(client, newJobId, dependsOnJobIds) {
969
+ if (dependsOnJobIds.length === 0) return;
970
+ if (dependsOnJobIds.includes(newJobId)) {
971
+ throw new Error(
972
+ `Job ${newJobId} cannot depend on itself (dependsOn.jobIds)`
973
+ );
974
+ }
975
+ const result = await client.query(
976
+ `
977
+ WITH RECURSIVE downstream AS (
978
+ SELECT j.id
979
+ FROM job_queue j
980
+ WHERE j.depends_on_job_ids @> ARRAY[$1::integer]::integer[]
981
+ UNION
982
+ SELECT j.id
983
+ FROM job_queue j
984
+ INNER JOIN downstream d ON j.depends_on_job_ids @> ARRAY[d.id]::integer[]
985
+ )
986
+ SELECT 1 FROM downstream WHERE id = ANY($2::integer[]) LIMIT 1
987
+ `,
988
+ [newJobId, dependsOnJobIds]
989
+ );
990
+ if (result.rows.length > 0) {
991
+ throw new Error(
992
+ `Adding job ${newJobId} would create a dependency cycle (dependsOn.jobIds)`
993
+ );
994
+ }
995
+ }
996
+
997
+ // src/backends/postgres.ts
918
998
  var MAX_TIMEOUT_MS = 365 * 24 * 60 * 60 * 1e3;
999
+ var JOB_DEPENDS_ON_PREDICATE = `
1000
+ AND (
1001
+ candidate.depends_on_job_ids IS NULL
1002
+ OR cardinality(candidate.depends_on_job_ids) = 0
1003
+ OR NOT EXISTS (
1004
+ SELECT 1
1005
+ FROM unnest(candidate.depends_on_job_ids) AS dep(id)
1006
+ LEFT JOIN job_queue prereq ON prereq.id = dep.id
1007
+ WHERE prereq.id IS NULL OR prereq.status <> 'completed'
1008
+ )
1009
+ )
1010
+ AND (
1011
+ candidate.depends_on_tags IS NULL
1012
+ OR cardinality(candidate.depends_on_tags) = 0
1013
+ OR NOT EXISTS (
1014
+ SELECT 1 FROM job_queue blocker
1015
+ WHERE blocker.id <> candidate.id
1016
+ AND blocker.status IN ('pending', 'processing', 'waiting')
1017
+ AND blocker.tags IS NOT NULL
1018
+ AND blocker.tags @> candidate.depends_on_tags
1019
+ )
1020
+ )`;
919
1021
  function parseTimeoutString(timeout) {
920
1022
  const match = timeout.match(/^(\d+)(s|m|h|d)$/);
921
1023
  if (!match) {
@@ -1022,18 +1124,35 @@ var PostgresBackend = class {
1022
1124
  retryBackoff = void 0,
1023
1125
  retryDelayMax = void 0,
1024
1126
  deadLetterJobType = void 0,
1025
- group = void 0
1127
+ group = void 0,
1128
+ dependsOn
1026
1129
  }, options) {
1027
1130
  const externalClient = options?.db;
1028
1131
  const client = externalClient ?? await this.pool.connect();
1132
+ let manageTx = false;
1029
1133
  try {
1134
+ const { jobIds: depJobIdsRaw, tags: depTags } = normalizeDependsOn(dependsOn);
1135
+ let resolvedDepJobIds = [];
1136
+ if (depJobIdsRaw?.length) {
1137
+ if (depJobIdsRaw.some((id) => id < 0)) {
1138
+ throw new Error(
1139
+ "dependsOn.jobIds: batch-relative (negative) ids are only supported in addJobs()"
1140
+ );
1141
+ }
1142
+ resolvedDepJobIds = depJobIdsRaw;
1143
+ await validatePrerequisiteJobIdsExist(client, resolvedDepJobIds);
1144
+ }
1145
+ const dependsOnJobIdsParam = resolvedDepJobIds.length > 0 ? resolvedDepJobIds : null;
1146
+ const dependsOnTagsParam = depTags?.length ? depTags : null;
1147
+ manageTx = resolvedDepJobIds.length > 0 && !externalClient;
1148
+ if (manageTx) await client.query("BEGIN");
1030
1149
  let result;
1031
1150
  const onConflict = idempotencyKey ? `ON CONFLICT (idempotency_key) WHERE idempotency_key IS NOT NULL DO NOTHING` : "";
1032
1151
  if (runAt) {
1033
1152
  result = await client.query(
1034
1153
  `INSERT INTO job_queue
1035
- (job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key, retry_delay, retry_backoff, retry_delay_max, dead_letter_job_type, group_id, group_tier)
1036
- VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
1154
+ (job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key, retry_delay, retry_backoff, retry_delay_max, dead_letter_job_type, group_id, group_tier, depends_on_job_ids, depends_on_tags)
1155
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17)
1037
1156
  ${onConflict}
1038
1157
  RETURNING id`,
1039
1158
  [
@@ -1051,14 +1170,16 @@ var PostgresBackend = class {
1051
1170
  retryDelayMax ?? null,
1052
1171
  deadLetterJobType ?? null,
1053
1172
  group?.id ?? null,
1054
- group?.tier ?? null
1173
+ group?.tier ?? null,
1174
+ dependsOnJobIdsParam,
1175
+ dependsOnTagsParam
1055
1176
  ]
1056
1177
  );
1057
1178
  } else {
1058
1179
  result = await client.query(
1059
1180
  `INSERT INTO job_queue
1060
- (job_type, payload, max_attempts, priority, timeout_ms, force_kill_on_timeout, tags, idempotency_key, retry_delay, retry_backoff, retry_delay_max, dead_letter_job_type, group_id, group_tier)
1061
- VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)
1181
+ (job_type, payload, max_attempts, priority, timeout_ms, force_kill_on_timeout, tags, idempotency_key, retry_delay, retry_backoff, retry_delay_max, dead_letter_job_type, group_id, group_tier, depends_on_job_ids, depends_on_tags)
1182
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
1062
1183
  ${onConflict}
1063
1184
  RETURNING id`,
1064
1185
  [
@@ -1075,11 +1196,14 @@ var PostgresBackend = class {
1075
1196
  retryDelayMax ?? null,
1076
1197
  deadLetterJobType ?? null,
1077
1198
  group?.id ?? null,
1078
- group?.tier ?? null
1199
+ group?.tier ?? null,
1200
+ dependsOnJobIdsParam,
1201
+ dependsOnTagsParam
1079
1202
  ]
1080
1203
  );
1081
1204
  }
1082
1205
  if (result.rows.length === 0 && idempotencyKey) {
1206
+ if (manageTx) await client.query("ROLLBACK");
1083
1207
  const existing = await client.query(
1084
1208
  `SELECT id FROM job_queue WHERE idempotency_key = $1`,
1085
1209
  [idempotencyKey]
@@ -1090,37 +1214,46 @@ var PostgresBackend = class {
1090
1214
  );
1091
1215
  return existing.rows[0].id;
1092
1216
  }
1217
+ if (manageTx) await client.query("ROLLBACK");
1093
1218
  throw new Error(
1094
1219
  `Failed to insert job and could not find existing job with idempotency key "${idempotencyKey}"`
1095
1220
  );
1096
1221
  }
1097
1222
  const jobId = result.rows[0].id;
1223
+ if (resolvedDepJobIds.length > 0) {
1224
+ await assertNoDependencyCycle(client, jobId, resolvedDepJobIds);
1225
+ }
1098
1226
  log(
1099
1227
  `Added job ${jobId}: payload ${JSON.stringify(payload)}, ${runAt ? `runAt ${runAt.toISOString()}, ` : ""}priority ${priority}, maxAttempts ${maxAttempts}, jobType ${jobType}, tags ${JSON.stringify(tags)}${idempotencyKey ? `, idempotencyKey "${idempotencyKey}"` : ""}`
1100
1228
  );
1229
+ const addedMeta = {
1230
+ jobType,
1231
+ payload,
1232
+ tags,
1233
+ idempotencyKey,
1234
+ dependsOn: dependsOnJobIdsParam || dependsOnTagsParam ? dependsOn : void 0
1235
+ };
1101
1236
  if (externalClient) {
1102
1237
  try {
1103
1238
  await client.query(
1104
1239
  `INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
1105
- [
1106
- jobId,
1107
- "added" /* Added */,
1108
- JSON.stringify({ jobType, payload, tags, idempotencyKey })
1109
- ]
1240
+ [jobId, "added" /* Added */, JSON.stringify(addedMeta)]
1110
1241
  );
1111
1242
  } catch (error) {
1112
1243
  log(`Error recording job event for job ${jobId}: ${error}`);
1113
1244
  }
1114
1245
  } else {
1115
- await this.recordJobEvent(jobId, "added" /* Added */, {
1116
- jobType,
1117
- payload,
1118
- tags,
1119
- idempotencyKey
1120
- });
1246
+ await this.recordJobEvent(jobId, "added" /* Added */, addedMeta);
1121
1247
  }
1248
+ if (manageTx) await client.query("COMMIT");
1122
1249
  return jobId;
1123
1250
  } catch (error) {
1251
+ if (manageTx) {
1252
+ try {
1253
+ await client.query("ROLLBACK");
1254
+ } catch {
1255
+ }
1256
+ }
1124
1257
  log(`Error adding job: ${error}`);
1125
1258
  throw error;
1126
1259
  } finally {
@@ -1138,7 +1271,50 @@ var PostgresBackend = class {
1138
1271
  const externalClient = options?.db;
1139
1272
  const client = externalClient ?? await this.pool.connect();
1140
1273
  try {
1141
- const COLS_PER_JOB = 15;
1274
+ const needsSequential = jobs.some((j) => {
1275
+ const n = normalizeDependsOn(j.dependsOn);
1276
+ return Boolean(n.jobIds?.length || n.tags?.length);
1277
+ });
1278
+ if (needsSequential) {
1279
+ const useOuterTx = !externalClient;
1280
+ if (useOuterTx) await client.query("BEGIN");
1281
+ try {
1282
+ const ids2 = [];
1283
+ for (let i = 0; i < jobs.length; i++) {
1284
+ let job = jobs[i];
1285
+ const nd = normalizeDependsOn(job.dependsOn);
1286
+ if (nd.jobIds?.some((id2) => id2 < 0)) {
1287
+ const resolvedJobIds = resolveDependsOnJobIdsForBatch(
1288
+ nd.jobIds,
1289
+ ids2
1290
+ );
1291
+ job = {
1292
+ ...job,
1293
+ dependsOn: {
1294
+ jobIds: resolvedJobIds,
1295
+ tags: job.dependsOn?.tags
1296
+ }
1297
+ };
1298
+ }
1299
+ const id = await this.addJob(job, { db: client });
1300
+ ids2.push(id);
1301
+ }
1302
+ if (useOuterTx) await client.query("COMMIT");
1303
+ log(
1304
+ `Batch-inserted ${jobs.length} jobs (sequential), IDs: [${ids2.join(", ")}]`
1305
+ );
1306
+ return ids2;
1307
+ } catch (e) {
1308
+ if (!externalClient) {
1309
+ try {
1310
+ await client.query("ROLLBACK");
1311
+ } catch {
1312
+ }
1313
+ }
1314
+ throw e;
1315
+ }
1316
+ }
1317
+ const COLS_PER_JOB = 17;
1142
1318
  const valueClauses = [];
1143
1319
  const params = [];
1144
1320
  const hasAnyIdempotencyKey = jobs.some((j) => j.idempotencyKey);
@@ -1161,7 +1337,7 @@ var PostgresBackend = class {
1161
1337
  } = jobs[i];
1162
1338
  const base = i * COLS_PER_JOB;
1163
1339
  valueClauses.push(
1164
- `($${base + 1}, $${base + 2}, $${base + 3}, $${base + 4}, COALESCE($${base + 5}::timestamptz, CURRENT_TIMESTAMP), $${base + 6}, $${base + 7}, $${base + 8}, $${base + 9}, $${base + 10}, $${base + 11}, $${base + 12}, $${base + 13}, $${base + 14}, $${base + 15})`
1340
+ `($${base + 1}, $${base + 2}, $${base + 3}, $${base + 4}, COALESCE($${base + 5}::timestamptz, CURRENT_TIMESTAMP), $${base + 6}, $${base + 7}, $${base + 8}, $${base + 9}, $${base + 10}, $${base + 11}, $${base + 12}, $${base + 13}, $${base + 14}, $${base + 15}, $${base + 16}, $${base + 17})`
1165
1341
  );
1166
1342
  params.push(
1167
1343
  jobType,
@@ -1178,13 +1354,15 @@ var PostgresBackend = class {
1178
1354
  retryDelayMax ?? null,
1179
1355
  deadLetterJobType ?? null,
1180
1356
  group?.id ?? null,
1181
- group?.tier ?? null
1357
+ group?.tier ?? null,
1358
+ null,
1359
+ null
1182
1360
  );
1183
1361
  }
1184
1362
  const onConflict = hasAnyIdempotencyKey ? `ON CONFLICT (idempotency_key) WHERE idempotency_key IS NOT NULL DO NOTHING` : "";
1185
1363
  const result = await client.query(
1186
1364
  `INSERT INTO job_queue
1187
- (job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key, retry_delay, retry_backoff, retry_delay_max, dead_letter_job_type, group_id, group_tier)
1365
+ (job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key, retry_delay, retry_backoff, retry_delay_max, dead_letter_job_type, group_id, group_tier, depends_on_job_ids, depends_on_tags)
1188
1366
  VALUES ${valueClauses.join(", ")}
1189
1367
  ${onConflict}
1190
1368
  RETURNING id, idempotency_key`,
@@ -1235,6 +1413,7 @@ var PostgresBackend = class {
1235
1413
  const job = jobs[i];
1236
1414
  const wasInserted = !job.idempotencyKey || !missingKeys.includes(job.idempotencyKey);
1237
1415
  if (wasInserted) {
1416
+ const nd = normalizeDependsOn(job.dependsOn);
1238
1417
  newJobEvents.push({
1239
1418
  jobId: ids[i],
1240
1419
  eventType: "added" /* Added */,
@@ -1242,7 +1421,8 @@ var PostgresBackend = class {
1242
1421
  jobType: job.jobType,
1243
1422
  payload: job.payload,
1244
1423
  tags: job.tags,
1245
- idempotencyKey: job.idempotencyKey
1424
+ idempotencyKey: job.idempotencyKey,
1425
+ ...nd.jobIds?.length || nd.tags?.length ? { dependsOn: job.dependsOn } : {}
1246
1426
  }
1247
1427
  });
1248
1428
  }
@@ -1284,7 +1464,7 @@ var PostgresBackend = class {
1284
1464
  const client = await this.pool.connect();
1285
1465
  try {
1286
1466
  const result = await client.query(
1287
- `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", output FROM job_queue WHERE id = $1`,
1467
+ `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", depends_on_job_ids AS "dependsOnJobIds", depends_on_tags AS "dependsOnTags", output FROM job_queue WHERE id = $1`,
1288
1468
  [id]
1289
1469
  );
1290
1470
  if (result.rows.length === 0) {
@@ -1311,7 +1491,7 @@ var PostgresBackend = class {
1311
1491
  const client = await this.pool.connect();
1312
1492
  try {
1313
1493
  const result = await client.query(
1314
- `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", output FROM job_queue WHERE status = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3`,
1494
+ `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", depends_on_job_ids AS "dependsOnJobIds", depends_on_tags AS "dependsOnTags", output FROM job_queue WHERE status = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3`,
1315
1495
  [status, limit, offset]
1316
1496
  );
1317
1497
  log(`Found ${result.rows.length} jobs by status ${status}`);
@@ -1333,7 +1513,7 @@ var PostgresBackend = class {
1333
1513
  const client = await this.pool.connect();
1334
1514
  try {
1335
1515
  const result = await client.query(
1336
- `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", output FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2`,
1516
+ `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", depends_on_job_ids AS "dependsOnJobIds", depends_on_tags AS "dependsOnTags", output FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2`,
1337
1517
  [limit, offset]
1338
1518
  );
1339
1519
  log(`Found ${result.rows.length} jobs (all)`);
@@ -1353,7 +1533,7 @@ var PostgresBackend = class {
1353
1533
  async getJobs(filters, limit = 100, offset = 0) {
1354
1534
  const client = await this.pool.connect();
1355
1535
  try {
1356
- let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", output FROM job_queue`;
1536
+ let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", depends_on_job_ids AS "dependsOnJobIds", depends_on_tags AS "dependsOnTags", output FROM job_queue`;
1357
1537
  const params = [];
1358
1538
  const where = [];
1359
1539
  let paramIdx = 1;
@@ -1454,7 +1634,7 @@ var PostgresBackend = class {
1454
1634
  async getJobsByTags(tags, mode = "all", limit = 100, offset = 0) {
1455
1635
  const client = await this.pool.connect();
1456
1636
  try {
1457
- let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", output
1637
+ let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", depends_on_job_ids AS "dependsOnJobIds", depends_on_tags AS "dependsOnTags", output
1458
1638
  FROM job_queue`;
1459
1639
  let params = [];
1460
1640
  switch (mode) {
@@ -1546,11 +1726,12 @@ var PostgresBackend = class {
1546
1726
  )
1547
1727
  )
1548
1728
  ${jobTypeFilter}
1729
+ ${JOB_DEPENDS_ON_PREDICATE}
1549
1730
  ORDER BY candidate.priority DESC, candidate.created_at ASC
1550
1731
  LIMIT $2
1551
1732
  FOR UPDATE SKIP LOCKED
1552
1733
  )
1553
- RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", output
1734
+ RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", depends_on_job_ids AS "dependsOnJobIds", depends_on_tags AS "dependsOnTags", output
1554
1735
  `,
1555
1736
  params
1556
1737
  );
@@ -1576,6 +1757,7 @@ var PostgresBackend = class {
1576
1757
  )
1577
1758
  )
1578
1759
  ${jobTypeFilter}
1760
+ ${JOB_DEPENDS_ON_PREDICATE}
1579
1761
  FOR UPDATE SKIP LOCKED
1580
1762
  ),
1581
1763
  ranked AS (
@@ -1618,7 +1800,7 @@ var PostgresBackend = class {
1618
1800
  last_retried_at = CASE WHEN status != 'waiting' AND attempts > 0 THEN NOW() ELSE last_retried_at END,
1619
1801
  wait_until = NULL
1620
1802
  WHERE id IN (SELECT id FROM selected)
1621
- RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", output
1803
+ RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", dead_letter_job_type AS "deadLetterJobType", dead_lettered_at AS "deadLetteredAt", dead_letter_job_id AS "deadLetterJobId", group_id AS "groupId", group_tier AS "groupTier", depends_on_job_ids AS "dependsOnJobIds", depends_on_tags AS "dependsOnTags", output
1622
1804
  `,
1623
1805
  constrainedParams
1624
1806
  );
@@ -1675,6 +1857,75 @@ var PostgresBackend = class {
1675
1857
  client.release();
1676
1858
  }
1677
1859
  }
1860
+ /**
1861
+ * Cancel pending/waiting jobs that depend on any seed job (by job id or tag superset), transitively.
1862
+ *
1863
+ * @param client - Database client (must be inside an open transaction when used from fail/cancel).
1864
+ * @param initialSeeds - Job ids that just failed or were cancelled.
1865
+ * @param rootJobId - Original job id for event metadata.
1866
+ */
1867
+ async propagateDependencyCancellations(client, initialSeeds, rootJobId) {
1868
+ const seeds = [...new Set(initialSeeds.filter((id) => id > 0))];
1869
+ if (seeds.length === 0) return;
1870
+ const cancelled = /* @__PURE__ */ new Set();
1871
+ const reasonJson = JSON.stringify({
1872
+ rootJobId,
1873
+ dependencyCascade: true
1874
+ });
1875
+ let frontier = seeds;
1876
+ while (frontier.length > 0) {
1877
+ const res = await client.query(
1878
+ `
1879
+ SELECT DISTINCT j.id
1880
+ FROM job_queue j
1881
+ CROSS JOIN unnest($1::int[]) AS s(id)
1882
+ INNER JOIN job_queue sx ON sx.id = s.id
1883
+ WHERE j.status IN ('pending', 'waiting')
1884
+ AND j.id <> sx.id
1885
+ AND (
1886
+ j.depends_on_job_ids @> ARRAY[s.id]::integer[]
1887
+ OR (
1888
+ j.depends_on_tags IS NOT NULL
1889
+ AND cardinality(j.depends_on_tags) > 0
1890
+ AND sx.tags IS NOT NULL
1891
+ AND sx.tags @> j.depends_on_tags
1892
+ )
1893
+ )
1894
+ `,
1895
+ [frontier]
1896
+ );
1897
+ const toCancel = [];
1898
+ for (const row of res.rows) {
1899
+ const pid = row.id;
1900
+ if (cancelled.has(pid)) continue;
1901
+ cancelled.add(pid);
1902
+ toCancel.push(pid);
1903
+ }
1904
+ if (toCancel.length === 0) break;
1905
+ await client.query(
1906
+ `
1907
+ UPDATE job_queue
1908
+ SET status = 'cancelled',
1909
+ updated_at = NOW(),
1910
+ last_cancelled_at = NOW(),
1911
+ wait_until = NULL,
1912
+ wait_token_id = NULL,
1913
+ pending_reason = $2
1914
+ WHERE id = ANY($1::int[])
1915
+ AND status IN ('pending', 'waiting')
1916
+ `,
1917
+ [toCancel, reasonJson]
1918
+ );
1919
+ const meta = JSON.stringify({ rootJobId, dependencyCascade: true });
1920
+ for (const jid of toCancel) {
1921
+ await client.query(
1922
+ `INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
1923
+ [jid, "cancelled" /* Cancelled */, meta]
1924
+ );
1925
+ }
1926
+ frontier = toCancel;
1927
+ }
1928
+ }
1678
1929
  async failJob(jobId, error, failureReason) {
1679
1930
  const client = await this.pool.connect();
1680
1931
  try {
@@ -1744,8 +1995,8 @@ var PostgresBackend = class {
1744
1995
  });
1745
1996
  const deadLetterInsert = await client.query(
1746
1997
  `INSERT INTO job_queue
1747
- (job_type, payload, max_attempts, priority, run_at)
1748
- VALUES ($1, $2, $3, $4, NOW())
1998
+ (job_type, payload, max_attempts, priority, run_at, depends_on_job_ids, depends_on_tags)
1999
+ VALUES ($1, $2, $3, $4, NOW(), NULL, NULL)
1749
2000
  RETURNING id`,
1750
2001
  [failedJob.deadLetterJobType, deadLetterPayload, 1, 0]
1751
2002
  );
@@ -1782,6 +2033,7 @@ var PostgresBackend = class {
1782
2033
  })
1783
2034
  ]
1784
2035
  );
2036
+ await this.propagateDependencyCancellations(client, [jobId], jobId);
1785
2037
  await client.query("COMMIT");
1786
2038
  log(
1787
2039
  `Failed job ${jobId}${deadLetterJobId ? ` and routed to dead-letter job ${deadLetterJobId}` : ""}`
@@ -1877,7 +2129,8 @@ var PostgresBackend = class {
1877
2129
  async cancelJob(jobId) {
1878
2130
  const client = await this.pool.connect();
1879
2131
  try {
1880
- await client.query(
2132
+ await client.query("BEGIN");
2133
+ const upd = await client.query(
1881
2134
  `
1882
2135
  UPDATE job_queue
1883
2136
  SET status = 'cancelled', updated_at = NOW(), last_cancelled_at = NOW(),
@@ -1886,9 +2139,25 @@ var PostgresBackend = class {
1886
2139
  `,
1887
2140
  [jobId]
1888
2141
  );
1889
- await this.recordJobEvent(jobId, "cancelled" /* Cancelled */);
2142
+ if (upd.rowCount === 0) {
2143
+ await client.query("ROLLBACK");
2144
+ log(
2145
+ `Job ${jobId} could not be cancelled (not in pending/waiting state or does not exist)`
2146
+ );
2147
+ return;
2148
+ }
2149
+ await client.query(
2150
+ `INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
2151
+ [jobId, "cancelled" /* Cancelled */, null]
2152
+ );
2153
+ await this.propagateDependencyCancellations(client, [jobId], jobId);
2154
+ await client.query("COMMIT");
1890
2155
  log(`Cancelled job ${jobId}`);
1891
2156
  } catch (error) {
2157
+ try {
2158
+ await client.query("ROLLBACK");
2159
+ } catch {
2160
+ }
1892
2161
  log(`Error cancelling job ${jobId}: ${error}`);
1893
2162
  throw error;
1894
2163
  } finally {
@@ -2921,6 +3190,8 @@ local retryDelayMax = ARGV[13] -- "null" or seconds string
2921
3190
  local deadLetterJobType = ARGV[14] -- "null" or jobType string
2922
3191
  local groupId = ARGV[15] -- "null" or group ID
2923
3192
  local groupTier = ARGV[16] -- "null" or group tier
3193
+ local dependsOnJobIdsJson = ARGV[17] -- "null" or JSON array of job ids
3194
+ local dependsOnTagsJson = ARGV[18] -- "null" or JSON array of tags
2924
3195
 
2925
3196
  -- Idempotency check
2926
3197
  if idempotencyKey ~= "null" then
@@ -2972,9 +3243,18 @@ redis.call('HMSET', jobKey,
2972
3243
  'deadLetteredAt', 'null',
2973
3244
  'deadLetterJobId', 'null',
2974
3245
  'groupId', groupId,
2975
- 'groupTier', groupTier
3246
+ 'groupTier', groupTier,
3247
+ 'dependsOnJobIds', dependsOnJobIdsJson,
3248
+ 'dependsOnTags', dependsOnTagsJson
2976
3249
  )
2977
3250
 
3251
+ if dependsOnJobIdsJson ~= "null" then
3252
+ local depIds = cjson.decode(dependsOnJobIdsJson)
3253
+ for _, parentId in ipairs(depIds) do
3254
+ redis.call('SADD', prefix .. 'dep:' .. tostring(parentId), tostring(id))
3255
+ end
3256
+ end
3257
+
2978
3258
  -- Status index
2979
3259
  redis.call('SADD', prefix .. 'status:pending', id)
2980
3260
 
@@ -3037,6 +3317,8 @@ for i, job in ipairs(jobs) do
3037
3317
  local deadLetterJobType = tostring(job.deadLetterJobType)
3038
3318
  local groupId = tostring(job.groupId)
3039
3319
  local groupTier = tostring(job.groupTier)
3320
+ local dependsOnJobIdsJson = (job.dependsOnJobIds ~= nil and job.dependsOnJobIds ~= cjson.null) and tostring(job.dependsOnJobIds) or "null"
3321
+ local dependsOnTagsJson = (job.dependsOnTags ~= nil and job.dependsOnTags ~= cjson.null) and tostring(job.dependsOnTags) or "null"
3040
3322
 
3041
3323
  -- Idempotency check
3042
3324
  local skip = false
@@ -3091,9 +3373,18 @@ for i, job in ipairs(jobs) do
3091
3373
  'deadLetteredAt', 'null',
3092
3374
  'deadLetterJobId', 'null',
3093
3375
  'groupId', groupId,
3094
- 'groupTier', groupTier
3376
+ 'groupTier', groupTier,
3377
+ 'dependsOnJobIds', dependsOnJobIdsJson,
3378
+ 'dependsOnTags', dependsOnTagsJson
3095
3379
  )
3096
3380
 
3381
+ if dependsOnJobIdsJson ~= "null" then
3382
+ local depIds = cjson.decode(dependsOnJobIdsJson)
3383
+ for _, parentId in ipairs(depIds) do
3384
+ redis.call('SADD', prefix .. 'dep:' .. tostring(parentId), tostring(id))
3385
+ end
3386
+ end
3387
+
3097
3388
  -- Status index
3098
3389
  redis.call('SADD', prefix .. 'status:pending', id)
3099
3390
 
@@ -3257,6 +3548,48 @@ for i = 1, #candidates, 2 do
3257
3548
  end
3258
3549
  end
3259
3550
 
3551
+ if canClaim then
3552
+ local depIdsJson = redis.call('HGET', jk, 'dependsOnJobIds')
3553
+ local depTagsJson = redis.call('HGET', jk, 'dependsOnTags')
3554
+ local depsOk = true
3555
+ if depIdsJson and depIdsJson ~= 'null' then
3556
+ local dids = cjson.decode(depIdsJson)
3557
+ for _, pid in ipairs(dids) do
3558
+ local pst = redis.call('HGET', prefix .. 'job:' .. pid, 'status')
3559
+ if pst ~= 'completed' then depsOk = false break end
3560
+ end
3561
+ end
3562
+ if depsOk and depTagsJson and depTagsJson ~= 'null' then
3563
+ local req = cjson.decode(depTagsJson)
3564
+ if #req > 0 then
3565
+ for _, stname in ipairs({'pending','processing','waiting'}) do
3566
+ local members = redis.call('SMEMBERS', prefix .. 'status:' .. stname)
3567
+ for _, oid in ipairs(members) do
3568
+ if oid ~= jobId then
3569
+ local otags = redis.call('HGET', prefix .. 'job:' .. oid, 'tags')
3570
+ if otags and otags ~= 'null' then
3571
+ local oarr = cjson.decode(otags)
3572
+ local tagset = {}
3573
+ for _, t in ipairs(oarr) do tagset[t] = true end
3574
+ local all = true
3575
+ for _, rt in ipairs(req) do
3576
+ if not tagset[rt] then all = false break end
3577
+ end
3578
+ if all then depsOk = false break end
3579
+ end
3580
+ end
3581
+ end
3582
+ if not depsOk then break end
3583
+ end
3584
+ end
3585
+ end
3586
+ if not depsOk then
3587
+ table.insert(putBack, score)
3588
+ table.insert(putBack, jobId)
3589
+ canClaim = false
3590
+ end
3591
+ end
3592
+
3260
3593
  if canClaim then
3261
3594
  -- Claim this job
3262
3595
  local attempts = tonumber(redis.call('HGET', jk, 'attempts'))
@@ -3337,6 +3670,14 @@ if groupId and groupId ~= 'null' then
3337
3670
  end
3338
3671
  end
3339
3672
 
3673
+ local depIdsJson = redis.call('HGET', jk, 'dependsOnJobIds')
3674
+ if depIdsJson and depIdsJson ~= 'null' then
3675
+ local dids = cjson.decode(depIdsJson)
3676
+ for _, pid in ipairs(dids) do
3677
+ redis.call('SREM', prefix .. 'dep:' .. tostring(pid), jobId)
3678
+ end
3679
+ end
3680
+
3340
3681
  return 1
3341
3682
  `;
3342
3683
  var FAIL_JOB_SCRIPT = `
@@ -3484,7 +3825,11 @@ if nextAttemptAt == 'null' and deadLetterJobType and deadLetterJobType ~= 'null'
3484
3825
  'retryDelayMax', 'null',
3485
3826
  'deadLetterJobType', 'null',
3486
3827
  'deadLetteredAt', 'null',
3487
- 'deadLetterJobId', 'null'
3828
+ 'deadLetterJobId', 'null',
3829
+ 'dependsOnJobIds', 'null',
3830
+ 'dependsOnTags', 'null',
3831
+ 'groupId', 'null',
3832
+ 'groupTier', 'null'
3488
3833
  )
3489
3834
 
3490
3835
  redis.call('SADD', prefix .. 'status:pending', deadLetterJobId)
@@ -3499,6 +3844,14 @@ if nextAttemptAt == 'null' and deadLetterJobType and deadLetterJobType ~= 'null'
3499
3844
  )
3500
3845
  end
3501
3846
 
3847
+ local depIdsJsonFail = redis.call('HGET', jk, 'dependsOnJobIds')
3848
+ if depIdsJsonFail and depIdsJsonFail ~= 'null' then
3849
+ local dids = cjson.decode(depIdsJsonFail)
3850
+ for _, pid in ipairs(dids) do
3851
+ redis.call('SREM', prefix .. 'dep:' .. tostring(pid), jobId)
3852
+ end
3853
+ end
3854
+
3502
3855
  return deadLetterJobId
3503
3856
  `;
3504
3857
  var RETRY_JOB_SCRIPT = `
@@ -3565,6 +3918,14 @@ redis.call('ZREM', prefix .. 'queue', jobId)
3565
3918
  redis.call('ZREM', prefix .. 'delayed', jobId)
3566
3919
  redis.call('ZREM', prefix .. 'waiting', jobId)
3567
3920
 
3921
+ local depIdsJsonCan = redis.call('HGET', jk, 'dependsOnJobIds')
3922
+ if depIdsJsonCan and depIdsJsonCan ~= 'null' then
3923
+ local dids = cjson.decode(depIdsJsonCan)
3924
+ for _, pid in ipairs(dids) do
3925
+ redis.call('SREM', prefix .. 'dep:' .. tostring(pid), jobId)
3926
+ end
3927
+ end
3928
+
3568
3929
  return 1
3569
3930
  `;
3570
3931
  var PROLONG_JOB_SCRIPT = `
@@ -3916,9 +4277,29 @@ function deserializeJob(h) {
3916
4277
  deadLetterJobId: numOrNull(h.deadLetterJobId),
3917
4278
  groupId: nullish(h.groupId),
3918
4279
  groupTier: nullish(h.groupTier),
3919
- output: parseJsonField(h.output)
4280
+ output: parseJsonField(h.output),
4281
+ dependsOnJobIds: parseOptionalIntArray(h.dependsOnJobIds),
4282
+ dependsOnTags: parseOptionalStringArray(h.dependsOnTags)
3920
4283
  };
3921
4284
  }
4285
+ function parseOptionalIntArray(raw) {
4286
+ if (!raw || raw === "null") return null;
4287
+ try {
4288
+ const arr = JSON.parse(raw);
4289
+ return Array.isArray(arr) && arr.length > 0 ? arr : null;
4290
+ } catch {
4291
+ return null;
4292
+ }
4293
+ }
4294
+ function parseOptionalStringArray(raw) {
4295
+ if (!raw || raw === "null") return null;
4296
+ try {
4297
+ const arr = JSON.parse(raw);
4298
+ return Array.isArray(arr) && arr.length > 0 ? arr : null;
4299
+ } catch {
4300
+ return null;
4301
+ }
4302
+ }
3922
4303
  function parseJsonField(raw) {
3923
4304
  if (!raw || raw === "null") return null;
3924
4305
  try {
@@ -3985,6 +4366,61 @@ var RedisBackend = class {
3985
4366
  nowMs() {
3986
4367
  return Date.now();
3987
4368
  }
4369
+ /**
4370
+ * Cancel pending/waiting jobs that depend on seed jobs (job id or tag), transitively.
4371
+ *
4372
+ * @param initialSeeds - Job ids that failed or were cancelled.
4373
+ * @param rootJobId - Root id for event metadata.
4374
+ */
4375
+ async propagateDependencyCancellationsRedis(initialSeeds, rootJobId) {
4376
+ const cancelled = /* @__PURE__ */ new Set();
4377
+ let frontier = [...new Set(initialSeeds.filter((id) => id > 0))];
4378
+ while (frontier.length > 0) {
4379
+ const pendingRaw = await this.client.sunion(
4380
+ `${this.prefix}status:pending`,
4381
+ `${this.prefix}status:waiting`
4382
+ );
4383
+ const toCancel = [];
4384
+ for (const pidStr of pendingRaw) {
4385
+ const pid = Number(pidStr);
4386
+ if (cancelled.has(pid)) continue;
4387
+ const job = await this.getJob(pid);
4388
+ if (!job || job.status !== "pending" && job.status !== "waiting") {
4389
+ continue;
4390
+ }
4391
+ for (const seedId of frontier) {
4392
+ if (pid === seedId) continue;
4393
+ const seedJob = await this.getJob(seedId);
4394
+ if (!seedJob) continue;
4395
+ const byJobId = job.dependsOnJobIds?.includes(seedId) ?? false;
4396
+ const byTag = job.dependsOnTags && job.dependsOnTags.length > 0 && tagsAreSuperset(seedJob.tags, job.dependsOnTags);
4397
+ if (byJobId || byTag) {
4398
+ toCancel.push(pid);
4399
+ break;
4400
+ }
4401
+ }
4402
+ }
4403
+ if (toCancel.length === 0) break;
4404
+ const now = this.nowMs();
4405
+ for (const jid of toCancel) {
4406
+ const ok = await this.client.eval(
4407
+ CANCEL_JOB_SCRIPT,
4408
+ 1,
4409
+ this.prefix,
4410
+ jid,
4411
+ now
4412
+ );
4413
+ if (Number(ok) === 1) {
4414
+ cancelled.add(jid);
4415
+ await this.recordJobEvent(jid, "cancelled" /* Cancelled */, {
4416
+ rootJobId,
4417
+ dependencyCascade: true
4418
+ });
4419
+ }
4420
+ }
4421
+ frontier = toCancel;
4422
+ }
4423
+ }
3988
4424
  // ── Events ──────────────────────────────────────────────────────────
3989
4425
  async recordJobEvent(jobId, eventType, metadata) {
3990
4426
  try {
@@ -4030,13 +4466,22 @@ var RedisBackend = class {
4030
4466
  retryBackoff = void 0,
4031
4467
  retryDelayMax = void 0,
4032
4468
  deadLetterJobType = void 0,
4033
- group = void 0
4469
+ group = void 0,
4470
+ dependsOn
4034
4471
  }, options) {
4035
4472
  if (options?.db) {
4036
4473
  throw new Error(
4037
4474
  "The db option is not supported with the Redis backend. Transactional job creation is only available with PostgreSQL."
4038
4475
  );
4039
4476
  }
4477
+ const { jobIds: depJobIdsRaw, tags: depTags } = normalizeDependsOn(dependsOn);
4478
+ if (depJobIdsRaw?.some((id) => id < 0)) {
4479
+ throw new Error(
4480
+ "dependsOn.jobIds: batch-relative (negative) ids are only supported in addJobs()"
4481
+ );
4482
+ }
4483
+ const dependsOnJobIdsJson = depJobIdsRaw && depJobIdsRaw.length > 0 ? JSON.stringify(depJobIdsRaw) : "null";
4484
+ const dependsOnTagsJson = depTags && depTags.length > 0 ? JSON.stringify(depTags) : "null";
4040
4485
  const now = this.nowMs();
4041
4486
  const runAtMs = runAt ? runAt.getTime() : 0;
4042
4487
  const result = await this.client.eval(
@@ -4058,7 +4503,9 @@ var RedisBackend = class {
4058
4503
  retryDelayMax !== void 0 ? retryDelayMax.toString() : "null",
4059
4504
  deadLetterJobType ?? "null",
4060
4505
  group?.id ?? "null",
4061
- group?.tier ?? "null"
4506
+ group?.tier ?? "null",
4507
+ dependsOnJobIdsJson,
4508
+ dependsOnTagsJson
4062
4509
  );
4063
4510
  const jobId = Number(result);
4064
4511
  log(
@@ -4068,7 +4515,8 @@ var RedisBackend = class {
4068
4515
  jobType,
4069
4516
  payload,
4070
4517
  tags,
4071
- idempotencyKey
4518
+ idempotencyKey,
4519
+ dependsOn: dependsOnJobIdsJson !== "null" || dependsOnTagsJson !== "null" ? dependsOn : void 0
4072
4520
  });
4073
4521
  return jobId;
4074
4522
  }
@@ -4083,24 +4531,58 @@ var RedisBackend = class {
4083
4531
  "The db option is not supported with the Redis backend. Transactional job creation is only available with PostgreSQL."
4084
4532
  );
4085
4533
  }
4534
+ const needsSequential = jobs.some((j) => {
4535
+ const n = normalizeDependsOn(j.dependsOn);
4536
+ return Boolean(n.jobIds?.length || n.tags?.length);
4537
+ });
4538
+ if (needsSequential) {
4539
+ const ids2 = [];
4540
+ for (let i = 0; i < jobs.length; i++) {
4541
+ let job = jobs[i];
4542
+ const nd = normalizeDependsOn(job.dependsOn);
4543
+ if (nd.jobIds?.some((id) => id < 0)) {
4544
+ const resolvedJobIds = resolveDependsOnJobIdsForBatch(
4545
+ nd.jobIds,
4546
+ ids2
4547
+ );
4548
+ job = {
4549
+ ...job,
4550
+ dependsOn: {
4551
+ jobIds: resolvedJobIds,
4552
+ tags: job.dependsOn?.tags
4553
+ }
4554
+ };
4555
+ }
4556
+ ids2.push(await this.addJob(job));
4557
+ }
4558
+ log(
4559
+ `Batch-inserted ${jobs.length} jobs (sequential), IDs: [${ids2.join(", ")}]`
4560
+ );
4561
+ return ids2;
4562
+ }
4086
4563
  const now = this.nowMs();
4087
- const jobsPayload = jobs.map((job) => ({
4088
- jobType: job.jobType,
4089
- payload: JSON.stringify(job.payload),
4090
- maxAttempts: job.maxAttempts ?? 3,
4091
- priority: job.priority ?? 0,
4092
- runAtMs: job.runAt ? job.runAt.getTime() : 0,
4093
- timeoutMs: job.timeoutMs !== void 0 ? job.timeoutMs.toString() : "null",
4094
- forceKillOnTimeout: job.forceKillOnTimeout ? "true" : "false",
4095
- tags: job.tags ? JSON.stringify(job.tags) : "null",
4096
- idempotencyKey: job.idempotencyKey ?? "null",
4097
- retryDelay: job.retryDelay !== void 0 ? job.retryDelay.toString() : "null",
4098
- retryBackoff: job.retryBackoff !== void 0 ? job.retryBackoff.toString() : "null",
4099
- retryDelayMax: job.retryDelayMax !== void 0 ? job.retryDelayMax.toString() : "null",
4100
- deadLetterJobType: job.deadLetterJobType ?? "null",
4101
- groupId: job.group?.id ?? "null",
4102
- groupTier: job.group?.tier ?? "null"
4103
- }));
4564
+ const jobsPayload = jobs.map((job) => {
4565
+ const nd = normalizeDependsOn(job.dependsOn);
4566
+ return {
4567
+ jobType: job.jobType,
4568
+ payload: JSON.stringify(job.payload),
4569
+ maxAttempts: job.maxAttempts ?? 3,
4570
+ priority: job.priority ?? 0,
4571
+ runAtMs: job.runAt ? job.runAt.getTime() : 0,
4572
+ timeoutMs: job.timeoutMs !== void 0 ? job.timeoutMs.toString() : "null",
4573
+ forceKillOnTimeout: job.forceKillOnTimeout ? "true" : "false",
4574
+ tags: job.tags ? JSON.stringify(job.tags) : "null",
4575
+ idempotencyKey: job.idempotencyKey ?? "null",
4576
+ retryDelay: job.retryDelay !== void 0 ? job.retryDelay.toString() : "null",
4577
+ retryBackoff: job.retryBackoff !== void 0 ? job.retryBackoff.toString() : "null",
4578
+ retryDelayMax: job.retryDelayMax !== void 0 ? job.retryDelayMax.toString() : "null",
4579
+ deadLetterJobType: job.deadLetterJobType ?? "null",
4580
+ groupId: job.group?.id ?? "null",
4581
+ groupTier: job.group?.tier ?? "null",
4582
+ dependsOnJobIds: nd.jobIds?.length ? JSON.stringify(nd.jobIds) : null,
4583
+ dependsOnTags: nd.tags?.length ? JSON.stringify(nd.tags) : null
4584
+ };
4585
+ });
4104
4586
  const result = await this.client.eval(
4105
4587
  ADD_JOBS_SCRIPT,
4106
4588
  1,
@@ -4270,6 +4752,7 @@ var RedisBackend = class {
4270
4752
  failureReason,
4271
4753
  deadLetterJobId
4272
4754
  });
4755
+ await this.propagateDependencyCancellationsRedis([jobId], jobId);
4273
4756
  if (deadLetterJobId) {
4274
4757
  const sourceJob = await this.client.hget(
4275
4758
  `${this.prefix}job:${jobId}`,
@@ -4338,9 +4821,22 @@ var RedisBackend = class {
4338
4821
  }
4339
4822
  async cancelJob(jobId) {
4340
4823
  const now = this.nowMs();
4341
- await this.client.eval(CANCEL_JOB_SCRIPT, 1, this.prefix, jobId, now);
4342
- await this.recordJobEvent(jobId, "cancelled" /* Cancelled */);
4343
- log(`Cancelled job ${jobId}`);
4824
+ const ok = await this.client.eval(
4825
+ CANCEL_JOB_SCRIPT,
4826
+ 1,
4827
+ this.prefix,
4828
+ jobId,
4829
+ now
4830
+ );
4831
+ if (Number(ok) === 1) {
4832
+ await this.recordJobEvent(jobId, "cancelled" /* Cancelled */);
4833
+ await this.propagateDependencyCancellationsRedis([jobId], jobId);
4834
+ log(`Cancelled job ${jobId}`);
4835
+ } else {
4836
+ log(
4837
+ `Job ${jobId} could not be cancelled (not in pending/waiting state or does not exist)`
4838
+ );
4839
+ }
4344
4840
  }
4345
4841
  async cancelAllUpcomingJobs(filters) {
4346
4842
  let ids = await this.client.smembers(`${this.prefix}status:pending`);
@@ -5667,6 +6163,6 @@ var withLogContext = (fn, verbose) => (...args) => {
5667
6163
  return fn(...args);
5668
6164
  };
5669
6165
 
5670
- export { FailureReason, JobEventType, PostgresBackend, WaitSignal, getNextCronOccurrence, initJobQueue, testHandlerSerialization, validateCronExpression, validateHandlerSerializable2 as validateHandlerSerializable };
6166
+ export { FailureReason, JobEventType, PostgresBackend, WaitSignal, assertNoDependencyCycle, batchDepRef, getNextCronOccurrence, initJobQueue, normalizeDependsOn, resolveDependsOnJobIdsForBatch, tagsAreSuperset, testHandlerSerialization, validateCronExpression, validateHandlerSerializable2 as validateHandlerSerializable, validatePrerequisiteJobIdsExist };
5671
6167
  //# sourceMappingURL=index.js.map
5672
6168
  //# sourceMappingURL=index.js.map