@nicnocquee/dataqueue 1.38.0 → 1.39.0-beta.20260322125514

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.cts CHANGED
@@ -41,6 +41,29 @@ interface JobGroup {
41
41
  /** Optional tier label reserved for future tier-based concurrency controls. */
42
42
  tier?: string;
43
43
  }
44
+ /**
45
+ * Declares prerequisites for a job. Both dimensions use logical AND.
46
+ *
47
+ * - `jobIds`: The job will not run until every listed job is `completed`. If any
48
+ * prerequisite becomes `failed` or `cancelled`, pending dependents are cancelled (transitively).
49
+ * - `tags`: Active barrier — the job will not run while another job (not self) is
50
+ * `pending`, `processing`, or `waiting` whose `tags` are a superset of every tag listed here
51
+ * (Postgres `tags @> depends_on_tags`). If any such job becomes `failed` or `cancelled`,
52
+ * pending jobs that list these tags are cancelled (transitively).
53
+ *
54
+ * **`addJobs` batch references:** In a batch insert, a negative job id means a 0-based index
55
+ * into the same batch array: use {@link batchDepRef} (e.g. `batchDepRef(0)` for the first job).
56
+ * Single `addJob` calls must use positive database ids only.
57
+ */
58
+ interface JobDependsOn {
59
+ /** Prerequisite job ids (must all reach `completed`). */
60
+ jobIds?: number[];
61
+ /**
62
+ * Tag drain: wait until no active job (pending/processing/waiting) has all of these tags.
63
+ * Requires matching jobs to succeed (dependents are cancelled if a matching job fails or is cancelled).
64
+ */
65
+ tags?: string[];
66
+ }
44
67
  interface JobOptions<PayloadMap, T extends JobType<PayloadMap>> {
45
68
  jobType: T;
46
69
  payload: PayloadMap[T];
@@ -148,6 +171,10 @@ interface JobOptions<PayloadMap, T extends JobType<PayloadMap>> {
148
171
  * globally limited by `group.id` across all workers/instances.
149
172
  */
150
173
  group?: JobGroup;
174
+ /**
175
+ * Optional prerequisites (job ids and/or tag drain). See {@link JobDependsOn}.
176
+ */
177
+ dependsOn?: JobDependsOn;
151
178
  }
152
179
  /**
153
180
  * Options for editing a pending job.
@@ -297,6 +324,14 @@ interface JobRecord<PayloadMap, T extends JobType<PayloadMap>> {
297
324
  * Group tier for this job, if provided at enqueue time.
298
325
  */
299
326
  groupTier?: string | null;
327
+ /**
328
+ * Prerequisite job ids persisted at enqueue time, if any.
329
+ */
330
+ dependsOnJobIds?: number[] | null;
331
+ /**
332
+ * Tag drain prerequisites persisted at enqueue time, if any.
333
+ */
334
+ dependsOnTags?: string[] | null;
300
335
  }
301
336
  /**
302
337
  * Envelope payload stored in dead-letter jobs.
@@ -1211,6 +1246,54 @@ interface JobQueue<PayloadMap> {
1211
1246
  getRedisClient: () => unknown;
1212
1247
  }
1213
1248
 
1249
+ /**
1250
+ * Returns a negative placeholder id for `addJobs` batch ordering: `-(index + 1)`.
1251
+ * Resolves to the id of the job at `batchIndex` in the same batch after inserts.
1252
+ *
1253
+ * @param batchIndex - Zero-based index into the `addJobs` array.
1254
+ */
1255
+ declare function batchDepRef(batchIndex: number): number;
1256
+ /**
1257
+ * Normalizes optional `dependsOn`: empty arrays become undefined, ids de-duplicated.
1258
+ *
1259
+ * @param dep - Raw dependency options from the caller.
1260
+ */
1261
+ declare function normalizeDependsOn(dep?: JobDependsOn): {
1262
+ jobIds: number[] | undefined;
1263
+ tags: string[] | undefined;
1264
+ };
1265
+ /**
1266
+ * Resolves batch-relative negative ids to real job ids after partial batch inserts.
1267
+ *
1268
+ * @param jobIds - May contain negative placeholders from {@link batchDepRef}.
1269
+ * @param insertedIds - Ids inserted so far, index-aligned with the batch array prefix.
1270
+ */
1271
+ declare function resolveDependsOnJobIdsForBatch(jobIds: number[], insertedIds: number[]): number[];
1272
+ /**
1273
+ * Returns true if `holderTags` contains every tag in `requiredTags` (set inclusion).
1274
+ *
1275
+ * @param holderTags - Tags on job X.
1276
+ * @param requiredTags - `depends_on_tags` on dependent D.
1277
+ */
1278
+ declare function tagsAreSuperset(holderTags: string[] | null | undefined, requiredTags: string[] | null | undefined): boolean;
1279
+ /**
1280
+ * Throws if inserting a job with `dependsOnJobIds` would create a cycle.
1281
+ * Uses: jobs reachable downstream from `newJobId` must not include any prerequisite id
1282
+ * (equivalently: a prerequisite must not lie in the downstream closure of `newJobId`).
1283
+ *
1284
+ * @param client - DB client (transaction).
1285
+ * @param newJobId - Id of the row just inserted.
1286
+ * @param dependsOnJobIds - Resolved positive prerequisite ids.
1287
+ */
1288
+ /**
1289
+ * Ensures every id in `jobIds` exists in `job_queue`.
1290
+ *
1291
+ * @param client - Database client.
1292
+ * @param jobIds - Resolved positive job ids.
1293
+ */
1294
+ declare function validatePrerequisiteJobIdsExist(client: DatabaseClient, jobIds: number[]): Promise<void>;
1295
+ declare function assertNoDependencyCycle(client: DatabaseClient, newJobId: number, dependsOnJobIds: number[]): Promise<void>;
1296
+
1214
1297
  /**
1215
1298
  * Filter options used by getJobs, cancelAllUpcomingJobs, editAllPendingJobs
1216
1299
  */
@@ -1440,7 +1523,7 @@ declare class PostgresBackend implements QueueBackend {
1440
1523
  * client (e.g., inside a transaction) so the job is part of the caller's
1441
1524
  * transaction. The event INSERT also uses the same client.
1442
1525
  */
1443
- addJob<PayloadMap, T extends JobType<PayloadMap>>({ jobType, payload, maxAttempts, priority, runAt, timeoutMs, forceKillOnTimeout, tags, idempotencyKey, retryDelay, retryBackoff, retryDelayMax, deadLetterJobType, group, }: JobOptions<PayloadMap, T>, options?: AddJobOptions): Promise<number>;
1526
+ addJob<PayloadMap, T extends JobType<PayloadMap>>({ jobType, payload, maxAttempts, priority, runAt, timeoutMs, forceKillOnTimeout, tags, idempotencyKey, retryDelay, retryBackoff, retryDelayMax, deadLetterJobType, group, dependsOn, }: JobOptions<PayloadMap, T>, options?: AddJobOptions): Promise<number>;
1444
1527
  /**
1445
1528
  * Insert multiple jobs in a single database round-trip.
1446
1529
  *
@@ -1455,6 +1538,14 @@ declare class PostgresBackend implements QueueBackend {
1455
1538
  getJobsByTags<PayloadMap, T extends JobType<PayloadMap>>(tags: string[], mode?: TagQueryMode, limit?: number, offset?: number): Promise<JobRecord<PayloadMap, T>[]>;
1456
1539
  getNextBatch<PayloadMap, T extends JobType<PayloadMap>>(workerId: string, batchSize?: number, jobType?: string | string[], groupConcurrency?: number): Promise<JobRecord<PayloadMap, T>[]>;
1457
1540
  completeJob(jobId: number, output?: unknown): Promise<void>;
1541
+ /**
1542
+ * Cancel pending/waiting jobs that depend on any seed job (by job id or tag superset), transitively.
1543
+ *
1544
+ * @param client - Database client (must be inside an open transaction when used from fail/cancel).
1545
+ * @param initialSeeds - Job ids that just failed or were cancelled.
1546
+ * @param rootJobId - Original job id for event metadata.
1547
+ */
1548
+ private propagateDependencyCancellations;
1458
1549
  failJob(jobId: number, error: Error, failureReason?: FailureReason): Promise<void>;
1459
1550
  prolongJob(jobId: number): Promise<void>;
1460
1551
  updateProgress(jobId: number, progress: number): Promise<void>;
@@ -1648,4 +1739,4 @@ declare function validateCronExpression(cronExpression: string, CronImpl?: typeo
1648
1739
  */
1649
1740
  declare const initJobQueue: <PayloadMap = any>(config: JobQueueConfig) => JobQueue<PayloadMap>;
1650
1741
 
1651
- export { type AddJobOptions, type CreateTokenOptions, type CronScheduleInput, type CronScheduleOptions, type CronScheduleRecord, type CronScheduleStatus, type DatabaseClient, type DatabaseSSLConfig, type DeadLetterPayloadEnvelope, type EditCronScheduleOptions, type EditJobOptions, FailureReason, type JobContext, type JobEvent, JobEventType, type JobGroup, type JobHandler, type JobHandlers, type JobOptions, type JobQueue, type JobQueueConfig, type JobQueueConfigLegacy, type JobRecord, type JobStatus, type JobType, type OnTimeoutCallback, PostgresBackend, type PostgresJobQueueConfig, type Processor, type ProcessorOptions, type QueueBackend, type QueueEmitFn, type QueueEventMap, type QueueEventName, type RedisJobQueueConfig, type RedisTLSConfig, type Supervisor, type SupervisorOptions, type SupervisorRunResult, type TagQueryMode, type WaitDuration, WaitSignal, type WaitToken, type WaitTokenResult, type WaitpointRecord, type WaitpointStatus, getNextCronOccurrence, initJobQueue, testHandlerSerialization, validateCronExpression, validateHandlerSerializable };
1742
+ export { type AddJobOptions, type CreateTokenOptions, type CronScheduleInput, type CronScheduleOptions, type CronScheduleRecord, type CronScheduleStatus, type DatabaseClient, type DatabaseSSLConfig, type DeadLetterPayloadEnvelope, type EditCronScheduleOptions, type EditJobOptions, FailureReason, type JobContext, type JobDependsOn, type JobEvent, JobEventType, type JobGroup, type JobHandler, type JobHandlers, type JobOptions, type JobQueue, type JobQueueConfig, type JobQueueConfigLegacy, type JobRecord, type JobStatus, type JobType, type OnTimeoutCallback, PostgresBackend, type PostgresJobQueueConfig, type Processor, type ProcessorOptions, type QueueBackend, type QueueEmitFn, type QueueEventMap, type QueueEventName, type RedisJobQueueConfig, type RedisTLSConfig, type Supervisor, type SupervisorOptions, type SupervisorRunResult, type TagQueryMode, type WaitDuration, WaitSignal, type WaitToken, type WaitTokenResult, type WaitpointRecord, type WaitpointStatus, assertNoDependencyCycle, batchDepRef, getNextCronOccurrence, initJobQueue, normalizeDependsOn, resolveDependsOnJobIdsForBatch, tagsAreSuperset, testHandlerSerialization, validateCronExpression, validateHandlerSerializable, validatePrerequisiteJobIdsExist };
package/dist/index.d.ts CHANGED
@@ -41,6 +41,29 @@ interface JobGroup {
41
41
  /** Optional tier label reserved for future tier-based concurrency controls. */
42
42
  tier?: string;
43
43
  }
44
+ /**
45
+ * Declares prerequisites for a job. Both dimensions use logical AND.
46
+ *
47
+ * - `jobIds`: The job will not run until every listed job is `completed`. If any
48
+ * prerequisite becomes `failed` or `cancelled`, pending dependents are cancelled (transitively).
49
+ * - `tags`: Active barrier — the job will not run while another job (not self) is
50
+ * `pending`, `processing`, or `waiting` whose `tags` are a superset of every tag listed here
51
+ * (Postgres `tags @> depends_on_tags`). If any such job becomes `failed` or `cancelled`,
52
+ * pending jobs that list these tags are cancelled (transitively).
53
+ *
54
+ * **`addJobs` batch references:** In a batch insert, a negative job id means a 0-based index
55
+ * into the same batch array: use {@link batchDepRef} (e.g. `batchDepRef(0)` for the first job).
56
+ * Single `addJob` calls must use positive database ids only.
57
+ */
58
+ interface JobDependsOn {
59
+ /** Prerequisite job ids (must all reach `completed`). */
60
+ jobIds?: number[];
61
+ /**
62
+ * Tag drain: wait until no active job (pending/processing/waiting) has all of these tags.
63
+ * Requires matching jobs to succeed (dependents are cancelled if a matching job fails or is cancelled).
64
+ */
65
+ tags?: string[];
66
+ }
44
67
  interface JobOptions<PayloadMap, T extends JobType<PayloadMap>> {
45
68
  jobType: T;
46
69
  payload: PayloadMap[T];
@@ -148,6 +171,10 @@ interface JobOptions<PayloadMap, T extends JobType<PayloadMap>> {
148
171
  * globally limited by `group.id` across all workers/instances.
149
172
  */
150
173
  group?: JobGroup;
174
+ /**
175
+ * Optional prerequisites (job ids and/or tag drain). See {@link JobDependsOn}.
176
+ */
177
+ dependsOn?: JobDependsOn;
151
178
  }
152
179
  /**
153
180
  * Options for editing a pending job.
@@ -297,6 +324,14 @@ interface JobRecord<PayloadMap, T extends JobType<PayloadMap>> {
297
324
  * Group tier for this job, if provided at enqueue time.
298
325
  */
299
326
  groupTier?: string | null;
327
+ /**
328
+ * Prerequisite job ids persisted at enqueue time, if any.
329
+ */
330
+ dependsOnJobIds?: number[] | null;
331
+ /**
332
+ * Tag drain prerequisites persisted at enqueue time, if any.
333
+ */
334
+ dependsOnTags?: string[] | null;
300
335
  }
301
336
  /**
302
337
  * Envelope payload stored in dead-letter jobs.
@@ -1211,6 +1246,54 @@ interface JobQueue<PayloadMap> {
1211
1246
  getRedisClient: () => unknown;
1212
1247
  }
1213
1248
 
1249
+ /**
1250
+ * Returns a negative placeholder id for `addJobs` batch ordering: `-(index + 1)`.
1251
+ * Resolves to the id of the job at `batchIndex` in the same batch after inserts.
1252
+ *
1253
+ * @param batchIndex - Zero-based index into the `addJobs` array.
1254
+ */
1255
+ declare function batchDepRef(batchIndex: number): number;
1256
+ /**
1257
+ * Normalizes optional `dependsOn`: empty arrays become undefined, ids de-duplicated.
1258
+ *
1259
+ * @param dep - Raw dependency options from the caller.
1260
+ */
1261
+ declare function normalizeDependsOn(dep?: JobDependsOn): {
1262
+ jobIds: number[] | undefined;
1263
+ tags: string[] | undefined;
1264
+ };
1265
+ /**
1266
+ * Resolves batch-relative negative ids to real job ids after partial batch inserts.
1267
+ *
1268
+ * @param jobIds - May contain negative placeholders from {@link batchDepRef}.
1269
+ * @param insertedIds - Ids inserted so far, index-aligned with the batch array prefix.
1270
+ */
1271
+ declare function resolveDependsOnJobIdsForBatch(jobIds: number[], insertedIds: number[]): number[];
1272
+ /**
1273
+ * Returns true if `holderTags` contains every tag in `requiredTags` (set inclusion).
1274
+ *
1275
+ * @param holderTags - Tags on job X.
1276
+ * @param requiredTags - `depends_on_tags` on dependent D.
1277
+ */
1278
+ declare function tagsAreSuperset(holderTags: string[] | null | undefined, requiredTags: string[] | null | undefined): boolean;
1279
+ /**
1280
+ * Throws if inserting a job with `dependsOnJobIds` would create a cycle.
1281
+ * Uses: jobs reachable downstream from `newJobId` must not include any prerequisite id
1282
+ * (equivalently: a prerequisite must not lie in the downstream closure of `newJobId`).
1283
+ *
1284
+ * @param client - DB client (transaction).
1285
+ * @param newJobId - Id of the row just inserted.
1286
+ * @param dependsOnJobIds - Resolved positive prerequisite ids.
1287
+ */
1288
+ /**
1289
+ * Ensures every id in `jobIds` exists in `job_queue`.
1290
+ *
1291
+ * @param client - Database client.
1292
+ * @param jobIds - Resolved positive job ids.
1293
+ */
1294
+ declare function validatePrerequisiteJobIdsExist(client: DatabaseClient, jobIds: number[]): Promise<void>;
1295
+ declare function assertNoDependencyCycle(client: DatabaseClient, newJobId: number, dependsOnJobIds: number[]): Promise<void>;
1296
+
1214
1297
  /**
1215
1298
  * Filter options used by getJobs, cancelAllUpcomingJobs, editAllPendingJobs
1216
1299
  */
@@ -1440,7 +1523,7 @@ declare class PostgresBackend implements QueueBackend {
1440
1523
  * client (e.g., inside a transaction) so the job is part of the caller's
1441
1524
  * transaction. The event INSERT also uses the same client.
1442
1525
  */
1443
- addJob<PayloadMap, T extends JobType<PayloadMap>>({ jobType, payload, maxAttempts, priority, runAt, timeoutMs, forceKillOnTimeout, tags, idempotencyKey, retryDelay, retryBackoff, retryDelayMax, deadLetterJobType, group, }: JobOptions<PayloadMap, T>, options?: AddJobOptions): Promise<number>;
1526
+ addJob<PayloadMap, T extends JobType<PayloadMap>>({ jobType, payload, maxAttempts, priority, runAt, timeoutMs, forceKillOnTimeout, tags, idempotencyKey, retryDelay, retryBackoff, retryDelayMax, deadLetterJobType, group, dependsOn, }: JobOptions<PayloadMap, T>, options?: AddJobOptions): Promise<number>;
1444
1527
  /**
1445
1528
  * Insert multiple jobs in a single database round-trip.
1446
1529
  *
@@ -1455,6 +1538,14 @@ declare class PostgresBackend implements QueueBackend {
1455
1538
  getJobsByTags<PayloadMap, T extends JobType<PayloadMap>>(tags: string[], mode?: TagQueryMode, limit?: number, offset?: number): Promise<JobRecord<PayloadMap, T>[]>;
1456
1539
  getNextBatch<PayloadMap, T extends JobType<PayloadMap>>(workerId: string, batchSize?: number, jobType?: string | string[], groupConcurrency?: number): Promise<JobRecord<PayloadMap, T>[]>;
1457
1540
  completeJob(jobId: number, output?: unknown): Promise<void>;
1541
+ /**
1542
+ * Cancel pending/waiting jobs that depend on any seed job (by job id or tag superset), transitively.
1543
+ *
1544
+ * @param client - Database client (must be inside an open transaction when used from fail/cancel).
1545
+ * @param initialSeeds - Job ids that just failed or were cancelled.
1546
+ * @param rootJobId - Original job id for event metadata.
1547
+ */
1548
+ private propagateDependencyCancellations;
1458
1549
  failJob(jobId: number, error: Error, failureReason?: FailureReason): Promise<void>;
1459
1550
  prolongJob(jobId: number): Promise<void>;
1460
1551
  updateProgress(jobId: number, progress: number): Promise<void>;
@@ -1648,4 +1739,4 @@ declare function validateCronExpression(cronExpression: string, CronImpl?: typeo
1648
1739
  */
1649
1740
  declare const initJobQueue: <PayloadMap = any>(config: JobQueueConfig) => JobQueue<PayloadMap>;
1650
1741
 
1651
- export { type AddJobOptions, type CreateTokenOptions, type CronScheduleInput, type CronScheduleOptions, type CronScheduleRecord, type CronScheduleStatus, type DatabaseClient, type DatabaseSSLConfig, type DeadLetterPayloadEnvelope, type EditCronScheduleOptions, type EditJobOptions, FailureReason, type JobContext, type JobEvent, JobEventType, type JobGroup, type JobHandler, type JobHandlers, type JobOptions, type JobQueue, type JobQueueConfig, type JobQueueConfigLegacy, type JobRecord, type JobStatus, type JobType, type OnTimeoutCallback, PostgresBackend, type PostgresJobQueueConfig, type Processor, type ProcessorOptions, type QueueBackend, type QueueEmitFn, type QueueEventMap, type QueueEventName, type RedisJobQueueConfig, type RedisTLSConfig, type Supervisor, type SupervisorOptions, type SupervisorRunResult, type TagQueryMode, type WaitDuration, WaitSignal, type WaitToken, type WaitTokenResult, type WaitpointRecord, type WaitpointStatus, getNextCronOccurrence, initJobQueue, testHandlerSerialization, validateCronExpression, validateHandlerSerializable };
1742
+ export { type AddJobOptions, type CreateTokenOptions, type CronScheduleInput, type CronScheduleOptions, type CronScheduleRecord, type CronScheduleStatus, type DatabaseClient, type DatabaseSSLConfig, type DeadLetterPayloadEnvelope, type EditCronScheduleOptions, type EditJobOptions, FailureReason, type JobContext, type JobDependsOn, type JobEvent, JobEventType, type JobGroup, type JobHandler, type JobHandlers, type JobOptions, type JobQueue, type JobQueueConfig, type JobQueueConfigLegacy, type JobRecord, type JobStatus, type JobType, type OnTimeoutCallback, PostgresBackend, type PostgresJobQueueConfig, type Processor, type ProcessorOptions, type QueueBackend, type QueueEmitFn, type QueueEventMap, type QueueEventName, type RedisJobQueueConfig, type RedisTLSConfig, type Supervisor, type SupervisorOptions, type SupervisorRunResult, type TagQueryMode, type WaitDuration, WaitSignal, type WaitToken, type WaitTokenResult, type WaitpointRecord, type WaitpointStatus, assertNoDependencyCycle, batchDepRef, getNextCronOccurrence, initJobQueue, normalizeDependsOn, resolveDependsOnJobIdsForBatch, tagsAreSuperset, testHandlerSerialization, validateCronExpression, validateHandlerSerializable, validatePrerequisiteJobIdsExist };