@nicnocquee/dataqueue 1.19.3 → 1.21.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.cts CHANGED
@@ -11,6 +11,10 @@ interface JobOptions<PayloadMap, T extends JobType<PayloadMap>> {
11
11
  * Timeout for this job in milliseconds. If not set, uses the processor default or unlimited.
12
12
  */
13
13
  timeoutMs?: number;
14
+ /**
15
+ * Tags for this job. Used for grouping, searching, or batch operations.
16
+ */
17
+ tags?: string[];
14
18
  }
15
19
  declare enum JobEventType {
16
20
  Added = "added",
@@ -80,6 +84,10 @@ interface JobRecord<PayloadMap, T extends JobType<PayloadMap>> {
80
84
  * The time the job was last cancelled.
81
85
  */
82
86
  lastCancelledAt: Date | null;
87
+ /**
88
+ * Tags for this job. Used for grouping, searching, or batch operations.
89
+ */
90
+ tags?: string[];
83
91
  }
84
92
  type JobHandler<PayloadMap, T extends keyof PayloadMap> = (payload: PayloadMap[T], signal: AbortSignal) => Promise<void>;
85
93
  type JobHandlers<PayloadMap> = {
@@ -151,6 +159,7 @@ interface JobQueueConfig {
151
159
  };
152
160
  verbose?: boolean;
153
161
  }
162
+ type TagQueryMode = 'exact' | 'all' | 'any' | 'none';
154
163
  interface JobQueue<PayloadMap> {
155
164
  /**
156
165
  * Add a job to the job queue.
@@ -167,10 +176,40 @@ interface JobQueue<PayloadMap> {
167
176
  * - The jobs are returned in descending order of createdAt.
168
177
  */
169
178
  getJobsByStatus: <T extends JobType<PayloadMap>>(status: JobStatus, limit?: number, offset?: number) => Promise<JobRecord<PayloadMap, T>[]>;
179
+ /**
180
+ * Get jobs by tag(s).
181
+ * - Modes:
182
+ * - 'exact': Jobs with exactly the same tags (no more, no less)
183
+ * - 'all': Jobs that have all the given tags (can have more)
184
+ * - 'any': Jobs that have at least one of the given tags
185
+ * - 'none': Jobs that have none of the given tags
186
+ * - Default mode is 'all'.
187
+ */
188
+ getJobsByTags: <T extends JobType<PayloadMap>>(tags: string[], mode?: TagQueryMode, limit?: number, offset?: number) => Promise<JobRecord<PayloadMap, T>[]>;
170
189
  /**
171
190
  * Get all jobs.
172
191
  */
173
192
  getAllJobs: <T extends JobType<PayloadMap>>(limit?: number, offset?: number) => Promise<JobRecord<PayloadMap, T>[]>;
193
+ /**
194
+ * Get jobs by filters.
195
+ /**
196
+ * Get jobs by filters.
197
+ */
198
+ getJobs: <T extends JobType<PayloadMap>>(filters?: {
199
+ jobType?: string;
200
+ priority?: number;
201
+ runAt?: Date | {
202
+ gt?: Date;
203
+ gte?: Date;
204
+ lt?: Date;
205
+ lte?: Date;
206
+ eq?: Date;
207
+ };
208
+ tags?: {
209
+ values: string[];
210
+ mode?: TagQueryMode;
211
+ };
212
+ }) => Promise<JobRecord<PayloadMap, T>[]>;
174
213
  /**
175
214
  * Retry a job given its ID.
176
215
  * - This will set the job status back to 'pending', clear the locked_at and locked_by, and allow it to be picked up by other workers.
@@ -199,12 +238,23 @@ interface JobQueue<PayloadMap> {
199
238
  * - The filters are:
200
239
  * - jobType: The job type to cancel.
201
240
  * - priority: The priority of the job to cancel.
202
- * - runAt: The time the job is scheduled to run at.
241
+ * - runAt: The time the job is scheduled to run at (now supports gt/gte/lt/lte/eq).
242
+ * - tags: An object with 'values' (string[]) and 'mode' (TagQueryMode) for tag-based cancellation.
203
243
  */
204
244
  cancelAllUpcomingJobs: (filters?: {
205
245
  jobType?: string;
206
246
  priority?: number;
207
- runAt?: Date;
247
+ runAt?: Date | {
248
+ gt?: Date;
249
+ gte?: Date;
250
+ lt?: Date;
251
+ lte?: Date;
252
+ eq?: Date;
253
+ };
254
+ tags?: {
255
+ values: string[];
256
+ mode?: TagQueryMode;
257
+ };
208
258
  }) => Promise<number>;
209
259
  /**
210
260
  * Create a job processor. Handlers must be provided per-processor.
@@ -225,4 +275,4 @@ interface JobQueue<PayloadMap> {
225
275
  */
226
276
  declare const initJobQueue: <PayloadMap = any>(config: JobQueueConfig) => JobQueue<PayloadMap>;
227
277
 
228
- export { FailureReason, type JobEvent, JobEventType, type JobHandler, type JobHandlers, type JobOptions, type JobQueue, type JobQueueConfig, type JobRecord, type JobStatus, type JobType, type Processor, type ProcessorOptions, initJobQueue };
278
+ export { FailureReason, type JobEvent, JobEventType, type JobHandler, type JobHandlers, type JobOptions, type JobQueue, type JobQueueConfig, type JobRecord, type JobStatus, type JobType, type Processor, type ProcessorOptions, type TagQueryMode, initJobQueue };
package/dist/index.d.ts CHANGED
@@ -11,6 +11,10 @@ interface JobOptions<PayloadMap, T extends JobType<PayloadMap>> {
11
11
  * Timeout for this job in milliseconds. If not set, uses the processor default or unlimited.
12
12
  */
13
13
  timeoutMs?: number;
14
+ /**
15
+ * Tags for this job. Used for grouping, searching, or batch operations.
16
+ */
17
+ tags?: string[];
14
18
  }
15
19
  declare enum JobEventType {
16
20
  Added = "added",
@@ -80,6 +84,10 @@ interface JobRecord<PayloadMap, T extends JobType<PayloadMap>> {
80
84
  * The time the job was last cancelled.
81
85
  */
82
86
  lastCancelledAt: Date | null;
87
+ /**
88
+ * Tags for this job. Used for grouping, searching, or batch operations.
89
+ */
90
+ tags?: string[];
83
91
  }
84
92
  type JobHandler<PayloadMap, T extends keyof PayloadMap> = (payload: PayloadMap[T], signal: AbortSignal) => Promise<void>;
85
93
  type JobHandlers<PayloadMap> = {
@@ -151,6 +159,7 @@ interface JobQueueConfig {
151
159
  };
152
160
  verbose?: boolean;
153
161
  }
162
+ type TagQueryMode = 'exact' | 'all' | 'any' | 'none';
154
163
  interface JobQueue<PayloadMap> {
155
164
  /**
156
165
  * Add a job to the job queue.
@@ -167,10 +176,40 @@ interface JobQueue<PayloadMap> {
167
176
  * - The jobs are returned in descending order of createdAt.
168
177
  */
169
178
  getJobsByStatus: <T extends JobType<PayloadMap>>(status: JobStatus, limit?: number, offset?: number) => Promise<JobRecord<PayloadMap, T>[]>;
179
+ /**
180
+ * Get jobs by tag(s).
181
+ * - Modes:
182
+ * - 'exact': Jobs with exactly the same tags (no more, no less)
183
+ * - 'all': Jobs that have all the given tags (can have more)
184
+ * - 'any': Jobs that have at least one of the given tags
185
+ * - 'none': Jobs that have none of the given tags
186
+ * - Default mode is 'all'.
187
+ */
188
+ getJobsByTags: <T extends JobType<PayloadMap>>(tags: string[], mode?: TagQueryMode, limit?: number, offset?: number) => Promise<JobRecord<PayloadMap, T>[]>;
170
189
  /**
171
190
  * Get all jobs.
172
191
  */
173
192
  getAllJobs: <T extends JobType<PayloadMap>>(limit?: number, offset?: number) => Promise<JobRecord<PayloadMap, T>[]>;
193
+ /**
194
+ * Get jobs by filters.
195
+ /**
196
+ * Get jobs by filters.
197
+ */
198
+ getJobs: <T extends JobType<PayloadMap>>(filters?: {
199
+ jobType?: string;
200
+ priority?: number;
201
+ runAt?: Date | {
202
+ gt?: Date;
203
+ gte?: Date;
204
+ lt?: Date;
205
+ lte?: Date;
206
+ eq?: Date;
207
+ };
208
+ tags?: {
209
+ values: string[];
210
+ mode?: TagQueryMode;
211
+ };
212
+ }) => Promise<JobRecord<PayloadMap, T>[]>;
174
213
  /**
175
214
  * Retry a job given its ID.
176
215
  * - This will set the job status back to 'pending', clear the locked_at and locked_by, and allow it to be picked up by other workers.
@@ -199,12 +238,23 @@ interface JobQueue<PayloadMap> {
199
238
  * - The filters are:
200
239
  * - jobType: The job type to cancel.
201
240
  * - priority: The priority of the job to cancel.
202
- * - runAt: The time the job is scheduled to run at.
241
+ * - runAt: The time the job is scheduled to run at (now supports gt/gte/lt/lte/eq).
242
+ * - tags: An object with 'values' (string[]) and 'mode' (TagQueryMode) for tag-based cancellation.
203
243
  */
204
244
  cancelAllUpcomingJobs: (filters?: {
205
245
  jobType?: string;
206
246
  priority?: number;
207
- runAt?: Date;
247
+ runAt?: Date | {
248
+ gt?: Date;
249
+ gte?: Date;
250
+ lt?: Date;
251
+ lte?: Date;
252
+ eq?: Date;
253
+ };
254
+ tags?: {
255
+ values: string[];
256
+ mode?: TagQueryMode;
257
+ };
208
258
  }) => Promise<number>;
209
259
  /**
210
260
  * Create a job processor. Handlers must be provided per-processor.
@@ -225,4 +275,4 @@ interface JobQueue<PayloadMap> {
225
275
  */
226
276
  declare const initJobQueue: <PayloadMap = any>(config: JobQueueConfig) => JobQueue<PayloadMap>;
227
277
 
228
- export { FailureReason, type JobEvent, JobEventType, type JobHandler, type JobHandlers, type JobOptions, type JobQueue, type JobQueueConfig, type JobRecord, type JobStatus, type JobType, type Processor, type ProcessorOptions, initJobQueue };
278
+ export { FailureReason, type JobEvent, JobEventType, type JobHandler, type JobHandlers, type JobOptions, type JobQueue, type JobQueueConfig, type JobRecord, type JobStatus, type JobType, type Processor, type ProcessorOptions, type TagQueryMode, initJobQueue };
package/dist/index.js CHANGED
@@ -52,7 +52,8 @@ var addJob = async (pool, {
52
52
  maxAttempts = 3,
53
53
  priority = 0,
54
54
  runAt = null,
55
- timeoutMs = void 0
55
+ timeoutMs = void 0,
56
+ tags = void 0
56
57
  }) => {
57
58
  const client = await pool.connect();
58
59
  try {
@@ -60,29 +61,45 @@ var addJob = async (pool, {
60
61
  if (runAt) {
61
62
  result = await client.query(
62
63
  `INSERT INTO job_queue
63
- (job_type, payload, max_attempts, priority, run_at, timeout_ms)
64
- VALUES ($1, $2, $3, $4, $5, $6)
64
+ (job_type, payload, max_attempts, priority, run_at, timeout_ms, tags)
65
+ VALUES ($1, $2, $3, $4, $5, $6, $7)
65
66
  RETURNING id`,
66
- [jobType, payload, maxAttempts, priority, runAt, timeoutMs ?? null]
67
+ [
68
+ jobType,
69
+ payload,
70
+ maxAttempts,
71
+ priority,
72
+ runAt,
73
+ timeoutMs ?? null,
74
+ tags ?? null
75
+ ]
67
76
  );
68
77
  log(
69
- `Added job ${result.rows[0].id}: payload ${JSON.stringify(payload)}, runAt ${runAt.toISOString()}, priority ${priority}, maxAttempts ${maxAttempts} jobType ${jobType}`
78
+ `Added job ${result.rows[0].id}: payload ${JSON.stringify(payload)}, runAt ${runAt.toISOString()}, priority ${priority}, maxAttempts ${maxAttempts} jobType ${jobType}, tags ${JSON.stringify(tags)}`
70
79
  );
71
80
  } else {
72
81
  result = await client.query(
73
82
  `INSERT INTO job_queue
74
- (job_type, payload, max_attempts, priority, timeout_ms)
75
- VALUES ($1, $2, $3, $4, $5)
83
+ (job_type, payload, max_attempts, priority, timeout_ms, tags)
84
+ VALUES ($1, $2, $3, $4, $5, $6)
76
85
  RETURNING id`,
77
- [jobType, payload, maxAttempts, priority, timeoutMs ?? null]
86
+ [
87
+ jobType,
88
+ payload,
89
+ maxAttempts,
90
+ priority,
91
+ timeoutMs ?? null,
92
+ tags ?? null
93
+ ]
78
94
  );
79
95
  log(
80
- `Added job ${result.rows[0].id}: payload ${JSON.stringify(payload)}, priority ${priority}, maxAttempts ${maxAttempts} jobType ${jobType}`
96
+ `Added job ${result.rows[0].id}: payload ${JSON.stringify(payload)}, priority ${priority}, maxAttempts ${maxAttempts} jobType ${jobType}, tags ${JSON.stringify(tags)}`
81
97
  );
82
98
  }
83
99
  await recordJobEvent(pool, result.rows[0].id, "added" /* Added */, {
84
100
  jobType,
85
- payload
101
+ payload,
102
+ tags
86
103
  });
87
104
  return result.rows[0].id;
88
105
  } catch (error) {
@@ -339,8 +356,57 @@ var cancelAllUpcomingJobs = async (pool, filters) => {
339
356
  params.push(filters.priority);
340
357
  }
341
358
  if (filters.runAt) {
342
- query += ` AND run_at = $${paramIdx++}`;
343
- params.push(filters.runAt);
359
+ if (filters.runAt instanceof Date) {
360
+ query += ` AND run_at = $${paramIdx++}`;
361
+ params.push(filters.runAt);
362
+ } else if (typeof filters.runAt === "object") {
363
+ const ops = filters.runAt;
364
+ if (ops.gt) {
365
+ query += ` AND run_at > $${paramIdx++}`;
366
+ params.push(ops.gt);
367
+ }
368
+ if (ops.gte) {
369
+ query += ` AND run_at >= $${paramIdx++}`;
370
+ params.push(ops.gte);
371
+ }
372
+ if (ops.lt) {
373
+ query += ` AND run_at < $${paramIdx++}`;
374
+ params.push(ops.lt);
375
+ }
376
+ if (ops.lte) {
377
+ query += ` AND run_at <= $${paramIdx++}`;
378
+ params.push(ops.lte);
379
+ }
380
+ if (ops.eq) {
381
+ query += ` AND run_at = $${paramIdx++}`;
382
+ params.push(ops.eq);
383
+ }
384
+ }
385
+ }
386
+ if (filters.tags && filters.tags.values && filters.tags.values.length > 0) {
387
+ const mode = filters.tags.mode || "all";
388
+ const tagValues = filters.tags.values;
389
+ switch (mode) {
390
+ case "exact":
391
+ query += ` AND tags = $${paramIdx++}`;
392
+ params.push(tagValues);
393
+ break;
394
+ case "all":
395
+ query += ` AND tags @> $${paramIdx++}`;
396
+ params.push(tagValues);
397
+ break;
398
+ case "any":
399
+ query += ` AND tags && $${paramIdx++}`;
400
+ params.push(tagValues);
401
+ break;
402
+ case "none":
403
+ query += ` AND NOT (tags && $${paramIdx++})`;
404
+ params.push(tagValues);
405
+ break;
406
+ default:
407
+ query += ` AND tags @> $${paramIdx++}`;
408
+ params.push(tagValues);
409
+ }
344
410
  }
345
411
  }
346
412
  query += "\nRETURNING id";
@@ -429,6 +495,145 @@ var getJobEvents = async (pool, jobId) => {
429
495
  client.release();
430
496
  }
431
497
  };
498
+ var getJobsByTags = async (pool, tags, mode = "all", limit = 100, offset = 0) => {
499
+ const client = await pool.connect();
500
+ try {
501
+ let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags
502
+ FROM job_queue`;
503
+ let params = [];
504
+ switch (mode) {
505
+ case "exact":
506
+ query += " WHERE tags = $1";
507
+ params = [tags];
508
+ break;
509
+ case "all":
510
+ query += " WHERE tags @> $1";
511
+ params = [tags];
512
+ break;
513
+ case "any":
514
+ query += " WHERE tags && $1";
515
+ params = [tags];
516
+ break;
517
+ case "none":
518
+ query += " WHERE NOT (tags && $1)";
519
+ params = [tags];
520
+ break;
521
+ default:
522
+ query += " WHERE tags @> $1";
523
+ params = [tags];
524
+ }
525
+ query += " ORDER BY created_at DESC LIMIT $2 OFFSET $3";
526
+ params.push(limit, offset);
527
+ const result = await client.query(query, params);
528
+ log(
529
+ `Found ${result.rows.length} jobs by tags ${JSON.stringify(tags)} (mode: ${mode})`
530
+ );
531
+ return result.rows.map((job) => ({
532
+ ...job,
533
+ payload: job.payload,
534
+ timeoutMs: job.timeoutMs,
535
+ failureReason: job.failureReason
536
+ }));
537
+ } catch (error) {
538
+ log(
539
+ `Error getting jobs by tags ${JSON.stringify(tags)} (mode: ${mode}): ${error}`
540
+ );
541
+ throw error;
542
+ } finally {
543
+ client.release();
544
+ }
545
+ };
546
+ var getJobs = async (pool, filters, limit = 100, offset = 0) => {
547
+ const client = await pool.connect();
548
+ try {
549
+ let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags FROM job_queue`;
550
+ const params = [];
551
+ let where = [];
552
+ let paramIdx = 1;
553
+ if (filters) {
554
+ if (filters.jobType) {
555
+ where.push(`job_type = $${paramIdx++}`);
556
+ params.push(filters.jobType);
557
+ }
558
+ if (filters.priority !== void 0) {
559
+ where.push(`priority = $${paramIdx++}`);
560
+ params.push(filters.priority);
561
+ }
562
+ if (filters.runAt) {
563
+ if (filters.runAt instanceof Date) {
564
+ where.push(`run_at = $${paramIdx++}`);
565
+ params.push(filters.runAt);
566
+ } else if (typeof filters.runAt === "object" && (filters.runAt.gt !== void 0 || filters.runAt.gte !== void 0 || filters.runAt.lt !== void 0 || filters.runAt.lte !== void 0 || filters.runAt.eq !== void 0)) {
567
+ const ops = filters.runAt;
568
+ if (ops.gt) {
569
+ where.push(`run_at > $${paramIdx++}`);
570
+ params.push(ops.gt);
571
+ }
572
+ if (ops.gte) {
573
+ where.push(`run_at >= $${paramIdx++}`);
574
+ params.push(ops.gte);
575
+ }
576
+ if (ops.lt) {
577
+ where.push(`run_at < $${paramIdx++}`);
578
+ params.push(ops.lt);
579
+ }
580
+ if (ops.lte) {
581
+ where.push(`run_at <= $${paramIdx++}`);
582
+ params.push(ops.lte);
583
+ }
584
+ if (ops.eq) {
585
+ where.push(`run_at = $${paramIdx++}`);
586
+ params.push(ops.eq);
587
+ }
588
+ }
589
+ }
590
+ if (filters.tags && filters.tags.values && filters.tags.values.length > 0) {
591
+ const mode = filters.tags.mode || "all";
592
+ const tagValues = filters.tags.values;
593
+ switch (mode) {
594
+ case "exact":
595
+ where.push(`tags = $${paramIdx++}`);
596
+ params.push(tagValues);
597
+ break;
598
+ case "all":
599
+ where.push(`tags @> $${paramIdx++}`);
600
+ params.push(tagValues);
601
+ break;
602
+ case "any":
603
+ where.push(`tags && $${paramIdx++}`);
604
+ params.push(tagValues);
605
+ break;
606
+ case "none":
607
+ where.push(`NOT (tags && $${paramIdx++})`);
608
+ params.push(tagValues);
609
+ break;
610
+ default:
611
+ where.push(`tags @> $${paramIdx++}`);
612
+ params.push(tagValues);
613
+ }
614
+ }
615
+ }
616
+ if (where.length > 0) {
617
+ query += ` WHERE ${where.join(" AND ")}`;
618
+ }
619
+ paramIdx = params.length + 1;
620
+ query += ` ORDER BY created_at DESC LIMIT $${paramIdx++} OFFSET $${paramIdx}`;
621
+ params.push(limit, offset);
622
+ const result = await client.query(query, params);
623
+ log(`Found ${result.rows.length} jobs`);
624
+ return result.rows.map((job) => ({
625
+ ...job,
626
+ payload: job.payload,
627
+ timeoutMs: job.timeoutMs,
628
+ failureReason: job.failureReason
629
+ }));
630
+ } catch (error) {
631
+ log(`Error getting jobs: ${error}`);
632
+ throw error;
633
+ } finally {
634
+ client.release();
635
+ }
636
+ };
432
637
 
433
638
  // src/processor.ts
434
639
  async function processJobWithHandlers(pool, job, jobHandlers) {
@@ -648,6 +853,10 @@ var initJobQueue = (config) => {
648
853
  (limit, offset) => getAllJobs(pool, limit, offset),
649
854
  config.verbose ?? false
650
855
  ),
856
+ getJobs: withLogContext(
857
+ (filters, limit, offset) => getJobs(pool, filters, limit, offset),
858
+ config.verbose ?? false
859
+ ),
651
860
  retryJob: (jobId) => retryJob(pool, jobId),
652
861
  cleanupOldJobs: (daysToKeep) => cleanupOldJobs(pool, daysToKeep),
653
862
  cancelJob: withLogContext(
@@ -662,6 +871,10 @@ var initJobQueue = (config) => {
662
871
  (maxProcessingTimeMinutes) => reclaimStuckJobs(pool, maxProcessingTimeMinutes),
663
872
  config.verbose ?? false
664
873
  ),
874
+ getJobsByTags: withLogContext(
875
+ (tags, mode = "all", limit, offset) => getJobsByTags(pool, tags, mode, limit, offset),
876
+ config.verbose ?? false
877
+ ),
665
878
  // Job processing
666
879
  createProcessor: (handlers, options) => createProcessor(pool, handlers, options),
667
880
  // Advanced access (for custom operations)