@nicnocquee/dataqueue 1.22.0 → 1.25.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/README.md +44 -0
  2. package/dist/index.cjs +2822 -583
  3. package/dist/index.cjs.map +1 -1
  4. package/dist/index.d.cts +589 -12
  5. package/dist/index.d.ts +589 -12
  6. package/dist/index.js +2818 -584
  7. package/dist/index.js.map +1 -1
  8. package/migrations/1751131910825_add_timeout_seconds_to_job_queue.sql +2 -2
  9. package/migrations/1751186053000_add_job_events_table.sql +12 -8
  10. package/migrations/1751984773000_add_tags_to_job_queue.sql +1 -1
  11. package/migrations/1765809419000_add_force_kill_on_timeout_to_job_queue.sql +6 -0
  12. package/migrations/1771100000000_add_idempotency_key_to_job_queue.sql +7 -0
  13. package/migrations/1781200000000_add_wait_support.sql +12 -0
  14. package/migrations/1781200000001_create_waitpoints_table.sql +18 -0
  15. package/migrations/1781200000002_add_performance_indexes.sql +34 -0
  16. package/migrations/1781200000003_add_progress_to_job_queue.sql +7 -0
  17. package/package.json +20 -6
  18. package/src/backend.ts +163 -0
  19. package/src/backends/postgres.ts +1111 -0
  20. package/src/backends/redis-scripts.ts +533 -0
  21. package/src/backends/redis.test.ts +543 -0
  22. package/src/backends/redis.ts +834 -0
  23. package/src/db-util.ts +4 -2
  24. package/src/handler-validation.test.ts +414 -0
  25. package/src/handler-validation.ts +168 -0
  26. package/src/index.test.ts +230 -1
  27. package/src/index.ts +128 -32
  28. package/src/processor.test.ts +612 -16
  29. package/src/processor.ts +759 -47
  30. package/src/queue.test.ts +736 -3
  31. package/src/queue.ts +346 -660
  32. package/src/test-util.ts +32 -0
  33. package/src/types.ts +451 -16
  34. package/src/wait.test.ts +698 -0
@@ -0,0 +1,1111 @@
1
+ import { Pool } from 'pg';
2
+ import {
3
+ JobOptions,
4
+ JobRecord,
5
+ FailureReason,
6
+ JobEvent,
7
+ JobEventType,
8
+ TagQueryMode,
9
+ JobType,
10
+ } from '../types.js';
11
+ import { QueueBackend, JobFilters, JobUpdates } from '../backend.js';
12
+ import { log } from '../log-context.js';
13
+
14
+ export class PostgresBackend implements QueueBackend {
15
+ constructor(private pool: Pool) {}
16
+
17
+ /** Expose the raw pool for advanced usage. */
18
+ getPool(): Pool {
19
+ return this.pool;
20
+ }
21
+
22
+ // ── Events ──────────────────────────────────────────────────────────
23
+
24
+ async recordJobEvent(
25
+ jobId: number,
26
+ eventType: JobEventType,
27
+ metadata?: any,
28
+ ): Promise<void> {
29
+ const client = await this.pool.connect();
30
+ try {
31
+ await client.query(
32
+ `INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
33
+ [jobId, eventType, metadata ? JSON.stringify(metadata) : null],
34
+ );
35
+ } catch (error) {
36
+ log(`Error recording job event for job ${jobId}: ${error}`);
37
+ // Do not throw, to avoid interfering with main job logic
38
+ } finally {
39
+ client.release();
40
+ }
41
+ }
42
+
43
+ async getJobEvents(jobId: number): Promise<JobEvent[]> {
44
+ const client = await this.pool.connect();
45
+ try {
46
+ const res = await client.query(
47
+ `SELECT id, job_id AS "jobId", event_type AS "eventType", metadata, created_at AS "createdAt" FROM job_events WHERE job_id = $1 ORDER BY created_at ASC`,
48
+ [jobId],
49
+ );
50
+ return res.rows as JobEvent[];
51
+ } finally {
52
+ client.release();
53
+ }
54
+ }
55
+
56
+ // ── Job CRUD ──────────────────────────────────────────────────────────
57
+
58
+ async addJob<PayloadMap, T extends JobType<PayloadMap>>({
59
+ jobType,
60
+ payload,
61
+ maxAttempts = 3,
62
+ priority = 0,
63
+ runAt = null,
64
+ timeoutMs = undefined,
65
+ forceKillOnTimeout = false,
66
+ tags = undefined,
67
+ idempotencyKey = undefined,
68
+ }: JobOptions<PayloadMap, T>): Promise<number> {
69
+ const client = await this.pool.connect();
70
+ try {
71
+ let result;
72
+ const onConflict = idempotencyKey
73
+ ? `ON CONFLICT (idempotency_key) WHERE idempotency_key IS NOT NULL DO NOTHING`
74
+ : '';
75
+
76
+ if (runAt) {
77
+ result = await client.query(
78
+ `INSERT INTO job_queue
79
+ (job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key)
80
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
81
+ ${onConflict}
82
+ RETURNING id`,
83
+ [
84
+ jobType,
85
+ payload,
86
+ maxAttempts,
87
+ priority,
88
+ runAt,
89
+ timeoutMs ?? null,
90
+ forceKillOnTimeout ?? false,
91
+ tags ?? null,
92
+ idempotencyKey ?? null,
93
+ ],
94
+ );
95
+ } else {
96
+ result = await client.query(
97
+ `INSERT INTO job_queue
98
+ (job_type, payload, max_attempts, priority, timeout_ms, force_kill_on_timeout, tags, idempotency_key)
99
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
100
+ ${onConflict}
101
+ RETURNING id`,
102
+ [
103
+ jobType,
104
+ payload,
105
+ maxAttempts,
106
+ priority,
107
+ timeoutMs ?? null,
108
+ forceKillOnTimeout ?? false,
109
+ tags ?? null,
110
+ idempotencyKey ?? null,
111
+ ],
112
+ );
113
+ }
114
+
115
+ // If ON CONFLICT DO NOTHING was triggered, no rows are returned.
116
+ if (result.rows.length === 0 && idempotencyKey) {
117
+ const existing = await client.query(
118
+ `SELECT id FROM job_queue WHERE idempotency_key = $1`,
119
+ [idempotencyKey],
120
+ );
121
+ if (existing.rows.length > 0) {
122
+ log(
123
+ `Job with idempotency key "${idempotencyKey}" already exists (id: ${existing.rows[0].id}), returning existing job`,
124
+ );
125
+ return existing.rows[0].id;
126
+ }
127
+ throw new Error(
128
+ `Failed to insert job and could not find existing job with idempotency key "${idempotencyKey}"`,
129
+ );
130
+ }
131
+
132
+ const jobId = result.rows[0].id;
133
+ log(
134
+ `Added job ${jobId}: payload ${JSON.stringify(payload)}, ${runAt ? `runAt ${runAt.toISOString()}, ` : ''}priority ${priority}, maxAttempts ${maxAttempts}, jobType ${jobType}, tags ${JSON.stringify(tags)}${idempotencyKey ? `, idempotencyKey "${idempotencyKey}"` : ''}`,
135
+ );
136
+ await this.recordJobEvent(jobId, JobEventType.Added, {
137
+ jobType,
138
+ payload,
139
+ tags,
140
+ idempotencyKey,
141
+ });
142
+ return jobId;
143
+ } catch (error) {
144
+ log(`Error adding job: ${error}`);
145
+ throw error;
146
+ } finally {
147
+ client.release();
148
+ }
149
+ }
150
+
151
+ async getJob<PayloadMap, T extends JobType<PayloadMap>>(
152
+ id: number,
153
+ ): Promise<JobRecord<PayloadMap, T> | null> {
154
+ const client = await this.pool.connect();
155
+ try {
156
+ const result = await client.query(
157
+ `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue WHERE id = $1`,
158
+ [id],
159
+ );
160
+
161
+ if (result.rows.length === 0) {
162
+ log(`Job ${id} not found`);
163
+ return null;
164
+ }
165
+
166
+ log(`Found job ${id}`);
167
+ const job = result.rows[0] as JobRecord<PayloadMap, T>;
168
+ return {
169
+ ...job,
170
+ payload: job.payload,
171
+ timeoutMs: job.timeoutMs,
172
+ forceKillOnTimeout: job.forceKillOnTimeout,
173
+ failureReason: job.failureReason,
174
+ };
175
+ } catch (error) {
176
+ log(`Error getting job ${id}: ${error}`);
177
+ throw error;
178
+ } finally {
179
+ client.release();
180
+ }
181
+ }
182
+
183
+ async getJobsByStatus<PayloadMap, T extends JobType<PayloadMap>>(
184
+ status: string,
185
+ limit = 100,
186
+ offset = 0,
187
+ ): Promise<JobRecord<PayloadMap, T>[]> {
188
+ const client = await this.pool.connect();
189
+ try {
190
+ const result = await client.query(
191
+ `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue WHERE status = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3`,
192
+ [status, limit, offset],
193
+ );
194
+ log(`Found ${result.rows.length} jobs by status ${status}`);
195
+ return result.rows.map((job) => ({
196
+ ...job,
197
+ payload: job.payload,
198
+ timeoutMs: job.timeoutMs,
199
+ forceKillOnTimeout: job.forceKillOnTimeout,
200
+ failureReason: job.failureReason,
201
+ }));
202
+ } catch (error) {
203
+ log(`Error getting jobs by status ${status}: ${error}`);
204
+ throw error;
205
+ } finally {
206
+ client.release();
207
+ }
208
+ }
209
+
210
+ async getAllJobs<PayloadMap, T extends JobType<PayloadMap>>(
211
+ limit = 100,
212
+ offset = 0,
213
+ ): Promise<JobRecord<PayloadMap, T>[]> {
214
+ const client = await this.pool.connect();
215
+ try {
216
+ const result = await client.query(
217
+ `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2`,
218
+ [limit, offset],
219
+ );
220
+ log(`Found ${result.rows.length} jobs (all)`);
221
+ return result.rows.map((job) => ({
222
+ ...job,
223
+ payload: job.payload,
224
+ timeoutMs: job.timeoutMs,
225
+ forceKillOnTimeout: job.forceKillOnTimeout,
226
+ }));
227
+ } catch (error) {
228
+ log(`Error getting all jobs: ${error}`);
229
+ throw error;
230
+ } finally {
231
+ client.release();
232
+ }
233
+ }
234
+
235
+ async getJobs<PayloadMap, T extends JobType<PayloadMap>>(
236
+ filters?: JobFilters,
237
+ limit = 100,
238
+ offset = 0,
239
+ ): Promise<JobRecord<PayloadMap, T>[]> {
240
+ const client = await this.pool.connect();
241
+ try {
242
+ let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue`;
243
+ const params: any[] = [];
244
+ const where: string[] = [];
245
+ let paramIdx = 1;
246
+ if (filters) {
247
+ if (filters.jobType) {
248
+ where.push(`job_type = $${paramIdx++}`);
249
+ params.push(filters.jobType);
250
+ }
251
+ if (filters.priority !== undefined) {
252
+ where.push(`priority = $${paramIdx++}`);
253
+ params.push(filters.priority);
254
+ }
255
+ if (filters.runAt) {
256
+ if (filters.runAt instanceof Date) {
257
+ where.push(`run_at = $${paramIdx++}`);
258
+ params.push(filters.runAt);
259
+ } else if (
260
+ typeof filters.runAt === 'object' &&
261
+ (filters.runAt.gt !== undefined ||
262
+ filters.runAt.gte !== undefined ||
263
+ filters.runAt.lt !== undefined ||
264
+ filters.runAt.lte !== undefined ||
265
+ filters.runAt.eq !== undefined)
266
+ ) {
267
+ const ops = filters.runAt as {
268
+ gt?: Date;
269
+ gte?: Date;
270
+ lt?: Date;
271
+ lte?: Date;
272
+ eq?: Date;
273
+ };
274
+ if (ops.gt) {
275
+ where.push(`run_at > $${paramIdx++}`);
276
+ params.push(ops.gt);
277
+ }
278
+ if (ops.gte) {
279
+ where.push(`run_at >= $${paramIdx++}`);
280
+ params.push(ops.gte);
281
+ }
282
+ if (ops.lt) {
283
+ where.push(`run_at < $${paramIdx++}`);
284
+ params.push(ops.lt);
285
+ }
286
+ if (ops.lte) {
287
+ where.push(`run_at <= $${paramIdx++}`);
288
+ params.push(ops.lte);
289
+ }
290
+ if (ops.eq) {
291
+ where.push(`run_at = $${paramIdx++}`);
292
+ params.push(ops.eq);
293
+ }
294
+ }
295
+ }
296
+ if (
297
+ filters.tags &&
298
+ filters.tags.values &&
299
+ filters.tags.values.length > 0
300
+ ) {
301
+ const mode = filters.tags.mode || 'all';
302
+ const tagValues = filters.tags.values;
303
+ switch (mode) {
304
+ case 'exact':
305
+ where.push(`tags = $${paramIdx++}`);
306
+ params.push(tagValues);
307
+ break;
308
+ case 'all':
309
+ where.push(`tags @> $${paramIdx++}`);
310
+ params.push(tagValues);
311
+ break;
312
+ case 'any':
313
+ where.push(`tags && $${paramIdx++}`);
314
+ params.push(tagValues);
315
+ break;
316
+ case 'none':
317
+ where.push(`NOT (tags && $${paramIdx++})`);
318
+ params.push(tagValues);
319
+ break;
320
+ default:
321
+ where.push(`tags @> $${paramIdx++}`);
322
+ params.push(tagValues);
323
+ }
324
+ }
325
+ // Keyset pagination: use cursor (id < cursor) instead of OFFSET
326
+ if (filters.cursor !== undefined) {
327
+ where.push(`id < $${paramIdx++}`);
328
+ params.push(filters.cursor);
329
+ }
330
+ }
331
+ if (where.length > 0) {
332
+ query += ` WHERE ${where.join(' AND ')}`;
333
+ }
334
+ paramIdx = params.length + 1;
335
+ // Use ORDER BY id DESC for consistent keyset pagination
336
+ query += ` ORDER BY id DESC LIMIT $${paramIdx++}`;
337
+ // Only apply OFFSET when cursor is not used
338
+ if (!filters?.cursor) {
339
+ query += ` OFFSET $${paramIdx}`;
340
+ params.push(limit, offset);
341
+ } else {
342
+ params.push(limit);
343
+ }
344
+ const result = await client.query(query, params);
345
+ log(`Found ${result.rows.length} jobs`);
346
+ return result.rows.map((job) => ({
347
+ ...job,
348
+ payload: job.payload,
349
+ timeoutMs: job.timeoutMs,
350
+ forceKillOnTimeout: job.forceKillOnTimeout,
351
+ failureReason: job.failureReason,
352
+ }));
353
+ } catch (error) {
354
+ log(`Error getting jobs: ${error}`);
355
+ throw error;
356
+ } finally {
357
+ client.release();
358
+ }
359
+ }
360
+
361
+ async getJobsByTags<PayloadMap, T extends JobType<PayloadMap>>(
362
+ tags: string[],
363
+ mode: TagQueryMode = 'all',
364
+ limit = 100,
365
+ offset = 0,
366
+ ): Promise<JobRecord<PayloadMap, T>[]> {
367
+ const client = await this.pool.connect();
368
+ try {
369
+ let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress
370
+ FROM job_queue`;
371
+ let params: any[] = [];
372
+ switch (mode) {
373
+ case 'exact':
374
+ query += ' WHERE tags = $1';
375
+ params = [tags];
376
+ break;
377
+ case 'all':
378
+ query += ' WHERE tags @> $1';
379
+ params = [tags];
380
+ break;
381
+ case 'any':
382
+ query += ' WHERE tags && $1';
383
+ params = [tags];
384
+ break;
385
+ case 'none':
386
+ query += ' WHERE NOT (tags && $1)';
387
+ params = [tags];
388
+ break;
389
+ default:
390
+ query += ' WHERE tags @> $1';
391
+ params = [tags];
392
+ }
393
+ query += ' ORDER BY created_at DESC LIMIT $2 OFFSET $3';
394
+ params.push(limit, offset);
395
+ const result = await client.query(query, params);
396
+ log(
397
+ `Found ${result.rows.length} jobs by tags ${JSON.stringify(tags)} (mode: ${mode})`,
398
+ );
399
+ return result.rows.map((job) => ({
400
+ ...job,
401
+ payload: job.payload,
402
+ timeoutMs: job.timeoutMs,
403
+ forceKillOnTimeout: job.forceKillOnTimeout,
404
+ failureReason: job.failureReason,
405
+ }));
406
+ } catch (error) {
407
+ log(
408
+ `Error getting jobs by tags ${JSON.stringify(tags)} (mode: ${mode}): ${error}`,
409
+ );
410
+ throw error;
411
+ } finally {
412
+ client.release();
413
+ }
414
+ }
415
+
416
+ // ── Processing lifecycle ──────────────────────────────────────────────
417
+
418
+ async getNextBatch<PayloadMap, T extends JobType<PayloadMap>>(
419
+ workerId: string,
420
+ batchSize = 10,
421
+ jobType?: string | string[],
422
+ ): Promise<JobRecord<PayloadMap, T>[]> {
423
+ const client = await this.pool.connect();
424
+ try {
425
+ await client.query('BEGIN');
426
+
427
+ let jobTypeFilter = '';
428
+ const params: any[] = [workerId, batchSize];
429
+ if (jobType) {
430
+ if (Array.isArray(jobType)) {
431
+ jobTypeFilter = ` AND job_type = ANY($3)`;
432
+ params.push(jobType);
433
+ } else {
434
+ jobTypeFilter = ` AND job_type = $3`;
435
+ params.push(jobType);
436
+ }
437
+ }
438
+
439
+ const result = await client.query(
440
+ `
441
+ UPDATE job_queue
442
+ SET status = 'processing',
443
+ locked_at = NOW(),
444
+ locked_by = $1,
445
+ attempts = CASE WHEN status = 'waiting' THEN attempts ELSE attempts + 1 END,
446
+ updated_at = NOW(),
447
+ pending_reason = NULL,
448
+ started_at = COALESCE(started_at, NOW()),
449
+ last_retried_at = CASE WHEN status != 'waiting' AND attempts > 0 THEN NOW() ELSE last_retried_at END,
450
+ wait_until = NULL
451
+ WHERE id IN (
452
+ SELECT id FROM job_queue
453
+ WHERE (
454
+ (
455
+ (status = 'pending' OR (status = 'failed' AND next_attempt_at <= NOW()))
456
+ AND (attempts < max_attempts)
457
+ AND run_at <= NOW()
458
+ )
459
+ OR (
460
+ status = 'waiting'
461
+ AND wait_until IS NOT NULL
462
+ AND wait_until <= NOW()
463
+ AND wait_token_id IS NULL
464
+ )
465
+ )
466
+ ${jobTypeFilter}
467
+ ORDER BY priority DESC, created_at ASC
468
+ LIMIT $2
469
+ FOR UPDATE SKIP LOCKED
470
+ )
471
+ RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress
472
+ `,
473
+ params,
474
+ );
475
+
476
+ log(`Found ${result.rows.length} jobs to process`);
477
+ await client.query('COMMIT');
478
+
479
+ // Batch-insert processing events in a single query
480
+ if (result.rows.length > 0) {
481
+ await this.recordJobEventsBatch(
482
+ result.rows.map((row) => ({
483
+ jobId: row.id,
484
+ eventType: JobEventType.Processing,
485
+ })),
486
+ );
487
+ }
488
+
489
+ return result.rows.map((job) => ({
490
+ ...job,
491
+ payload: job.payload,
492
+ timeoutMs: job.timeoutMs,
493
+ forceKillOnTimeout: job.forceKillOnTimeout,
494
+ }));
495
+ } catch (error) {
496
+ log(`Error getting next batch: ${error}`);
497
+ await client.query('ROLLBACK');
498
+ throw error;
499
+ } finally {
500
+ client.release();
501
+ }
502
+ }
503
+
504
+ async completeJob(jobId: number): Promise<void> {
505
+ const client = await this.pool.connect();
506
+ try {
507
+ const result = await client.query(
508
+ `
509
+ UPDATE job_queue
510
+ SET status = 'completed', updated_at = NOW(), completed_at = NOW(),
511
+ step_data = NULL, wait_until = NULL, wait_token_id = NULL
512
+ WHERE id = $1 AND status = 'processing'
513
+ `,
514
+ [jobId],
515
+ );
516
+ if (result.rowCount === 0) {
517
+ log(
518
+ `Job ${jobId} could not be completed (not in processing state or does not exist)`,
519
+ );
520
+ }
521
+ await this.recordJobEvent(jobId, JobEventType.Completed);
522
+ log(`Completed job ${jobId}`);
523
+ } catch (error) {
524
+ log(`Error completing job ${jobId}: ${error}`);
525
+ throw error;
526
+ } finally {
527
+ client.release();
528
+ }
529
+ }
530
+
531
+ async failJob(
532
+ jobId: number,
533
+ error: Error,
534
+ failureReason?: FailureReason,
535
+ ): Promise<void> {
536
+ const client = await this.pool.connect();
537
+ try {
538
+ const result = await client.query(
539
+ `
540
+ UPDATE job_queue
541
+ SET status = 'failed',
542
+ updated_at = NOW(),
543
+ next_attempt_at = CASE
544
+ WHEN attempts < max_attempts THEN NOW() + (POWER(2, attempts) * INTERVAL '1 minute')
545
+ ELSE NULL
546
+ END,
547
+ error_history = COALESCE(error_history, '[]'::jsonb) || $2::jsonb,
548
+ failure_reason = $3,
549
+ last_failed_at = NOW()
550
+ WHERE id = $1 AND status IN ('processing', 'pending')
551
+ `,
552
+ [
553
+ jobId,
554
+ JSON.stringify([
555
+ {
556
+ message: error.message || String(error),
557
+ timestamp: new Date().toISOString(),
558
+ },
559
+ ]),
560
+ failureReason ?? null,
561
+ ],
562
+ );
563
+ if (result.rowCount === 0) {
564
+ log(
565
+ `Job ${jobId} could not be failed (not in processing/pending state or does not exist)`,
566
+ );
567
+ }
568
+ await this.recordJobEvent(jobId, JobEventType.Failed, {
569
+ message: error.message || String(error),
570
+ failureReason,
571
+ });
572
+ log(`Failed job ${jobId}`);
573
+ } catch (err) {
574
+ log(`Error failing job ${jobId}: ${err}`);
575
+ throw err;
576
+ } finally {
577
+ client.release();
578
+ }
579
+ }
580
+
581
+ async prolongJob(jobId: number): Promise<void> {
582
+ const client = await this.pool.connect();
583
+ try {
584
+ await client.query(
585
+ `
586
+ UPDATE job_queue
587
+ SET locked_at = NOW(), updated_at = NOW()
588
+ WHERE id = $1 AND status = 'processing'
589
+ `,
590
+ [jobId],
591
+ );
592
+ await this.recordJobEvent(jobId, JobEventType.Prolonged);
593
+ log(`Prolonged job ${jobId}`);
594
+ } catch (error) {
595
+ log(`Error prolonging job ${jobId}: ${error}`);
596
+ // Do not throw -- prolong is best-effort
597
+ } finally {
598
+ client.release();
599
+ }
600
+ }
601
+
602
+ // ── Progress ──────────────────────────────────────────────────────────
603
+
604
+ async updateProgress(jobId: number, progress: number): Promise<void> {
605
+ const client = await this.pool.connect();
606
+ try {
607
+ await client.query(
608
+ `UPDATE job_queue SET progress = $2, updated_at = NOW() WHERE id = $1`,
609
+ [jobId, progress],
610
+ );
611
+ log(`Updated progress for job ${jobId}: ${progress}%`);
612
+ } catch (error) {
613
+ log(`Error updating progress for job ${jobId}: ${error}`);
614
+ // Best-effort: do not throw to avoid killing the running handler
615
+ } finally {
616
+ client.release();
617
+ }
618
+ }
619
+
620
+ // ── Job management ────────────────────────────────────────────────────
621
+
622
+ async retryJob(jobId: number): Promise<void> {
623
+ const client = await this.pool.connect();
624
+ try {
625
+ const result = await client.query(
626
+ `
627
+ UPDATE job_queue
628
+ SET status = 'pending',
629
+ updated_at = NOW(),
630
+ locked_at = NULL,
631
+ locked_by = NULL,
632
+ next_attempt_at = NOW(),
633
+ last_retried_at = NOW()
634
+ WHERE id = $1 AND status IN ('failed', 'processing')
635
+ `,
636
+ [jobId],
637
+ );
638
+ if (result.rowCount === 0) {
639
+ log(
640
+ `Job ${jobId} could not be retried (not in failed/processing state or does not exist)`,
641
+ );
642
+ }
643
+ await this.recordJobEvent(jobId, JobEventType.Retried);
644
+ log(`Retried job ${jobId}`);
645
+ } catch (error) {
646
+ log(`Error retrying job ${jobId}: ${error}`);
647
+ throw error;
648
+ } finally {
649
+ client.release();
650
+ }
651
+ }
652
+
653
+ async cancelJob(jobId: number): Promise<void> {
654
+ const client = await this.pool.connect();
655
+ try {
656
+ await client.query(
657
+ `
658
+ UPDATE job_queue
659
+ SET status = 'cancelled', updated_at = NOW(), last_cancelled_at = NOW(),
660
+ wait_until = NULL, wait_token_id = NULL
661
+ WHERE id = $1 AND status IN ('pending', 'waiting')
662
+ `,
663
+ [jobId],
664
+ );
665
+ await this.recordJobEvent(jobId, JobEventType.Cancelled);
666
+ log(`Cancelled job ${jobId}`);
667
+ } catch (error) {
668
+ log(`Error cancelling job ${jobId}: ${error}`);
669
+ throw error;
670
+ } finally {
671
+ client.release();
672
+ }
673
+ }
674
+
675
+ async cancelAllUpcomingJobs(filters?: JobFilters): Promise<number> {
676
+ const client = await this.pool.connect();
677
+ try {
678
+ let query = `
679
+ UPDATE job_queue
680
+ SET status = 'cancelled', updated_at = NOW()
681
+ WHERE status = 'pending'`;
682
+ const params: any[] = [];
683
+ let paramIdx = 1;
684
+ if (filters) {
685
+ if (filters.jobType) {
686
+ query += ` AND job_type = $${paramIdx++}`;
687
+ params.push(filters.jobType);
688
+ }
689
+ if (filters.priority !== undefined) {
690
+ query += ` AND priority = $${paramIdx++}`;
691
+ params.push(filters.priority);
692
+ }
693
+ if (filters.runAt) {
694
+ if (filters.runAt instanceof Date) {
695
+ query += ` AND run_at = $${paramIdx++}`;
696
+ params.push(filters.runAt);
697
+ } else if (typeof filters.runAt === 'object') {
698
+ const ops = filters.runAt;
699
+ if (ops.gt) {
700
+ query += ` AND run_at > $${paramIdx++}`;
701
+ params.push(ops.gt);
702
+ }
703
+ if (ops.gte) {
704
+ query += ` AND run_at >= $${paramIdx++}`;
705
+ params.push(ops.gte);
706
+ }
707
+ if (ops.lt) {
708
+ query += ` AND run_at < $${paramIdx++}`;
709
+ params.push(ops.lt);
710
+ }
711
+ if (ops.lte) {
712
+ query += ` AND run_at <= $${paramIdx++}`;
713
+ params.push(ops.lte);
714
+ }
715
+ if (ops.eq) {
716
+ query += ` AND run_at = $${paramIdx++}`;
717
+ params.push(ops.eq);
718
+ }
719
+ }
720
+ }
721
+ if (
722
+ filters.tags &&
723
+ filters.tags.values &&
724
+ filters.tags.values.length > 0
725
+ ) {
726
+ const mode = filters.tags.mode || 'all';
727
+ const tagValues = filters.tags.values;
728
+ switch (mode) {
729
+ case 'exact':
730
+ query += ` AND tags = $${paramIdx++}`;
731
+ params.push(tagValues);
732
+ break;
733
+ case 'all':
734
+ query += ` AND tags @> $${paramIdx++}`;
735
+ params.push(tagValues);
736
+ break;
737
+ case 'any':
738
+ query += ` AND tags && $${paramIdx++}`;
739
+ params.push(tagValues);
740
+ break;
741
+ case 'none':
742
+ query += ` AND NOT (tags && $${paramIdx++})`;
743
+ params.push(tagValues);
744
+ break;
745
+ default:
746
+ query += ` AND tags @> $${paramIdx++}`;
747
+ params.push(tagValues);
748
+ }
749
+ }
750
+ }
751
+ query += '\nRETURNING id';
752
+ const result = await client.query(query, params);
753
+ log(`Cancelled ${result.rowCount} jobs`);
754
+ return result.rowCount || 0;
755
+ } catch (error) {
756
+ log(`Error cancelling upcoming jobs: ${error}`);
757
+ throw error;
758
+ } finally {
759
+ client.release();
760
+ }
761
+ }
762
+
763
+ async editJob(jobId: number, updates: JobUpdates): Promise<void> {
764
+ const client = await this.pool.connect();
765
+ try {
766
+ const updateFields: string[] = [];
767
+ const params: any[] = [];
768
+ let paramIdx = 1;
769
+
770
+ if (updates.payload !== undefined) {
771
+ updateFields.push(`payload = $${paramIdx++}`);
772
+ params.push(updates.payload);
773
+ }
774
+ if (updates.maxAttempts !== undefined) {
775
+ updateFields.push(`max_attempts = $${paramIdx++}`);
776
+ params.push(updates.maxAttempts);
777
+ }
778
+ if (updates.priority !== undefined) {
779
+ updateFields.push(`priority = $${paramIdx++}`);
780
+ params.push(updates.priority);
781
+ }
782
+ if (updates.runAt !== undefined) {
783
+ if (updates.runAt === null) {
784
+ updateFields.push(`run_at = NOW()`);
785
+ } else {
786
+ updateFields.push(`run_at = $${paramIdx++}`);
787
+ params.push(updates.runAt);
788
+ }
789
+ }
790
+ if (updates.timeoutMs !== undefined) {
791
+ updateFields.push(`timeout_ms = $${paramIdx++}`);
792
+ params.push(updates.timeoutMs ?? null);
793
+ }
794
+ if (updates.tags !== undefined) {
795
+ updateFields.push(`tags = $${paramIdx++}`);
796
+ params.push(updates.tags ?? null);
797
+ }
798
+
799
+ if (updateFields.length === 0) {
800
+ log(`No fields to update for job ${jobId}`);
801
+ return;
802
+ }
803
+
804
+ updateFields.push(`updated_at = NOW()`);
805
+ params.push(jobId);
806
+
807
+ const query = `
808
+ UPDATE job_queue
809
+ SET ${updateFields.join(', ')}
810
+ WHERE id = $${paramIdx} AND status = 'pending'
811
+ `;
812
+
813
+ await client.query(query, params);
814
+
815
+ const metadata: any = {};
816
+ if (updates.payload !== undefined) metadata.payload = updates.payload;
817
+ if (updates.maxAttempts !== undefined)
818
+ metadata.maxAttempts = updates.maxAttempts;
819
+ if (updates.priority !== undefined) metadata.priority = updates.priority;
820
+ if (updates.runAt !== undefined) metadata.runAt = updates.runAt;
821
+ if (updates.timeoutMs !== undefined)
822
+ metadata.timeoutMs = updates.timeoutMs;
823
+ if (updates.tags !== undefined) metadata.tags = updates.tags;
824
+
825
+ await this.recordJobEvent(jobId, JobEventType.Edited, metadata);
826
+ log(`Edited job ${jobId}: ${JSON.stringify(metadata)}`);
827
+ } catch (error) {
828
+ log(`Error editing job ${jobId}: ${error}`);
829
+ throw error;
830
+ } finally {
831
+ client.release();
832
+ }
833
+ }
834
+
835
+ async editAllPendingJobs(
836
+ filters: JobFilters | undefined = undefined,
837
+ updates: JobUpdates,
838
+ ): Promise<number> {
839
+ const client = await this.pool.connect();
840
+ try {
841
+ const updateFields: string[] = [];
842
+ const params: any[] = [];
843
+ let paramIdx = 1;
844
+
845
+ if (updates.payload !== undefined) {
846
+ updateFields.push(`payload = $${paramIdx++}`);
847
+ params.push(updates.payload);
848
+ }
849
+ if (updates.maxAttempts !== undefined) {
850
+ updateFields.push(`max_attempts = $${paramIdx++}`);
851
+ params.push(updates.maxAttempts);
852
+ }
853
+ if (updates.priority !== undefined) {
854
+ updateFields.push(`priority = $${paramIdx++}`);
855
+ params.push(updates.priority);
856
+ }
857
+ if (updates.runAt !== undefined) {
858
+ if (updates.runAt === null) {
859
+ updateFields.push(`run_at = NOW()`);
860
+ } else {
861
+ updateFields.push(`run_at = $${paramIdx++}`);
862
+ params.push(updates.runAt);
863
+ }
864
+ }
865
+ if (updates.timeoutMs !== undefined) {
866
+ updateFields.push(`timeout_ms = $${paramIdx++}`);
867
+ params.push(updates.timeoutMs ?? null);
868
+ }
869
+ if (updates.tags !== undefined) {
870
+ updateFields.push(`tags = $${paramIdx++}`);
871
+ params.push(updates.tags ?? null);
872
+ }
873
+
874
+ if (updateFields.length === 0) {
875
+ log(`No fields to update for batch edit`);
876
+ return 0;
877
+ }
878
+
879
+ updateFields.push(`updated_at = NOW()`);
880
+
881
+ let query = `
882
+ UPDATE job_queue
883
+ SET ${updateFields.join(', ')}
884
+ WHERE status = 'pending'`;
885
+
886
+ if (filters) {
887
+ if (filters.jobType) {
888
+ query += ` AND job_type = $${paramIdx++}`;
889
+ params.push(filters.jobType);
890
+ }
891
+ if (filters.priority !== undefined) {
892
+ query += ` AND priority = $${paramIdx++}`;
893
+ params.push(filters.priority);
894
+ }
895
+ if (filters.runAt) {
896
+ if (filters.runAt instanceof Date) {
897
+ query += ` AND run_at = $${paramIdx++}`;
898
+ params.push(filters.runAt);
899
+ } else if (typeof filters.runAt === 'object') {
900
+ const ops = filters.runAt;
901
+ if (ops.gt) {
902
+ query += ` AND run_at > $${paramIdx++}`;
903
+ params.push(ops.gt);
904
+ }
905
+ if (ops.gte) {
906
+ query += ` AND run_at >= $${paramIdx++}`;
907
+ params.push(ops.gte);
908
+ }
909
+ if (ops.lt) {
910
+ query += ` AND run_at < $${paramIdx++}`;
911
+ params.push(ops.lt);
912
+ }
913
+ if (ops.lte) {
914
+ query += ` AND run_at <= $${paramIdx++}`;
915
+ params.push(ops.lte);
916
+ }
917
+ if (ops.eq) {
918
+ query += ` AND run_at = $${paramIdx++}`;
919
+ params.push(ops.eq);
920
+ }
921
+ }
922
+ }
923
+ if (
924
+ filters.tags &&
925
+ filters.tags.values &&
926
+ filters.tags.values.length > 0
927
+ ) {
928
+ const mode = filters.tags.mode || 'all';
929
+ const tagValues = filters.tags.values;
930
+ switch (mode) {
931
+ case 'exact':
932
+ query += ` AND tags = $${paramIdx++}`;
933
+ params.push(tagValues);
934
+ break;
935
+ case 'all':
936
+ query += ` AND tags @> $${paramIdx++}`;
937
+ params.push(tagValues);
938
+ break;
939
+ case 'any':
940
+ query += ` AND tags && $${paramIdx++}`;
941
+ params.push(tagValues);
942
+ break;
943
+ case 'none':
944
+ query += ` AND NOT (tags && $${paramIdx++})`;
945
+ params.push(tagValues);
946
+ break;
947
+ default:
948
+ query += ` AND tags @> $${paramIdx++}`;
949
+ params.push(tagValues);
950
+ }
951
+ }
952
+ }
953
+ query += '\nRETURNING id';
954
+
955
+ const result = await client.query(query, params);
956
+ const editedCount = result.rowCount || 0;
957
+
958
+ const metadata: any = {};
959
+ if (updates.payload !== undefined) metadata.payload = updates.payload;
960
+ if (updates.maxAttempts !== undefined)
961
+ metadata.maxAttempts = updates.maxAttempts;
962
+ if (updates.priority !== undefined) metadata.priority = updates.priority;
963
+ if (updates.runAt !== undefined) metadata.runAt = updates.runAt;
964
+ if (updates.timeoutMs !== undefined)
965
+ metadata.timeoutMs = updates.timeoutMs;
966
+ if (updates.tags !== undefined) metadata.tags = updates.tags;
967
+
968
+ for (const row of result.rows) {
969
+ await this.recordJobEvent(row.id, JobEventType.Edited, metadata);
970
+ }
971
+
972
+ log(`Edited ${editedCount} pending jobs: ${JSON.stringify(metadata)}`);
973
+ return editedCount;
974
+ } catch (error) {
975
+ log(`Error editing pending jobs: ${error}`);
976
+ throw error;
977
+ } finally {
978
+ client.release();
979
+ }
980
+ }
981
+
982
+ async cleanupOldJobs(daysToKeep = 30): Promise<number> {
983
+ const client = await this.pool.connect();
984
+ try {
985
+ const result = await client.query(
986
+ `
987
+ DELETE FROM job_queue
988
+ WHERE status = 'completed'
989
+ AND updated_at < NOW() - INTERVAL '1 day' * $1::int
990
+ RETURNING id
991
+ `,
992
+ [daysToKeep],
993
+ );
994
+ log(`Deleted ${result.rowCount} old jobs`);
995
+ return result.rowCount || 0;
996
+ } catch (error) {
997
+ log(`Error cleaning up old jobs: ${error}`);
998
+ throw error;
999
+ } finally {
1000
+ client.release();
1001
+ }
1002
+ }
1003
+
1004
+ async cleanupOldJobEvents(daysToKeep = 30): Promise<number> {
1005
+ const client = await this.pool.connect();
1006
+ try {
1007
+ const result = await client.query(
1008
+ `
1009
+ DELETE FROM job_events
1010
+ WHERE created_at < NOW() - INTERVAL '1 day' * $1::int
1011
+ RETURNING id
1012
+ `,
1013
+ [daysToKeep],
1014
+ );
1015
+ log(`Deleted ${result.rowCount} old job events`);
1016
+ return result.rowCount || 0;
1017
+ } catch (error) {
1018
+ log(`Error cleaning up old job events: ${error}`);
1019
+ throw error;
1020
+ } finally {
1021
+ client.release();
1022
+ }
1023
+ }
1024
+
1025
+ async reclaimStuckJobs(maxProcessingTimeMinutes = 10): Promise<number> {
1026
+ const client = await this.pool.connect();
1027
+ try {
1028
+ const result = await client.query(
1029
+ `
1030
+ UPDATE job_queue
1031
+ SET status = 'pending', locked_at = NULL, locked_by = NULL, updated_at = NOW()
1032
+ WHERE status = 'processing'
1033
+ AND locked_at < NOW() - GREATEST(
1034
+ INTERVAL '1 minute' * $1::int,
1035
+ INTERVAL '1 millisecond' * COALESCE(timeout_ms, 0)
1036
+ )
1037
+ RETURNING id
1038
+ `,
1039
+ [maxProcessingTimeMinutes],
1040
+ );
1041
+ log(`Reclaimed ${result.rowCount} stuck jobs`);
1042
+ return result.rowCount || 0;
1043
+ } catch (error) {
1044
+ log(`Error reclaiming stuck jobs: ${error}`);
1045
+ throw error;
1046
+ } finally {
1047
+ client.release();
1048
+ }
1049
+ }
1050
+
1051
+ // ── Internal helpers ──────────────────────────────────────────────────
1052
+
1053
+ /**
1054
+ * Batch-insert multiple job events in a single query.
1055
+ * More efficient than individual recordJobEvent calls.
1056
+ */
1057
+ private async recordJobEventsBatch(
1058
+ events: { jobId: number; eventType: JobEventType; metadata?: any }[],
1059
+ ): Promise<void> {
1060
+ if (events.length === 0) return;
1061
+ const client = await this.pool.connect();
1062
+ try {
1063
+ const values: string[] = [];
1064
+ const params: any[] = [];
1065
+ let paramIdx = 1;
1066
+ for (const event of events) {
1067
+ values.push(`($${paramIdx++}, $${paramIdx++}, $${paramIdx++})`);
1068
+ params.push(
1069
+ event.jobId,
1070
+ event.eventType,
1071
+ event.metadata ? JSON.stringify(event.metadata) : null,
1072
+ );
1073
+ }
1074
+ await client.query(
1075
+ `INSERT INTO job_events (job_id, event_type, metadata) VALUES ${values.join(', ')}`,
1076
+ params,
1077
+ );
1078
+ } catch (error) {
1079
+ log(`Error recording batch job events: ${error}`);
1080
+ // Do not throw, to avoid interfering with main job logic
1081
+ } finally {
1082
+ client.release();
1083
+ }
1084
+ }
1085
+
1086
+ async setPendingReasonForUnpickedJobs(
1087
+ reason: string,
1088
+ jobType?: string | string[],
1089
+ ): Promise<void> {
1090
+ const client = await this.pool.connect();
1091
+ try {
1092
+ let jobTypeFilter = '';
1093
+ const params: any[] = [reason];
1094
+ if (jobType) {
1095
+ if (Array.isArray(jobType)) {
1096
+ jobTypeFilter = ` AND job_type = ANY($2)`;
1097
+ params.push(jobType);
1098
+ } else {
1099
+ jobTypeFilter = ` AND job_type = $2`;
1100
+ params.push(jobType);
1101
+ }
1102
+ }
1103
+ await client.query(
1104
+ `UPDATE job_queue SET pending_reason = $1 WHERE status = 'pending'${jobTypeFilter}`,
1105
+ params,
1106
+ );
1107
+ } finally {
1108
+ client.release();
1109
+ }
1110
+ }
1111
+ }