@nicnocquee/dataqueue 1.16.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,659 @@
1
+ import { AsyncLocalStorage } from 'async_hooks';
2
+ import { Pool } from 'pg';
3
+
4
+ // src/types.ts
5
+ var JobEventType = /* @__PURE__ */ ((JobEventType2) => {
6
+ JobEventType2["Added"] = "added";
7
+ JobEventType2["Processing"] = "processing";
8
+ JobEventType2["Completed"] = "completed";
9
+ JobEventType2["Failed"] = "failed";
10
+ JobEventType2["Cancelled"] = "cancelled";
11
+ JobEventType2["Retried"] = "retried";
12
+ return JobEventType2;
13
+ })(JobEventType || {});
14
+ var FailureReason = /* @__PURE__ */ ((FailureReason3) => {
15
+ FailureReason3["Timeout"] = "timeout";
16
+ FailureReason3["HandlerError"] = "handler_error";
17
+ FailureReason3["NoHandler"] = "no_handler";
18
+ return FailureReason3;
19
+ })(FailureReason || {});
20
+ var logStorage = new AsyncLocalStorage();
21
+ var setLogContext = (verbose) => {
22
+ logStorage.enterWith({ verbose });
23
+ };
24
+ var getLogContext = () => {
25
+ return logStorage.getStore();
26
+ };
27
+ var log = (message) => {
28
+ const context = getLogContext();
29
+ if (context?.verbose) {
30
+ console.log(message);
31
+ }
32
+ };
33
+
34
+ // src/queue.ts
35
+ var recordJobEvent = async (pool, jobId, eventType, metadata) => {
36
+ const client = await pool.connect();
37
+ try {
38
+ await client.query(
39
+ `INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
40
+ [jobId, eventType, metadata ? JSON.stringify(metadata) : null]
41
+ );
42
+ } catch (error) {
43
+ log(`Error recording job event for job ${jobId}: ${error}`);
44
+ } finally {
45
+ client.release();
46
+ }
47
+ };
48
+ var addJob = async (pool, {
49
+ job_type,
50
+ payload,
51
+ max_attempts = 3,
52
+ priority = 0,
53
+ run_at = null,
54
+ timeoutMs = void 0
55
+ }) => {
56
+ const client = await pool.connect();
57
+ try {
58
+ let result;
59
+ if (run_at) {
60
+ result = await client.query(
61
+ `INSERT INTO job_queue
62
+ (job_type, payload, max_attempts, priority, run_at, timeout_ms)
63
+ VALUES ($1, $2, $3, $4, $5, $6)
64
+ RETURNING id`,
65
+ [job_type, payload, max_attempts, priority, run_at, timeoutMs ?? null]
66
+ );
67
+ log(
68
+ `Added job ${result.rows[0].id}: payload ${JSON.stringify(payload)}, run_at ${run_at.toISOString()}, priority ${priority}, max_attempts ${max_attempts} job_type ${job_type}`
69
+ );
70
+ } else {
71
+ result = await client.query(
72
+ `INSERT INTO job_queue
73
+ (job_type, payload, max_attempts, priority, timeout_ms)
74
+ VALUES ($1, $2, $3, $4, $5)
75
+ RETURNING id`,
76
+ [job_type, payload, max_attempts, priority, timeoutMs ?? null]
77
+ );
78
+ log(
79
+ `Added job ${result.rows[0].id}: payload ${JSON.stringify(payload)}, priority ${priority}, max_attempts ${max_attempts} job_type ${job_type}`
80
+ );
81
+ }
82
+ await recordJobEvent(pool, result.rows[0].id, "added" /* Added */, {
83
+ job_type,
84
+ payload
85
+ });
86
+ return result.rows[0].id;
87
+ } catch (error) {
88
+ log(`Error adding job: ${error}`);
89
+ throw error;
90
+ } finally {
91
+ client.release();
92
+ }
93
+ };
94
+ var getJob = async (pool, id) => {
95
+ const client = await pool.connect();
96
+ try {
97
+ const result = await client.query("SELECT * FROM job_queue WHERE id = $1", [
98
+ id
99
+ ]);
100
+ if (result.rows.length === 0) {
101
+ log(`Job ${id} not found`);
102
+ return null;
103
+ }
104
+ log(`Found job ${id}`);
105
+ return {
106
+ ...result.rows[0],
107
+ payload: result.rows[0].payload,
108
+ timeout_ms: result.rows[0].timeout_ms,
109
+ failure_reason: result.rows[0].failure_reason
110
+ };
111
+ } catch (error) {
112
+ log(`Error getting job ${id}: ${error}`);
113
+ throw error;
114
+ } finally {
115
+ client.release();
116
+ }
117
+ };
118
+ var getJobsByStatus = async (pool, status, limit = 100, offset = 0) => {
119
+ const client = await pool.connect();
120
+ try {
121
+ const result = await client.query(
122
+ "SELECT * FROM job_queue WHERE status = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3",
123
+ [status, limit, offset]
124
+ );
125
+ log(`Found ${result.rows.length} jobs by status ${status}`);
126
+ return result.rows.map((row) => ({
127
+ ...row,
128
+ payload: row.payload,
129
+ timeout_ms: row.timeout_ms,
130
+ failure_reason: row.failure_reason
131
+ }));
132
+ } catch (error) {
133
+ log(`Error getting jobs by status ${status}: ${error}`);
134
+ throw error;
135
+ } finally {
136
+ client.release();
137
+ }
138
+ };
139
+ var getNextBatch = async (pool, workerId, batchSize = 10, jobType) => {
140
+ const client = await pool.connect();
141
+ try {
142
+ await client.query("BEGIN");
143
+ let jobTypeFilter = "";
144
+ let params = [workerId, batchSize];
145
+ if (jobType) {
146
+ if (Array.isArray(jobType)) {
147
+ jobTypeFilter = ` AND job_type = ANY($3)`;
148
+ params.push(jobType);
149
+ } else {
150
+ jobTypeFilter = ` AND job_type = $3`;
151
+ params.push(jobType);
152
+ }
153
+ }
154
+ const result = await client.query(
155
+ `
156
+ UPDATE job_queue
157
+ SET status = 'processing',
158
+ locked_at = NOW(),
159
+ locked_by = $1,
160
+ attempts = attempts + 1,
161
+ updated_at = NOW(),
162
+ pending_reason = NULL,
163
+ started_at = COALESCE(started_at, NOW()),
164
+ last_retried_at = CASE WHEN attempts > 0 THEN NOW() ELSE last_retried_at END
165
+ WHERE id IN (
166
+ SELECT id FROM job_queue
167
+ WHERE (status = 'pending' OR (status = 'failed' AND next_attempt_at <= NOW()))
168
+ AND (attempts < max_attempts)
169
+ AND run_at <= NOW()
170
+ ${jobTypeFilter}
171
+ ORDER BY priority DESC, created_at ASC
172
+ LIMIT $2
173
+ FOR UPDATE SKIP LOCKED
174
+ )
175
+ RETURNING *
176
+ `,
177
+ params
178
+ );
179
+ log(`Found ${result.rows.length} jobs to process`);
180
+ await client.query("COMMIT");
181
+ for (const row of result.rows) {
182
+ await recordJobEvent(pool, row.id, "processing" /* Processing */);
183
+ }
184
+ return result.rows.map((row) => ({
185
+ ...row,
186
+ payload: row.payload,
187
+ timeout_ms: row.timeout_ms
188
+ }));
189
+ } catch (error) {
190
+ log(`Error getting next batch: ${error}`);
191
+ await client.query("ROLLBACK");
192
+ throw error;
193
+ } finally {
194
+ client.release();
195
+ }
196
+ };
197
+ var completeJob = async (pool, jobId) => {
198
+ const client = await pool.connect();
199
+ try {
200
+ await client.query(
201
+ `
202
+ UPDATE job_queue
203
+ SET status = 'completed', updated_at = NOW(), completed_at = NOW()
204
+ WHERE id = $1
205
+ `,
206
+ [jobId]
207
+ );
208
+ await recordJobEvent(pool, jobId, "completed" /* Completed */);
209
+ } catch (error) {
210
+ log(`Error completing job ${jobId}: ${error}`);
211
+ throw error;
212
+ } finally {
213
+ log(`Completed job ${jobId}`);
214
+ client.release();
215
+ }
216
+ };
217
+ var failJob = async (pool, jobId, error, failureReason) => {
218
+ const client = await pool.connect();
219
+ try {
220
+ await client.query(
221
+ `
222
+ UPDATE job_queue
223
+ SET status = 'failed',
224
+ updated_at = NOW(),
225
+ next_attempt_at = CASE
226
+ WHEN attempts < max_attempts THEN NOW() + (POWER(2, attempts) * INTERVAL '1 minute')
227
+ ELSE NULL
228
+ END,
229
+ error_history = COALESCE(error_history, '[]'::jsonb) || $2::jsonb,
230
+ failure_reason = $3,
231
+ last_failed_at = NOW()
232
+ WHERE id = $1
233
+ `,
234
+ [
235
+ jobId,
236
+ JSON.stringify([
237
+ {
238
+ message: error.message || String(error),
239
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
240
+ }
241
+ ]),
242
+ failureReason ?? null
243
+ ]
244
+ );
245
+ await recordJobEvent(pool, jobId, "failed" /* Failed */, {
246
+ message: error.message || String(error),
247
+ failureReason
248
+ });
249
+ } catch (error2) {
250
+ log(`Error failing job ${jobId}: ${error2}`);
251
+ throw error2;
252
+ } finally {
253
+ log(`Failed job ${jobId}`);
254
+ client.release();
255
+ }
256
+ };
257
+ var retryJob = async (pool, jobId) => {
258
+ const client = await pool.connect();
259
+ try {
260
+ await client.query(
261
+ `
262
+ UPDATE job_queue
263
+ SET status = 'pending',
264
+ updated_at = NOW(),
265
+ locked_at = NULL,
266
+ locked_by = NULL,
267
+ next_attempt_at = NOW(),
268
+ last_retried_at = NOW()
269
+ WHERE id = $1
270
+ `,
271
+ [jobId]
272
+ );
273
+ await recordJobEvent(pool, jobId, "retried" /* Retried */);
274
+ } catch (error) {
275
+ log(`Error retrying job ${jobId}: ${error}`);
276
+ throw error;
277
+ } finally {
278
+ log(`Retried job ${jobId}`);
279
+ client.release();
280
+ }
281
+ };
282
+ var cleanupOldJobs = async (pool, daysToKeep = 30) => {
283
+ const client = await pool.connect();
284
+ try {
285
+ const result = await client.query(`
286
+ DELETE FROM job_queue
287
+ WHERE status = 'completed'
288
+ AND updated_at < NOW() - INTERVAL '${daysToKeep} days'
289
+ RETURNING id
290
+ `);
291
+ log(`Deleted ${result.rowCount} old jobs`);
292
+ return result.rowCount || 0;
293
+ } catch (error) {
294
+ log(`Error cleaning up old jobs: ${error}`);
295
+ throw error;
296
+ } finally {
297
+ client.release();
298
+ }
299
+ };
300
+ var cancelJob = async (pool, jobId) => {
301
+ const client = await pool.connect();
302
+ try {
303
+ await client.query(
304
+ `
305
+ UPDATE job_queue
306
+ SET status = 'cancelled', updated_at = NOW(), last_cancelled_at = NOW()
307
+ WHERE id = $1 AND status = 'pending'
308
+ `,
309
+ [jobId]
310
+ );
311
+ await recordJobEvent(pool, jobId, "cancelled" /* Cancelled */);
312
+ } catch (error) {
313
+ log(`Error cancelling job ${jobId}: ${error}`);
314
+ throw error;
315
+ } finally {
316
+ log(`Cancelled job ${jobId}`);
317
+ client.release();
318
+ }
319
+ };
320
+ var cancelAllUpcomingJobs = async (pool, filters) => {
321
+ const client = await pool.connect();
322
+ try {
323
+ let query = `
324
+ UPDATE job_queue
325
+ SET status = 'cancelled', updated_at = NOW()
326
+ WHERE status = 'pending'`;
327
+ const params = [];
328
+ let paramIdx = 1;
329
+ if (filters) {
330
+ if (filters.job_type) {
331
+ query += ` AND job_type = $${paramIdx++}`;
332
+ params.push(filters.job_type);
333
+ }
334
+ if (filters.priority !== void 0) {
335
+ query += ` AND priority = $${paramIdx++}`;
336
+ params.push(filters.priority);
337
+ }
338
+ if (filters.run_at) {
339
+ query += ` AND run_at = $${paramIdx++}`;
340
+ params.push(filters.run_at);
341
+ }
342
+ }
343
+ query += "\nRETURNING id";
344
+ const result = await client.query(query, params);
345
+ log(`Cancelled ${result.rowCount} jobs`);
346
+ return result.rowCount || 0;
347
+ } catch (error) {
348
+ log(`Error cancelling upcoming jobs: ${error}`);
349
+ throw error;
350
+ } finally {
351
+ client.release();
352
+ }
353
+ };
354
+ var getAllJobs = async (pool, limit = 100, offset = 0) => {
355
+ const client = await pool.connect();
356
+ try {
357
+ const result = await client.query(
358
+ "SELECT * FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2",
359
+ [limit, offset]
360
+ );
361
+ log(`Found ${result.rows.length} jobs (all)`);
362
+ return result.rows.map((row) => ({
363
+ ...row,
364
+ payload: row.payload,
365
+ timeout_ms: row.timeout_ms
366
+ }));
367
+ } catch (error) {
368
+ log(`Error getting all jobs: ${error}`);
369
+ throw error;
370
+ } finally {
371
+ client.release();
372
+ }
373
+ };
374
+ var setPendingReasonForUnpickedJobs = async (pool, reason, jobType) => {
375
+ const client = await pool.connect();
376
+ try {
377
+ let jobTypeFilter = "";
378
+ let params = [reason];
379
+ if (jobType) {
380
+ if (Array.isArray(jobType)) {
381
+ jobTypeFilter = ` AND job_type = ANY($2)`;
382
+ params.push(jobType);
383
+ } else {
384
+ jobTypeFilter = ` AND job_type = $2`;
385
+ params.push(jobType);
386
+ }
387
+ }
388
+ await client.query(
389
+ `UPDATE job_queue SET pending_reason = $1 WHERE status = 'pending'${jobTypeFilter}`,
390
+ params
391
+ );
392
+ } finally {
393
+ client.release();
394
+ }
395
+ };
396
+ var reclaimStuckJobs = async (pool, maxProcessingTimeMinutes = 10) => {
397
+ const client = await pool.connect();
398
+ try {
399
+ const result = await client.query(
400
+ `
401
+ UPDATE job_queue
402
+ SET status = 'pending', locked_at = NULL, locked_by = NULL, updated_at = NOW()
403
+ WHERE status = 'processing'
404
+ AND locked_at < NOW() - INTERVAL '${maxProcessingTimeMinutes} minutes'
405
+ RETURNING id
406
+ `
407
+ );
408
+ log(`Reclaimed ${result.rowCount} stuck jobs`);
409
+ return result.rowCount || 0;
410
+ } catch (error) {
411
+ log(`Error reclaiming stuck jobs: ${error}`);
412
+ throw error;
413
+ } finally {
414
+ client.release();
415
+ }
416
+ };
417
+ var getJobEvents = async (pool, jobId) => {
418
+ const client = await pool.connect();
419
+ try {
420
+ const res = await client.query(
421
+ "SELECT * FROM job_events WHERE job_id = $1 ORDER BY created_at ASC",
422
+ [jobId]
423
+ );
424
+ return res.rows;
425
+ } finally {
426
+ client.release();
427
+ }
428
+ };
429
+
430
+ // src/processor.ts
431
+ async function processJobWithHandlers(pool, job, jobHandlers) {
432
+ const handler = jobHandlers[job.job_type];
433
+ if (!handler) {
434
+ await setPendingReasonForUnpickedJobs(
435
+ pool,
436
+ `No handler registered for job type: ${job.job_type}`,
437
+ job.job_type
438
+ );
439
+ await failJob(
440
+ pool,
441
+ job.id,
442
+ new Error(`No handler registered for job type: ${job.job_type}`),
443
+ "no_handler" /* NoHandler */
444
+ );
445
+ return;
446
+ }
447
+ const timeoutMs = job.timeout_ms ?? void 0;
448
+ let timeoutId;
449
+ const controller = new AbortController();
450
+ try {
451
+ const jobPromise = handler(job.payload, controller.signal);
452
+ if (timeoutMs && timeoutMs > 0) {
453
+ await Promise.race([
454
+ jobPromise,
455
+ new Promise((_, reject) => {
456
+ timeoutId = setTimeout(() => {
457
+ controller.abort();
458
+ const timeoutError = new Error(
459
+ `Job timed out after ${timeoutMs} ms`
460
+ );
461
+ timeoutError.failureReason = "timeout" /* Timeout */;
462
+ reject(timeoutError);
463
+ }, timeoutMs);
464
+ })
465
+ ]);
466
+ } else {
467
+ await jobPromise;
468
+ }
469
+ if (timeoutId) clearTimeout(timeoutId);
470
+ await completeJob(pool, job.id);
471
+ } catch (error) {
472
+ if (timeoutId) clearTimeout(timeoutId);
473
+ console.error(`Error processing job ${job.id}:`, error);
474
+ let failureReason = "handler_error" /* HandlerError */;
475
+ if (error && typeof error === "object" && "failureReason" in error && error.failureReason === "timeout" /* Timeout */) {
476
+ failureReason = "timeout" /* Timeout */;
477
+ }
478
+ await failJob(
479
+ pool,
480
+ job.id,
481
+ error instanceof Error ? error : new Error(String(error)),
482
+ failureReason
483
+ );
484
+ }
485
+ }
486
+ async function processBatchWithHandlers(pool, workerId, batchSize, jobType, jobHandlers, concurrency) {
487
+ const jobs = await getNextBatch(
488
+ pool,
489
+ workerId,
490
+ batchSize,
491
+ jobType
492
+ );
493
+ if (!concurrency || concurrency >= jobs.length) {
494
+ await Promise.all(
495
+ jobs.map((job) => processJobWithHandlers(pool, job, jobHandlers))
496
+ );
497
+ return jobs.length;
498
+ }
499
+ let idx = 0;
500
+ let running = 0;
501
+ let finished = 0;
502
+ return new Promise((resolve, reject) => {
503
+ const next = () => {
504
+ if (finished === jobs.length) return resolve(jobs.length);
505
+ while (running < concurrency && idx < jobs.length) {
506
+ const job = jobs[idx++];
507
+ running++;
508
+ processJobWithHandlers(pool, job, jobHandlers).then(() => {
509
+ running--;
510
+ finished++;
511
+ next();
512
+ }).catch((err) => {
513
+ running--;
514
+ finished++;
515
+ next();
516
+ });
517
+ }
518
+ };
519
+ next();
520
+ });
521
+ }
522
+ var createProcessor = (pool, handlers, options = {}) => {
523
+ const {
524
+ workerId = `worker-${Math.random().toString(36).substring(2, 9)}`,
525
+ batchSize = 10,
526
+ pollInterval = 5e3,
527
+ onError = (error) => console.error("Job processor error:", error),
528
+ jobType,
529
+ concurrency = 3
530
+ } = options;
531
+ let running = false;
532
+ let intervalId = null;
533
+ setLogContext(options.verbose ?? false);
534
+ const processJobs = async () => {
535
+ if (!running) return 0;
536
+ log(
537
+ `Processing jobs with workerId: ${workerId}${jobType ? ` and jobType: ${Array.isArray(jobType) ? jobType.join(",") : jobType}` : ""}`
538
+ );
539
+ try {
540
+ const processed = await processBatchWithHandlers(
541
+ pool,
542
+ workerId,
543
+ batchSize,
544
+ jobType,
545
+ handlers,
546
+ concurrency
547
+ );
548
+ return processed;
549
+ } catch (error) {
550
+ onError(error instanceof Error ? error : new Error(String(error)));
551
+ }
552
+ return 0;
553
+ };
554
+ return {
555
+ /**
556
+ * Start the job processor in the background.
557
+ * - This will run periodically (every pollInterval milliseconds or 5 seconds if not provided) and process jobs as they become available.
558
+ * - You have to call the stop method to stop the processor.
559
+ */
560
+ startInBackground: () => {
561
+ if (running) return;
562
+ log(`Starting job processor with workerId: ${workerId}`);
563
+ running = true;
564
+ const processBatches = async () => {
565
+ if (!running) return;
566
+ const processed = await processJobs();
567
+ if (processed === batchSize && running) {
568
+ setImmediate(processBatches);
569
+ }
570
+ };
571
+ processBatches();
572
+ intervalId = setInterval(processJobs, pollInterval);
573
+ },
574
+ /**
575
+ * Stop the job processor that runs in the background
576
+ */
577
+ stop: () => {
578
+ log(`Stopping job processor with workerId: ${workerId}`);
579
+ running = false;
580
+ if (intervalId) {
581
+ clearInterval(intervalId);
582
+ intervalId = null;
583
+ }
584
+ },
585
+ /**
586
+ * Start the job processor synchronously.
587
+ * - This will process all jobs immediately and then stop.
588
+ * - The pollInterval is ignored.
589
+ */
590
+ start: async () => {
591
+ log(`Starting job processor with workerId: ${workerId}`);
592
+ running = true;
593
+ const processed = await processJobs();
594
+ running = false;
595
+ return processed;
596
+ },
597
+ isRunning: () => running
598
+ };
599
+ };
600
+ var createPool = (config) => {
601
+ return new Pool(config);
602
+ };
603
+
604
+ // src/index.ts
605
+ var initJobQueue = async (config) => {
606
+ const { databaseConfig } = config;
607
+ const pool = createPool(databaseConfig);
608
+ setLogContext(config.verbose ?? false);
609
+ return {
610
+ // Job queue operations
611
+ addJob: withLogContext(
612
+ (job) => addJob(pool, job),
613
+ config.verbose ?? false
614
+ ),
615
+ getJob: withLogContext(
616
+ (id) => getJob(pool, id),
617
+ config.verbose ?? false
618
+ ),
619
+ getJobsByStatus: withLogContext(
620
+ (status, limit, offset) => getJobsByStatus(pool, status, limit, offset),
621
+ config.verbose ?? false
622
+ ),
623
+ getAllJobs: withLogContext(
624
+ (limit, offset) => getAllJobs(pool, limit, offset),
625
+ config.verbose ?? false
626
+ ),
627
+ retryJob: (jobId) => retryJob(pool, jobId),
628
+ cleanupOldJobs: (daysToKeep) => cleanupOldJobs(pool, daysToKeep),
629
+ cancelJob: withLogContext(
630
+ (jobId) => cancelJob(pool, jobId),
631
+ config.verbose ?? false
632
+ ),
633
+ cancelAllUpcomingJobs: withLogContext(
634
+ (filters) => cancelAllUpcomingJobs(pool, filters),
635
+ config.verbose ?? false
636
+ ),
637
+ reclaimStuckJobs: withLogContext(
638
+ (maxProcessingTimeMinutes) => reclaimStuckJobs(pool, maxProcessingTimeMinutes),
639
+ config.verbose ?? false
640
+ ),
641
+ // Job processing
642
+ createProcessor: (handlers, options) => createProcessor(pool, handlers, options),
643
+ // Advanced access (for custom operations)
644
+ getPool: () => pool,
645
+ // Job events
646
+ getJobEvents: withLogContext(
647
+ (jobId) => getJobEvents(pool, jobId),
648
+ config.verbose ?? false
649
+ )
650
+ };
651
+ };
652
+ var withLogContext = (fn, verbose) => (...args) => {
653
+ setLogContext(verbose);
654
+ return fn(...args);
655
+ };
656
+
657
+ export { FailureReason, JobEventType, initJobQueue };
658
+ //# sourceMappingURL=index.js.map
659
+ //# sourceMappingURL=index.js.map