@nicnocquee/dataqueue 1.21.0 → 1.24.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -1,8 +1,14 @@
1
1
  'use strict';
2
2
 
3
3
  var async_hooks = require('async_hooks');
4
+ var worker_threads = require('worker_threads');
4
5
  var pg = require('pg');
5
6
  var pgConnectionString = require('pg-connection-string');
7
+ var fs = require('fs');
8
+
9
+ function _interopDefault (e) { return e && e.__esModule ? e : { default: e }; }
10
+
11
+ var fs__default = /*#__PURE__*/_interopDefault(fs);
6
12
 
7
13
  // src/types.ts
8
14
  var JobEventType = /* @__PURE__ */ ((JobEventType2) => {
@@ -12,6 +18,7 @@ var JobEventType = /* @__PURE__ */ ((JobEventType2) => {
12
18
  JobEventType2["Failed"] = "failed";
13
19
  JobEventType2["Cancelled"] = "cancelled";
14
20
  JobEventType2["Retried"] = "retried";
21
+ JobEventType2["Edited"] = "edited";
15
22
  return JobEventType2;
16
23
  })(JobEventType || {});
17
24
  var FailureReason = /* @__PURE__ */ ((FailureReason3) => {
@@ -55,6 +62,7 @@ var addJob = async (pool, {
55
62
  priority = 0,
56
63
  runAt = null,
57
64
  timeoutMs = void 0,
65
+ forceKillOnTimeout = false,
58
66
  tags = void 0
59
67
  }) => {
60
68
  const client = await pool.connect();
@@ -63,8 +71,8 @@ var addJob = async (pool, {
63
71
  if (runAt) {
64
72
  result = await client.query(
65
73
  `INSERT INTO job_queue
66
- (job_type, payload, max_attempts, priority, run_at, timeout_ms, tags)
67
- VALUES ($1, $2, $3, $4, $5, $6, $7)
74
+ (job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags)
75
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
68
76
  RETURNING id`,
69
77
  [
70
78
  jobType,
@@ -73,6 +81,7 @@ var addJob = async (pool, {
73
81
  priority,
74
82
  runAt,
75
83
  timeoutMs ?? null,
84
+ forceKillOnTimeout ?? false,
76
85
  tags ?? null
77
86
  ]
78
87
  );
@@ -82,8 +91,8 @@ var addJob = async (pool, {
82
91
  } else {
83
92
  result = await client.query(
84
93
  `INSERT INTO job_queue
85
- (job_type, payload, max_attempts, priority, timeout_ms, tags)
86
- VALUES ($1, $2, $3, $4, $5, $6)
94
+ (job_type, payload, max_attempts, priority, timeout_ms, force_kill_on_timeout, tags)
95
+ VALUES ($1, $2, $3, $4, $5, $6, $7)
87
96
  RETURNING id`,
88
97
  [
89
98
  jobType,
@@ -91,6 +100,7 @@ var addJob = async (pool, {
91
100
  maxAttempts,
92
101
  priority,
93
102
  timeoutMs ?? null,
103
+ forceKillOnTimeout ?? false,
94
104
  tags ?? null
95
105
  ]
96
106
  );
@@ -115,7 +125,7 @@ var getJob = async (pool, id) => {
115
125
  const client = await pool.connect();
116
126
  try {
117
127
  const result = await client.query(
118
- `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason" FROM job_queue WHERE id = $1`,
128
+ `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags FROM job_queue WHERE id = $1`,
119
129
  [id]
120
130
  );
121
131
  if (result.rows.length === 0) {
@@ -128,6 +138,7 @@ var getJob = async (pool, id) => {
128
138
  ...job,
129
139
  payload: job.payload,
130
140
  timeoutMs: job.timeoutMs,
141
+ forceKillOnTimeout: job.forceKillOnTimeout,
131
142
  failureReason: job.failureReason
132
143
  };
133
144
  } catch (error) {
@@ -141,7 +152,7 @@ var getJobsByStatus = async (pool, status, limit = 100, offset = 0) => {
141
152
  const client = await pool.connect();
142
153
  try {
143
154
  const result = await client.query(
144
- `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason" FROM job_queue WHERE status = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3`,
155
+ `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason" FROM job_queue WHERE status = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3`,
145
156
  [status, limit, offset]
146
157
  );
147
158
  log(`Found ${result.rows.length} jobs by status ${status}`);
@@ -149,6 +160,7 @@ var getJobsByStatus = async (pool, status, limit = 100, offset = 0) => {
149
160
  ...job,
150
161
  payload: job.payload,
151
162
  timeoutMs: job.timeoutMs,
163
+ forceKillOnTimeout: job.forceKillOnTimeout,
152
164
  failureReason: job.failureReason
153
165
  }));
154
166
  } catch (error) {
@@ -194,7 +206,7 @@ var getNextBatch = async (pool, workerId, batchSize = 10, jobType) => {
194
206
  LIMIT $2
195
207
  FOR UPDATE SKIP LOCKED
196
208
  )
197
- RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason"
209
+ RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason"
198
210
  `,
199
211
  params
200
212
  );
@@ -206,7 +218,8 @@ var getNextBatch = async (pool, workerId, batchSize = 10, jobType) => {
206
218
  return result.rows.map((job) => ({
207
219
  ...job,
208
220
  payload: job.payload,
209
- timeoutMs: job.timeoutMs
221
+ timeoutMs: job.timeoutMs,
222
+ forceKillOnTimeout: job.forceKillOnTimeout
210
223
  }));
211
224
  } catch (error) {
212
225
  log(`Error getting next batch: ${error}`);
@@ -339,6 +352,198 @@ var cancelJob = async (pool, jobId) => {
339
352
  client.release();
340
353
  }
341
354
  };
355
+ var editJob = async (pool, jobId, updates) => {
356
+ const client = await pool.connect();
357
+ try {
358
+ const updateFields = [];
359
+ const params = [];
360
+ let paramIdx = 1;
361
+ if (updates.payload !== void 0) {
362
+ updateFields.push(`payload = $${paramIdx++}`);
363
+ params.push(updates.payload);
364
+ }
365
+ if (updates.maxAttempts !== void 0) {
366
+ updateFields.push(`max_attempts = $${paramIdx++}`);
367
+ params.push(updates.maxAttempts);
368
+ }
369
+ if (updates.priority !== void 0) {
370
+ updateFields.push(`priority = $${paramIdx++}`);
371
+ params.push(updates.priority);
372
+ }
373
+ if (updates.runAt !== void 0) {
374
+ if (updates.runAt === null) {
375
+ updateFields.push(`run_at = NOW()`);
376
+ } else {
377
+ updateFields.push(`run_at = $${paramIdx++}`);
378
+ params.push(updates.runAt);
379
+ }
380
+ }
381
+ if (updates.timeoutMs !== void 0) {
382
+ updateFields.push(`timeout_ms = $${paramIdx++}`);
383
+ params.push(updates.timeoutMs ?? null);
384
+ }
385
+ if (updates.tags !== void 0) {
386
+ updateFields.push(`tags = $${paramIdx++}`);
387
+ params.push(updates.tags ?? null);
388
+ }
389
+ if (updateFields.length === 0) {
390
+ log(`No fields to update for job ${jobId}`);
391
+ return;
392
+ }
393
+ updateFields.push(`updated_at = NOW()`);
394
+ params.push(jobId);
395
+ const query = `
396
+ UPDATE job_queue
397
+ SET ${updateFields.join(", ")}
398
+ WHERE id = $${paramIdx} AND status = 'pending'
399
+ `;
400
+ await client.query(query, params);
401
+ const metadata = {};
402
+ if (updates.payload !== void 0) metadata.payload = updates.payload;
403
+ if (updates.maxAttempts !== void 0)
404
+ metadata.maxAttempts = updates.maxAttempts;
405
+ if (updates.priority !== void 0) metadata.priority = updates.priority;
406
+ if (updates.runAt !== void 0) metadata.runAt = updates.runAt;
407
+ if (updates.timeoutMs !== void 0) metadata.timeoutMs = updates.timeoutMs;
408
+ if (updates.tags !== void 0) metadata.tags = updates.tags;
409
+ await recordJobEvent(pool, jobId, "edited" /* Edited */, metadata);
410
+ log(`Edited job ${jobId}: ${JSON.stringify(metadata)}`);
411
+ } catch (error) {
412
+ log(`Error editing job ${jobId}: ${error}`);
413
+ throw error;
414
+ } finally {
415
+ client.release();
416
+ }
417
+ };
418
+ var editAllPendingJobs = async (pool, filters = void 0, updates) => {
419
+ const client = await pool.connect();
420
+ try {
421
+ const updateFields = [];
422
+ const params = [];
423
+ let paramIdx = 1;
424
+ if (updates.payload !== void 0) {
425
+ updateFields.push(`payload = $${paramIdx++}`);
426
+ params.push(updates.payload);
427
+ }
428
+ if (updates.maxAttempts !== void 0) {
429
+ updateFields.push(`max_attempts = $${paramIdx++}`);
430
+ params.push(updates.maxAttempts);
431
+ }
432
+ if (updates.priority !== void 0) {
433
+ updateFields.push(`priority = $${paramIdx++}`);
434
+ params.push(updates.priority);
435
+ }
436
+ if (updates.runAt !== void 0) {
437
+ if (updates.runAt === null) {
438
+ updateFields.push(`run_at = NOW()`);
439
+ } else {
440
+ updateFields.push(`run_at = $${paramIdx++}`);
441
+ params.push(updates.runAt);
442
+ }
443
+ }
444
+ if (updates.timeoutMs !== void 0) {
445
+ updateFields.push(`timeout_ms = $${paramIdx++}`);
446
+ params.push(updates.timeoutMs ?? null);
447
+ }
448
+ if (updates.tags !== void 0) {
449
+ updateFields.push(`tags = $${paramIdx++}`);
450
+ params.push(updates.tags ?? null);
451
+ }
452
+ if (updateFields.length === 0) {
453
+ log(`No fields to update for batch edit`);
454
+ return 0;
455
+ }
456
+ updateFields.push(`updated_at = NOW()`);
457
+ let query = `
458
+ UPDATE job_queue
459
+ SET ${updateFields.join(", ")}
460
+ WHERE status = 'pending'`;
461
+ if (filters) {
462
+ if (filters.jobType) {
463
+ query += ` AND job_type = $${paramIdx++}`;
464
+ params.push(filters.jobType);
465
+ }
466
+ if (filters.priority !== void 0) {
467
+ query += ` AND priority = $${paramIdx++}`;
468
+ params.push(filters.priority);
469
+ }
470
+ if (filters.runAt) {
471
+ if (filters.runAt instanceof Date) {
472
+ query += ` AND run_at = $${paramIdx++}`;
473
+ params.push(filters.runAt);
474
+ } else if (typeof filters.runAt === "object") {
475
+ const ops = filters.runAt;
476
+ if (ops.gt) {
477
+ query += ` AND run_at > $${paramIdx++}`;
478
+ params.push(ops.gt);
479
+ }
480
+ if (ops.gte) {
481
+ query += ` AND run_at >= $${paramIdx++}`;
482
+ params.push(ops.gte);
483
+ }
484
+ if (ops.lt) {
485
+ query += ` AND run_at < $${paramIdx++}`;
486
+ params.push(ops.lt);
487
+ }
488
+ if (ops.lte) {
489
+ query += ` AND run_at <= $${paramIdx++}`;
490
+ params.push(ops.lte);
491
+ }
492
+ if (ops.eq) {
493
+ query += ` AND run_at = $${paramIdx++}`;
494
+ params.push(ops.eq);
495
+ }
496
+ }
497
+ }
498
+ if (filters.tags && filters.tags.values && filters.tags.values.length > 0) {
499
+ const mode = filters.tags.mode || "all";
500
+ const tagValues = filters.tags.values;
501
+ switch (mode) {
502
+ case "exact":
503
+ query += ` AND tags = $${paramIdx++}`;
504
+ params.push(tagValues);
505
+ break;
506
+ case "all":
507
+ query += ` AND tags @> $${paramIdx++}`;
508
+ params.push(tagValues);
509
+ break;
510
+ case "any":
511
+ query += ` AND tags && $${paramIdx++}`;
512
+ params.push(tagValues);
513
+ break;
514
+ case "none":
515
+ query += ` AND NOT (tags && $${paramIdx++})`;
516
+ params.push(tagValues);
517
+ break;
518
+ default:
519
+ query += ` AND tags @> $${paramIdx++}`;
520
+ params.push(tagValues);
521
+ }
522
+ }
523
+ }
524
+ query += "\nRETURNING id";
525
+ const result = await client.query(query, params);
526
+ const editedCount = result.rowCount || 0;
527
+ const metadata = {};
528
+ if (updates.payload !== void 0) metadata.payload = updates.payload;
529
+ if (updates.maxAttempts !== void 0)
530
+ metadata.maxAttempts = updates.maxAttempts;
531
+ if (updates.priority !== void 0) metadata.priority = updates.priority;
532
+ if (updates.runAt !== void 0) metadata.runAt = updates.runAt;
533
+ if (updates.timeoutMs !== void 0) metadata.timeoutMs = updates.timeoutMs;
534
+ if (updates.tags !== void 0) metadata.tags = updates.tags;
535
+ for (const row of result.rows) {
536
+ await recordJobEvent(pool, row.id, "edited" /* Edited */, metadata);
537
+ }
538
+ log(`Edited ${editedCount} pending jobs: ${JSON.stringify(metadata)}`);
539
+ return editedCount;
540
+ } catch (error) {
541
+ log(`Error editing pending jobs: ${error}`);
542
+ throw error;
543
+ } finally {
544
+ client.release();
545
+ }
546
+ };
342
547
  var cancelAllUpcomingJobs = async (pool, filters) => {
343
548
  const client = await pool.connect();
344
549
  try {
@@ -426,14 +631,15 @@ var getAllJobs = async (pool, limit = 100, offset = 0) => {
426
631
  const client = await pool.connect();
427
632
  try {
428
633
  const result = await client.query(
429
- `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason" FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2`,
634
+ `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason" FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2`,
430
635
  [limit, offset]
431
636
  );
432
637
  log(`Found ${result.rows.length} jobs (all)`);
433
638
  return result.rows.map((job) => ({
434
639
  ...job,
435
640
  payload: job.payload,
436
- timeoutMs: job.timeoutMs
641
+ timeoutMs: job.timeoutMs,
642
+ forceKillOnTimeout: job.forceKillOnTimeout
437
643
  }));
438
644
  } catch (error) {
439
645
  log(`Error getting all jobs: ${error}`);
@@ -534,6 +740,7 @@ var getJobsByTags = async (pool, tags, mode = "all", limit = 100, offset = 0) =>
534
740
  ...job,
535
741
  payload: job.payload,
536
742
  timeoutMs: job.timeoutMs,
743
+ forceKillOnTimeout: job.forceKillOnTimeout,
537
744
  failureReason: job.failureReason
538
745
  }));
539
746
  } catch (error) {
@@ -548,7 +755,7 @@ var getJobsByTags = async (pool, tags, mode = "all", limit = 100, offset = 0) =>
548
755
  var getJobs = async (pool, filters, limit = 100, offset = 0) => {
549
756
  const client = await pool.connect();
550
757
  try {
551
- let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags FROM job_queue`;
758
+ let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags FROM job_queue`;
552
759
  const params = [];
553
760
  let where = [];
554
761
  let paramIdx = 1;
@@ -627,6 +834,7 @@ var getJobs = async (pool, filters, limit = 100, offset = 0) => {
627
834
  ...job,
628
835
  payload: job.payload,
629
836
  timeoutMs: job.timeoutMs,
837
+ forceKillOnTimeout: job.forceKillOnTimeout,
630
838
  failureReason: job.failureReason
631
839
  }));
632
840
  } catch (error) {
@@ -636,8 +844,178 @@ var getJobs = async (pool, filters, limit = 100, offset = 0) => {
636
844
  client.release();
637
845
  }
638
846
  };
639
-
640
- // src/processor.ts
847
+ function validateHandlerSerializable(handler, jobType) {
848
+ try {
849
+ const handlerString = handler.toString();
850
+ if (handlerString.includes("this.") && !handlerString.match(/\([^)]*this[^)]*\)/)) {
851
+ throw new Error(
852
+ `Handler for job type "${jobType}" uses 'this' context which cannot be serialized. Use a regular function or avoid 'this' references when forceKillOnTimeout is enabled.`
853
+ );
854
+ }
855
+ if (handlerString.includes("[native code]")) {
856
+ throw new Error(
857
+ `Handler for job type "${jobType}" contains native code which cannot be serialized. Ensure your handler is a plain function when forceKillOnTimeout is enabled.`
858
+ );
859
+ }
860
+ try {
861
+ new Function("return " + handlerString);
862
+ } catch (parseError) {
863
+ throw new Error(
864
+ `Handler for job type "${jobType}" cannot be serialized: ${parseError instanceof Error ? parseError.message : String(parseError)}. When using forceKillOnTimeout, handlers must be serializable functions without closures over external variables.`
865
+ );
866
+ }
867
+ } catch (error) {
868
+ if (error instanceof Error) {
869
+ throw error;
870
+ }
871
+ throw new Error(
872
+ `Failed to validate handler serialization for job type "${jobType}": ${String(error)}`
873
+ );
874
+ }
875
+ }
876
+ async function runHandlerInWorker(handler, payload, timeoutMs, jobType) {
877
+ validateHandlerSerializable(handler, jobType);
878
+ return new Promise((resolve, reject) => {
879
+ const workerCode = `
880
+ (function() {
881
+ const { parentPort, workerData } = require('worker_threads');
882
+ const { handlerCode, payload, timeoutMs } = workerData;
883
+
884
+ // Create an AbortController for the handler
885
+ const controller = new AbortController();
886
+ const signal = controller.signal;
887
+
888
+ // Set up timeout
889
+ const timeoutId = setTimeout(() => {
890
+ controller.abort();
891
+ parentPort.postMessage({ type: 'timeout' });
892
+ }, timeoutMs);
893
+
894
+ try {
895
+ // Execute the handler
896
+ // Note: This uses Function constructor which requires the handler to be serializable.
897
+ // The handler should be validated before reaching this point.
898
+ let handlerFn;
899
+ try {
900
+ // Wrap handlerCode in parentheses to ensure it's treated as an expression
901
+ // This handles both arrow functions and regular functions
902
+ const wrappedCode = handlerCode.trim().startsWith('async') || handlerCode.trim().startsWith('function')
903
+ ? handlerCode
904
+ : '(' + handlerCode + ')';
905
+ handlerFn = new Function('return ' + wrappedCode)();
906
+ } catch (parseError) {
907
+ clearTimeout(timeoutId);
908
+ parentPort.postMessage({
909
+ type: 'error',
910
+ error: {
911
+ message: 'Handler cannot be deserialized in worker thread. ' +
912
+ 'Ensure your handler is a standalone function without closures over external variables. ' +
913
+ 'Original error: ' + (parseError instanceof Error ? parseError.message : String(parseError)),
914
+ stack: parseError instanceof Error ? parseError.stack : undefined,
915
+ name: 'SerializationError',
916
+ },
917
+ });
918
+ return;
919
+ }
920
+
921
+ // Ensure handlerFn is actually a function
922
+ if (typeof handlerFn !== 'function') {
923
+ clearTimeout(timeoutId);
924
+ parentPort.postMessage({
925
+ type: 'error',
926
+ error: {
927
+ message: 'Handler deserialization did not produce a function. ' +
928
+ 'Ensure your handler is a valid function when forceKillOnTimeout is enabled.',
929
+ name: 'SerializationError',
930
+ },
931
+ });
932
+ return;
933
+ }
934
+
935
+ handlerFn(payload, signal)
936
+ .then(() => {
937
+ clearTimeout(timeoutId);
938
+ parentPort.postMessage({ type: 'success' });
939
+ })
940
+ .catch((error) => {
941
+ clearTimeout(timeoutId);
942
+ parentPort.postMessage({
943
+ type: 'error',
944
+ error: {
945
+ message: error.message,
946
+ stack: error.stack,
947
+ name: error.name,
948
+ },
949
+ });
950
+ });
951
+ } catch (error) {
952
+ clearTimeout(timeoutId);
953
+ parentPort.postMessage({
954
+ type: 'error',
955
+ error: {
956
+ message: error.message,
957
+ stack: error.stack,
958
+ name: error.name,
959
+ },
960
+ });
961
+ }
962
+ })();
963
+ `;
964
+ const worker = new worker_threads.Worker(workerCode, {
965
+ eval: true,
966
+ workerData: {
967
+ handlerCode: handler.toString(),
968
+ payload,
969
+ timeoutMs
970
+ }
971
+ });
972
+ let resolved = false;
973
+ worker.on("message", (message) => {
974
+ if (resolved) return;
975
+ resolved = true;
976
+ if (message.type === "success") {
977
+ resolve();
978
+ } else if (message.type === "timeout") {
979
+ const timeoutError = new Error(
980
+ `Job timed out after ${timeoutMs} ms and was forcefully terminated`
981
+ );
982
+ timeoutError.failureReason = "timeout" /* Timeout */;
983
+ reject(timeoutError);
984
+ } else if (message.type === "error") {
985
+ const error = new Error(message.error.message);
986
+ error.stack = message.error.stack;
987
+ error.name = message.error.name;
988
+ reject(error);
989
+ }
990
+ });
991
+ worker.on("error", (error) => {
992
+ if (resolved) return;
993
+ resolved = true;
994
+ reject(error);
995
+ });
996
+ worker.on("exit", (code) => {
997
+ if (resolved) return;
998
+ if (code !== 0) {
999
+ resolved = true;
1000
+ reject(new Error(`Worker stopped with exit code ${code}`));
1001
+ }
1002
+ });
1003
+ setTimeout(() => {
1004
+ if (!resolved) {
1005
+ resolved = true;
1006
+ worker.terminate().then(() => {
1007
+ const timeoutError = new Error(
1008
+ `Job timed out after ${timeoutMs} ms and was forcefully terminated`
1009
+ );
1010
+ timeoutError.failureReason = "timeout" /* Timeout */;
1011
+ reject(timeoutError);
1012
+ }).catch((err) => {
1013
+ reject(err);
1014
+ });
1015
+ }
1016
+ }, timeoutMs + 100);
1017
+ });
1018
+ }
641
1019
  async function processJobWithHandlers(pool, job, jobHandlers) {
642
1020
  const handler = jobHandlers[job.jobType];
643
1021
  if (!handler) {
@@ -655,26 +1033,31 @@ async function processJobWithHandlers(pool, job, jobHandlers) {
655
1033
  return;
656
1034
  }
657
1035
  const timeoutMs = job.timeoutMs ?? void 0;
1036
+ const forceKillOnTimeout = job.forceKillOnTimeout ?? false;
658
1037
  let timeoutId;
659
1038
  const controller = new AbortController();
660
1039
  try {
661
- const jobPromise = handler(job.payload, controller.signal);
662
- if (timeoutMs && timeoutMs > 0) {
663
- await Promise.race([
664
- jobPromise,
665
- new Promise((_, reject) => {
666
- timeoutId = setTimeout(() => {
667
- controller.abort();
668
- const timeoutError = new Error(
669
- `Job timed out after ${timeoutMs} ms`
670
- );
671
- timeoutError.failureReason = "timeout" /* Timeout */;
672
- reject(timeoutError);
673
- }, timeoutMs);
674
- })
675
- ]);
1040
+ if (forceKillOnTimeout && timeoutMs && timeoutMs > 0) {
1041
+ await runHandlerInWorker(handler, job.payload, timeoutMs, job.jobType);
676
1042
  } else {
677
- await jobPromise;
1043
+ const jobPromise = handler(job.payload, controller.signal);
1044
+ if (timeoutMs && timeoutMs > 0) {
1045
+ await Promise.race([
1046
+ jobPromise,
1047
+ new Promise((_, reject) => {
1048
+ timeoutId = setTimeout(() => {
1049
+ controller.abort();
1050
+ const timeoutError = new Error(
1051
+ `Job timed out after ${timeoutMs} ms`
1052
+ );
1053
+ timeoutError.failureReason = "timeout" /* Timeout */;
1054
+ reject(timeoutError);
1055
+ }, timeoutMs);
1056
+ })
1057
+ ]);
1058
+ } else {
1059
+ await jobPromise;
1060
+ }
678
1061
  }
679
1062
  if (timeoutId) clearTimeout(timeoutId);
680
1063
  await completeJob(pool, job.id);
@@ -807,12 +1190,27 @@ var createProcessor = (pool, handlers, options = {}) => {
807
1190
  isRunning: () => running
808
1191
  };
809
1192
  };
1193
+ function loadPemOrFile(value) {
1194
+ if (!value) return void 0;
1195
+ if (value.startsWith("file://")) {
1196
+ const filePath = value.slice(7);
1197
+ return fs__default.default.readFileSync(filePath, "utf8");
1198
+ }
1199
+ return value;
1200
+ }
810
1201
  var createPool = (config) => {
811
1202
  let searchPath;
1203
+ let ssl = void 0;
1204
+ let customCA;
1205
+ let sslmode;
812
1206
  if (config.connectionString) {
813
1207
  try {
814
1208
  const url = new URL(config.connectionString);
815
1209
  searchPath = url.searchParams.get("search_path") || void 0;
1210
+ sslmode = url.searchParams.get("sslmode") || void 0;
1211
+ if (sslmode === "no-verify") {
1212
+ ssl = { rejectUnauthorized: false };
1213
+ }
816
1214
  } catch (e) {
817
1215
  const parsed = pgConnectionString.parse(config.connectionString);
818
1216
  if (parsed.options) {
@@ -821,9 +1219,52 @@ var createPool = (config) => {
821
1219
  searchPath = match[1];
822
1220
  }
823
1221
  }
1222
+ sslmode = typeof parsed.sslmode === "string" ? parsed.sslmode : void 0;
1223
+ if (sslmode === "no-verify") {
1224
+ ssl = { rejectUnauthorized: false };
1225
+ }
1226
+ }
1227
+ }
1228
+ if (config.ssl) {
1229
+ if (typeof config.ssl.ca === "string") {
1230
+ customCA = config.ssl.ca;
1231
+ } else if (typeof process.env.PGSSLROOTCERT === "string") {
1232
+ customCA = process.env.PGSSLROOTCERT;
1233
+ } else {
1234
+ customCA = void 0;
824
1235
  }
1236
+ const caValue = typeof customCA === "string" ? loadPemOrFile(customCA) : void 0;
1237
+ ssl = {
1238
+ ...ssl,
1239
+ ...caValue ? { ca: caValue } : {},
1240
+ cert: loadPemOrFile(
1241
+ typeof config.ssl.cert === "string" ? config.ssl.cert : process.env.PGSSLCERT
1242
+ ),
1243
+ key: loadPemOrFile(
1244
+ typeof config.ssl.key === "string" ? config.ssl.key : process.env.PGSSLKEY
1245
+ ),
1246
+ rejectUnauthorized: config.ssl.rejectUnauthorized !== void 0 ? config.ssl.rejectUnauthorized : true
1247
+ };
825
1248
  }
826
- const pool = new pg.Pool(config);
1249
+ if (sslmode && customCA) {
1250
+ const warning = `
1251
+
1252
+ \x1B[33m**************************************************
1253
+ \u26A0\uFE0F WARNING: SSL CONFIGURATION ISSUE
1254
+ **************************************************
1255
+ Both sslmode ('${sslmode}') is set in the connection string
1256
+ and a custom CA is provided (via config.ssl.ca or PGSSLROOTCERT).
1257
+ This combination may cause connection failures or unexpected behavior.
1258
+
1259
+ Recommended: Remove sslmode from the connection string when using a custom CA.
1260
+ **************************************************\x1B[0m
1261
+ `;
1262
+ console.warn(warning);
1263
+ }
1264
+ const pool = new pg.Pool({
1265
+ ...config,
1266
+ ...ssl ? { ssl } : {}
1267
+ });
827
1268
  if (searchPath) {
828
1269
  pool.on("connect", (client) => {
829
1270
  client.query(`SET search_path TO ${searchPath}`);
@@ -832,6 +1273,75 @@ var createPool = (config) => {
832
1273
  return pool;
833
1274
  };
834
1275
 
1276
+ // src/handler-validation.ts
1277
+ function validateHandlerSerializable2(handler, jobType) {
1278
+ try {
1279
+ const handlerString = handler.toString();
1280
+ const typeLabel = jobType ? `job type "${jobType}"` : "handler";
1281
+ if (handlerString.includes("this.") && !handlerString.match(/\([^)]*this[^)]*\)/)) {
1282
+ return {
1283
+ isSerializable: false,
1284
+ error: `Handler for ${typeLabel} uses 'this' context which cannot be serialized. Use a regular function or avoid 'this' references when forceKillOnTimeout is enabled.`
1285
+ };
1286
+ }
1287
+ if (handlerString.includes("[native code]")) {
1288
+ return {
1289
+ isSerializable: false,
1290
+ error: `Handler for ${typeLabel} contains native code which cannot be serialized. Ensure your handler is a plain function when forceKillOnTimeout is enabled.`
1291
+ };
1292
+ }
1293
+ try {
1294
+ new Function("return " + handlerString);
1295
+ } catch (parseError) {
1296
+ return {
1297
+ isSerializable: false,
1298
+ error: `Handler for ${typeLabel} cannot be serialized: ${parseError instanceof Error ? parseError.message : String(parseError)}. When using forceKillOnTimeout, handlers must be serializable functions without closures over external variables.`
1299
+ };
1300
+ }
1301
+ const hasPotentialClosure = /const\s+\w+\s*=\s*[^;]+;\s*async\s*\(/.test(handlerString) || /let\s+\w+\s*=\s*[^;]+;\s*async\s*\(/.test(handlerString);
1302
+ if (hasPotentialClosure) {
1303
+ return {
1304
+ isSerializable: true,
1305
+ // Still serializable, but might have issues
1306
+ error: `Warning: Handler for ${typeLabel} may have closures over external variables. Test thoroughly with forceKillOnTimeout enabled. If the handler fails to execute in a worker thread, ensure all dependencies are imported within the handler function.`
1307
+ };
1308
+ }
1309
+ return { isSerializable: true };
1310
+ } catch (error) {
1311
+ return {
1312
+ isSerializable: false,
1313
+ error: `Failed to validate handler serialization${jobType ? ` for job type "${jobType}"` : ""}: ${error instanceof Error ? error.message : String(error)}`
1314
+ };
1315
+ }
1316
+ }
1317
+ async function testHandlerSerialization(handler, jobType) {
1318
+ const basicValidation = validateHandlerSerializable2(handler, jobType);
1319
+ if (!basicValidation.isSerializable) {
1320
+ return basicValidation;
1321
+ }
1322
+ try {
1323
+ const handlerString = handler.toString();
1324
+ const handlerFn = new Function("return " + handlerString)();
1325
+ const testPromise = handlerFn({}, new AbortController().signal);
1326
+ const timeoutPromise = new Promise(
1327
+ (_, reject) => setTimeout(() => reject(new Error("Handler test timeout")), 100)
1328
+ );
1329
+ try {
1330
+ await Promise.race([testPromise, timeoutPromise]);
1331
+ } catch (execError) {
1332
+ if (execError instanceof Error && execError.message === "Handler test timeout") {
1333
+ return { isSerializable: true };
1334
+ }
1335
+ }
1336
+ return { isSerializable: true };
1337
+ } catch (error) {
1338
+ return {
1339
+ isSerializable: false,
1340
+ error: `Handler failed serialization test: ${error instanceof Error ? error.message : String(error)}`
1341
+ };
1342
+ }
1343
+ }
1344
+
835
1345
  // src/index.ts
836
1346
  var initJobQueue = (config) => {
837
1347
  const { databaseConfig } = config;
@@ -865,6 +1375,14 @@ var initJobQueue = (config) => {
865
1375
  (jobId) => cancelJob(pool, jobId),
866
1376
  config.verbose ?? false
867
1377
  ),
1378
+ editJob: withLogContext(
1379
+ (jobId, updates) => editJob(pool, jobId, updates),
1380
+ config.verbose ?? false
1381
+ ),
1382
+ editAllPendingJobs: withLogContext(
1383
+ (filters, updates) => editAllPendingJobs(pool, filters, updates),
1384
+ config.verbose ?? false
1385
+ ),
868
1386
  cancelAllUpcomingJobs: withLogContext(
869
1387
  (filters) => cancelAllUpcomingJobs(pool, filters),
870
1388
  config.verbose ?? false
@@ -896,5 +1414,7 @@ var withLogContext = (fn, verbose) => (...args) => {
896
1414
  exports.FailureReason = FailureReason;
897
1415
  exports.JobEventType = JobEventType;
898
1416
  exports.initJobQueue = initJobQueue;
1417
+ exports.testHandlerSerialization = testHandlerSerialization;
1418
+ exports.validateHandlerSerializable = validateHandlerSerializable2;
899
1419
  //# sourceMappingURL=index.cjs.map
900
1420
  //# sourceMappingURL=index.cjs.map