@nicnocquee/dataqueue 1.34.0 → 1.35.0-beta.20260224075710

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,3 +1,4 @@
1
+ import { EventEmitter } from 'events';
1
2
  import { Worker } from 'worker_threads';
2
3
  import { AsyncLocalStorage } from 'async_hooks';
3
4
  import { Pool } from 'pg';
@@ -7,7 +8,7 @@ import { randomUUID } from 'crypto';
7
8
  import { createRequire } from 'module';
8
9
  import { Cron } from 'croner';
9
10
 
10
- // src/processor.ts
11
+ // src/index.ts
11
12
 
12
13
  // src/types.ts
13
14
  var JobEventType = /* @__PURE__ */ ((JobEventType2) => {
@@ -143,9 +144,9 @@ async function runHandlerInWorker(handler, payload, timeoutMs, jobType) {
143
144
  }
144
145
 
145
146
  handlerFn(payload, signal)
146
- .then(() => {
147
+ .then((result) => {
147
148
  clearTimeout(timeoutId);
148
- parentPort.postMessage({ type: 'success' });
149
+ parentPort.postMessage({ type: 'success', output: result });
149
150
  })
150
151
  .catch((error) => {
151
152
  clearTimeout(timeoutId);
@@ -180,24 +181,27 @@ async function runHandlerInWorker(handler, payload, timeoutMs, jobType) {
180
181
  }
181
182
  });
182
183
  let resolved = false;
183
- worker.on("message", (message) => {
184
- if (resolved) return;
185
- resolved = true;
186
- if (message.type === "success") {
187
- resolve();
188
- } else if (message.type === "timeout") {
189
- const timeoutError = new Error(
190
- `Job timed out after ${timeoutMs} ms and was forcefully terminated`
191
- );
192
- timeoutError.failureReason = "timeout" /* Timeout */;
193
- reject(timeoutError);
194
- } else if (message.type === "error") {
195
- const error = new Error(message.error.message);
196
- error.stack = message.error.stack;
197
- error.name = message.error.name;
198
- reject(error);
184
+ worker.on(
185
+ "message",
186
+ (message) => {
187
+ if (resolved) return;
188
+ resolved = true;
189
+ if (message.type === "success") {
190
+ resolve(message.output);
191
+ } else if (message.type === "timeout") {
192
+ const timeoutError = new Error(
193
+ `Job timed out after ${timeoutMs} ms and was forcefully terminated`
194
+ );
195
+ timeoutError.failureReason = "timeout" /* Timeout */;
196
+ reject(timeoutError);
197
+ } else if (message.type === "error") {
198
+ const error = new Error(message.error.message);
199
+ error.stack = message.error.stack;
200
+ error.name = message.error.name;
201
+ reject(error);
202
+ }
199
203
  }
200
- });
204
+ );
201
205
  worker.on("error", (error) => {
202
206
  if (resolved) return;
203
207
  resolved = true;
@@ -354,22 +358,30 @@ function buildWaitContext(backend, jobId, stepData, baseCtx) {
354
358
  if (percent < 0 || percent > 100)
355
359
  throw new Error("Progress must be between 0 and 100");
356
360
  await backend.updateProgress(jobId, Math.round(percent));
361
+ },
362
+ setOutput: async (data) => {
363
+ await backend.updateOutput(jobId, data);
357
364
  }
358
365
  };
359
366
  return ctx;
360
367
  }
361
- async function processJobWithHandlers(backend, job, jobHandlers) {
368
+ async function processJobWithHandlers(backend, job, jobHandlers, emit) {
362
369
  const handler = jobHandlers[job.jobType];
363
370
  if (!handler) {
364
371
  await backend.setPendingReasonForUnpickedJobs(
365
372
  `No handler registered for job type: ${job.jobType}`,
366
373
  job.jobType
367
374
  );
368
- await backend.failJob(
369
- job.id,
370
- new Error(`No handler registered for job type: ${job.jobType}`),
371
- "no_handler" /* NoHandler */
375
+ const noHandlerError = new Error(
376
+ `No handler registered for job type: ${job.jobType}`
372
377
  );
378
+ await backend.failJob(job.id, noHandlerError, "no_handler" /* NoHandler */);
379
+ emit?.("job:failed", {
380
+ jobId: job.id,
381
+ jobType: job.jobType,
382
+ error: noHandlerError,
383
+ willRetry: false
384
+ });
373
385
  return;
374
386
  }
375
387
  const stepData = { ...job.stepData || {} };
@@ -384,9 +396,16 @@ async function processJobWithHandlers(backend, job, jobHandlers) {
384
396
  const forceKillOnTimeout = job.forceKillOnTimeout ?? false;
385
397
  let timeoutId;
386
398
  const controller = new AbortController();
399
+ let setOutputCalled = false;
400
+ let handlerReturnValue;
387
401
  try {
388
402
  if (forceKillOnTimeout && timeoutMs && timeoutMs > 0) {
389
- await runHandlerInWorker(handler, job.payload, timeoutMs, job.jobType);
403
+ handlerReturnValue = await runHandlerInWorker(
404
+ handler,
405
+ job.payload,
406
+ timeoutMs,
407
+ job.jobType
408
+ );
390
409
  } else {
391
410
  let onTimeoutCallback;
392
411
  let timeoutReject;
@@ -438,6 +457,22 @@ async function processJobWithHandlers(backend, job, jobHandlers) {
438
457
  }
439
458
  };
440
459
  const ctx = buildWaitContext(backend, job.id, stepData, baseCtx);
460
+ if (emit) {
461
+ const originalSetProgress = ctx.setProgress;
462
+ ctx.setProgress = async (percent) => {
463
+ await originalSetProgress(percent);
464
+ emit("job:progress", {
465
+ jobId: job.id,
466
+ progress: Math.round(percent)
467
+ });
468
+ };
469
+ }
470
+ const originalSetOutput = ctx.setOutput;
471
+ ctx.setOutput = async (data) => {
472
+ setOutputCalled = true;
473
+ await originalSetOutput(data);
474
+ emit?.("job:output", { jobId: job.id, output: data });
475
+ };
441
476
  if (forceKillOnTimeout && !hasTimeout) {
442
477
  log(
443
478
  `forceKillOnTimeout is set but no timeoutMs for job ${job.id}, running without force kill`
@@ -445,7 +480,7 @@ async function processJobWithHandlers(backend, job, jobHandlers) {
445
480
  }
446
481
  const jobPromise = handler(job.payload, controller.signal, ctx);
447
482
  if (hasTimeout) {
448
- await Promise.race([
483
+ handlerReturnValue = await Promise.race([
449
484
  jobPromise,
450
485
  new Promise((_, reject) => {
451
486
  timeoutReject = reject;
@@ -453,11 +488,13 @@ async function processJobWithHandlers(backend, job, jobHandlers) {
453
488
  })
454
489
  ]);
455
490
  } else {
456
- await jobPromise;
491
+ handlerReturnValue = await jobPromise;
457
492
  }
458
493
  }
459
494
  if (timeoutId) clearTimeout(timeoutId);
460
- await backend.completeJob(job.id);
495
+ const completionOutput = setOutputCalled || handlerReturnValue === void 0 ? void 0 : handlerReturnValue;
496
+ await backend.completeJob(job.id, completionOutput);
497
+ emit?.("job:completed", { jobId: job.id, jobType: job.jobType });
461
498
  } catch (error) {
462
499
  if (timeoutId) clearTimeout(timeoutId);
463
500
  if (error instanceof WaitSignal) {
@@ -469,6 +506,7 @@ async function processJobWithHandlers(backend, job, jobHandlers) {
469
506
  waitTokenId: error.tokenId,
470
507
  stepData: error.stepData
471
508
  });
509
+ emit?.("job:waiting", { jobId: job.id, jobType: job.jobType });
472
510
  return;
473
511
  }
474
512
  console.error(`Error processing job ${job.id}:`, error);
@@ -476,22 +514,32 @@ async function processJobWithHandlers(backend, job, jobHandlers) {
476
514
  if (error && typeof error === "object" && "failureReason" in error && error.failureReason === "timeout" /* Timeout */) {
477
515
  failureReason = "timeout" /* Timeout */;
478
516
  }
479
- await backend.failJob(
480
- job.id,
481
- error instanceof Error ? error : new Error(String(error)),
482
- failureReason
483
- );
517
+ const failError = error instanceof Error ? error : new Error(String(error));
518
+ await backend.failJob(job.id, failError, failureReason);
519
+ emit?.("job:failed", {
520
+ jobId: job.id,
521
+ jobType: job.jobType,
522
+ error: failError,
523
+ willRetry: job.attempts + 1 < job.maxAttempts
524
+ });
484
525
  }
485
526
  }
486
- async function processBatchWithHandlers(backend, workerId, batchSize, jobType, jobHandlers, concurrency, onError) {
527
+ async function processBatchWithHandlers(backend, workerId, batchSize, jobType, jobHandlers, concurrency, onError, emit) {
487
528
  const jobs = await backend.getNextBatch(
488
529
  workerId,
489
530
  batchSize,
490
531
  jobType
491
532
  );
533
+ if (emit) {
534
+ for (const job of jobs) {
535
+ emit("job:processing", { jobId: job.id, jobType: job.jobType });
536
+ }
537
+ }
492
538
  if (!concurrency || concurrency >= jobs.length) {
493
539
  await Promise.all(
494
- jobs.map((job) => processJobWithHandlers(backend, job, jobHandlers))
540
+ jobs.map(
541
+ (job) => processJobWithHandlers(backend, job, jobHandlers, emit)
542
+ )
495
543
  );
496
544
  return jobs.length;
497
545
  }
@@ -504,7 +552,7 @@ async function processBatchWithHandlers(backend, workerId, batchSize, jobType, j
504
552
  while (running < concurrency && idx < jobs.length) {
505
553
  const job = jobs[idx++];
506
554
  running++;
507
- processJobWithHandlers(backend, job, jobHandlers).then(() => {
555
+ processJobWithHandlers(backend, job, jobHandlers, emit).then(() => {
508
556
  running--;
509
557
  finished++;
510
558
  next();
@@ -521,7 +569,7 @@ async function processBatchWithHandlers(backend, workerId, batchSize, jobType, j
521
569
  next();
522
570
  });
523
571
  }
524
- var createProcessor = (backend, handlers, options = {}, onBeforeBatch) => {
572
+ var createProcessor = (backend, handlers, options = {}, onBeforeBatch, emit) => {
525
573
  const {
526
574
  workerId = `worker-${Math.random().toString(36).substring(2, 9)}`,
527
575
  batchSize = 10,
@@ -541,11 +589,11 @@ var createProcessor = (backend, handlers, options = {}, onBeforeBatch) => {
541
589
  await onBeforeBatch();
542
590
  } catch (hookError) {
543
591
  log(`onBeforeBatch hook error: ${hookError}`);
592
+ const err = hookError instanceof Error ? hookError : new Error(String(hookError));
544
593
  if (onError) {
545
- onError(
546
- hookError instanceof Error ? hookError : new Error(String(hookError))
547
- );
594
+ onError(err);
548
595
  }
596
+ emit?.("error", err);
549
597
  }
550
598
  }
551
599
  log(
@@ -559,11 +607,14 @@ var createProcessor = (backend, handlers, options = {}, onBeforeBatch) => {
559
607
  jobType,
560
608
  handlers,
561
609
  concurrency,
562
- onError
610
+ onError,
611
+ emit
563
612
  );
564
613
  return processed;
565
614
  } catch (error) {
566
- onError(error instanceof Error ? error : new Error(String(error)));
615
+ const err = error instanceof Error ? error : new Error(String(error));
616
+ onError(err);
617
+ emit?.("error", err);
567
618
  }
568
619
  return 0;
569
620
  };
@@ -642,6 +693,138 @@ var createProcessor = (backend, handlers, options = {}, onBeforeBatch) => {
642
693
  isRunning: () => running
643
694
  };
644
695
  };
696
+
697
+ // src/supervisor.ts
698
+ var createSupervisor = (backend, options = {}, emit) => {
699
+ const {
700
+ intervalMs = 6e4,
701
+ stuckJobsTimeoutMinutes = 10,
702
+ cleanupJobsDaysToKeep = 30,
703
+ cleanupEventsDaysToKeep = 30,
704
+ cleanupBatchSize = 1e3,
705
+ reclaimStuckJobs = true,
706
+ expireTimedOutTokens = true,
707
+ onError = (error) => console.error("Supervisor maintenance error:", error),
708
+ verbose = false
709
+ } = options;
710
+ let running = false;
711
+ let timeoutId = null;
712
+ let currentRunPromise = null;
713
+ setLogContext(verbose);
714
+ const runOnce = async () => {
715
+ setLogContext(verbose);
716
+ const result = {
717
+ reclaimedJobs: 0,
718
+ cleanedUpJobs: 0,
719
+ cleanedUpEvents: 0,
720
+ expiredTokens: 0
721
+ };
722
+ if (reclaimStuckJobs) {
723
+ try {
724
+ result.reclaimedJobs = await backend.reclaimStuckJobs(
725
+ stuckJobsTimeoutMinutes
726
+ );
727
+ if (result.reclaimedJobs > 0) {
728
+ log(`Supervisor: reclaimed ${result.reclaimedJobs} stuck jobs`);
729
+ }
730
+ } catch (e) {
731
+ const err = e instanceof Error ? e : new Error(String(e));
732
+ onError(err);
733
+ emit?.("error", err);
734
+ }
735
+ }
736
+ if (cleanupJobsDaysToKeep > 0) {
737
+ try {
738
+ result.cleanedUpJobs = await backend.cleanupOldJobs(
739
+ cleanupJobsDaysToKeep,
740
+ cleanupBatchSize
741
+ );
742
+ if (result.cleanedUpJobs > 0) {
743
+ log(`Supervisor: cleaned up ${result.cleanedUpJobs} old jobs`);
744
+ }
745
+ } catch (e) {
746
+ const err = e instanceof Error ? e : new Error(String(e));
747
+ onError(err);
748
+ emit?.("error", err);
749
+ }
750
+ }
751
+ if (cleanupEventsDaysToKeep > 0) {
752
+ try {
753
+ result.cleanedUpEvents = await backend.cleanupOldJobEvents(
754
+ cleanupEventsDaysToKeep,
755
+ cleanupBatchSize
756
+ );
757
+ if (result.cleanedUpEvents > 0) {
758
+ log(
759
+ `Supervisor: cleaned up ${result.cleanedUpEvents} old job events`
760
+ );
761
+ }
762
+ } catch (e) {
763
+ const err = e instanceof Error ? e : new Error(String(e));
764
+ onError(err);
765
+ emit?.("error", err);
766
+ }
767
+ }
768
+ if (expireTimedOutTokens) {
769
+ try {
770
+ result.expiredTokens = await backend.expireTimedOutWaitpoints();
771
+ if (result.expiredTokens > 0) {
772
+ log(`Supervisor: expired ${result.expiredTokens} timed-out tokens`);
773
+ }
774
+ } catch (e) {
775
+ const err = e instanceof Error ? e : new Error(String(e));
776
+ onError(err);
777
+ emit?.("error", err);
778
+ }
779
+ }
780
+ return result;
781
+ };
782
+ return {
783
+ start: async () => {
784
+ return runOnce();
785
+ },
786
+ startInBackground: () => {
787
+ if (running) return;
788
+ log("Supervisor: starting background maintenance loop");
789
+ running = true;
790
+ const loop = async () => {
791
+ if (!running) return;
792
+ currentRunPromise = runOnce();
793
+ await currentRunPromise;
794
+ currentRunPromise = null;
795
+ if (running) {
796
+ timeoutId = setTimeout(loop, intervalMs);
797
+ }
798
+ };
799
+ loop();
800
+ },
801
+ stop: () => {
802
+ running = false;
803
+ if (timeoutId !== null) {
804
+ clearTimeout(timeoutId);
805
+ timeoutId = null;
806
+ }
807
+ log("Supervisor: stopped");
808
+ },
809
+ stopAndDrain: async (timeoutMs = 3e4) => {
810
+ running = false;
811
+ if (timeoutId !== null) {
812
+ clearTimeout(timeoutId);
813
+ timeoutId = null;
814
+ }
815
+ if (currentRunPromise) {
816
+ log("Supervisor: draining current maintenance run\u2026");
817
+ await Promise.race([
818
+ currentRunPromise,
819
+ new Promise((resolve) => setTimeout(resolve, timeoutMs))
820
+ ]);
821
+ currentRunPromise = null;
822
+ }
823
+ log("Supervisor: drained and stopped");
824
+ },
825
+ isRunning: () => running
826
+ };
827
+ };
645
828
  function loadPemOrFile(value) {
646
829
  if (!value) return void 0;
647
830
  if (value.startsWith("file://")) {
@@ -793,6 +976,14 @@ var PostgresBackend = class {
793
976
  }
794
977
  }
795
978
  // ── Job CRUD ──────────────────────────────────────────────────────────
979
+ /**
980
+ * Add a job and return its numeric ID.
981
+ *
982
+ * @param job - Job configuration.
983
+ * @param options - Optional. Pass `{ db }` to run the INSERT on an external
984
+ * client (e.g., inside a transaction) so the job is part of the caller's
985
+ * transaction. The event INSERT also uses the same client.
986
+ */
796
987
  async addJob({
797
988
  jobType,
798
989
  payload,
@@ -802,17 +993,21 @@ var PostgresBackend = class {
802
993
  timeoutMs = void 0,
803
994
  forceKillOnTimeout = false,
804
995
  tags = void 0,
805
- idempotencyKey = void 0
806
- }) {
807
- const client = await this.pool.connect();
996
+ idempotencyKey = void 0,
997
+ retryDelay = void 0,
998
+ retryBackoff = void 0,
999
+ retryDelayMax = void 0
1000
+ }, options) {
1001
+ const externalClient = options?.db;
1002
+ const client = externalClient ?? await this.pool.connect();
808
1003
  try {
809
1004
  let result;
810
1005
  const onConflict = idempotencyKey ? `ON CONFLICT (idempotency_key) WHERE idempotency_key IS NOT NULL DO NOTHING` : "";
811
1006
  if (runAt) {
812
1007
  result = await client.query(
813
1008
  `INSERT INTO job_queue
814
- (job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key)
815
- VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
1009
+ (job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key, retry_delay, retry_backoff, retry_delay_max)
1010
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)
816
1011
  ${onConflict}
817
1012
  RETURNING id`,
818
1013
  [
@@ -824,14 +1019,17 @@ var PostgresBackend = class {
824
1019
  timeoutMs ?? null,
825
1020
  forceKillOnTimeout ?? false,
826
1021
  tags ?? null,
827
- idempotencyKey ?? null
1022
+ idempotencyKey ?? null,
1023
+ retryDelay ?? null,
1024
+ retryBackoff ?? null,
1025
+ retryDelayMax ?? null
828
1026
  ]
829
1027
  );
830
1028
  } else {
831
1029
  result = await client.query(
832
1030
  `INSERT INTO job_queue
833
- (job_type, payload, max_attempts, priority, timeout_ms, force_kill_on_timeout, tags, idempotency_key)
834
- VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
1031
+ (job_type, payload, max_attempts, priority, timeout_ms, force_kill_on_timeout, tags, idempotency_key, retry_delay, retry_backoff, retry_delay_max)
1032
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)
835
1033
  ${onConflict}
836
1034
  RETURNING id`,
837
1035
  [
@@ -842,7 +1040,10 @@ var PostgresBackend = class {
842
1040
  timeoutMs ?? null,
843
1041
  forceKillOnTimeout ?? false,
844
1042
  tags ?? null,
845
- idempotencyKey ?? null
1043
+ idempotencyKey ?? null,
1044
+ retryDelay ?? null,
1045
+ retryBackoff ?? null,
1046
+ retryDelayMax ?? null
846
1047
  ]
847
1048
  );
848
1049
  }
@@ -865,25 +1066,188 @@ var PostgresBackend = class {
865
1066
  log(
866
1067
  `Added job ${jobId}: payload ${JSON.stringify(payload)}, ${runAt ? `runAt ${runAt.toISOString()}, ` : ""}priority ${priority}, maxAttempts ${maxAttempts}, jobType ${jobType}, tags ${JSON.stringify(tags)}${idempotencyKey ? `, idempotencyKey "${idempotencyKey}"` : ""}`
867
1068
  );
868
- await this.recordJobEvent(jobId, "added" /* Added */, {
869
- jobType,
870
- payload,
871
- tags,
872
- idempotencyKey
873
- });
1069
+ if (externalClient) {
1070
+ try {
1071
+ await client.query(
1072
+ `INSERT INTO job_events (job_id, event_type, metadata) VALUES ($1, $2, $3)`,
1073
+ [
1074
+ jobId,
1075
+ "added" /* Added */,
1076
+ JSON.stringify({ jobType, payload, tags, idempotencyKey })
1077
+ ]
1078
+ );
1079
+ } catch (error) {
1080
+ log(`Error recording job event for job ${jobId}: ${error}`);
1081
+ }
1082
+ } else {
1083
+ await this.recordJobEvent(jobId, "added" /* Added */, {
1084
+ jobType,
1085
+ payload,
1086
+ tags,
1087
+ idempotencyKey
1088
+ });
1089
+ }
874
1090
  return jobId;
875
1091
  } catch (error) {
876
1092
  log(`Error adding job: ${error}`);
877
1093
  throw error;
878
1094
  } finally {
879
- client.release();
1095
+ if (!externalClient) client.release();
1096
+ }
1097
+ }
1098
+ /**
1099
+ * Insert multiple jobs in a single database round-trip.
1100
+ *
1101
+ * Uses a multi-row INSERT with ON CONFLICT handling for idempotency keys.
1102
+ * Returns IDs in the same order as the input array.
1103
+ */
1104
+ async addJobs(jobs, options) {
1105
+ if (jobs.length === 0) return [];
1106
+ const externalClient = options?.db;
1107
+ const client = externalClient ?? await this.pool.connect();
1108
+ try {
1109
+ const COLS_PER_JOB = 12;
1110
+ const valueClauses = [];
1111
+ const params = [];
1112
+ const hasAnyIdempotencyKey = jobs.some((j) => j.idempotencyKey);
1113
+ for (let i = 0; i < jobs.length; i++) {
1114
+ const {
1115
+ jobType,
1116
+ payload,
1117
+ maxAttempts = 3,
1118
+ priority = 0,
1119
+ runAt = null,
1120
+ timeoutMs = void 0,
1121
+ forceKillOnTimeout = false,
1122
+ tags = void 0,
1123
+ idempotencyKey = void 0,
1124
+ retryDelay = void 0,
1125
+ retryBackoff = void 0,
1126
+ retryDelayMax = void 0
1127
+ } = jobs[i];
1128
+ const base = i * COLS_PER_JOB;
1129
+ valueClauses.push(
1130
+ `($${base + 1}, $${base + 2}, $${base + 3}, $${base + 4}, COALESCE($${base + 5}::timestamptz, CURRENT_TIMESTAMP), $${base + 6}, $${base + 7}, $${base + 8}, $${base + 9}, $${base + 10}, $${base + 11}, $${base + 12})`
1131
+ );
1132
+ params.push(
1133
+ jobType,
1134
+ payload,
1135
+ maxAttempts,
1136
+ priority,
1137
+ runAt,
1138
+ timeoutMs ?? null,
1139
+ forceKillOnTimeout ?? false,
1140
+ tags ?? null,
1141
+ idempotencyKey ?? null,
1142
+ retryDelay ?? null,
1143
+ retryBackoff ?? null,
1144
+ retryDelayMax ?? null
1145
+ );
1146
+ }
1147
+ const onConflict = hasAnyIdempotencyKey ? `ON CONFLICT (idempotency_key) WHERE idempotency_key IS NOT NULL DO NOTHING` : "";
1148
+ const result = await client.query(
1149
+ `INSERT INTO job_queue
1150
+ (job_type, payload, max_attempts, priority, run_at, timeout_ms, force_kill_on_timeout, tags, idempotency_key, retry_delay, retry_backoff, retry_delay_max)
1151
+ VALUES ${valueClauses.join(", ")}
1152
+ ${onConflict}
1153
+ RETURNING id, idempotency_key`,
1154
+ params
1155
+ );
1156
+ const returnedKeyToId = /* @__PURE__ */ new Map();
1157
+ const returnedNullKeyIds = [];
1158
+ for (const row of result.rows) {
1159
+ if (row.idempotency_key != null) {
1160
+ returnedKeyToId.set(row.idempotency_key, row.id);
1161
+ } else {
1162
+ returnedNullKeyIds.push(row.id);
1163
+ }
1164
+ }
1165
+ const missingKeys = [];
1166
+ for (const job of jobs) {
1167
+ if (job.idempotencyKey && !returnedKeyToId.has(job.idempotencyKey)) {
1168
+ missingKeys.push(job.idempotencyKey);
1169
+ }
1170
+ }
1171
+ if (missingKeys.length > 0) {
1172
+ const existing = await client.query(
1173
+ `SELECT id, idempotency_key FROM job_queue WHERE idempotency_key = ANY($1)`,
1174
+ [missingKeys]
1175
+ );
1176
+ for (const row of existing.rows) {
1177
+ returnedKeyToId.set(row.idempotency_key, row.id);
1178
+ }
1179
+ }
1180
+ let nullKeyIdx = 0;
1181
+ const ids = [];
1182
+ for (const job of jobs) {
1183
+ if (job.idempotencyKey) {
1184
+ const id = returnedKeyToId.get(job.idempotencyKey);
1185
+ if (id === void 0) {
1186
+ throw new Error(
1187
+ `Failed to resolve job ID for idempotency key "${job.idempotencyKey}"`
1188
+ );
1189
+ }
1190
+ ids.push(id);
1191
+ } else {
1192
+ ids.push(returnedNullKeyIds[nullKeyIdx++]);
1193
+ }
1194
+ }
1195
+ log(`Batch-inserted ${jobs.length} jobs, IDs: [${ids.join(", ")}]`);
1196
+ const newJobEvents = [];
1197
+ for (let i = 0; i < jobs.length; i++) {
1198
+ const job = jobs[i];
1199
+ const wasInserted = !job.idempotencyKey || !missingKeys.includes(job.idempotencyKey);
1200
+ if (wasInserted) {
1201
+ newJobEvents.push({
1202
+ jobId: ids[i],
1203
+ eventType: "added" /* Added */,
1204
+ metadata: {
1205
+ jobType: job.jobType,
1206
+ payload: job.payload,
1207
+ tags: job.tags,
1208
+ idempotencyKey: job.idempotencyKey
1209
+ }
1210
+ });
1211
+ }
1212
+ }
1213
+ if (newJobEvents.length > 0) {
1214
+ if (externalClient) {
1215
+ const evtValues = [];
1216
+ const evtParams = [];
1217
+ let evtIdx = 1;
1218
+ for (const evt of newJobEvents) {
1219
+ evtValues.push(`($${evtIdx++}, $${evtIdx++}, $${evtIdx++})`);
1220
+ evtParams.push(
1221
+ evt.jobId,
1222
+ evt.eventType,
1223
+ evt.metadata ? JSON.stringify(evt.metadata) : null
1224
+ );
1225
+ }
1226
+ try {
1227
+ await client.query(
1228
+ `INSERT INTO job_events (job_id, event_type, metadata) VALUES ${evtValues.join(", ")}`,
1229
+ evtParams
1230
+ );
1231
+ } catch (error) {
1232
+ log(`Error recording batch job events: ${error}`);
1233
+ }
1234
+ } else {
1235
+ await this.recordJobEventsBatch(newJobEvents);
1236
+ }
1237
+ }
1238
+ return ids;
1239
+ } catch (error) {
1240
+ log(`Error batch-inserting jobs: ${error}`);
1241
+ throw error;
1242
+ } finally {
1243
+ if (!externalClient) client.release();
880
1244
  }
881
1245
  }
882
1246
  async getJob(id) {
883
1247
  const client = await this.pool.connect();
884
1248
  try {
885
1249
  const result = await client.query(
886
- `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue WHERE id = $1`,
1250
+ `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", output FROM job_queue WHERE id = $1`,
887
1251
  [id]
888
1252
  );
889
1253
  if (result.rows.length === 0) {
@@ -910,7 +1274,7 @@ var PostgresBackend = class {
910
1274
  const client = await this.pool.connect();
911
1275
  try {
912
1276
  const result = await client.query(
913
- `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue WHERE status = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3`,
1277
+ `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", output FROM job_queue WHERE status = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3`,
914
1278
  [status, limit, offset]
915
1279
  );
916
1280
  log(`Found ${result.rows.length} jobs by status ${status}`);
@@ -932,7 +1296,7 @@ var PostgresBackend = class {
932
1296
  const client = await this.pool.connect();
933
1297
  try {
934
1298
  const result = await client.query(
935
- `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2`,
1299
+ `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", output FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2`,
936
1300
  [limit, offset]
937
1301
  );
938
1302
  log(`Found ${result.rows.length} jobs (all)`);
@@ -952,7 +1316,7 @@ var PostgresBackend = class {
952
1316
  async getJobs(filters, limit = 100, offset = 0) {
953
1317
  const client = await this.pool.connect();
954
1318
  try {
955
- let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress FROM job_queue`;
1319
+ let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", output FROM job_queue`;
956
1320
  const params = [];
957
1321
  const where = [];
958
1322
  let paramIdx = 1;
@@ -1053,7 +1417,7 @@ var PostgresBackend = class {
1053
1417
  async getJobsByTags(tags, mode = "all", limit = 100, offset = 0) {
1054
1418
  const client = await this.pool.connect();
1055
1419
  try {
1056
- let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress
1420
+ let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", output
1057
1421
  FROM job_queue`;
1058
1422
  let params = [];
1059
1423
  switch (mode) {
@@ -1147,7 +1511,7 @@ var PostgresBackend = class {
1147
1511
  LIMIT $2
1148
1512
  FOR UPDATE SKIP LOCKED
1149
1513
  )
1150
- RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress
1514
+ RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", output
1151
1515
  `,
1152
1516
  params
1153
1517
  );
@@ -1175,17 +1539,19 @@ var PostgresBackend = class {
1175
1539
  client.release();
1176
1540
  }
1177
1541
  }
1178
- async completeJob(jobId) {
1542
+ async completeJob(jobId, output) {
1179
1543
  const client = await this.pool.connect();
1180
1544
  try {
1545
+ const outputJson = output !== void 0 ? JSON.stringify(output) : null;
1181
1546
  const result = await client.query(
1182
1547
  `
1183
1548
  UPDATE job_queue
1184
1549
  SET status = 'completed', updated_at = NOW(), completed_at = NOW(),
1185
- step_data = NULL, wait_until = NULL, wait_token_id = NULL
1550
+ step_data = NULL, wait_until = NULL, wait_token_id = NULL,
1551
+ output = COALESCE($2::jsonb, output)
1186
1552
  WHERE id = $1 AND status = 'processing'
1187
1553
  `,
1188
- [jobId]
1554
+ [jobId, outputJson]
1189
1555
  );
1190
1556
  if (result.rowCount === 0) {
1191
1557
  log(
@@ -1209,9 +1575,17 @@ var PostgresBackend = class {
1209
1575
  UPDATE job_queue
1210
1576
  SET status = 'failed',
1211
1577
  updated_at = NOW(),
1212
- next_attempt_at = CASE
1213
- WHEN attempts < max_attempts THEN NOW() + (POWER(2, attempts) * INTERVAL '1 minute')
1214
- ELSE NULL
1578
+ next_attempt_at = CASE
1579
+ WHEN attempts >= max_attempts THEN NULL
1580
+ WHEN retry_delay IS NULL AND retry_backoff IS NULL AND retry_delay_max IS NULL
1581
+ THEN NOW() + (POWER(2, attempts) * INTERVAL '1 minute')
1582
+ WHEN COALESCE(retry_backoff, true) = true
1583
+ THEN NOW() + (LEAST(
1584
+ COALESCE(retry_delay_max, 2147483647),
1585
+ COALESCE(retry_delay, 60) * POWER(2, attempts)
1586
+ ) * (0.5 + 0.5 * random()) * INTERVAL '1 second')
1587
+ ELSE
1588
+ NOW() + (COALESCE(retry_delay, 60) * INTERVAL '1 second')
1215
1589
  END,
1216
1590
  error_history = COALESCE(error_history, '[]'::jsonb) || $2::jsonb,
1217
1591
  failure_reason = $3,
@@ -1280,6 +1654,21 @@ var PostgresBackend = class {
1280
1654
  client.release();
1281
1655
  }
1282
1656
  }
1657
+ // ── Output ────────────────────────────────────────────────────────────
1658
+ async updateOutput(jobId, output) {
1659
+ const client = await this.pool.connect();
1660
+ try {
1661
+ await client.query(
1662
+ `UPDATE job_queue SET output = $2::jsonb, updated_at = NOW() WHERE id = $1`,
1663
+ [jobId, JSON.stringify(output)]
1664
+ );
1665
+ log(`Updated output for job ${jobId}`);
1666
+ } catch (error) {
1667
+ log(`Error updating output for job ${jobId}: ${error}`);
1668
+ } finally {
1669
+ client.release();
1670
+ }
1671
+ }
1283
1672
  // ── Job management ────────────────────────────────────────────────────
1284
1673
  async retryJob(jobId) {
1285
1674
  const client = await this.pool.connect();
@@ -1449,6 +1838,18 @@ var PostgresBackend = class {
1449
1838
  updateFields.push(`tags = $${paramIdx++}`);
1450
1839
  params.push(updates.tags ?? null);
1451
1840
  }
1841
+ if (updates.retryDelay !== void 0) {
1842
+ updateFields.push(`retry_delay = $${paramIdx++}`);
1843
+ params.push(updates.retryDelay ?? null);
1844
+ }
1845
+ if (updates.retryBackoff !== void 0) {
1846
+ updateFields.push(`retry_backoff = $${paramIdx++}`);
1847
+ params.push(updates.retryBackoff ?? null);
1848
+ }
1849
+ if (updates.retryDelayMax !== void 0) {
1850
+ updateFields.push(`retry_delay_max = $${paramIdx++}`);
1851
+ params.push(updates.retryDelayMax ?? null);
1852
+ }
1452
1853
  if (updateFields.length === 0) {
1453
1854
  log(`No fields to update for job ${jobId}`);
1454
1855
  return;
@@ -1470,6 +1871,12 @@ var PostgresBackend = class {
1470
1871
  if (updates.timeoutMs !== void 0)
1471
1872
  metadata.timeoutMs = updates.timeoutMs;
1472
1873
  if (updates.tags !== void 0) metadata.tags = updates.tags;
1874
+ if (updates.retryDelay !== void 0)
1875
+ metadata.retryDelay = updates.retryDelay;
1876
+ if (updates.retryBackoff !== void 0)
1877
+ metadata.retryBackoff = updates.retryBackoff;
1878
+ if (updates.retryDelayMax !== void 0)
1879
+ metadata.retryDelayMax = updates.retryDelayMax;
1473
1880
  await this.recordJobEvent(jobId, "edited" /* Edited */, metadata);
1474
1881
  log(`Edited job ${jobId}: ${JSON.stringify(metadata)}`);
1475
1882
  } catch (error) {
@@ -1513,6 +1920,18 @@ var PostgresBackend = class {
1513
1920
  updateFields.push(`tags = $${paramIdx++}`);
1514
1921
  params.push(updates.tags ?? null);
1515
1922
  }
1923
+ if (updates.retryDelay !== void 0) {
1924
+ updateFields.push(`retry_delay = $${paramIdx++}`);
1925
+ params.push(updates.retryDelay ?? null);
1926
+ }
1927
+ if (updates.retryBackoff !== void 0) {
1928
+ updateFields.push(`retry_backoff = $${paramIdx++}`);
1929
+ params.push(updates.retryBackoff ?? null);
1930
+ }
1931
+ if (updates.retryDelayMax !== void 0) {
1932
+ updateFields.push(`retry_delay_max = $${paramIdx++}`);
1933
+ params.push(updates.retryDelayMax ?? null);
1934
+ }
1516
1935
  if (updateFields.length === 0) {
1517
1936
  log(`No fields to update for batch edit`);
1518
1937
  return 0;
@@ -1754,8 +2173,8 @@ var PostgresBackend = class {
1754
2173
  `INSERT INTO cron_schedules
1755
2174
  (schedule_name, cron_expression, job_type, payload, max_attempts,
1756
2175
  priority, timeout_ms, force_kill_on_timeout, tags, timezone,
1757
- allow_overlap, next_run_at)
1758
- VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)
2176
+ allow_overlap, next_run_at, retry_delay, retry_backoff, retry_delay_max)
2177
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
1759
2178
  RETURNING id`,
1760
2179
  [
1761
2180
  input.scheduleName,
@@ -1769,7 +2188,10 @@ var PostgresBackend = class {
1769
2188
  input.tags ?? null,
1770
2189
  input.timezone,
1771
2190
  input.allowOverlap,
1772
- input.nextRunAt
2191
+ input.nextRunAt,
2192
+ input.retryDelay,
2193
+ input.retryBackoff,
2194
+ input.retryDelayMax
1773
2195
  ]
1774
2196
  );
1775
2197
  const id = result.rows[0].id;
@@ -1799,7 +2221,9 @@ var PostgresBackend = class {
1799
2221
  timezone, allow_overlap AS "allowOverlap", status,
1800
2222
  last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
1801
2223
  next_run_at AS "nextRunAt",
1802
- created_at AS "createdAt", updated_at AS "updatedAt"
2224
+ created_at AS "createdAt", updated_at AS "updatedAt",
2225
+ retry_delay AS "retryDelay", retry_backoff AS "retryBackoff",
2226
+ retry_delay_max AS "retryDelayMax"
1803
2227
  FROM cron_schedules WHERE id = $1`,
1804
2228
  [id]
1805
2229
  );
@@ -1824,7 +2248,9 @@ var PostgresBackend = class {
1824
2248
  timezone, allow_overlap AS "allowOverlap", status,
1825
2249
  last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
1826
2250
  next_run_at AS "nextRunAt",
1827
- created_at AS "createdAt", updated_at AS "updatedAt"
2251
+ created_at AS "createdAt", updated_at AS "updatedAt",
2252
+ retry_delay AS "retryDelay", retry_backoff AS "retryBackoff",
2253
+ retry_delay_max AS "retryDelayMax"
1828
2254
  FROM cron_schedules WHERE schedule_name = $1`,
1829
2255
  [name]
1830
2256
  );
@@ -1848,7 +2274,9 @@ var PostgresBackend = class {
1848
2274
  timezone, allow_overlap AS "allowOverlap", status,
1849
2275
  last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
1850
2276
  next_run_at AS "nextRunAt",
1851
- created_at AS "createdAt", updated_at AS "updatedAt"
2277
+ created_at AS "createdAt", updated_at AS "updatedAt",
2278
+ retry_delay AS "retryDelay", retry_backoff AS "retryBackoff",
2279
+ retry_delay_max AS "retryDelayMax"
1852
2280
  FROM cron_schedules`;
1853
2281
  const params = [];
1854
2282
  if (status) {
@@ -1953,6 +2381,18 @@ var PostgresBackend = class {
1953
2381
  updateFields.push(`allow_overlap = $${paramIdx++}`);
1954
2382
  params.push(updates.allowOverlap);
1955
2383
  }
2384
+ if (updates.retryDelay !== void 0) {
2385
+ updateFields.push(`retry_delay = $${paramIdx++}`);
2386
+ params.push(updates.retryDelay);
2387
+ }
2388
+ if (updates.retryBackoff !== void 0) {
2389
+ updateFields.push(`retry_backoff = $${paramIdx++}`);
2390
+ params.push(updates.retryBackoff);
2391
+ }
2392
+ if (updates.retryDelayMax !== void 0) {
2393
+ updateFields.push(`retry_delay_max = $${paramIdx++}`);
2394
+ params.push(updates.retryDelayMax);
2395
+ }
1956
2396
  if (nextRunAt !== void 0) {
1957
2397
  updateFields.push(`next_run_at = $${paramIdx++}`);
1958
2398
  params.push(nextRunAt);
@@ -1988,7 +2428,9 @@ var PostgresBackend = class {
1988
2428
  timezone, allow_overlap AS "allowOverlap", status,
1989
2429
  last_enqueued_at AS "lastEnqueuedAt", last_job_id AS "lastJobId",
1990
2430
  next_run_at AS "nextRunAt",
1991
- created_at AS "createdAt", updated_at AS "updatedAt"
2431
+ created_at AS "createdAt", updated_at AS "updatedAt",
2432
+ retry_delay AS "retryDelay", retry_backoff AS "retryBackoff",
2433
+ retry_delay_max AS "retryDelayMax"
1992
2434
  FROM cron_schedules
1993
2435
  WHERE status = 'active'
1994
2436
  AND next_run_at IS NOT NULL
@@ -2272,6 +2714,9 @@ local forceKillOnTimeout = ARGV[7]
2272
2714
  local tagsJson = ARGV[8] -- "null" or JSON array string
2273
2715
  local idempotencyKey = ARGV[9] -- "null" string if not set
2274
2716
  local nowMs = tonumber(ARGV[10])
2717
+ local retryDelay = ARGV[11] -- "null" or seconds string
2718
+ local retryBackoff = ARGV[12] -- "null" or "true"/"false"
2719
+ local retryDelayMax = ARGV[13] -- "null" or seconds string
2275
2720
 
2276
2721
  -- Idempotency check
2277
2722
  if idempotencyKey ~= "null" then
@@ -2315,7 +2760,10 @@ redis.call('HMSET', jobKey,
2315
2760
  'idempotencyKey', idempotencyKey,
2316
2761
  'waitUntil', 'null',
2317
2762
  'waitTokenId', 'null',
2318
- 'stepData', 'null'
2763
+ 'stepData', 'null',
2764
+ 'retryDelay', retryDelay,
2765
+ 'retryBackoff', retryBackoff,
2766
+ 'retryDelayMax', retryDelayMax
2319
2767
  )
2320
2768
 
2321
2769
  -- Status index
@@ -2356,6 +2804,118 @@ end
2356
2804
 
2357
2805
  return id
2358
2806
  `;
2807
+ var ADD_JOBS_SCRIPT = `
2808
+ local prefix = KEYS[1]
2809
+ local jobsJson = ARGV[1]
2810
+ local nowMs = tonumber(ARGV[2])
2811
+
2812
+ local jobs = cjson.decode(jobsJson)
2813
+ local results = {}
2814
+
2815
+ for i, job in ipairs(jobs) do
2816
+ local jobType = job.jobType
2817
+ local payloadJson = job.payload
2818
+ local maxAttempts = tonumber(job.maxAttempts)
2819
+ local priority = tonumber(job.priority)
2820
+ local runAtMs = tostring(job.runAtMs)
2821
+ local timeoutMs = tostring(job.timeoutMs)
2822
+ local forceKillOnTimeout = tostring(job.forceKillOnTimeout)
2823
+ local tagsJson = tostring(job.tags)
2824
+ local idempotencyKey = tostring(job.idempotencyKey)
2825
+ local retryDelay = tostring(job.retryDelay)
2826
+ local retryBackoff = tostring(job.retryBackoff)
2827
+ local retryDelayMax = tostring(job.retryDelayMax)
2828
+
2829
+ -- Idempotency check
2830
+ local skip = false
2831
+ if idempotencyKey ~= "null" then
2832
+ local existing = redis.call('GET', prefix .. 'idempotency:' .. idempotencyKey)
2833
+ if existing then
2834
+ results[i] = tonumber(existing)
2835
+ skip = true
2836
+ end
2837
+ end
2838
+
2839
+ if not skip then
2840
+ -- Generate ID
2841
+ local id = redis.call('INCR', prefix .. 'id_seq')
2842
+ local jobKey = prefix .. 'job:' .. id
2843
+ local runAt = runAtMs ~= "0" and tonumber(runAtMs) or nowMs
2844
+
2845
+ -- Store the job hash
2846
+ redis.call('HMSET', jobKey,
2847
+ 'id', id,
2848
+ 'jobType', jobType,
2849
+ 'payload', payloadJson,
2850
+ 'status', 'pending',
2851
+ 'maxAttempts', maxAttempts,
2852
+ 'attempts', 0,
2853
+ 'priority', priority,
2854
+ 'runAt', runAt,
2855
+ 'timeoutMs', timeoutMs,
2856
+ 'forceKillOnTimeout', forceKillOnTimeout,
2857
+ 'createdAt', nowMs,
2858
+ 'updatedAt', nowMs,
2859
+ 'lockedAt', 'null',
2860
+ 'lockedBy', 'null',
2861
+ 'nextAttemptAt', 'null',
2862
+ 'pendingReason', 'null',
2863
+ 'errorHistory', '[]',
2864
+ 'failureReason', 'null',
2865
+ 'completedAt', 'null',
2866
+ 'startedAt', 'null',
2867
+ 'lastRetriedAt', 'null',
2868
+ 'lastFailedAt', 'null',
2869
+ 'lastCancelledAt', 'null',
2870
+ 'tags', tagsJson,
2871
+ 'idempotencyKey', idempotencyKey,
2872
+ 'waitUntil', 'null',
2873
+ 'waitTokenId', 'null',
2874
+ 'stepData', 'null',
2875
+ 'retryDelay', retryDelay,
2876
+ 'retryBackoff', retryBackoff,
2877
+ 'retryDelayMax', retryDelayMax
2878
+ )
2879
+
2880
+ -- Status index
2881
+ redis.call('SADD', prefix .. 'status:pending', id)
2882
+
2883
+ -- Type index
2884
+ redis.call('SADD', prefix .. 'type:' .. jobType, id)
2885
+
2886
+ -- Tag indexes
2887
+ if tagsJson ~= "null" then
2888
+ local tags = cjson.decode(tagsJson)
2889
+ for _, tag in ipairs(tags) do
2890
+ redis.call('SADD', prefix .. 'tag:' .. tag, id)
2891
+ end
2892
+ for _, tag in ipairs(tags) do
2893
+ redis.call('SADD', prefix .. 'job:' .. id .. ':tags', tag)
2894
+ end
2895
+ end
2896
+
2897
+ -- Idempotency mapping
2898
+ if idempotencyKey ~= "null" then
2899
+ redis.call('SET', prefix .. 'idempotency:' .. idempotencyKey, id)
2900
+ end
2901
+
2902
+ -- All-jobs sorted set
2903
+ redis.call('ZADD', prefix .. 'all', nowMs, id)
2904
+
2905
+ -- Queue or delayed
2906
+ if runAt <= nowMs then
2907
+ local score = priority * ${SCORE_RANGE} + (${SCORE_RANGE} - nowMs)
2908
+ redis.call('ZADD', prefix .. 'queue', score, id)
2909
+ else
2910
+ redis.call('ZADD', prefix .. 'delayed', runAt, id)
2911
+ end
2912
+
2913
+ results[i] = id
2914
+ end
2915
+ end
2916
+
2917
+ return results
2918
+ `;
2359
2919
  var GET_NEXT_BATCH_SCRIPT = `
2360
2920
  local prefix = KEYS[1]
2361
2921
  local workerId = ARGV[1]
@@ -2508,16 +3068,24 @@ var COMPLETE_JOB_SCRIPT = `
2508
3068
  local prefix = KEYS[1]
2509
3069
  local jobId = ARGV[1]
2510
3070
  local nowMs = ARGV[2]
3071
+ local outputJson = ARGV[3]
2511
3072
  local jk = prefix .. 'job:' .. jobId
2512
3073
 
2513
- redis.call('HMSET', jk,
3074
+ local fields = {
2514
3075
  'status', 'completed',
2515
3076
  'updatedAt', nowMs,
2516
3077
  'completedAt', nowMs,
2517
3078
  'stepData', 'null',
2518
3079
  'waitUntil', 'null',
2519
3080
  'waitTokenId', 'null'
2520
- )
3081
+ }
3082
+
3083
+ if outputJson ~= '__NONE__' then
3084
+ fields[#fields + 1] = 'output'
3085
+ fields[#fields + 1] = outputJson
3086
+ end
3087
+
3088
+ redis.call('HMSET', jk, unpack(fields))
2521
3089
  redis.call('SREM', prefix .. 'status:processing', jobId)
2522
3090
  redis.call('SADD', prefix .. 'status:completed', jobId)
2523
3091
 
@@ -2534,11 +3102,38 @@ local jk = prefix .. 'job:' .. jobId
2534
3102
  local attempts = tonumber(redis.call('HGET', jk, 'attempts'))
2535
3103
  local maxAttempts = tonumber(redis.call('HGET', jk, 'maxAttempts'))
2536
3104
 
2537
- -- Compute next_attempt_at: 2^attempts minutes from now
3105
+ -- Read per-job retry config (may be "null")
3106
+ local rdRaw = redis.call('HGET', jk, 'retryDelay')
3107
+ local rbRaw = redis.call('HGET', jk, 'retryBackoff')
3108
+ local rmRaw = redis.call('HGET', jk, 'retryDelayMax')
3109
+
2538
3110
  local nextAttemptAt = 'null'
2539
3111
  if attempts < maxAttempts then
2540
- local delayMs = math.pow(2, attempts) * 60000
2541
- nextAttemptAt = nowMs + delayMs
3112
+ local allNull = (rdRaw == 'null' or rdRaw == false)
3113
+ and (rbRaw == 'null' or rbRaw == false)
3114
+ and (rmRaw == 'null' or rmRaw == false)
3115
+ if allNull then
3116
+ -- Legacy formula: 2^attempts minutes
3117
+ local delayMs = math.pow(2, attempts) * 60000
3118
+ nextAttemptAt = nowMs + delayMs
3119
+ else
3120
+ local retryDelaySec = 60
3121
+ if rdRaw and rdRaw ~= 'null' then retryDelaySec = tonumber(rdRaw) end
3122
+ local useBackoff = true
3123
+ if rbRaw and rbRaw ~= 'null' then useBackoff = (rbRaw == 'true') end
3124
+ local maxDelaySec = nil
3125
+ if rmRaw and rmRaw ~= 'null' then maxDelaySec = tonumber(rmRaw) end
3126
+
3127
+ local delaySec
3128
+ if useBackoff then
3129
+ delaySec = retryDelaySec * math.pow(2, attempts)
3130
+ if maxDelaySec then delaySec = math.min(delaySec, maxDelaySec) end
3131
+ delaySec = delaySec * (0.5 + 0.5 * math.random())
3132
+ else
3133
+ delaySec = retryDelaySec
3134
+ end
3135
+ nextAttemptAt = nowMs + math.floor(delaySec * 1000)
3136
+ end
2542
3137
  end
2543
3138
 
2544
3139
  -- Append to error_history
@@ -2950,9 +3545,21 @@ function deserializeJob(h) {
2950
3545
  progress: numOrNull(h.progress),
2951
3546
  waitUntil: dateOrNull(h.waitUntil),
2952
3547
  waitTokenId: nullish(h.waitTokenId),
2953
- stepData: parseStepData(h.stepData)
3548
+ stepData: parseStepData(h.stepData),
3549
+ retryDelay: numOrNull(h.retryDelay),
3550
+ retryBackoff: h.retryBackoff === "true" ? true : h.retryBackoff === "false" ? false : null,
3551
+ retryDelayMax: numOrNull(h.retryDelayMax),
3552
+ output: parseJsonField(h.output)
2954
3553
  };
2955
3554
  }
3555
+ function parseJsonField(raw) {
3556
+ if (!raw || raw === "null") return null;
3557
+ try {
3558
+ return JSON.parse(raw);
3559
+ } catch {
3560
+ return null;
3561
+ }
3562
+ }
2956
3563
  function parseStepData(raw) {
2957
3564
  if (!raw || raw === "null") return void 0;
2958
3565
  try {
@@ -2962,7 +3569,23 @@ function parseStepData(raw) {
2962
3569
  }
2963
3570
  }
2964
3571
  var RedisBackend = class {
2965
- constructor(redisConfig) {
3572
+ /**
3573
+ * Create a RedisBackend.
3574
+ *
3575
+ * @param configOrClient - Either `redisConfig` from the config file (the
3576
+ * library creates a new ioredis client) or an existing ioredis client
3577
+ * instance (bring your own).
3578
+ * @param keyPrefix - Key prefix, only used when `configOrClient` is an
3579
+ * external client. Ignored when `redisConfig` is passed (uses
3580
+ * `redisConfig.keyPrefix` instead). Default: `'dq:'`.
3581
+ */
3582
+ constructor(configOrClient, keyPrefix) {
3583
+ if (configOrClient && typeof configOrClient.eval === "function") {
3584
+ this.client = configOrClient;
3585
+ this.prefix = keyPrefix ?? "dq:";
3586
+ return;
3587
+ }
3588
+ const redisConfig = configOrClient;
2966
3589
  let IORedis;
2967
3590
  try {
2968
3591
  const _require = createRequire(import.meta.url);
@@ -3035,8 +3658,16 @@ var RedisBackend = class {
3035
3658
  timeoutMs = void 0,
3036
3659
  forceKillOnTimeout = false,
3037
3660
  tags = void 0,
3038
- idempotencyKey = void 0
3039
- }) {
3661
+ idempotencyKey = void 0,
3662
+ retryDelay = void 0,
3663
+ retryBackoff = void 0,
3664
+ retryDelayMax = void 0
3665
+ }, options) {
3666
+ if (options?.db) {
3667
+ throw new Error(
3668
+ "The db option is not supported with the Redis backend. Transactional job creation is only available with PostgreSQL."
3669
+ );
3670
+ }
3040
3671
  const now = this.nowMs();
3041
3672
  const runAtMs = runAt ? runAt.getTime() : 0;
3042
3673
  const result = await this.client.eval(
@@ -3052,7 +3683,10 @@ var RedisBackend = class {
3052
3683
  forceKillOnTimeout ? "true" : "false",
3053
3684
  tags ? JSON.stringify(tags) : "null",
3054
3685
  idempotencyKey ?? "null",
3055
- now
3686
+ now,
3687
+ retryDelay !== void 0 ? retryDelay.toString() : "null",
3688
+ retryBackoff !== void 0 ? retryBackoff.toString() : "null",
3689
+ retryDelayMax !== void 0 ? retryDelayMax.toString() : "null"
3056
3690
  );
3057
3691
  const jobId = Number(result);
3058
3692
  log(
@@ -3066,6 +3700,58 @@ var RedisBackend = class {
3066
3700
  });
3067
3701
  return jobId;
3068
3702
  }
3703
+ /**
3704
+ * Insert multiple jobs atomically via a single Lua script.
3705
+ * Returns IDs in the same order as the input array.
3706
+ */
3707
+ async addJobs(jobs, options) {
3708
+ if (jobs.length === 0) return [];
3709
+ if (options?.db) {
3710
+ throw new Error(
3711
+ "The db option is not supported with the Redis backend. Transactional job creation is only available with PostgreSQL."
3712
+ );
3713
+ }
3714
+ const now = this.nowMs();
3715
+ const jobsPayload = jobs.map((job) => ({
3716
+ jobType: job.jobType,
3717
+ payload: JSON.stringify(job.payload),
3718
+ maxAttempts: job.maxAttempts ?? 3,
3719
+ priority: job.priority ?? 0,
3720
+ runAtMs: job.runAt ? job.runAt.getTime() : 0,
3721
+ timeoutMs: job.timeoutMs !== void 0 ? job.timeoutMs.toString() : "null",
3722
+ forceKillOnTimeout: job.forceKillOnTimeout ? "true" : "false",
3723
+ tags: job.tags ? JSON.stringify(job.tags) : "null",
3724
+ idempotencyKey: job.idempotencyKey ?? "null",
3725
+ retryDelay: job.retryDelay !== void 0 ? job.retryDelay.toString() : "null",
3726
+ retryBackoff: job.retryBackoff !== void 0 ? job.retryBackoff.toString() : "null",
3727
+ retryDelayMax: job.retryDelayMax !== void 0 ? job.retryDelayMax.toString() : "null"
3728
+ }));
3729
+ const result = await this.client.eval(
3730
+ ADD_JOBS_SCRIPT,
3731
+ 1,
3732
+ this.prefix,
3733
+ JSON.stringify(jobsPayload),
3734
+ now
3735
+ );
3736
+ const ids = result.map(Number);
3737
+ log(`Batch-inserted ${jobs.length} jobs, IDs: [${ids.join(", ")}]`);
3738
+ const existingIdempotencyIds = /* @__PURE__ */ new Set();
3739
+ for (let i = 0; i < jobs.length; i++) {
3740
+ if (jobs[i].idempotencyKey) {
3741
+ if (existingIdempotencyIds.has(ids[i])) {
3742
+ continue;
3743
+ }
3744
+ existingIdempotencyIds.add(ids[i]);
3745
+ }
3746
+ await this.recordJobEvent(ids[i], "added" /* Added */, {
3747
+ jobType: jobs[i].jobType,
3748
+ payload: jobs[i].payload,
3749
+ tags: jobs[i].tags,
3750
+ idempotencyKey: jobs[i].idempotencyKey
3751
+ });
3752
+ }
3753
+ return ids;
3754
+ }
3069
3755
  async getJob(id) {
3070
3756
  const data = await this.client.hgetall(`${this.prefix}job:${id}`);
3071
3757
  if (!data || Object.keys(data).length === 0) {
@@ -3171,9 +3857,17 @@ var RedisBackend = class {
3171
3857
  }
3172
3858
  return jobs;
3173
3859
  }
3174
- async completeJob(jobId) {
3860
+ async completeJob(jobId, output) {
3175
3861
  const now = this.nowMs();
3176
- await this.client.eval(COMPLETE_JOB_SCRIPT, 1, this.prefix, jobId, now);
3862
+ const outputArg = output !== void 0 ? JSON.stringify(output) : "__NONE__";
3863
+ await this.client.eval(
3864
+ COMPLETE_JOB_SCRIPT,
3865
+ 1,
3866
+ this.prefix,
3867
+ jobId,
3868
+ now,
3869
+ outputArg
3870
+ );
3177
3871
  await this.recordJobEvent(jobId, "completed" /* Completed */);
3178
3872
  log(`Completed job ${jobId}`);
3179
3873
  }
@@ -3226,6 +3920,22 @@ var RedisBackend = class {
3226
3920
  log(`Error updating progress for job ${jobId}: ${error}`);
3227
3921
  }
3228
3922
  }
3923
+ // ── Output ────────────────────────────────────────────────────────────
3924
+ async updateOutput(jobId, output) {
3925
+ try {
3926
+ const now = this.nowMs();
3927
+ await this.client.hset(
3928
+ `${this.prefix}job:${jobId}`,
3929
+ "output",
3930
+ JSON.stringify(output),
3931
+ "updatedAt",
3932
+ now.toString()
3933
+ );
3934
+ log(`Updated output for job ${jobId}`);
3935
+ } catch (error) {
3936
+ log(`Error updating output for job ${jobId}: ${error}`);
3937
+ }
3938
+ }
3229
3939
  // ── Job management ────────────────────────────────────────────────────
3230
3940
  async retryJob(jobId) {
3231
3941
  const now = this.nowMs();
@@ -3332,6 +4042,27 @@ var RedisBackend = class {
3332
4042
  }
3333
4043
  metadata.tags = updates.tags;
3334
4044
  }
4045
+ if (updates.retryDelay !== void 0) {
4046
+ fields.push(
4047
+ "retryDelay",
4048
+ updates.retryDelay !== null ? updates.retryDelay.toString() : "null"
4049
+ );
4050
+ metadata.retryDelay = updates.retryDelay;
4051
+ }
4052
+ if (updates.retryBackoff !== void 0) {
4053
+ fields.push(
4054
+ "retryBackoff",
4055
+ updates.retryBackoff !== null ? updates.retryBackoff.toString() : "null"
4056
+ );
4057
+ metadata.retryBackoff = updates.retryBackoff;
4058
+ }
4059
+ if (updates.retryDelayMax !== void 0) {
4060
+ fields.push(
4061
+ "retryDelayMax",
4062
+ updates.retryDelayMax !== null ? updates.retryDelayMax.toString() : "null"
4063
+ );
4064
+ metadata.retryDelayMax = updates.retryDelayMax;
4065
+ }
3335
4066
  if (fields.length === 0) {
3336
4067
  log(`No fields to update for job ${jobId}`);
3337
4068
  return;
@@ -3806,7 +4537,13 @@ var RedisBackend = class {
3806
4537
  "createdAt",
3807
4538
  now.toString(),
3808
4539
  "updatedAt",
3809
- now.toString()
4540
+ now.toString(),
4541
+ "retryDelay",
4542
+ input.retryDelay !== null && input.retryDelay !== void 0 ? input.retryDelay.toString() : "null",
4543
+ "retryBackoff",
4544
+ input.retryBackoff !== null && input.retryBackoff !== void 0 ? input.retryBackoff.toString() : "null",
4545
+ "retryDelayMax",
4546
+ input.retryDelayMax !== null && input.retryDelayMax !== void 0 ? input.retryDelayMax.toString() : "null"
3810
4547
  ];
3811
4548
  await this.client.hmset(key, ...fields);
3812
4549
  await this.client.set(
@@ -3960,6 +4697,24 @@ var RedisBackend = class {
3960
4697
  if (updates.allowOverlap !== void 0) {
3961
4698
  fields.push("allowOverlap", updates.allowOverlap ? "true" : "false");
3962
4699
  }
4700
+ if (updates.retryDelay !== void 0) {
4701
+ fields.push(
4702
+ "retryDelay",
4703
+ updates.retryDelay !== null ? updates.retryDelay.toString() : "null"
4704
+ );
4705
+ }
4706
+ if (updates.retryBackoff !== void 0) {
4707
+ fields.push(
4708
+ "retryBackoff",
4709
+ updates.retryBackoff !== null ? updates.retryBackoff.toString() : "null"
4710
+ );
4711
+ }
4712
+ if (updates.retryDelayMax !== void 0) {
4713
+ fields.push(
4714
+ "retryDelayMax",
4715
+ updates.retryDelayMax !== null ? updates.retryDelayMax.toString() : "null"
4716
+ );
4717
+ }
3963
4718
  if (nextRunAt !== void 0) {
3964
4719
  const val = nextRunAt !== null ? nextRunAt.getTime().toString() : "null";
3965
4720
  fields.push("nextRunAt", val);
@@ -4078,7 +4833,10 @@ var RedisBackend = class {
4078
4833
  lastJobId: numOrNull(h.lastJobId),
4079
4834
  nextRunAt: dateOrNull(h.nextRunAt),
4080
4835
  createdAt: new Date(Number(h.createdAt)),
4081
- updatedAt: new Date(Number(h.updatedAt))
4836
+ updatedAt: new Date(Number(h.updatedAt)),
4837
+ retryDelay: numOrNull(h.retryDelay),
4838
+ retryBackoff: h.retryBackoff === "true" ? true : h.retryBackoff === "false" ? false : null,
4839
+ retryDelayMax: numOrNull(h.retryDelayMax)
4082
4840
  };
4083
4841
  }
4084
4842
  // ── Private helpers (filters) ─────────────────────────────────────────
@@ -4201,14 +4959,37 @@ var initJobQueue = (config) => {
4201
4959
  let backend;
4202
4960
  if (backendType === "postgres") {
4203
4961
  const pgConfig = config;
4204
- const pool = createPool(pgConfig.databaseConfig);
4205
- backend = new PostgresBackend(pool);
4962
+ if (pgConfig.pool) {
4963
+ backend = new PostgresBackend(pgConfig.pool);
4964
+ } else if (pgConfig.databaseConfig) {
4965
+ const pool = createPool(pgConfig.databaseConfig);
4966
+ backend = new PostgresBackend(pool);
4967
+ } else {
4968
+ throw new Error(
4969
+ 'PostgreSQL backend requires either "databaseConfig" or "pool" to be provided.'
4970
+ );
4971
+ }
4206
4972
  } else if (backendType === "redis") {
4207
- const redisConfig = config.redisConfig;
4208
- backend = new RedisBackend(redisConfig);
4973
+ const redisConfig = config;
4974
+ if (redisConfig.client) {
4975
+ backend = new RedisBackend(
4976
+ redisConfig.client,
4977
+ redisConfig.keyPrefix
4978
+ );
4979
+ } else if (redisConfig.redisConfig) {
4980
+ backend = new RedisBackend(redisConfig.redisConfig);
4981
+ } else {
4982
+ throw new Error(
4983
+ 'Redis backend requires either "redisConfig" or "client" to be provided.'
4984
+ );
4985
+ }
4209
4986
  } else {
4210
4987
  throw new Error(`Unknown backend: ${backendType}`);
4211
4988
  }
4989
+ const emitter = new EventEmitter();
4990
+ const emit = (event, data) => {
4991
+ emitter.emit(event, data);
4992
+ };
4212
4993
  const enqueueDueCronJobsImpl = async () => {
4213
4994
  const dueSchedules = await backend.getDueCronSchedules();
4214
4995
  let count = 0;
@@ -4236,7 +5017,10 @@ var initJobQueue = (config) => {
4236
5017
  priority: schedule.priority,
4237
5018
  timeoutMs: schedule.timeoutMs ?? void 0,
4238
5019
  forceKillOnTimeout: schedule.forceKillOnTimeout,
4239
- tags: schedule.tags
5020
+ tags: schedule.tags,
5021
+ retryDelay: schedule.retryDelay ?? void 0,
5022
+ retryBackoff: schedule.retryBackoff ?? void 0,
5023
+ retryDelayMax: schedule.retryDelayMax ?? void 0
4240
5024
  });
4241
5025
  const nextRunAt = getNextCronOccurrence(
4242
5026
  schedule.cronExpression,
@@ -4255,7 +5039,21 @@ var initJobQueue = (config) => {
4255
5039
  return {
4256
5040
  // Job queue operations
4257
5041
  addJob: withLogContext(
4258
- (job) => backend.addJob(job),
5042
+ async (job, options) => {
5043
+ const jobId = await backend.addJob(job, options);
5044
+ emit("job:added", { jobId, jobType: job.jobType });
5045
+ return jobId;
5046
+ },
5047
+ config.verbose ?? false
5048
+ ),
5049
+ addJobs: withLogContext(
5050
+ async (jobs, options) => {
5051
+ const jobIds = await backend.addJobs(jobs, options);
5052
+ for (let i = 0; i < jobIds.length; i++) {
5053
+ emit("job:added", { jobId: jobIds[i], jobType: jobs[i].jobType });
5054
+ }
5055
+ return jobIds;
5056
+ },
4259
5057
  config.verbose ?? false
4260
5058
  ),
4261
5059
  getJob: withLogContext(
@@ -4274,13 +5072,16 @@ var initJobQueue = (config) => {
4274
5072
  (filters, limit, offset) => backend.getJobs(filters, limit, offset),
4275
5073
  config.verbose ?? false
4276
5074
  ),
4277
- retryJob: (jobId) => backend.retryJob(jobId),
5075
+ retryJob: async (jobId) => {
5076
+ await backend.retryJob(jobId);
5077
+ emit("job:retried", { jobId });
5078
+ },
4278
5079
  cleanupOldJobs: (daysToKeep, batchSize) => backend.cleanupOldJobs(daysToKeep, batchSize),
4279
5080
  cleanupOldJobEvents: (daysToKeep, batchSize) => backend.cleanupOldJobEvents(daysToKeep, batchSize),
4280
- cancelJob: withLogContext(
4281
- (jobId) => backend.cancelJob(jobId),
4282
- config.verbose ?? false
4283
- ),
5081
+ cancelJob: withLogContext(async (jobId) => {
5082
+ await backend.cancelJob(jobId);
5083
+ emit("job:cancelled", { jobId });
5084
+ }, config.verbose ?? false),
4284
5085
  editJob: withLogContext(
4285
5086
  (jobId, updates) => backend.editJob(jobId, updates),
4286
5087
  config.verbose ?? false
@@ -4305,9 +5106,17 @@ var initJobQueue = (config) => {
4305
5106
  config.verbose ?? false
4306
5107
  ),
4307
5108
  // Job processing — automatically enqueues due cron jobs before each batch
4308
- createProcessor: (handlers, options) => createProcessor(backend, handlers, options, async () => {
4309
- await enqueueDueCronJobsImpl();
4310
- }),
5109
+ createProcessor: (handlers, options) => createProcessor(
5110
+ backend,
5111
+ handlers,
5112
+ options,
5113
+ async () => {
5114
+ await enqueueDueCronJobsImpl();
5115
+ },
5116
+ emit
5117
+ ),
5118
+ // Background supervisor — automated maintenance
5119
+ createSupervisor: (options) => createSupervisor(backend, options, emit),
4311
5120
  // Job events
4312
5121
  getJobEvents: withLogContext(
4313
5122
  (jobId) => backend.getJobEvents(jobId),
@@ -4354,7 +5163,10 @@ var initJobQueue = (config) => {
4354
5163
  tags: options.tags,
4355
5164
  timezone: options.timezone ?? "UTC",
4356
5165
  allowOverlap: options.allowOverlap ?? false,
4357
- nextRunAt
5166
+ nextRunAt,
5167
+ retryDelay: options.retryDelay ?? null,
5168
+ retryBackoff: options.retryBackoff ?? null,
5169
+ retryDelayMax: options.retryDelayMax ?? null
4358
5170
  };
4359
5171
  return backend.addCronSchedule(input);
4360
5172
  },
@@ -4406,6 +5218,23 @@ var initJobQueue = (config) => {
4406
5218
  () => enqueueDueCronJobsImpl(),
4407
5219
  config.verbose ?? false
4408
5220
  ),
5221
+ // Event hooks
5222
+ on: (event, listener) => {
5223
+ emitter.on(event, listener);
5224
+ },
5225
+ once: (event, listener) => {
5226
+ emitter.once(event, listener);
5227
+ },
5228
+ off: (event, listener) => {
5229
+ emitter.off(event, listener);
5230
+ },
5231
+ removeAllListeners: (event) => {
5232
+ if (event) {
5233
+ emitter.removeAllListeners(event);
5234
+ } else {
5235
+ emitter.removeAllListeners();
5236
+ }
5237
+ },
4409
5238
  // Advanced access
4410
5239
  getPool: () => {
4411
5240
  if (!(backend instanceof PostgresBackend)) {