@nicnocquee/dataqueue 1.34.0 → 1.35.0-beta.20260224110011

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.cts CHANGED
@@ -3,6 +3,44 @@ import { Pool } from 'pg';
3
3
  import { Cron } from 'croner';
4
4
 
5
5
  type JobType<PayloadMap> = keyof PayloadMap & string;
6
+ /**
7
+ * Abstract database client interface for transactional job creation.
8
+ * Compatible with `pg.Pool`, `pg.PoolClient`, `pg.Client`, or any object
9
+ * that exposes a `.query()` method matching the `pg` signature.
10
+ */
11
+ interface DatabaseClient {
12
+ query(text: string, values?: any[]): Promise<{
13
+ rows: any[];
14
+ rowCount: number | null;
15
+ }>;
16
+ }
17
+ /**
18
+ * Options for `addJob()` beyond the job itself.
19
+ * Use `db` to insert the job within an existing database transaction.
20
+ */
21
+ interface AddJobOptions {
22
+ /**
23
+ * An external database client (e.g., a `pg.PoolClient` inside a transaction).
24
+ * When provided, the INSERT runs on this client instead of the internal pool,
25
+ * so the job is part of the caller's transaction.
26
+ *
27
+ * **PostgreSQL only.** Throws if used with the Redis backend.
28
+ */
29
+ db?: DatabaseClient;
30
+ }
31
+ /**
32
+ * Optional grouping metadata for a job.
33
+ * Use `id` to enforce global per-group concurrency limits when
34
+ * `ProcessorOptions.groupConcurrency` is set.
35
+ *
36
+ * `tier` is reserved for future tier-based policies.
37
+ */
38
+ interface JobGroup {
39
+ /** Stable group identifier (for example: tenant ID, user ID, organization ID). */
40
+ id: string;
41
+ /** Optional tier label reserved for future tier-based concurrency controls. */
42
+ tier?: string;
43
+ }
6
44
  interface JobOptions<PayloadMap, T extends JobType<PayloadMap>> {
7
45
  jobType: T;
8
46
  payload: PayloadMap[T];
@@ -75,6 +113,32 @@ interface JobOptions<PayloadMap, T extends JobType<PayloadMap>> {
75
113
  * Once a key exists, it cannot be reused until the job is cleaned up (via `cleanupOldJobs`).
76
114
  */
77
115
  idempotencyKey?: string;
116
+ /**
117
+ * Base delay between retries in seconds. When `retryBackoff` is true (the default),
118
+ * this is the base for exponential backoff: `retryDelay * 2^attempts`.
119
+ * When `retryBackoff` is false, retries use this fixed delay.
120
+ * @default 60
121
+ */
122
+ retryDelay?: number;
123
+ /**
124
+ * Whether to use exponential backoff for retries. When true, delay doubles
125
+ * with each attempt and includes jitter to prevent thundering herd.
126
+ * When false, a fixed `retryDelay` is used between every retry.
127
+ * @default true
128
+ */
129
+ retryBackoff?: boolean;
130
+ /**
131
+ * Maximum delay between retries in seconds. Caps the exponential backoff
132
+ * so retries never wait longer than this value. Only meaningful when
133
+ * `retryBackoff` is true. No limit when omitted.
134
+ */
135
+ retryDelayMax?: number;
136
+ /**
137
+ * Optional group metadata for this job.
138
+ * When `ProcessorOptions.groupConcurrency` is configured, grouped jobs are
139
+ * globally limited by `group.id` across all workers/instances.
140
+ */
141
+ group?: JobGroup;
78
142
  }
79
143
  /**
80
144
  * Options for editing a pending job.
@@ -187,6 +251,31 @@ interface JobRecord<PayloadMap, T extends JobType<PayloadMap>> {
187
251
  * Updated by the handler via `ctx.setProgress(percent)`.
188
252
  */
189
253
  progress?: number | null;
254
+ /**
255
+ * Handler output stored via `ctx.setOutput(data)` or by returning a value
256
+ * from the handler. `null` if no output has been stored.
257
+ */
258
+ output?: unknown;
259
+ /**
260
+ * Base delay between retries in seconds, or null if using legacy default.
261
+ */
262
+ retryDelay?: number | null;
263
+ /**
264
+ * Whether exponential backoff is enabled for retries, or null if using legacy default.
265
+ */
266
+ retryBackoff?: boolean | null;
267
+ /**
268
+ * Maximum delay cap for retries in seconds, or null if no cap.
269
+ */
270
+ retryDelayMax?: number | null;
271
+ /**
272
+ * Group identifier for this job, if provided at enqueue time.
273
+ */
274
+ groupId?: string | null;
275
+ /**
276
+ * Group tier for this job, if provided at enqueue time.
277
+ */
278
+ groupTier?: string | null;
190
279
  }
191
280
  /**
192
281
  * Callback registered via `onTimeout`. Invoked when the timeout fires, before the AbortSignal is triggered.
@@ -274,6 +363,16 @@ interface JobContext {
274
363
  * @throws If percent is outside the 0-100 range.
275
364
  */
276
365
  setProgress: (percent: number) => Promise<void>;
366
+ /**
367
+ * Store an output/result for this job. The value is persisted to the database
368
+ * as JSONB and can be read by clients via `getJob()` or the React SDK's `useJob()` hook.
369
+ *
370
+ * Can be called multiple times — each call overwrites the previous value.
371
+ * If `setOutput()` is called, the handler's return value is ignored.
372
+ *
373
+ * @param data - Any JSON-serializable value to store as the job's output.
374
+ */
375
+ setOutput: (data: unknown) => Promise<void>;
277
376
  }
278
377
  /**
279
378
  * Duration specification for `ctx.waitFor()`.
@@ -349,7 +448,7 @@ interface WaitpointRecord {
349
448
  completedAt: Date | null;
350
449
  tags: string[] | null;
351
450
  }
352
- type JobHandler<PayloadMap, T extends keyof PayloadMap> = (payload: PayloadMap[T], signal: AbortSignal, ctx: JobContext) => Promise<void>;
451
+ type JobHandler<PayloadMap, T extends keyof PayloadMap> = (payload: PayloadMap[T], signal: AbortSignal, ctx: JobContext) => Promise<unknown>;
353
452
  type JobHandlers<PayloadMap> = {
354
453
  [K in keyof PayloadMap]: JobHandler<PayloadMap, K>;
355
454
  };
@@ -368,6 +467,13 @@ interface ProcessorOptions {
368
467
  * - Set to a lower value to avoid resource exhaustion.
369
468
  */
370
469
  concurrency?: number;
470
+ /**
471
+ * Global per-group concurrency limit across all workers/instances.
472
+ * - Applies only to jobs with `group.id` set.
473
+ * - Jobs without a group are unaffected.
474
+ * - Disabled when omitted.
475
+ */
476
+ groupConcurrency?: number;
371
477
  /**
372
478
  * The interval in milliseconds to poll for new jobs.
373
479
  * - If not provided, the processor will process jobs every 5 seconds when startInBackground is called.
@@ -417,6 +523,88 @@ interface Processor {
417
523
  */
418
524
  start: () => Promise<number>;
419
525
  }
526
+ interface SupervisorOptions {
527
+ /**
528
+ * How often the maintenance loop runs, in milliseconds.
529
+ * @default 60000 (1 minute)
530
+ */
531
+ intervalMs?: number;
532
+ /**
533
+ * Reclaim jobs stuck in `processing` longer than this many minutes.
534
+ * @default 10
535
+ */
536
+ stuckJobsTimeoutMinutes?: number;
537
+ /**
538
+ * Auto-delete completed jobs older than this many days. Set to 0 to disable.
539
+ * @default 30
540
+ */
541
+ cleanupJobsDaysToKeep?: number;
542
+ /**
543
+ * Auto-delete job events older than this many days. Set to 0 to disable.
544
+ * @default 30
545
+ */
546
+ cleanupEventsDaysToKeep?: number;
547
+ /**
548
+ * Batch size for cleanup deletions.
549
+ * @default 1000
550
+ */
551
+ cleanupBatchSize?: number;
552
+ /**
553
+ * Whether to reclaim stuck jobs each cycle.
554
+ * @default true
555
+ */
556
+ reclaimStuckJobs?: boolean;
557
+ /**
558
+ * Whether to expire timed-out waitpoint tokens each cycle.
559
+ * @default true
560
+ */
561
+ expireTimedOutTokens?: boolean;
562
+ /**
563
+ * Called when a maintenance task throws. One failure does not block other tasks.
564
+ * @default console.error
565
+ */
566
+ onError?: (error: Error) => void;
567
+ /** Enable verbose logging. */
568
+ verbose?: boolean;
569
+ }
570
+ interface SupervisorRunResult {
571
+ /** Number of stuck jobs reclaimed back to pending. */
572
+ reclaimedJobs: number;
573
+ /** Number of old completed jobs deleted. */
574
+ cleanedUpJobs: number;
575
+ /** Number of old job events deleted. */
576
+ cleanedUpEvents: number;
577
+ /** Number of timed-out waitpoint tokens expired. */
578
+ expiredTokens: number;
579
+ }
580
+ interface Supervisor {
581
+ /**
582
+ * Run all maintenance tasks once and return the results.
583
+ * Ideal for serverless or cron-triggered invocations.
584
+ */
585
+ start: () => Promise<SupervisorRunResult>;
586
+ /**
587
+ * Start the maintenance loop in the background.
588
+ * Runs every `intervalMs` milliseconds (default: 60 000).
589
+ * Call `stop()` or `stopAndDrain()` to halt the loop.
590
+ */
591
+ startInBackground: () => void;
592
+ /**
593
+ * Stop the background maintenance loop immediately.
594
+ * Does not wait for an in-flight maintenance run to complete.
595
+ */
596
+ stop: () => void;
597
+ /**
598
+ * Stop the background loop and wait for the current maintenance run
599
+ * (if any) to finish before resolving.
600
+ *
601
+ * @param timeoutMs - Maximum time to wait (default: 30 000 ms).
602
+ * If the run does not finish within this time the promise resolves anyway.
603
+ */
604
+ stopAndDrain: (timeoutMs?: number) => Promise<void>;
605
+ /** Whether the background maintenance loop is currently running. */
606
+ isRunning: () => boolean;
607
+ }
420
608
  interface DatabaseSSLConfig {
421
609
  /**
422
610
  * CA certificate as PEM string or file path. If the value starts with 'file://', it will be loaded from file, otherwise treated as PEM string.
@@ -438,10 +626,13 @@ interface DatabaseSSLConfig {
438
626
  /**
439
627
  * Configuration for PostgreSQL backend (default).
440
628
  * Backward-compatible: omitting `backend` defaults to 'postgres'.
629
+ *
630
+ * Provide either `databaseConfig` (the library creates a pool) or `pool`
631
+ * (bring your own `pg.Pool`). At least one must be set.
441
632
  */
442
633
  interface PostgresJobQueueConfig {
443
634
  backend?: 'postgres';
444
- databaseConfig: {
635
+ databaseConfig?: {
445
636
  connectionString?: string;
446
637
  host?: string;
447
638
  port?: number;
@@ -467,6 +658,11 @@ interface PostgresJobQueueConfig {
467
658
  */
468
659
  connectionTimeoutMillis?: number;
469
660
  };
661
+ /**
662
+ * Bring your own `pg.Pool` instance. When provided, `databaseConfig` is
663
+ * ignored and the library will not close the pool on shutdown.
664
+ */
665
+ pool?: pg.Pool;
470
666
  verbose?: boolean;
471
667
  }
472
668
  /**
@@ -480,10 +676,13 @@ interface RedisTLSConfig {
480
676
  }
481
677
  /**
482
678
  * Configuration for Redis backend.
679
+ *
680
+ * Provide either `redisConfig` (the library creates an ioredis client) or
681
+ * `client` (bring your own ioredis instance). At least one must be set.
483
682
  */
484
683
  interface RedisJobQueueConfig {
485
684
  backend: 'redis';
486
- redisConfig: {
685
+ redisConfig?: {
487
686
  /** Redis URL (e.g. redis://localhost:6379) */
488
687
  url?: string;
489
688
  host?: string;
@@ -498,6 +697,17 @@ interface RedisJobQueueConfig {
498
697
  */
499
698
  keyPrefix?: string;
500
699
  };
700
+ /**
701
+ * Bring your own ioredis client instance. When provided, `redisConfig` is
702
+ * ignored and the library will not close the client on shutdown.
703
+ * Use `keyPrefix` to set the key namespace (default: 'dq:').
704
+ */
705
+ client?: unknown;
706
+ /**
707
+ * Key prefix when using an external `client`. Ignored when `redisConfig` is used
708
+ * (set `redisConfig.keyPrefix` instead). Default: 'dq:'.
709
+ */
710
+ keyPrefix?: string;
501
711
  verbose?: boolean;
502
712
  }
503
713
  /**
@@ -544,6 +754,12 @@ interface CronScheduleOptions<PayloadMap, T extends JobType<PayloadMap>> {
544
754
  * is still pending, processing, or waiting.
545
755
  */
546
756
  allowOverlap?: boolean;
757
+ /** Base delay between retries in seconds for each job instance (default: 60). */
758
+ retryDelay?: number;
759
+ /** Whether to use exponential backoff for retries (default: true). */
760
+ retryBackoff?: boolean;
761
+ /** Maximum delay cap for retries in seconds. */
762
+ retryDelayMax?: number;
547
763
  }
548
764
  /**
549
765
  * A persisted cron schedule record.
@@ -567,6 +783,9 @@ interface CronScheduleRecord {
567
783
  nextRunAt: Date | null;
568
784
  createdAt: Date;
569
785
  updatedAt: Date;
786
+ retryDelay: number | null;
787
+ retryBackoff: boolean | null;
788
+ retryDelayMax: number | null;
570
789
  }
571
790
  /**
572
791
  * Options for editing an existing cron schedule.
@@ -582,12 +801,94 @@ interface EditCronScheduleOptions {
582
801
  tags?: string[] | null;
583
802
  timezone?: string;
584
803
  allowOverlap?: boolean;
804
+ retryDelay?: number | null;
805
+ retryBackoff?: boolean | null;
806
+ retryDelayMax?: number | null;
807
+ }
808
+ /**
809
+ * Payload types for each event emitted by the job queue.
810
+ */
811
+ interface QueueEventMap {
812
+ /** Fired after a job is successfully added to the queue. */
813
+ 'job:added': {
814
+ jobId: number;
815
+ jobType: string;
816
+ };
817
+ /** Fired when a processor claims a job and begins executing its handler. */
818
+ 'job:processing': {
819
+ jobId: number;
820
+ jobType: string;
821
+ };
822
+ /** Fired when a job handler completes successfully. */
823
+ 'job:completed': {
824
+ jobId: number;
825
+ jobType: string;
826
+ };
827
+ /** Fired when a job handler fails. `willRetry` indicates whether the job will be retried. */
828
+ 'job:failed': {
829
+ jobId: number;
830
+ jobType: string;
831
+ error: Error;
832
+ willRetry: boolean;
833
+ };
834
+ /** Fired after a job is cancelled via `cancelJob()`. */
835
+ 'job:cancelled': {
836
+ jobId: number;
837
+ };
838
+ /** Fired after a failed job is manually retried via `retryJob()`. */
839
+ 'job:retried': {
840
+ jobId: number;
841
+ };
842
+ /** Fired when a job enters the `waiting` state (via `ctx.waitFor`, `ctx.waitUntil`, or `ctx.waitForToken`). */
843
+ 'job:waiting': {
844
+ jobId: number;
845
+ jobType: string;
846
+ };
847
+ /** Fired when a job reports progress via `ctx.setProgress()`. */
848
+ 'job:progress': {
849
+ jobId: number;
850
+ progress: number;
851
+ };
852
+ /** Fired when a job stores output via `ctx.setOutput()`. */
853
+ 'job:output': {
854
+ jobId: number;
855
+ output: unknown;
856
+ };
857
+ /** Fired on internal errors from the processor or supervisor. */
858
+ error: Error;
585
859
  }
860
+ /** Union of all event names supported by the job queue. */
861
+ type QueueEventName = keyof QueueEventMap;
862
+ /**
863
+ * Callback type for `emit`. Used internally to pass the emitter
864
+ * from `initJobQueue` into the processor and supervisor.
865
+ */
866
+ type QueueEmitFn = <K extends QueueEventName>(event: K, data: QueueEventMap[K]) => void;
586
867
  interface JobQueue<PayloadMap> {
587
868
  /**
588
869
  * Add a job to the job queue.
870
+ *
871
+ * @param job - The job to enqueue.
872
+ * @param options - Optional. Pass `{ db }` with an external database client
873
+ * to insert the job within an existing transaction (PostgreSQL only).
589
874
  */
590
- addJob: <T extends JobType<PayloadMap>>(job: JobOptions<PayloadMap, T>) => Promise<number>;
875
+ addJob: <T extends JobType<PayloadMap>>(job: JobOptions<PayloadMap, T>, options?: AddJobOptions) => Promise<number>;
876
+ /**
877
+ * Add multiple jobs to the queue in a single operation.
878
+ *
879
+ * More efficient than calling `addJob` in a loop because it batches the
880
+ * INSERT into a single database round-trip (PostgreSQL) or a single
881
+ * atomic Lua script (Redis).
882
+ *
883
+ * Returns an array of job IDs in the same order as the input array.
884
+ * Each job may independently have an `idempotencyKey`; duplicates
885
+ * resolve to the existing job's ID without creating a new row.
886
+ *
887
+ * @param jobs - Array of jobs to enqueue.
888
+ * @param options - Optional. Pass `{ db }` with an external database client
889
+ * to insert the jobs within an existing transaction (PostgreSQL only).
890
+ */
891
+ addJobs: <T extends JobType<PayloadMap>>(jobs: JobOptions<PayloadMap, T>[], options?: AddJobOptions) => Promise<number[]>;
591
892
  /**
592
893
  * Get a job by its ID.
593
894
  */
@@ -732,6 +1033,12 @@ interface JobQueue<PayloadMap> {
732
1033
  * Create a job processor. Handlers must be provided per-processor.
733
1034
  */
734
1035
  createProcessor: (handlers: JobHandlers<PayloadMap>, options?: ProcessorOptions) => Processor;
1036
+ /**
1037
+ * Create a background supervisor that automatically reclaims stuck jobs,
1038
+ * cleans up old completed jobs/events, and expires timed-out waitpoint
1039
+ * tokens on a configurable interval.
1040
+ */
1041
+ createSupervisor: (options?: SupervisorOptions) => Supervisor;
735
1042
  /**
736
1043
  * Get the job events for a job.
737
1044
  */
@@ -817,6 +1124,36 @@ interface JobQueue<PayloadMap> {
817
1124
  * @returns The number of jobs that were enqueued.
818
1125
  */
819
1126
  enqueueDueCronJobs: () => Promise<number>;
1127
+ /**
1128
+ * Register a listener for a queue event. The listener is called every
1129
+ * time the event fires. Works identically with both PostgreSQL and Redis.
1130
+ *
1131
+ * @param event - The event name (e.g. `'job:completed'`, `'error'`).
1132
+ * @param listener - Callback receiving the event payload.
1133
+ */
1134
+ on: <K extends QueueEventName>(event: K, listener: (data: QueueEventMap[K]) => void) => void;
1135
+ /**
1136
+ * Register a one-time listener. The listener is automatically removed
1137
+ * after it fires once.
1138
+ *
1139
+ * @param event - The event name.
1140
+ * @param listener - Callback receiving the event payload.
1141
+ */
1142
+ once: <K extends QueueEventName>(event: K, listener: (data: QueueEventMap[K]) => void) => void;
1143
+ /**
1144
+ * Remove a previously registered listener.
1145
+ *
1146
+ * @param event - The event name.
1147
+ * @param listener - The exact function reference passed to `on` or `once`.
1148
+ */
1149
+ off: <K extends QueueEventName>(event: K, listener: (data: QueueEventMap[K]) => void) => void;
1150
+ /**
1151
+ * Remove all listeners for a specific event, or all listeners for
1152
+ * all events when called without arguments.
1153
+ *
1154
+ * @param event - Optional event name. If omitted, removes everything.
1155
+ */
1156
+ removeAllListeners: (event?: QueueEventName) => void;
820
1157
  /**
821
1158
  * Get the PostgreSQL database pool.
822
1159
  * Throws if the backend is not PostgreSQL.
@@ -863,6 +1200,9 @@ interface JobUpdates {
863
1200
  runAt?: Date | null;
864
1201
  timeoutMs?: number | null;
865
1202
  tags?: string[] | null;
1203
+ retryDelay?: number | null;
1204
+ retryBackoff?: boolean | null;
1205
+ retryDelayMax?: number | null;
866
1206
  }
867
1207
  /**
868
1208
  * Input shape for creating a cron schedule in the backend.
@@ -881,6 +1221,9 @@ interface CronScheduleInput {
881
1221
  timezone: string;
882
1222
  allowOverlap: boolean;
883
1223
  nextRunAt: Date | null;
1224
+ retryDelay: number | null;
1225
+ retryBackoff: boolean | null;
1226
+ retryDelayMax: number | null;
884
1227
  }
885
1228
  /**
886
1229
  * Abstract backend interface that both PostgreSQL and Redis implement.
@@ -888,8 +1231,26 @@ interface CronScheduleInput {
888
1231
  * and public API are backend-agnostic.
889
1232
  */
890
1233
  interface QueueBackend {
891
- /** Add a job and return its numeric ID. */
892
- addJob<PayloadMap, T extends JobType<PayloadMap>>(job: JobOptions<PayloadMap, T>): Promise<number>;
1234
+ /**
1235
+ * Add a job and return its numeric ID.
1236
+ *
1237
+ * @param job - Job configuration.
1238
+ * @param options - Optional. Pass `{ db }` to run the INSERT on an external
1239
+ * client (e.g., inside a transaction). PostgreSQL only.
1240
+ */
1241
+ addJob<PayloadMap, T extends JobType<PayloadMap>>(job: JobOptions<PayloadMap, T>, options?: AddJobOptions): Promise<number>;
1242
+ /**
1243
+ * Add multiple jobs in a single operation and return their IDs.
1244
+ *
1245
+ * IDs are returned in the same order as the input array.
1246
+ * Each job may independently have an `idempotencyKey`; duplicates
1247
+ * resolve to the existing job's ID without creating a new row.
1248
+ *
1249
+ * @param jobs - Array of job configurations.
1250
+ * @param options - Optional. Pass `{ db }` to run the INSERTs on an external
1251
+ * client (e.g., inside a transaction). PostgreSQL only.
1252
+ */
1253
+ addJobs<PayloadMap, T extends JobType<PayloadMap>>(jobs: JobOptions<PayloadMap, T>[], options?: AddJobOptions): Promise<number[]>;
893
1254
  /** Get a single job by ID, or null if not found. */
894
1255
  getJob<PayloadMap, T extends JobType<PayloadMap>>(id: number): Promise<JobRecord<PayloadMap, T> | null>;
895
1256
  /** Get jobs filtered by status, ordered by createdAt DESC. */
@@ -904,9 +1265,9 @@ interface QueueBackend {
904
1265
  * Atomically claim a batch of ready jobs for the given worker.
905
1266
  * Equivalent to SELECT … FOR UPDATE SKIP LOCKED in Postgres.
906
1267
  */
907
- getNextBatch<PayloadMap, T extends JobType<PayloadMap>>(workerId: string, batchSize?: number, jobType?: string | string[]): Promise<JobRecord<PayloadMap, T>[]>;
908
- /** Mark a job as completed. */
909
- completeJob(jobId: number): Promise<void>;
1268
+ getNextBatch<PayloadMap, T extends JobType<PayloadMap>>(workerId: string, batchSize?: number, jobType?: string | string[], groupConcurrency?: number): Promise<JobRecord<PayloadMap, T>[]>;
1269
+ /** Mark a job as completed, optionally storing output data. */
1270
+ completeJob(jobId: number, output?: unknown): Promise<void>;
910
1271
  /** Mark a job as failed with error info and schedule retry. */
911
1272
  failJob(jobId: number, error: Error, failureReason?: FailureReason): Promise<void>;
912
1273
  /** Update locked_at to keep the job alive (heartbeat). */
@@ -929,6 +1290,8 @@ interface QueueBackend {
929
1290
  reclaimStuckJobs(maxProcessingTimeMinutes?: number): Promise<number>;
930
1291
  /** Update the progress percentage (0-100) for a job. */
931
1292
  updateProgress(jobId: number, progress: number): Promise<void>;
1293
+ /** Update the output data for a job. Best-effort: should not throw. */
1294
+ updateOutput(jobId: number, output: unknown): Promise<void>;
932
1295
  /** Record a job event. Should not throw. */
933
1296
  recordJobEvent(jobId: number, eventType: JobEventType, metadata?: any): Promise<void>;
934
1297
  /** Get all events for a job, ordered by createdAt ASC. */
@@ -1022,17 +1385,33 @@ declare class PostgresBackend implements QueueBackend {
1022
1385
  getPool(): Pool;
1023
1386
  recordJobEvent(jobId: number, eventType: JobEventType, metadata?: any): Promise<void>;
1024
1387
  getJobEvents(jobId: number): Promise<JobEvent[]>;
1025
- addJob<PayloadMap, T extends JobType<PayloadMap>>({ jobType, payload, maxAttempts, priority, runAt, timeoutMs, forceKillOnTimeout, tags, idempotencyKey, }: JobOptions<PayloadMap, T>): Promise<number>;
1388
+ /**
1389
+ * Add a job and return its numeric ID.
1390
+ *
1391
+ * @param job - Job configuration.
1392
+ * @param options - Optional. Pass `{ db }` to run the INSERT on an external
1393
+ * client (e.g., inside a transaction) so the job is part of the caller's
1394
+ * transaction. The event INSERT also uses the same client.
1395
+ */
1396
+ addJob<PayloadMap, T extends JobType<PayloadMap>>({ jobType, payload, maxAttempts, priority, runAt, timeoutMs, forceKillOnTimeout, tags, idempotencyKey, retryDelay, retryBackoff, retryDelayMax, group, }: JobOptions<PayloadMap, T>, options?: AddJobOptions): Promise<number>;
1397
+ /**
1398
+ * Insert multiple jobs in a single database round-trip.
1399
+ *
1400
+ * Uses a multi-row INSERT with ON CONFLICT handling for idempotency keys.
1401
+ * Returns IDs in the same order as the input array.
1402
+ */
1403
+ addJobs<PayloadMap, T extends JobType<PayloadMap>>(jobs: JobOptions<PayloadMap, T>[], options?: AddJobOptions): Promise<number[]>;
1026
1404
  getJob<PayloadMap, T extends JobType<PayloadMap>>(id: number): Promise<JobRecord<PayloadMap, T> | null>;
1027
1405
  getJobsByStatus<PayloadMap, T extends JobType<PayloadMap>>(status: string, limit?: number, offset?: number): Promise<JobRecord<PayloadMap, T>[]>;
1028
1406
  getAllJobs<PayloadMap, T extends JobType<PayloadMap>>(limit?: number, offset?: number): Promise<JobRecord<PayloadMap, T>[]>;
1029
1407
  getJobs<PayloadMap, T extends JobType<PayloadMap>>(filters?: JobFilters, limit?: number, offset?: number): Promise<JobRecord<PayloadMap, T>[]>;
1030
1408
  getJobsByTags<PayloadMap, T extends JobType<PayloadMap>>(tags: string[], mode?: TagQueryMode, limit?: number, offset?: number): Promise<JobRecord<PayloadMap, T>[]>;
1031
- getNextBatch<PayloadMap, T extends JobType<PayloadMap>>(workerId: string, batchSize?: number, jobType?: string | string[]): Promise<JobRecord<PayloadMap, T>[]>;
1032
- completeJob(jobId: number): Promise<void>;
1409
+ getNextBatch<PayloadMap, T extends JobType<PayloadMap>>(workerId: string, batchSize?: number, jobType?: string | string[], groupConcurrency?: number): Promise<JobRecord<PayloadMap, T>[]>;
1410
+ completeJob(jobId: number, output?: unknown): Promise<void>;
1033
1411
  failJob(jobId: number, error: Error, failureReason?: FailureReason): Promise<void>;
1034
1412
  prolongJob(jobId: number): Promise<void>;
1035
1413
  updateProgress(jobId: number, progress: number): Promise<void>;
1414
+ updateOutput(jobId: number, output: unknown): Promise<void>;
1036
1415
  retryJob(jobId: number): Promise<void>;
1037
1416
  cancelJob(jobId: number): Promise<void>;
1038
1417
  cancelAllUpcomingJobs(filters?: JobFilters): Promise<number>;
@@ -1217,7 +1596,9 @@ declare function validateCronExpression(cronExpression: string, CronImpl?: typeo
1217
1596
  * Initialize the job queue system.
1218
1597
  *
1219
1598
  * Defaults to PostgreSQL when `backend` is omitted.
1599
+ * For PostgreSQL, provide either `databaseConfig` or `pool` (bring your own).
1600
+ * For Redis, provide either `redisConfig` or `client` (bring your own).
1220
1601
  */
1221
1602
  declare const initJobQueue: <PayloadMap = any>(config: JobQueueConfig) => JobQueue<PayloadMap>;
1222
1603
 
1223
- export { type CreateTokenOptions, type CronScheduleInput, type CronScheduleOptions, type CronScheduleRecord, type CronScheduleStatus, type DatabaseSSLConfig, type EditCronScheduleOptions, type EditJobOptions, FailureReason, type JobContext, type JobEvent, JobEventType, type JobHandler, type JobHandlers, type JobOptions, type JobQueue, type JobQueueConfig, type JobQueueConfigLegacy, type JobRecord, type JobStatus, type JobType, type OnTimeoutCallback, PostgresBackend, type PostgresJobQueueConfig, type Processor, type ProcessorOptions, type QueueBackend, type RedisJobQueueConfig, type RedisTLSConfig, type TagQueryMode, type WaitDuration, WaitSignal, type WaitToken, type WaitTokenResult, type WaitpointRecord, type WaitpointStatus, getNextCronOccurrence, initJobQueue, testHandlerSerialization, validateCronExpression, validateHandlerSerializable };
1604
+ export { type AddJobOptions, type CreateTokenOptions, type CronScheduleInput, type CronScheduleOptions, type CronScheduleRecord, type CronScheduleStatus, type DatabaseClient, type DatabaseSSLConfig, type EditCronScheduleOptions, type EditJobOptions, FailureReason, type JobContext, type JobEvent, JobEventType, type JobGroup, type JobHandler, type JobHandlers, type JobOptions, type JobQueue, type JobQueueConfig, type JobQueueConfigLegacy, type JobRecord, type JobStatus, type JobType, type OnTimeoutCallback, PostgresBackend, type PostgresJobQueueConfig, type Processor, type ProcessorOptions, type QueueBackend, type QueueEmitFn, type QueueEventMap, type QueueEventName, type RedisJobQueueConfig, type RedisTLSConfig, type Supervisor, type SupervisorOptions, type SupervisorRunResult, type TagQueryMode, type WaitDuration, WaitSignal, type WaitToken, type WaitTokenResult, type WaitpointRecord, type WaitpointStatus, getNextCronOccurrence, initJobQueue, testHandlerSerialization, validateCronExpression, validateHandlerSerializable };