@nicnocquee/dataqueue 1.34.0 → 1.35.0-beta.20260224075710

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -170,6 +170,56 @@ await queue.addJob({
170
170
  - Handler must be serializable (no closures over external variables).
171
171
  - `prolong`, `onTimeout`, `ctx.run`, waits are NOT available.
172
172
 
173
+ ## Event Hooks
174
+
175
+ Subscribe to real-time job lifecycle events. Works identically with PostgreSQL and Redis.
176
+
177
+ ```typescript
178
+ const queue = initJobQueue<MyPayloadMap>(config);
179
+
180
+ queue.on('job:completed', ({ jobId, jobType }) => {
181
+ console.log(`Job ${jobId} (${jobType}) completed`);
182
+ });
183
+
184
+ queue.on('job:failed', ({ jobId, jobType, error, willRetry }) => {
185
+ console.error(`Job ${jobId} failed: ${error.message}`);
186
+ if (!willRetry) {
187
+ alertOps(`Permanent failure for job ${jobId}`);
188
+ }
189
+ });
190
+
191
+ queue.on('error', (error) => {
192
+ Sentry.captureException(error);
193
+ });
194
+ ```
195
+
196
+ ### Available events
197
+
198
+ | Event | Payload |
199
+ | ---------------- | -------------------------------------- |
200
+ | `job:added` | `{ jobId, jobType }` |
201
+ | `job:processing` | `{ jobId, jobType }` |
202
+ | `job:completed` | `{ jobId, jobType }` |
203
+ | `job:failed` | `{ jobId, jobType, error, willRetry }` |
204
+ | `job:cancelled` | `{ jobId }` |
205
+ | `job:retried` | `{ jobId }` |
206
+ | `job:waiting` | `{ jobId, jobType }` |
207
+ | `job:progress` | `{ jobId, progress }` |
208
+ | `error` | `Error` |
209
+
210
+ ### Listener management
211
+
212
+ ```typescript
213
+ const listener = ({ jobId }) => console.log(jobId);
214
+ queue.on('job:completed', listener);
215
+ queue.off('job:completed', listener);
216
+ queue.once('job:added', ({ jobId }) => console.log('First job:', jobId));
217
+ queue.removeAllListeners('job:completed');
218
+ queue.removeAllListeners(); // all events
219
+ ```
220
+
221
+ The `error` event fires alongside `onError` callbacks in `ProcessorOptions` and `SupervisorOptions` -- both mechanisms work independently.
222
+
173
223
  ## Tags
174
224
 
175
225
  ```typescript
@@ -201,8 +251,117 @@ const jobId = await queue.addJob({
201
251
 
202
252
  If a job with the same key exists, returns the existing job ID. Key is unique across all statuses until `cleanupOldJobs` removes it.
203
253
 
254
+ ## Transactional Job Creation (PostgreSQL Only)
255
+
256
+ Insert a job within an existing database transaction so the job is enqueued **atomically** with other writes:
257
+
258
+ ```typescript
259
+ import { Pool } from 'pg';
260
+
261
+ const pool = new Pool({ connectionString: process.env.DATABASE_URL });
262
+
263
+ async function registerUser(email: string, name: string) {
264
+ const client = await pool.connect();
265
+ try {
266
+ await client.query('BEGIN');
267
+
268
+ await client.query('INSERT INTO users (email, name) VALUES ($1, $2)', [
269
+ email,
270
+ name,
271
+ ]);
272
+
273
+ const queue = getJobQueue();
274
+ await queue.addJob(
275
+ {
276
+ jobType: 'send_email',
277
+ payload: { to: email, subject: 'Welcome!', body: `Hi ${name}!` },
278
+ },
279
+ { db: client },
280
+ );
281
+
282
+ await client.query('COMMIT');
283
+ } catch (error) {
284
+ await client.query('ROLLBACK');
285
+ throw error;
286
+ } finally {
287
+ client.release();
288
+ }
289
+ }
290
+ ```
291
+
292
+ The `db` option accepts any object matching `DatabaseClient { query(text, values): Promise<{ rows, rowCount }> }` — works with `pg.PoolClient`, `pg.Client`, or compatible ORM query runners.
293
+
294
+ The job event (`'added'`) is also inserted within the same transaction.
295
+
296
+ ## Retry Strategy
297
+
298
+ Configure how failed jobs are retried with `retryDelay`, `retryBackoff`, and `retryDelayMax`.
299
+
300
+ ### Fixed delay
301
+
302
+ ```typescript
303
+ await queue.addJob({
304
+ jobType: 'email',
305
+ payload: {
306
+ /* ... */
307
+ },
308
+ maxAttempts: 5,
309
+ retryDelay: 30, // 30 seconds between each retry
310
+ retryBackoff: false,
311
+ });
312
+ ```
313
+
314
+ ### Exponential backoff with cap
315
+
316
+ ```typescript
317
+ await queue.addJob({
318
+ jobType: 'email',
319
+ payload: {
320
+ /* ... */
321
+ },
322
+ maxAttempts: 10,
323
+ retryDelay: 5, // base: 5 seconds
324
+ retryBackoff: true, // default — delay doubles each attempt with jitter
325
+ retryDelayMax: 300, // never wait more than 5 minutes
326
+ });
327
+ ```
328
+
329
+ ### Cron schedules with retry config
330
+
331
+ ```typescript
332
+ await queue.addCronJob({
333
+ scheduleName: 'daily-sync',
334
+ cronExpression: '0 * * * *',
335
+ jobType: 'sync',
336
+ payload: { source: 'api' },
337
+ retryDelay: 60,
338
+ retryBackoff: true,
339
+ retryDelayMax: 600,
340
+ });
341
+ ```
342
+
343
+ Every job enqueued by the schedule inherits the retry settings.
344
+
345
+ ### Default behavior
346
+
347
+ When no retry options are set, the legacy formula `2^attempts * 60 seconds` is used. This is fully backward compatible.
348
+
204
349
  ## Maintenance
205
350
 
351
+ Use `createSupervisor()` to automate all maintenance tasks in a long-running server:
352
+
353
+ ```typescript
354
+ const supervisor = queue.createSupervisor({
355
+ intervalMs: 60_000,
356
+ stuckJobsTimeoutMinutes: 10,
357
+ cleanupJobsDaysToKeep: 30,
358
+ cleanupEventsDaysToKeep: 30,
359
+ });
360
+ supervisor.startInBackground();
361
+ ```
362
+
363
+ For serverless or one-off scripts, call `supervisor.start()` (runs once) or use the manual methods:
364
+
206
365
  ```typescript
207
366
  await queue.reclaimStuckJobs(10); // reclaim jobs stuck > 10 min
208
367
  await queue.cleanupOldJobs(30); // delete completed jobs > 30 days
@@ -38,7 +38,8 @@ export const jobHandlers: JobHandlers<JobPayloadMap> = {
38
38
  },
39
39
  generate_report: async (payload, signal) => {
40
40
  if (signal.aborted) return;
41
- await generateReport(payload.reportId, payload.userId);
41
+ const url = await generateReport(payload.reportId, payload.userId);
42
+ return { url }; // stored as job output, readable via getJob()
42
43
  },
43
44
  };
44
45
  ```
@@ -79,6 +80,30 @@ jobQueue = initJobQueue<JobPayloadMap>({
79
80
  });
80
81
  ```
81
82
 
83
+ ### Bring Your Own Pool / Client
84
+
85
+ You can pass an existing `pg.Pool` or `ioredis` client instead of connection config:
86
+
87
+ ```typescript
88
+ import { Pool } from 'pg';
89
+ const pool = new Pool({ connectionString: process.env.DATABASE_URL });
90
+
91
+ jobQueue = initJobQueue<JobPayloadMap>({ pool });
92
+ ```
93
+
94
+ ```typescript
95
+ import IORedis from 'ioredis';
96
+ const redis = new IORedis(process.env.REDIS_URL);
97
+
98
+ jobQueue = initJobQueue<JobPayloadMap>({
99
+ backend: 'redis',
100
+ client: redis,
101
+ keyPrefix: 'myapp:',
102
+ });
103
+ ```
104
+
105
+ When you provide your own pool/client, the library will **not** close it on shutdown — you manage its lifecycle.
106
+
82
107
  ## Step 4: Add Jobs
83
108
 
84
109
  ```typescript
@@ -92,6 +117,71 @@ const jobId = await queue.addJob({
92
117
  });
93
118
  ```
94
119
 
120
+ ### Batch Insert
121
+
122
+ Use `addJobs` to insert many jobs in a single database round-trip. Returns IDs in the same order as the input array.
123
+
124
+ ```typescript
125
+ const jobIds = await queue.addJobs([
126
+ {
127
+ jobType: 'send_email',
128
+ payload: { to: 'a@example.com', subject: 'Hi', body: '...' },
129
+ },
130
+ {
131
+ jobType: 'send_email',
132
+ payload: { to: 'b@example.com', subject: 'Hi', body: '...' },
133
+ priority: 10,
134
+ },
135
+ {
136
+ jobType: 'generate_report',
137
+ payload: { reportId: '1', userId: '2' },
138
+ tags: ['monthly'],
139
+ },
140
+ ]);
141
+ ```
142
+
143
+ Each job can independently have its own `idempotencyKey`, `priority`, `runAt`, `tags`, etc. The `{ db }` transactional option is also supported (PostgreSQL only).
144
+
145
+ ### Transactional Job Creation (PostgreSQL only)
146
+
147
+ Pass an external `pg.PoolClient` inside a transaction via `{ db: client }`:
148
+
149
+ ```typescript
150
+ const client = await pool.connect();
151
+ await client.query('BEGIN');
152
+ await client.query('INSERT INTO users (email) VALUES ($1)', [email]);
153
+ await queue.addJob(
154
+ {
155
+ jobType: 'send_email',
156
+ payload: { to: email, subject: 'Welcome!', body: '...' },
157
+ },
158
+ { db: client },
159
+ );
160
+ await client.query('COMMIT');
161
+ client.release();
162
+ ```
163
+
164
+ If the transaction rolls back, the job is never enqueued.
165
+
166
+ ### Retry configuration
167
+
168
+ Control retry behavior per-job with `retryDelay`, `retryBackoff`, and `retryDelayMax`:
169
+
170
+ ```typescript
171
+ await queue.addJob({
172
+ jobType: 'send_email',
173
+ payload: { to: 'user@example.com', subject: 'Hi', body: 'Hello' },
174
+ maxAttempts: 5,
175
+ retryDelay: 10, // base delay: 10 seconds
176
+ retryBackoff: true, // exponential backoff (default)
177
+ retryDelayMax: 300, // cap at 5 minutes
178
+ });
179
+ ```
180
+
181
+ - **Fixed delay**: set `retryBackoff: false` for constant delay between retries.
182
+ - **Exponential backoff** (default): delay doubles each attempt with jitter.
183
+ - **Default**: when no retry options are set, legacy `2^attempts * 60s` is used.
184
+
95
185
  ## Step 5: Process Jobs
96
186
 
97
187
  ### Serverless (one-shot)
@@ -114,8 +204,20 @@ const processor = queue.createProcessor(handlers, {
114
204
  });
115
205
  processor.startInBackground();
116
206
 
207
+ // Automate maintenance (reclaim stuck jobs, cleanup old data, expire tokens)
208
+ const supervisor = queue.createSupervisor({
209
+ intervalMs: 60_000,
210
+ stuckJobsTimeoutMinutes: 10,
211
+ cleanupJobsDaysToKeep: 30,
212
+ cleanupEventsDaysToKeep: 30,
213
+ });
214
+ supervisor.startInBackground();
215
+
117
216
  process.on('SIGTERM', async () => {
118
- await processor.stopAndDrain(30000);
217
+ await Promise.all([
218
+ processor.stopAndDrain(30000),
219
+ supervisor.stopAndDrain(30000),
220
+ ]);
119
221
  queue.getPool().end();
120
222
  process.exit(0);
121
223
  });
@@ -126,6 +228,8 @@ process.on('SIGTERM', async () => {
126
228
  1. **Creating a new queue per request** — always use a singleton. Each `initJobQueue` creates a DB pool.
127
229
  2. **Missing handler for a job type** — the job fails with `FailureReason.NoHandler`. Let TypeScript enforce completeness by typing handlers as `JobHandlers<PayloadMap>`.
128
230
  3. **Not checking `signal.aborted`** — timed-out jobs keep running in the background. Always check the signal in long-running handlers.
129
- 4. **Forgetting `reclaimStuckJobs`** — crashed workers leave jobs stuck in `processing`. Call `reclaimStuckJobs()` periodically.
231
+ 4. **Skipping maintenance**use `createSupervisor()` to automate reclaiming stuck jobs, cleaning up old data, and expiring tokens. Without it, crashed workers leave jobs stuck in `processing` and tables grow unbounded.
130
232
  5. **Forgetting to run migrations** — PostgreSQL requires `dataqueue-cli migrate` before use. Redis needs no migrations.
131
233
  6. **Not calling `stopAndDrain` on shutdown** — use `stopAndDrain()` (not `stop()`) for graceful shutdown to avoid stuck jobs.
234
+ 7. **Forgetting to commit/rollback when using `db` option** — the `addJob` INSERT sits in an open transaction. If you never `COMMIT` or `ROLLBACK`, the connection leaks and the job is invisible to other sessions.
235
+ 8. **Using `db` option with Redis** — transactional job creation is PostgreSQL only. The Redis backend throws if `db` is provided.
@@ -102,13 +102,14 @@ export async function GET(
102
102
 
103
103
  ### useJob Return Value
104
104
 
105
- | Field | Type | Description |
106
- | ----------- | ------------------- | ------------------------------- |
107
- | `data` | `JobData \| null` | Latest job data from fetcher |
108
- | `status` | `JobStatus \| null` | Current job status |
109
- | `progress` | `number \| null` | Progress percentage (0–100) |
110
- | `isLoading` | `boolean` | True until first fetch resolves |
111
- | `error` | `Error \| null` | Last fetch error |
105
+ | Field | Type | Description |
106
+ | ----------- | ------------------- | ----------------------------------------------------- |
107
+ | `data` | `JobData \| null` | Latest job data from fetcher |
108
+ | `status` | `JobStatus \| null` | Current job status |
109
+ | `progress` | `number \| null` | Progress percentage (0–100) |
110
+ | `output` | `unknown \| null` | Handler output from `ctx.setOutput()` or return value |
111
+ | `isLoading` | `boolean` | True until first fetch resolves |
112
+ | `error` | `Error \| null` | Last fetch error |
112
113
 
113
114
  ## Dashboard — @nicnocquee/dataqueue-dashboard
114
115
 
@@ -187,3 +188,14 @@ const handler = async (payload, signal, ctx) => {
187
188
  }
188
189
  };
189
190
  ```
191
+
192
+ ### Job Output from Handlers
193
+
194
+ Store results via `ctx.setOutput(data)` or by returning a value from the handler. Exposed via `getJob()` (`output` field) and the `useJob` hook's `output` property. If both are used, `ctx.setOutput()` takes precedence.
195
+
196
+ ```typescript
197
+ const handler = async (payload, signal, ctx) => {
198
+ const result = await doWork(payload);
199
+ return { url: result.downloadUrl }; // stored as output
200
+ };
201
+ ```