@nicnocquee/dataqueue 1.33.0 → 1.35.0-beta.20260224075710
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ai/build-docs-content.ts +96 -0
- package/ai/build-llms-full.ts +42 -0
- package/ai/docs-content.json +290 -0
- package/ai/rules/advanced.md +170 -0
- package/ai/rules/basic.md +159 -0
- package/ai/rules/react-dashboard.md +87 -0
- package/ai/skills/dataqueue-advanced/SKILL.md +370 -0
- package/ai/skills/dataqueue-core/SKILL.md +235 -0
- package/ai/skills/dataqueue-react/SKILL.md +201 -0
- package/dist/cli.cjs +577 -32
- package/dist/cli.cjs.map +1 -1
- package/dist/cli.d.cts +52 -2
- package/dist/cli.d.ts +52 -2
- package/dist/cli.js +575 -32
- package/dist/cli.js.map +1 -1
- package/dist/index.cjs +937 -108
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +358 -11
- package/dist/index.d.ts +358 -11
- package/dist/index.js +937 -108
- package/dist/index.js.map +1 -1
- package/dist/mcp-server.cjs +186 -0
- package/dist/mcp-server.cjs.map +1 -0
- package/dist/mcp-server.d.cts +32 -0
- package/dist/mcp-server.d.ts +32 -0
- package/dist/mcp-server.js +175 -0
- package/dist/mcp-server.js.map +1 -0
- package/migrations/1781200000005_add_retry_config_to_job_queue.sql +17 -0
- package/migrations/1781200000006_add_output_to_job_queue.sql +3 -0
- package/package.json +10 -4
- package/src/backend.ts +36 -3
- package/src/backends/postgres.ts +344 -42
- package/src/backends/redis-scripts.ts +173 -8
- package/src/backends/redis.test.ts +668 -0
- package/src/backends/redis.ts +244 -15
- package/src/cli.test.ts +65 -0
- package/src/cli.ts +56 -19
- package/src/db-util.ts +1 -1
- package/src/index.test.ts +811 -12
- package/src/index.ts +106 -14
- package/src/install-mcp-command.test.ts +216 -0
- package/src/install-mcp-command.ts +185 -0
- package/src/install-rules-command.test.ts +218 -0
- package/src/install-rules-command.ts +233 -0
- package/src/install-skills-command.test.ts +176 -0
- package/src/install-skills-command.ts +124 -0
- package/src/mcp-server.test.ts +162 -0
- package/src/mcp-server.ts +231 -0
- package/src/processor.ts +133 -49
- package/src/queue.test.ts +477 -0
- package/src/queue.ts +20 -3
- package/src/supervisor.test.ts +340 -0
- package/src/supervisor.ts +177 -0
- package/src/types.ts +318 -3
|
@@ -0,0 +1,370 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: dataqueue-advanced
|
|
3
|
+
description: Advanced DataQueue patterns — step memoization, waits, tokens, cron, timeouts, tags, idempotency.
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# DataQueue Advanced Patterns
|
|
7
|
+
|
|
8
|
+
## Step Memoization with ctx.run()
|
|
9
|
+
|
|
10
|
+
Wrap side-effectful work in `ctx.run(stepName, fn)`. Results are cached in the database — when the handler re-runs after a wait, completed steps replay from cache without re-executing.
|
|
11
|
+
|
|
12
|
+
```typescript
|
|
13
|
+
const handler = async (payload, signal, ctx) => {
|
|
14
|
+
const data = await ctx.run('fetch-data', async () => {
|
|
15
|
+
return await fetchFromAPI(payload.url);
|
|
16
|
+
});
|
|
17
|
+
|
|
18
|
+
await ctx.run('send-notification', async () => {
|
|
19
|
+
await notify(data.userId, data.message);
|
|
20
|
+
});
|
|
21
|
+
};
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
**Rules:**
|
|
25
|
+
|
|
26
|
+
- Step names must be unique within a handler.
|
|
27
|
+
- Step names must be stable across deployments while jobs are waiting.
|
|
28
|
+
- Step order must not change conditionally between re-invocations.
|
|
29
|
+
|
|
30
|
+
## Time-Based Waits
|
|
31
|
+
|
|
32
|
+
### waitFor (duration)
|
|
33
|
+
|
|
34
|
+
```typescript
|
|
35
|
+
const handler = async (payload, signal, ctx) => {
|
|
36
|
+
await ctx.run('step-1', async () => {
|
|
37
|
+
/* ... */
|
|
38
|
+
});
|
|
39
|
+
await ctx.waitFor({ hours: 24 });
|
|
40
|
+
await ctx.run('step-2', async () => {
|
|
41
|
+
/* ... */
|
|
42
|
+
});
|
|
43
|
+
};
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
Duration fields: `seconds`, `minutes`, `hours`, `days`, `weeks`, `months`, `years` (additive).
|
|
47
|
+
|
|
48
|
+
### waitUntil (date)
|
|
49
|
+
|
|
50
|
+
```typescript
|
|
51
|
+
await ctx.waitUntil(new Date('2025-03-01T09:00:00Z'));
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
### How waits work internally
|
|
55
|
+
|
|
56
|
+
1. Handler throws a `WaitSignal` internally.
|
|
57
|
+
2. Job moves to `'waiting'` status — worker lock is released.
|
|
58
|
+
3. After the wait expires, job becomes `'pending'` again.
|
|
59
|
+
4. Handler re-runs from top; `ctx.run()` replays cached steps.
|
|
60
|
+
|
|
61
|
+
Waiting jobs are idle — they hold no lock, no concurrency slot, no resources.
|
|
62
|
+
|
|
63
|
+
## Token-Based Waits (Human-in-the-Loop)
|
|
64
|
+
|
|
65
|
+
Create a token, send it to an external actor, and wait for them to complete it.
|
|
66
|
+
|
|
67
|
+
```typescript
|
|
68
|
+
const handler = async (payload, signal, ctx) => {
|
|
69
|
+
const token = await ctx.run('create-token', async () => {
|
|
70
|
+
return await ctx.createToken({ timeout: '48h', tags: ['approval'] });
|
|
71
|
+
});
|
|
72
|
+
|
|
73
|
+
await ctx.run('notify', async () => {
|
|
74
|
+
await sendSlack(`Approve: ${token.id}`);
|
|
75
|
+
});
|
|
76
|
+
|
|
77
|
+
const result = await ctx.waitForToken<{ approved: boolean }>(token.id);
|
|
78
|
+
if (result.ok) {
|
|
79
|
+
await ctx.run('process', async () => {
|
|
80
|
+
if (result.output.approved) await approve(payload.id);
|
|
81
|
+
});
|
|
82
|
+
}
|
|
83
|
+
};
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
Complete tokens externally:
|
|
87
|
+
|
|
88
|
+
```typescript
|
|
89
|
+
await queue.completeToken(tokenId, { approved: true });
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
Expire timed-out tokens periodically:
|
|
93
|
+
|
|
94
|
+
```typescript
|
|
95
|
+
await queue.expireTimedOutTokens();
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
## Cron Scheduling
|
|
99
|
+
|
|
100
|
+
```typescript
|
|
101
|
+
const cronId = await queue.addCronJob({
|
|
102
|
+
scheduleName: 'daily-report',
|
|
103
|
+
cronExpression: '0 9 * * *',
|
|
104
|
+
jobType: 'generate_report',
|
|
105
|
+
payload: { reportId: 'daily', userId: 'system' },
|
|
106
|
+
timezone: 'America/New_York',
|
|
107
|
+
allowOverlap: false,
|
|
108
|
+
});
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
The processor automatically enqueues due cron jobs before each batch — no manual triggering needed.
|
|
112
|
+
|
|
113
|
+
Manage schedules:
|
|
114
|
+
|
|
115
|
+
```typescript
|
|
116
|
+
await queue.pauseCronJob(cronId);
|
|
117
|
+
await queue.resumeCronJob(cronId);
|
|
118
|
+
await queue.editCronJob(cronId, { cronExpression: '0 */2 * * *' });
|
|
119
|
+
await queue.removeCronJob(cronId);
|
|
120
|
+
const schedules = await queue.listCronJobs('active');
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
## Timeout Management
|
|
124
|
+
|
|
125
|
+
### Proactive — ctx.prolong()
|
|
126
|
+
|
|
127
|
+
```typescript
|
|
128
|
+
const handler = async (payload, signal, ctx) => {
|
|
129
|
+
ctx.prolong(60_000); // set deadline to 60s from now
|
|
130
|
+
await doHeavyWork();
|
|
131
|
+
ctx.prolong(); // reset to original timeoutMs
|
|
132
|
+
};
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
### Reactive — ctx.onTimeout()
|
|
136
|
+
|
|
137
|
+
```typescript
|
|
138
|
+
const handler = async (payload, signal, ctx) => {
|
|
139
|
+
let step = 0;
|
|
140
|
+
ctx.onTimeout(() => {
|
|
141
|
+
if (step < 3) return 30_000; // extend 30s
|
|
142
|
+
});
|
|
143
|
+
step = 1;
|
|
144
|
+
await doStep1();
|
|
145
|
+
step = 2;
|
|
146
|
+
await doStep2();
|
|
147
|
+
step = 3;
|
|
148
|
+
await doStep3();
|
|
149
|
+
};
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
Both update `locked_at` in the DB, preventing premature reclamation.
|
|
153
|
+
|
|
154
|
+
### Force Kill on Timeout
|
|
155
|
+
|
|
156
|
+
```typescript
|
|
157
|
+
await queue.addJob({
|
|
158
|
+
jobType: 'task',
|
|
159
|
+
payload: {
|
|
160
|
+
/* ... */
|
|
161
|
+
},
|
|
162
|
+
timeoutMs: 5000,
|
|
163
|
+
forceKillOnTimeout: true,
|
|
164
|
+
});
|
|
165
|
+
```
|
|
166
|
+
|
|
167
|
+
**Limitations of forceKillOnTimeout:**
|
|
168
|
+
|
|
169
|
+
- Requires Node.js (not Bun).
|
|
170
|
+
- Handler must be serializable (no closures over external variables).
|
|
171
|
+
- `prolong`, `onTimeout`, `ctx.run`, waits are NOT available.
|
|
172
|
+
|
|
173
|
+
## Event Hooks
|
|
174
|
+
|
|
175
|
+
Subscribe to real-time job lifecycle events. Works identically with PostgreSQL and Redis.
|
|
176
|
+
|
|
177
|
+
```typescript
|
|
178
|
+
const queue = initJobQueue<MyPayloadMap>(config);
|
|
179
|
+
|
|
180
|
+
queue.on('job:completed', ({ jobId, jobType }) => {
|
|
181
|
+
console.log(`Job ${jobId} (${jobType}) completed`);
|
|
182
|
+
});
|
|
183
|
+
|
|
184
|
+
queue.on('job:failed', ({ jobId, jobType, error, willRetry }) => {
|
|
185
|
+
console.error(`Job ${jobId} failed: ${error.message}`);
|
|
186
|
+
if (!willRetry) {
|
|
187
|
+
alertOps(`Permanent failure for job ${jobId}`);
|
|
188
|
+
}
|
|
189
|
+
});
|
|
190
|
+
|
|
191
|
+
queue.on('error', (error) => {
|
|
192
|
+
Sentry.captureException(error);
|
|
193
|
+
});
|
|
194
|
+
```
|
|
195
|
+
|
|
196
|
+
### Available events
|
|
197
|
+
|
|
198
|
+
| Event | Payload |
|
|
199
|
+
| ---------------- | -------------------------------------- |
|
|
200
|
+
| `job:added` | `{ jobId, jobType }` |
|
|
201
|
+
| `job:processing` | `{ jobId, jobType }` |
|
|
202
|
+
| `job:completed` | `{ jobId, jobType }` |
|
|
203
|
+
| `job:failed` | `{ jobId, jobType, error, willRetry }` |
|
|
204
|
+
| `job:cancelled` | `{ jobId }` |
|
|
205
|
+
| `job:retried` | `{ jobId }` |
|
|
206
|
+
| `job:waiting` | `{ jobId, jobType }` |
|
|
207
|
+
| `job:progress` | `{ jobId, progress }` |
|
|
208
|
+
| `error` | `Error` |
|
|
209
|
+
|
|
210
|
+
### Listener management
|
|
211
|
+
|
|
212
|
+
```typescript
|
|
213
|
+
const listener = ({ jobId }) => console.log(jobId);
|
|
214
|
+
queue.on('job:completed', listener);
|
|
215
|
+
queue.off('job:completed', listener);
|
|
216
|
+
queue.once('job:added', ({ jobId }) => console.log('First job:', jobId));
|
|
217
|
+
queue.removeAllListeners('job:completed');
|
|
218
|
+
queue.removeAllListeners(); // all events
|
|
219
|
+
```
|
|
220
|
+
|
|
221
|
+
The `error` event fires alongside `onError` callbacks in `ProcessorOptions` and `SupervisorOptions` -- both mechanisms work independently.
|
|
222
|
+
|
|
223
|
+
## Tags
|
|
224
|
+
|
|
225
|
+
```typescript
|
|
226
|
+
await queue.addJob({
|
|
227
|
+
jobType: 'email',
|
|
228
|
+
payload: {
|
|
229
|
+
/* ... */
|
|
230
|
+
},
|
|
231
|
+
tags: ['welcome', 'onboarding'],
|
|
232
|
+
});
|
|
233
|
+
|
|
234
|
+
const jobs = await queue.getJobsByTags(['welcome'], 'any');
|
|
235
|
+
await queue.cancelAllUpcomingJobs({
|
|
236
|
+
tags: { values: ['onboarding'], mode: 'all' },
|
|
237
|
+
});
|
|
238
|
+
```
|
|
239
|
+
|
|
240
|
+
Tag query modes: `'exact'`, `'all'`, `'any'`, `'none'`.
|
|
241
|
+
|
|
242
|
+
## Idempotency
|
|
243
|
+
|
|
244
|
+
```typescript
|
|
245
|
+
const jobId = await queue.addJob({
|
|
246
|
+
jobType: 'email',
|
|
247
|
+
payload: { to: 'user@example.com', subject: 'Welcome', body: '...' },
|
|
248
|
+
idempotencyKey: `welcome-${userId}`,
|
|
249
|
+
});
|
|
250
|
+
```
|
|
251
|
+
|
|
252
|
+
If a job with the same key exists, returns the existing job ID. Key is unique across all statuses until `cleanupOldJobs` removes it.
|
|
253
|
+
|
|
254
|
+
## Transactional Job Creation (PostgreSQL Only)
|
|
255
|
+
|
|
256
|
+
Insert a job within an existing database transaction so the job is enqueued **atomically** with other writes:
|
|
257
|
+
|
|
258
|
+
```typescript
|
|
259
|
+
import { Pool } from 'pg';
|
|
260
|
+
|
|
261
|
+
const pool = new Pool({ connectionString: process.env.DATABASE_URL });
|
|
262
|
+
|
|
263
|
+
async function registerUser(email: string, name: string) {
|
|
264
|
+
const client = await pool.connect();
|
|
265
|
+
try {
|
|
266
|
+
await client.query('BEGIN');
|
|
267
|
+
|
|
268
|
+
await client.query('INSERT INTO users (email, name) VALUES ($1, $2)', [
|
|
269
|
+
email,
|
|
270
|
+
name,
|
|
271
|
+
]);
|
|
272
|
+
|
|
273
|
+
const queue = getJobQueue();
|
|
274
|
+
await queue.addJob(
|
|
275
|
+
{
|
|
276
|
+
jobType: 'send_email',
|
|
277
|
+
payload: { to: email, subject: 'Welcome!', body: `Hi ${name}!` },
|
|
278
|
+
},
|
|
279
|
+
{ db: client },
|
|
280
|
+
);
|
|
281
|
+
|
|
282
|
+
await client.query('COMMIT');
|
|
283
|
+
} catch (error) {
|
|
284
|
+
await client.query('ROLLBACK');
|
|
285
|
+
throw error;
|
|
286
|
+
} finally {
|
|
287
|
+
client.release();
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
```
|
|
291
|
+
|
|
292
|
+
The `db` option accepts any object matching `DatabaseClient { query(text, values): Promise<{ rows, rowCount }> }` — works with `pg.PoolClient`, `pg.Client`, or compatible ORM query runners.
|
|
293
|
+
|
|
294
|
+
The job event (`'added'`) is also inserted within the same transaction.
|
|
295
|
+
|
|
296
|
+
## Retry Strategy
|
|
297
|
+
|
|
298
|
+
Configure how failed jobs are retried with `retryDelay`, `retryBackoff`, and `retryDelayMax`.
|
|
299
|
+
|
|
300
|
+
### Fixed delay
|
|
301
|
+
|
|
302
|
+
```typescript
|
|
303
|
+
await queue.addJob({
|
|
304
|
+
jobType: 'email',
|
|
305
|
+
payload: {
|
|
306
|
+
/* ... */
|
|
307
|
+
},
|
|
308
|
+
maxAttempts: 5,
|
|
309
|
+
retryDelay: 30, // 30 seconds between each retry
|
|
310
|
+
retryBackoff: false,
|
|
311
|
+
});
|
|
312
|
+
```
|
|
313
|
+
|
|
314
|
+
### Exponential backoff with cap
|
|
315
|
+
|
|
316
|
+
```typescript
|
|
317
|
+
await queue.addJob({
|
|
318
|
+
jobType: 'email',
|
|
319
|
+
payload: {
|
|
320
|
+
/* ... */
|
|
321
|
+
},
|
|
322
|
+
maxAttempts: 10,
|
|
323
|
+
retryDelay: 5, // base: 5 seconds
|
|
324
|
+
retryBackoff: true, // default — delay doubles each attempt with jitter
|
|
325
|
+
retryDelayMax: 300, // never wait more than 5 minutes
|
|
326
|
+
});
|
|
327
|
+
```
|
|
328
|
+
|
|
329
|
+
### Cron schedules with retry config
|
|
330
|
+
|
|
331
|
+
```typescript
|
|
332
|
+
await queue.addCronJob({
|
|
333
|
+
scheduleName: 'daily-sync',
|
|
334
|
+
cronExpression: '0 * * * *',
|
|
335
|
+
jobType: 'sync',
|
|
336
|
+
payload: { source: 'api' },
|
|
337
|
+
retryDelay: 60,
|
|
338
|
+
retryBackoff: true,
|
|
339
|
+
retryDelayMax: 600,
|
|
340
|
+
});
|
|
341
|
+
```
|
|
342
|
+
|
|
343
|
+
Every job enqueued by the schedule inherits the retry settings.
|
|
344
|
+
|
|
345
|
+
### Default behavior
|
|
346
|
+
|
|
347
|
+
When no retry options are set, the legacy formula `2^attempts * 60 seconds` is used. This is fully backward compatible.
|
|
348
|
+
|
|
349
|
+
## Maintenance
|
|
350
|
+
|
|
351
|
+
Use `createSupervisor()` to automate all maintenance tasks in a long-running server:
|
|
352
|
+
|
|
353
|
+
```typescript
|
|
354
|
+
const supervisor = queue.createSupervisor({
|
|
355
|
+
intervalMs: 60_000,
|
|
356
|
+
stuckJobsTimeoutMinutes: 10,
|
|
357
|
+
cleanupJobsDaysToKeep: 30,
|
|
358
|
+
cleanupEventsDaysToKeep: 30,
|
|
359
|
+
});
|
|
360
|
+
supervisor.startInBackground();
|
|
361
|
+
```
|
|
362
|
+
|
|
363
|
+
For serverless or one-off scripts, call `supervisor.start()` (runs once) or use the manual methods:
|
|
364
|
+
|
|
365
|
+
```typescript
|
|
366
|
+
await queue.reclaimStuckJobs(10); // reclaim jobs stuck > 10 min
|
|
367
|
+
await queue.cleanupOldJobs(30); // delete completed jobs > 30 days
|
|
368
|
+
await queue.cleanupOldJobEvents(30); // delete old events > 30 days
|
|
369
|
+
await queue.expireTimedOutTokens(); // expire overdue tokens
|
|
370
|
+
```
|
|
@@ -0,0 +1,235 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: dataqueue-core
|
|
3
|
+
description: Core patterns for using @nicnocquee/dataqueue — typed PayloadMap, init, handlers, adding and processing jobs.
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# DataQueue Core Patterns
|
|
7
|
+
|
|
8
|
+
## Imports
|
|
9
|
+
|
|
10
|
+
Always import from `@nicnocquee/dataqueue`. There is no v2/v3 subpath.
|
|
11
|
+
|
|
12
|
+
```typescript
|
|
13
|
+
import { initJobQueue, JobHandlers } from '@nicnocquee/dataqueue';
|
|
14
|
+
```
|
|
15
|
+
|
|
16
|
+
## Step 1: Define a PayloadMap
|
|
17
|
+
|
|
18
|
+
Define an object type mapping job type strings to their payload shapes. This is the foundation of type safety — every API method is generic over this map.
|
|
19
|
+
|
|
20
|
+
```typescript
|
|
21
|
+
export type JobPayloadMap = {
|
|
22
|
+
send_email: { to: string; subject: string; body: string };
|
|
23
|
+
generate_report: { reportId: string; userId: string };
|
|
24
|
+
};
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
## Step 2: Define Handlers
|
|
28
|
+
|
|
29
|
+
Create a `JobHandlers<PayloadMap>` object. TypeScript enforces that every key in the PayloadMap has a handler. Each handler receives `(payload, signal, ctx)`.
|
|
30
|
+
|
|
31
|
+
```typescript
|
|
32
|
+
import { JobHandlers } from '@nicnocquee/dataqueue';
|
|
33
|
+
import type { JobPayloadMap } from './types';
|
|
34
|
+
|
|
35
|
+
export const jobHandlers: JobHandlers<JobPayloadMap> = {
|
|
36
|
+
send_email: async (payload) => {
|
|
37
|
+
await sendEmail(payload.to, payload.subject, payload.body);
|
|
38
|
+
},
|
|
39
|
+
generate_report: async (payload, signal) => {
|
|
40
|
+
if (signal.aborted) return;
|
|
41
|
+
const url = await generateReport(payload.reportId, payload.userId);
|
|
42
|
+
return { url }; // stored as job output, readable via getJob()
|
|
43
|
+
},
|
|
44
|
+
};
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
## Step 3: Initialize the Queue (Singleton)
|
|
48
|
+
|
|
49
|
+
Use a module-level singleton. Each `initJobQueue` call creates a new database pool — never call it per-request.
|
|
50
|
+
|
|
51
|
+
### PostgreSQL
|
|
52
|
+
|
|
53
|
+
```typescript
|
|
54
|
+
import { initJobQueue } from '@nicnocquee/dataqueue';
|
|
55
|
+
import type { JobPayloadMap } from './types';
|
|
56
|
+
|
|
57
|
+
let jobQueue: ReturnType<typeof initJobQueue<JobPayloadMap>> | null = null;
|
|
58
|
+
|
|
59
|
+
export const getJobQueue = () => {
|
|
60
|
+
if (!jobQueue) {
|
|
61
|
+
jobQueue = initJobQueue<JobPayloadMap>({
|
|
62
|
+
databaseConfig: {
|
|
63
|
+
connectionString: process.env.PG_DATAQUEUE_DATABASE,
|
|
64
|
+
},
|
|
65
|
+
});
|
|
66
|
+
}
|
|
67
|
+
return jobQueue;
|
|
68
|
+
};
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
### Redis
|
|
72
|
+
|
|
73
|
+
```typescript
|
|
74
|
+
jobQueue = initJobQueue<JobPayloadMap>({
|
|
75
|
+
backend: 'redis',
|
|
76
|
+
redisConfig: {
|
|
77
|
+
url: process.env.REDIS_URL,
|
|
78
|
+
keyPrefix: 'myapp:',
|
|
79
|
+
},
|
|
80
|
+
});
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
### Bring Your Own Pool / Client
|
|
84
|
+
|
|
85
|
+
You can pass an existing `pg.Pool` or `ioredis` client instead of connection config:
|
|
86
|
+
|
|
87
|
+
```typescript
|
|
88
|
+
import { Pool } from 'pg';
|
|
89
|
+
const pool = new Pool({ connectionString: process.env.DATABASE_URL });
|
|
90
|
+
|
|
91
|
+
jobQueue = initJobQueue<JobPayloadMap>({ pool });
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
```typescript
|
|
95
|
+
import IORedis from 'ioredis';
|
|
96
|
+
const redis = new IORedis(process.env.REDIS_URL);
|
|
97
|
+
|
|
98
|
+
jobQueue = initJobQueue<JobPayloadMap>({
|
|
99
|
+
backend: 'redis',
|
|
100
|
+
client: redis,
|
|
101
|
+
keyPrefix: 'myapp:',
|
|
102
|
+
});
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
When you provide your own pool/client, the library will **not** close it on shutdown — you manage its lifecycle.
|
|
106
|
+
|
|
107
|
+
## Step 4: Add Jobs
|
|
108
|
+
|
|
109
|
+
```typescript
|
|
110
|
+
const jobId = await queue.addJob({
|
|
111
|
+
jobType: 'send_email',
|
|
112
|
+
payload: { to: 'user@example.com', subject: 'Hi', body: 'Hello' },
|
|
113
|
+
priority: 10,
|
|
114
|
+
runAt: new Date(Date.now() + 5000),
|
|
115
|
+
tags: ['welcome'],
|
|
116
|
+
idempotencyKey: 'welcome-user-123',
|
|
117
|
+
});
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
### Batch Insert
|
|
121
|
+
|
|
122
|
+
Use `addJobs` to insert many jobs in a single database round-trip. Returns IDs in the same order as the input array.
|
|
123
|
+
|
|
124
|
+
```typescript
|
|
125
|
+
const jobIds = await queue.addJobs([
|
|
126
|
+
{
|
|
127
|
+
jobType: 'send_email',
|
|
128
|
+
payload: { to: 'a@example.com', subject: 'Hi', body: '...' },
|
|
129
|
+
},
|
|
130
|
+
{
|
|
131
|
+
jobType: 'send_email',
|
|
132
|
+
payload: { to: 'b@example.com', subject: 'Hi', body: '...' },
|
|
133
|
+
priority: 10,
|
|
134
|
+
},
|
|
135
|
+
{
|
|
136
|
+
jobType: 'generate_report',
|
|
137
|
+
payload: { reportId: '1', userId: '2' },
|
|
138
|
+
tags: ['monthly'],
|
|
139
|
+
},
|
|
140
|
+
]);
|
|
141
|
+
```
|
|
142
|
+
|
|
143
|
+
Each job can independently have its own `idempotencyKey`, `priority`, `runAt`, `tags`, etc. The `{ db }` transactional option is also supported (PostgreSQL only).
|
|
144
|
+
|
|
145
|
+
### Transactional Job Creation (PostgreSQL only)
|
|
146
|
+
|
|
147
|
+
Pass an external `pg.PoolClient` inside a transaction via `{ db: client }`:
|
|
148
|
+
|
|
149
|
+
```typescript
|
|
150
|
+
const client = await pool.connect();
|
|
151
|
+
await client.query('BEGIN');
|
|
152
|
+
await client.query('INSERT INTO users (email) VALUES ($1)', [email]);
|
|
153
|
+
await queue.addJob(
|
|
154
|
+
{
|
|
155
|
+
jobType: 'send_email',
|
|
156
|
+
payload: { to: email, subject: 'Welcome!', body: '...' },
|
|
157
|
+
},
|
|
158
|
+
{ db: client },
|
|
159
|
+
);
|
|
160
|
+
await client.query('COMMIT');
|
|
161
|
+
client.release();
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
If the transaction rolls back, the job is never enqueued.
|
|
165
|
+
|
|
166
|
+
### Retry configuration
|
|
167
|
+
|
|
168
|
+
Control retry behavior per-job with `retryDelay`, `retryBackoff`, and `retryDelayMax`:
|
|
169
|
+
|
|
170
|
+
```typescript
|
|
171
|
+
await queue.addJob({
|
|
172
|
+
jobType: 'send_email',
|
|
173
|
+
payload: { to: 'user@example.com', subject: 'Hi', body: 'Hello' },
|
|
174
|
+
maxAttempts: 5,
|
|
175
|
+
retryDelay: 10, // base delay: 10 seconds
|
|
176
|
+
retryBackoff: true, // exponential backoff (default)
|
|
177
|
+
retryDelayMax: 300, // cap at 5 minutes
|
|
178
|
+
});
|
|
179
|
+
```
|
|
180
|
+
|
|
181
|
+
- **Fixed delay**: set `retryBackoff: false` for constant delay between retries.
|
|
182
|
+
- **Exponential backoff** (default): delay doubles each attempt with jitter.
|
|
183
|
+
- **Default**: when no retry options are set, legacy `2^attempts * 60s` is used.
|
|
184
|
+
|
|
185
|
+
## Step 5: Process Jobs
|
|
186
|
+
|
|
187
|
+
### Serverless (one-shot)
|
|
188
|
+
|
|
189
|
+
```typescript
|
|
190
|
+
const processor = queue.createProcessor(handlers, {
|
|
191
|
+
batchSize: 10,
|
|
192
|
+
concurrency: 3,
|
|
193
|
+
});
|
|
194
|
+
const processed = await processor.start();
|
|
195
|
+
```
|
|
196
|
+
|
|
197
|
+
### Long-running server
|
|
198
|
+
|
|
199
|
+
```typescript
|
|
200
|
+
const processor = queue.createProcessor(handlers, {
|
|
201
|
+
batchSize: 10,
|
|
202
|
+
concurrency: 3,
|
|
203
|
+
pollInterval: 5000,
|
|
204
|
+
});
|
|
205
|
+
processor.startInBackground();
|
|
206
|
+
|
|
207
|
+
// Automate maintenance (reclaim stuck jobs, cleanup old data, expire tokens)
|
|
208
|
+
const supervisor = queue.createSupervisor({
|
|
209
|
+
intervalMs: 60_000,
|
|
210
|
+
stuckJobsTimeoutMinutes: 10,
|
|
211
|
+
cleanupJobsDaysToKeep: 30,
|
|
212
|
+
cleanupEventsDaysToKeep: 30,
|
|
213
|
+
});
|
|
214
|
+
supervisor.startInBackground();
|
|
215
|
+
|
|
216
|
+
process.on('SIGTERM', async () => {
|
|
217
|
+
await Promise.all([
|
|
218
|
+
processor.stopAndDrain(30000),
|
|
219
|
+
supervisor.stopAndDrain(30000),
|
|
220
|
+
]);
|
|
221
|
+
queue.getPool().end();
|
|
222
|
+
process.exit(0);
|
|
223
|
+
});
|
|
224
|
+
```
|
|
225
|
+
|
|
226
|
+
## Common Mistakes
|
|
227
|
+
|
|
228
|
+
1. **Creating a new queue per request** — always use a singleton. Each `initJobQueue` creates a DB pool.
|
|
229
|
+
2. **Missing handler for a job type** — the job fails with `FailureReason.NoHandler`. Let TypeScript enforce completeness by typing handlers as `JobHandlers<PayloadMap>`.
|
|
230
|
+
3. **Not checking `signal.aborted`** — timed-out jobs keep running in the background. Always check the signal in long-running handlers.
|
|
231
|
+
4. **Skipping maintenance** — use `createSupervisor()` to automate reclaiming stuck jobs, cleaning up old data, and expiring tokens. Without it, crashed workers leave jobs stuck in `processing` and tables grow unbounded.
|
|
232
|
+
5. **Forgetting to run migrations** — PostgreSQL requires `dataqueue-cli migrate` before use. Redis needs no migrations.
|
|
233
|
+
6. **Not calling `stopAndDrain` on shutdown** — use `stopAndDrain()` (not `stop()`) for graceful shutdown to avoid stuck jobs.
|
|
234
|
+
7. **Forgetting to commit/rollback when using `db` option** — the `addJob` INSERT sits in an open transaction. If you never `COMMIT` or `ROLLBACK`, the connection leaks and the job is invisible to other sessions.
|
|
235
|
+
8. **Using `db` option with Redis** — transactional job creation is PostgreSQL only. The Redis backend throws if `db` is provided.
|