@flightdev/queue 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +372 -0
- package/dist/adapters/bullmq.d.ts +62 -0
- package/dist/adapters/bullmq.js +141 -0
- package/dist/adapters/memory.d.ts +9 -0
- package/dist/adapters/memory.js +74 -0
- package/dist/adapters/redis.d.ts +13 -0
- package/dist/adapters/redis.js +98 -0
- package/dist/index.d.ts +80 -0
- package/dist/index.js +71 -0
- package/dist/jobs.d.ts +212 -0
- package/dist/jobs.js +134 -0
- package/package.json +55 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2024-2026 Flight Contributors
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,372 @@
|
|
|
1
|
+
# @flight-framework/queue
|
|
2
|
+
|
|
3
|
+
Background job processing for Flight Framework. Run tasks asynchronously with retries, scheduling, and multiple adapters.
|
|
4
|
+
|
|
5
|
+
## Table of Contents
|
|
6
|
+
|
|
7
|
+
- [Features](#features)
|
|
8
|
+
- [Installation](#installation)
|
|
9
|
+
- [Quick Start](#quick-start)
|
|
10
|
+
- [Adapters](#adapters)
|
|
11
|
+
- [Defining Jobs](#defining-jobs)
|
|
12
|
+
- [Enqueueing Jobs](#enqueueing-jobs)
|
|
13
|
+
- [Processing Jobs](#processing-jobs)
|
|
14
|
+
- [Scheduling](#scheduling)
|
|
15
|
+
- [Error Handling](#error-handling)
|
|
16
|
+
- [API Reference](#api-reference)
|
|
17
|
+
- [License](#license)
|
|
18
|
+
|
|
19
|
+
---
|
|
20
|
+
|
|
21
|
+
## Features
|
|
22
|
+
|
|
23
|
+
- Multiple storage adapters (Redis, Memory, IndexedDB)
|
|
24
|
+
- Automatic retries with exponential backoff
|
|
25
|
+
- Job scheduling (delayed and recurring)
|
|
26
|
+
- Priority queues
|
|
27
|
+
- Concurrency control
|
|
28
|
+
- Dead letter queue for failed jobs
|
|
29
|
+
- Job batching
|
|
30
|
+
- Progress tracking
|
|
31
|
+
- Type-safe job definitions
|
|
32
|
+
|
|
33
|
+
---
|
|
34
|
+
|
|
35
|
+
## Installation
|
|
36
|
+
|
|
37
|
+
```bash
|
|
38
|
+
npm install @flight-framework/queue
|
|
39
|
+
|
|
40
|
+
# For Redis adapter
|
|
41
|
+
npm install ioredis
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
---
|
|
45
|
+
|
|
46
|
+
## Quick Start
|
|
47
|
+
|
|
48
|
+
```typescript
|
|
49
|
+
import { createQueue } from '@flight-framework/queue';
|
|
50
|
+
import { redis } from '@flight-framework/queue/redis';
|
|
51
|
+
|
|
52
|
+
const queue = createQueue(redis({
|
|
53
|
+
url: process.env.REDIS_URL,
|
|
54
|
+
}));
|
|
55
|
+
|
|
56
|
+
// Define a job handler
|
|
57
|
+
queue.define('sendEmail', async (job) => {
|
|
58
|
+
await sendEmail(job.data.to, job.data.subject, job.data.body);
|
|
59
|
+
console.log(`Email sent to ${job.data.to}`);
|
|
60
|
+
});
|
|
61
|
+
|
|
62
|
+
// Enqueue a job
|
|
63
|
+
await queue.enqueue('sendEmail', {
|
|
64
|
+
to: 'user@example.com',
|
|
65
|
+
subject: 'Welcome!',
|
|
66
|
+
body: 'Thanks for signing up.',
|
|
67
|
+
});
|
|
68
|
+
|
|
69
|
+
// Start processing
|
|
70
|
+
queue.process();
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
---
|
|
74
|
+
|
|
75
|
+
## Adapters
|
|
76
|
+
|
|
77
|
+
### Redis (Production)
|
|
78
|
+
|
|
79
|
+
Persistent, distributed queue using Redis.
|
|
80
|
+
|
|
81
|
+
```typescript
|
|
82
|
+
import { redis } from '@flight-framework/queue/redis';
|
|
83
|
+
|
|
84
|
+
const adapter = redis({
|
|
85
|
+
url: process.env.REDIS_URL,
|
|
86
|
+
prefix: 'myapp:queue:', // Key prefix
|
|
87
|
+
maxRetries: 3,
|
|
88
|
+
});
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
### Memory (Development)
|
|
92
|
+
|
|
93
|
+
In-memory queue for development and testing.
|
|
94
|
+
|
|
95
|
+
```typescript
|
|
96
|
+
import { memory } from '@flight-framework/queue/memory';
|
|
97
|
+
|
|
98
|
+
const adapter = memory();
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
### IndexedDB (Client-Side)
|
|
102
|
+
|
|
103
|
+
Browser-based queue for offline-capable apps.
|
|
104
|
+
|
|
105
|
+
```typescript
|
|
106
|
+
import { indexeddb } from '@flight-framework/queue-indexeddb';
|
|
107
|
+
|
|
108
|
+
const adapter = indexeddb({
|
|
109
|
+
dbName: 'myapp-queue',
|
|
110
|
+
});
|
|
111
|
+
```
|
|
112
|
+
|
|
113
|
+
---
|
|
114
|
+
|
|
115
|
+
## Defining Jobs
|
|
116
|
+
|
|
117
|
+
### Basic Definition
|
|
118
|
+
|
|
119
|
+
```typescript
|
|
120
|
+
queue.define('processImage', async (job) => {
|
|
121
|
+
const { imageUrl, size } = job.data;
|
|
122
|
+
const result = await resizeImage(imageUrl, size);
|
|
123
|
+
return result;
|
|
124
|
+
});
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
### With Options
|
|
128
|
+
|
|
129
|
+
```typescript
|
|
130
|
+
queue.define('sendNotification', async (job) => {
|
|
131
|
+
await sendPushNotification(job.data);
|
|
132
|
+
}, {
|
|
133
|
+
retries: 5,
|
|
134
|
+
backoff: 'exponential',
|
|
135
|
+
timeout: 30000, // 30 seconds
|
|
136
|
+
priority: 'high',
|
|
137
|
+
});
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
### Type-Safe Jobs
|
|
141
|
+
|
|
142
|
+
```typescript
|
|
143
|
+
interface EmailJob {
|
|
144
|
+
to: string;
|
|
145
|
+
subject: string;
|
|
146
|
+
body: string;
|
|
147
|
+
attachments?: string[];
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
queue.define<EmailJob>('sendEmail', async (job) => {
|
|
151
|
+
// job.data is typed as EmailJob
|
|
152
|
+
await sendEmail(job.data.to, job.data.subject, job.data.body);
|
|
153
|
+
});
|
|
154
|
+
```
|
|
155
|
+
|
|
156
|
+
---
|
|
157
|
+
|
|
158
|
+
## Enqueueing Jobs
|
|
159
|
+
|
|
160
|
+
### Basic Enqueue
|
|
161
|
+
|
|
162
|
+
```typescript
|
|
163
|
+
await queue.enqueue('sendEmail', {
|
|
164
|
+
to: 'user@example.com',
|
|
165
|
+
subject: 'Hello',
|
|
166
|
+
body: 'World',
|
|
167
|
+
});
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
### With Options
|
|
171
|
+
|
|
172
|
+
```typescript
|
|
173
|
+
await queue.enqueue('sendEmail', data, {
|
|
174
|
+
delay: 60000, // Delay 1 minute
|
|
175
|
+
priority: 'high', // high, normal, low
|
|
176
|
+
attempts: 5, // Max retry attempts
|
|
177
|
+
backoff: 'exponential', // Retry strategy
|
|
178
|
+
jobId: 'unique-id', // Dedupe by ID
|
|
179
|
+
});
|
|
180
|
+
```
|
|
181
|
+
|
|
182
|
+
### Delayed Jobs
|
|
183
|
+
|
|
184
|
+
```typescript
|
|
185
|
+
// Run in 5 minutes
|
|
186
|
+
await queue.enqueue('reminder', data, {
|
|
187
|
+
delay: 5 * 60 * 1000,
|
|
188
|
+
});
|
|
189
|
+
|
|
190
|
+
// Run at specific time
|
|
191
|
+
await queue.enqueue('reminder', data, {
|
|
192
|
+
runAt: new Date('2026-01-15T10:00:00Z'),
|
|
193
|
+
});
|
|
194
|
+
```
|
|
195
|
+
|
|
196
|
+
### Batch Enqueue
|
|
197
|
+
|
|
198
|
+
```typescript
|
|
199
|
+
await queue.enqueueMany('sendEmail', [
|
|
200
|
+
{ to: 'user1@example.com', subject: 'Hello' },
|
|
201
|
+
{ to: 'user2@example.com', subject: 'Hello' },
|
|
202
|
+
{ to: 'user3@example.com', subject: 'Hello' },
|
|
203
|
+
]);
|
|
204
|
+
```
|
|
205
|
+
|
|
206
|
+
---
|
|
207
|
+
|
|
208
|
+
## Processing Jobs
|
|
209
|
+
|
|
210
|
+
### Start Processing
|
|
211
|
+
|
|
212
|
+
```typescript
|
|
213
|
+
// Process all queues
|
|
214
|
+
queue.process();
|
|
215
|
+
|
|
216
|
+
// Process specific queues with concurrency
|
|
217
|
+
queue.process({
|
|
218
|
+
queues: ['sendEmail', 'processImage'],
|
|
219
|
+
concurrency: 5,
|
|
220
|
+
});
|
|
221
|
+
```
|
|
222
|
+
|
|
223
|
+
### Graceful Shutdown
|
|
224
|
+
|
|
225
|
+
```typescript
|
|
226
|
+
process.on('SIGTERM', async () => {
|
|
227
|
+
await queue.shutdown({ timeout: 30000 });
|
|
228
|
+
process.exit(0);
|
|
229
|
+
});
|
|
230
|
+
```
|
|
231
|
+
|
|
232
|
+
### Progress Updates
|
|
233
|
+
|
|
234
|
+
```typescript
|
|
235
|
+
queue.define('processVideo', async (job) => {
|
|
236
|
+
for (let i = 0; i <= 100; i += 10) {
|
|
237
|
+
await processChunk(job.data, i);
|
|
238
|
+
await job.updateProgress(i);
|
|
239
|
+
}
|
|
240
|
+
});
|
|
241
|
+
|
|
242
|
+
// Listen to progress
|
|
243
|
+
queue.on('progress', (jobId, progress) => {
|
|
244
|
+
console.log(`Job ${jobId}: ${progress}%`);
|
|
245
|
+
});
|
|
246
|
+
```
|
|
247
|
+
|
|
248
|
+
---
|
|
249
|
+
|
|
250
|
+
## Scheduling
|
|
251
|
+
|
|
252
|
+
### Recurring Jobs
|
|
253
|
+
|
|
254
|
+
```typescript
|
|
255
|
+
import { schedule } from '@flight-framework/queue';
|
|
256
|
+
|
|
257
|
+
// Run every hour
|
|
258
|
+
schedule(queue, 'cleanup', {}, {
|
|
259
|
+
pattern: '0 * * * *', // Cron pattern
|
|
260
|
+
});
|
|
261
|
+
|
|
262
|
+
// Run every 5 minutes
|
|
263
|
+
schedule(queue, 'healthCheck', {}, {
|
|
264
|
+
every: 5 * 60 * 1000,
|
|
265
|
+
});
|
|
266
|
+
```
|
|
267
|
+
|
|
268
|
+
### Cron Patterns
|
|
269
|
+
|
|
270
|
+
| Pattern | Description |
|
|
271
|
+
|---------|-------------|
|
|
272
|
+
| `0 * * * *` | Every hour |
|
|
273
|
+
| `0 0 * * *` | Every day at midnight |
|
|
274
|
+
| `0 0 * * 0` | Every Sunday |
|
|
275
|
+
| `*/5 * * * *` | Every 5 minutes |
|
|
276
|
+
| `0 9 * * 1-5` | 9am on weekdays |
|
|
277
|
+
|
|
278
|
+
---
|
|
279
|
+
|
|
280
|
+
## Error Handling
|
|
281
|
+
|
|
282
|
+
### Retry Strategy
|
|
283
|
+
|
|
284
|
+
```typescript
|
|
285
|
+
queue.define('unreliableTask', handler, {
|
|
286
|
+
retries: 3,
|
|
287
|
+
backoff: 'exponential', // 1s, 2s, 4s, 8s...
|
|
288
|
+
// or: 'linear' // 1s, 2s, 3s, 4s...
|
|
289
|
+
// or: 'fixed' // 1s, 1s, 1s, 1s...
|
|
290
|
+
});
|
|
291
|
+
```
|
|
292
|
+
|
|
293
|
+
### Dead Letter Queue
|
|
294
|
+
|
|
295
|
+
Jobs that exceed max retries go to the dead letter queue:
|
|
296
|
+
|
|
297
|
+
```typescript
|
|
298
|
+
// Get failed jobs
|
|
299
|
+
const failed = await queue.getDeadJobs();
|
|
300
|
+
|
|
301
|
+
// Retry a dead job
|
|
302
|
+
await queue.retryDead(jobId);
|
|
303
|
+
|
|
304
|
+
// Clear dead jobs
|
|
305
|
+
await queue.clearDead();
|
|
306
|
+
```
|
|
307
|
+
|
|
308
|
+
### Event Listeners
|
|
309
|
+
|
|
310
|
+
```typescript
|
|
311
|
+
queue.on('completed', (job) => {
|
|
312
|
+
console.log(`Job ${job.id} completed`);
|
|
313
|
+
});
|
|
314
|
+
|
|
315
|
+
queue.on('failed', (job, error) => {
|
|
316
|
+
console.error(`Job ${job.id} failed:`, error);
|
|
317
|
+
});
|
|
318
|
+
|
|
319
|
+
queue.on('stalled', (job) => {
|
|
320
|
+
console.warn(`Job ${job.id} stalled`);
|
|
321
|
+
});
|
|
322
|
+
```
|
|
323
|
+
|
|
324
|
+
---
|
|
325
|
+
|
|
326
|
+
## API Reference
|
|
327
|
+
|
|
328
|
+
### createQueue Options
|
|
329
|
+
|
|
330
|
+
| Option | Type | Default | Description |
|
|
331
|
+
|--------|------|---------|-------------|
|
|
332
|
+
| `concurrency` | `number` | `1` | Max concurrent jobs |
|
|
333
|
+
| `prefix` | `string` | `'queue:'` | Key prefix |
|
|
334
|
+
| `defaultRetries` | `number` | `3` | Default retry count |
|
|
335
|
+
|
|
336
|
+
### queue.define Options
|
|
337
|
+
|
|
338
|
+
| Option | Type | Default | Description |
|
|
339
|
+
|--------|------|---------|-------------|
|
|
340
|
+
| `retries` | `number` | `3` | Max retries |
|
|
341
|
+
| `backoff` | `string` | `'exponential'` | Retry strategy |
|
|
342
|
+
| `timeout` | `number` | `30000` | Job timeout (ms) |
|
|
343
|
+
| `priority` | `string` | `'normal'` | Queue priority |
|
|
344
|
+
|
|
345
|
+
### queue.enqueue Options
|
|
346
|
+
|
|
347
|
+
| Option | Type | Description |
|
|
348
|
+
|--------|------|-------------|
|
|
349
|
+
| `delay` | `number` | Delay in ms |
|
|
350
|
+
| `runAt` | `Date` | Specific run time |
|
|
351
|
+
| `priority` | `string` | Job priority |
|
|
352
|
+
| `attempts` | `number` | Max attempts |
|
|
353
|
+
| `jobId` | `string` | Unique job ID |
|
|
354
|
+
|
|
355
|
+
### Queue Methods
|
|
356
|
+
|
|
357
|
+
| Method | Description |
|
|
358
|
+
|--------|-------------|
|
|
359
|
+
| `define(name, handler, opts?)` | Define job handler |
|
|
360
|
+
| `enqueue(name, data, opts?)` | Add job to queue |
|
|
361
|
+
| `enqueueMany(name, items)` | Batch enqueue |
|
|
362
|
+
| `process(opts?)` | Start processing |
|
|
363
|
+
| `shutdown(opts?)` | Graceful shutdown |
|
|
364
|
+
| `getJob(id)` | Get job by ID |
|
|
365
|
+
| `getDeadJobs()` | Get failed jobs |
|
|
366
|
+
| `retryDead(id)` | Retry failed job |
|
|
367
|
+
|
|
368
|
+
---
|
|
369
|
+
|
|
370
|
+
## License
|
|
371
|
+
|
|
372
|
+
MIT
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
import * as _bullmq from 'bullmq';
|
|
2
|
+
import { QueueAdapter } from '../index.js';
|
|
3
|
+
|
|
4
|
+
interface BullMQConfig {
|
|
5
|
+
/** Redis connection options */
|
|
6
|
+
connection: {
|
|
7
|
+
host?: string;
|
|
8
|
+
port?: number;
|
|
9
|
+
password?: string;
|
|
10
|
+
db?: number;
|
|
11
|
+
url?: string;
|
|
12
|
+
};
|
|
13
|
+
/** Queue name (default: 'flight-jobs') */
|
|
14
|
+
queueName?: string;
|
|
15
|
+
/** Default job options */
|
|
16
|
+
defaultJobOptions?: {
|
|
17
|
+
attempts?: number;
|
|
18
|
+
backoff?: {
|
|
19
|
+
type: 'exponential' | 'fixed';
|
|
20
|
+
delay: number;
|
|
21
|
+
};
|
|
22
|
+
removeOnComplete?: boolean | number;
|
|
23
|
+
removeOnFail?: boolean | number;
|
|
24
|
+
};
|
|
25
|
+
}
|
|
26
|
+
/**
|
|
27
|
+
* Create a BullMQ adapter for the queue service.
|
|
28
|
+
*
|
|
29
|
+
* @param config - BullMQ configuration
|
|
30
|
+
* @returns Queue adapter
|
|
31
|
+
*/
|
|
32
|
+
declare function bullmq(config: BullMQConfig): QueueAdapter;
|
|
33
|
+
/**
|
|
34
|
+
* Create a BullMQ worker for processing jobs.
|
|
35
|
+
*
|
|
36
|
+
* This is an advanced API for when you need more control over the worker.
|
|
37
|
+
* For most cases, use queue.process() instead.
|
|
38
|
+
*
|
|
39
|
+
* @example
|
|
40
|
+
* ```typescript
|
|
41
|
+
* import { createBullMQWorker } from '@flightdev/queue/bullmq';
|
|
42
|
+
*
|
|
43
|
+
* const worker = await createBullMQWorker({
|
|
44
|
+
* connection: { host: 'localhost' },
|
|
45
|
+
* queueName: 'my-jobs',
|
|
46
|
+
* processor: async (job) => {
|
|
47
|
+
* console.log('Processing:', job.name);
|
|
48
|
+
* // ... job logic
|
|
49
|
+
* },
|
|
50
|
+
* concurrency: 10,
|
|
51
|
+
* });
|
|
52
|
+
*
|
|
53
|
+
* // Graceful shutdown
|
|
54
|
+
* process.on('SIGTERM', () => worker.close());
|
|
55
|
+
* ```
|
|
56
|
+
*/
|
|
57
|
+
declare function createBullMQWorker(config: BullMQConfig & {
|
|
58
|
+
processor: (job: any) => Promise<void>;
|
|
59
|
+
concurrency?: number;
|
|
60
|
+
}): Promise<_bullmq.Worker<any, void, string>>;
|
|
61
|
+
|
|
62
|
+
export { type BullMQConfig, bullmq, createBullMQWorker };
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
// src/adapters/bullmq.ts
|
|
2
|
+
function bullmq(config) {
|
|
3
|
+
const queueName = config.queueName ?? "flight-jobs";
|
|
4
|
+
let Queue;
|
|
5
|
+
let Worker;
|
|
6
|
+
let queueInstance;
|
|
7
|
+
let workerInstance;
|
|
8
|
+
async function getQueue() {
|
|
9
|
+
if (!queueInstance) {
|
|
10
|
+
const bullmqModule = await import("bullmq");
|
|
11
|
+
Queue = bullmqModule.Queue;
|
|
12
|
+
queueInstance = new Queue(queueName, {
|
|
13
|
+
connection: config.connection,
|
|
14
|
+
defaultJobOptions: {
|
|
15
|
+
attempts: config.defaultJobOptions?.attempts ?? 3,
|
|
16
|
+
backoff: config.defaultJobOptions?.backoff ?? {
|
|
17
|
+
type: "exponential",
|
|
18
|
+
delay: 1e3
|
|
19
|
+
},
|
|
20
|
+
removeOnComplete: config.defaultJobOptions?.removeOnComplete ?? 100,
|
|
21
|
+
removeOnFail: config.defaultJobOptions?.removeOnFail ?? 1e3
|
|
22
|
+
}
|
|
23
|
+
});
|
|
24
|
+
}
|
|
25
|
+
return queueInstance;
|
|
26
|
+
}
|
|
27
|
+
function mapStatus(bullmqState) {
|
|
28
|
+
const statusMap = {
|
|
29
|
+
waiting: "pending",
|
|
30
|
+
delayed: "pending",
|
|
31
|
+
active: "processing",
|
|
32
|
+
completed: "completed",
|
|
33
|
+
failed: "failed"
|
|
34
|
+
};
|
|
35
|
+
return statusMap[bullmqState] ?? "pending";
|
|
36
|
+
}
|
|
37
|
+
function toJob(bullmqJob) {
|
|
38
|
+
return {
|
|
39
|
+
id: bullmqJob.id,
|
|
40
|
+
name: bullmqJob.name,
|
|
41
|
+
data: bullmqJob.data,
|
|
42
|
+
status: mapStatus(bullmqJob.getState?.() ?? "waiting"),
|
|
43
|
+
attempts: bullmqJob.attemptsMade ?? 0,
|
|
44
|
+
maxAttempts: bullmqJob.opts?.attempts ?? 3,
|
|
45
|
+
createdAt: new Date(bullmqJob.timestamp),
|
|
46
|
+
scheduledAt: bullmqJob.delay ? new Date(bullmqJob.timestamp + bullmqJob.delay) : void 0,
|
|
47
|
+
startedAt: bullmqJob.processedOn ? new Date(bullmqJob.processedOn) : void 0,
|
|
48
|
+
completedAt: bullmqJob.finishedOn ? new Date(bullmqJob.finishedOn) : void 0,
|
|
49
|
+
error: bullmqJob.failedReason
|
|
50
|
+
};
|
|
51
|
+
}
|
|
52
|
+
return {
|
|
53
|
+
name: "bullmq",
|
|
54
|
+
async enqueue(name, data, options) {
|
|
55
|
+
const queue = await getQueue();
|
|
56
|
+
const bullmqOptions = {};
|
|
57
|
+
if (options?.delay) {
|
|
58
|
+
bullmqOptions.delay = options.delay;
|
|
59
|
+
}
|
|
60
|
+
if (options?.maxAttempts) {
|
|
61
|
+
bullmqOptions.attempts = options.maxAttempts;
|
|
62
|
+
}
|
|
63
|
+
if (options?.priority) {
|
|
64
|
+
bullmqOptions.priority = options.priority;
|
|
65
|
+
}
|
|
66
|
+
if (options?.scheduledAt) {
|
|
67
|
+
bullmqOptions.delay = options.scheduledAt.getTime() - Date.now();
|
|
68
|
+
}
|
|
69
|
+
const job = await queue.add(name, data, bullmqOptions);
|
|
70
|
+
return toJob(job);
|
|
71
|
+
},
|
|
72
|
+
async dequeue(_name) {
|
|
73
|
+
const queue = await getQueue();
|
|
74
|
+
const jobs = await queue.getWaiting(0, 0);
|
|
75
|
+
if (jobs.length > 0) {
|
|
76
|
+
return toJob(jobs[0]);
|
|
77
|
+
}
|
|
78
|
+
return null;
|
|
79
|
+
},
|
|
80
|
+
async complete(jobId) {
|
|
81
|
+
const queue = await getQueue();
|
|
82
|
+
const job = await queue.getJob(jobId);
|
|
83
|
+
if (job) {
|
|
84
|
+
await job.moveToCompleted("done", true);
|
|
85
|
+
}
|
|
86
|
+
},
|
|
87
|
+
async fail(jobId, error) {
|
|
88
|
+
const queue = await getQueue();
|
|
89
|
+
const job = await queue.getJob(jobId);
|
|
90
|
+
if (job) {
|
|
91
|
+
await job.moveToFailed(new Error(error), true);
|
|
92
|
+
}
|
|
93
|
+
},
|
|
94
|
+
async retry(jobId) {
|
|
95
|
+
const queue = await getQueue();
|
|
96
|
+
const job = await queue.getJob(jobId);
|
|
97
|
+
if (job) {
|
|
98
|
+
await job.retry();
|
|
99
|
+
}
|
|
100
|
+
},
|
|
101
|
+
async getJob(jobId) {
|
|
102
|
+
const queue = await getQueue();
|
|
103
|
+
const job = await queue.getJob(jobId);
|
|
104
|
+
if (!job) return null;
|
|
105
|
+
return toJob(job);
|
|
106
|
+
},
|
|
107
|
+
async getStats() {
|
|
108
|
+
const queue = await getQueue();
|
|
109
|
+
const counts = await queue.getJobCounts();
|
|
110
|
+
return {
|
|
111
|
+
pending: (counts.waiting ?? 0) + (counts.delayed ?? 0),
|
|
112
|
+
processing: counts.active ?? 0,
|
|
113
|
+
completed: counts.completed ?? 0,
|
|
114
|
+
failed: counts.failed ?? 0
|
|
115
|
+
};
|
|
116
|
+
}
|
|
117
|
+
};
|
|
118
|
+
}
|
|
119
|
+
async function createBullMQWorker(config) {
|
|
120
|
+
const { Worker } = await import("bullmq");
|
|
121
|
+
const queueName = config.queueName ?? "flight-jobs";
|
|
122
|
+
const worker = new Worker(
|
|
123
|
+
queueName,
|
|
124
|
+
config.processor,
|
|
125
|
+
{
|
|
126
|
+
connection: config.connection,
|
|
127
|
+
concurrency: config.concurrency ?? 1
|
|
128
|
+
}
|
|
129
|
+
);
|
|
130
|
+
worker.on("completed", (job) => {
|
|
131
|
+
console.log(`[BullMQ] Job ${job.id} completed`);
|
|
132
|
+
});
|
|
133
|
+
worker.on("failed", (job, err) => {
|
|
134
|
+
console.error(`[BullMQ] Job ${job?.id} failed:`, err.message);
|
|
135
|
+
});
|
|
136
|
+
return worker;
|
|
137
|
+
}
|
|
138
|
+
export {
|
|
139
|
+
bullmq,
|
|
140
|
+
createBullMQWorker
|
|
141
|
+
};
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
// src/adapters/memory.ts
|
|
2
|
+
var memory = () => {
|
|
3
|
+
const jobs = /* @__PURE__ */ new Map();
|
|
4
|
+
const queue = [];
|
|
5
|
+
let idCounter = 0;
|
|
6
|
+
const adapter = {
|
|
7
|
+
name: "memory",
|
|
8
|
+
async enqueue(name, data, options) {
|
|
9
|
+
const job = {
|
|
10
|
+
id: `job_${++idCounter}`,
|
|
11
|
+
name,
|
|
12
|
+
data,
|
|
13
|
+
status: "pending",
|
|
14
|
+
attempts: 0,
|
|
15
|
+
maxAttempts: options?.maxAttempts ?? 3,
|
|
16
|
+
createdAt: /* @__PURE__ */ new Date(),
|
|
17
|
+
scheduledAt: options?.scheduledAt
|
|
18
|
+
};
|
|
19
|
+
jobs.set(job.id, job);
|
|
20
|
+
queue.push(job.id);
|
|
21
|
+
return job;
|
|
22
|
+
},
|
|
23
|
+
async dequeue() {
|
|
24
|
+
const jobId = queue.shift();
|
|
25
|
+
if (!jobId) return null;
|
|
26
|
+
const job = jobs.get(jobId);
|
|
27
|
+
if (!job || job.status !== "pending") return null;
|
|
28
|
+
job.status = "processing";
|
|
29
|
+
job.startedAt = /* @__PURE__ */ new Date();
|
|
30
|
+
job.attempts++;
|
|
31
|
+
return job;
|
|
32
|
+
},
|
|
33
|
+
async complete(jobId) {
|
|
34
|
+
const job = jobs.get(jobId);
|
|
35
|
+
if (job) {
|
|
36
|
+
job.status = "completed";
|
|
37
|
+
job.completedAt = /* @__PURE__ */ new Date();
|
|
38
|
+
}
|
|
39
|
+
},
|
|
40
|
+
async fail(jobId, error) {
|
|
41
|
+
const job = jobs.get(jobId);
|
|
42
|
+
if (job) {
|
|
43
|
+
job.status = "failed";
|
|
44
|
+
job.error = error;
|
|
45
|
+
}
|
|
46
|
+
},
|
|
47
|
+
async retry(jobId) {
|
|
48
|
+
const job = jobs.get(jobId);
|
|
49
|
+
if (job) {
|
|
50
|
+
job.status = "pending";
|
|
51
|
+
queue.push(jobId);
|
|
52
|
+
}
|
|
53
|
+
},
|
|
54
|
+
async getJob(jobId) {
|
|
55
|
+
return jobs.get(jobId) ?? null;
|
|
56
|
+
},
|
|
57
|
+
async getStats() {
|
|
58
|
+
let pending = 0, processing = 0, completed = 0, failed = 0;
|
|
59
|
+
for (const job of jobs.values()) {
|
|
60
|
+
if (job.status === "pending") pending++;
|
|
61
|
+
else if (job.status === "processing") processing++;
|
|
62
|
+
else if (job.status === "completed") completed++;
|
|
63
|
+
else if (job.status === "failed") failed++;
|
|
64
|
+
}
|
|
65
|
+
return { pending, processing, completed, failed };
|
|
66
|
+
}
|
|
67
|
+
};
|
|
68
|
+
return adapter;
|
|
69
|
+
};
|
|
70
|
+
var memory_default = memory;
|
|
71
|
+
export {
|
|
72
|
+
memory_default as default,
|
|
73
|
+
memory
|
|
74
|
+
};
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import { QueueAdapterFactory } from '../index.js';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Redis Queue Adapter
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
interface RedisConfig {
|
|
8
|
+
url?: string;
|
|
9
|
+
prefix?: string;
|
|
10
|
+
}
|
|
11
|
+
declare const redis: QueueAdapterFactory<RedisConfig>;
|
|
12
|
+
|
|
13
|
+
export { type RedisConfig, redis as default, redis };
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
// src/adapters/redis.ts
|
|
2
|
+
var redis = (config = {}) => {
|
|
3
|
+
const { url = "redis://localhost:6379", prefix = "flight:queue:" } = config;
|
|
4
|
+
let client = null;
|
|
5
|
+
async function getClient() {
|
|
6
|
+
if (!client) {
|
|
7
|
+
try {
|
|
8
|
+
const Redis = (await import("ioredis")).default;
|
|
9
|
+
client = new Redis(url);
|
|
10
|
+
} catch {
|
|
11
|
+
throw new Error("@flightdev/queue: ioredis not installed. Run: npm install ioredis");
|
|
12
|
+
}
|
|
13
|
+
}
|
|
14
|
+
return client;
|
|
15
|
+
}
|
|
16
|
+
const adapter = {
|
|
17
|
+
name: "redis",
|
|
18
|
+
async enqueue(name, data, options) {
|
|
19
|
+
const redis2 = await getClient();
|
|
20
|
+
const job = {
|
|
21
|
+
id: `job_${Date.now()}_${Math.random().toString(36).slice(2)}`,
|
|
22
|
+
name,
|
|
23
|
+
data,
|
|
24
|
+
status: "pending",
|
|
25
|
+
attempts: 0,
|
|
26
|
+
maxAttempts: options?.maxAttempts ?? 3,
|
|
27
|
+
createdAt: /* @__PURE__ */ new Date()
|
|
28
|
+
};
|
|
29
|
+
await redis2.lpush(`${prefix}pending`, JSON.stringify(job));
|
|
30
|
+
await redis2.hset(`${prefix}jobs`, job.id, JSON.stringify(job));
|
|
31
|
+
return job;
|
|
32
|
+
},
|
|
33
|
+
async dequeue() {
|
|
34
|
+
const redis2 = await getClient();
|
|
35
|
+
const data = await redis2.rpop(`${prefix}pending`);
|
|
36
|
+
if (!data) return null;
|
|
37
|
+
const job = JSON.parse(data);
|
|
38
|
+
job.status = "processing";
|
|
39
|
+
job.attempts++;
|
|
40
|
+
await redis2.hset(`${prefix}jobs`, job.id, JSON.stringify(job));
|
|
41
|
+
return job;
|
|
42
|
+
},
|
|
43
|
+
async complete(jobId) {
|
|
44
|
+
const redis2 = await getClient();
|
|
45
|
+
const data = await redis2.hget(`${prefix}jobs`, jobId);
|
|
46
|
+
if (data) {
|
|
47
|
+
const job = JSON.parse(data);
|
|
48
|
+
job.status = "completed";
|
|
49
|
+
job.completedAt = /* @__PURE__ */ new Date();
|
|
50
|
+
await redis2.hset(`${prefix}jobs`, jobId, JSON.stringify(job));
|
|
51
|
+
}
|
|
52
|
+
},
|
|
53
|
+
async fail(jobId, error) {
|
|
54
|
+
const redis2 = await getClient();
|
|
55
|
+
const data = await redis2.hget(`${prefix}jobs`, jobId);
|
|
56
|
+
if (data) {
|
|
57
|
+
const job = JSON.parse(data);
|
|
58
|
+
job.status = "failed";
|
|
59
|
+
job.error = error;
|
|
60
|
+
await redis2.hset(`${prefix}jobs`, jobId, JSON.stringify(job));
|
|
61
|
+
}
|
|
62
|
+
},
|
|
63
|
+
async retry(jobId) {
|
|
64
|
+
const redis2 = await getClient();
|
|
65
|
+
const data = await redis2.hget(`${prefix}jobs`, jobId);
|
|
66
|
+
if (data) {
|
|
67
|
+
const job = JSON.parse(data);
|
|
68
|
+
job.status = "pending";
|
|
69
|
+
await redis2.lpush(`${prefix}pending`, JSON.stringify(job));
|
|
70
|
+
await redis2.hset(`${prefix}jobs`, jobId, JSON.stringify(job));
|
|
71
|
+
}
|
|
72
|
+
},
|
|
73
|
+
async getJob(jobId) {
|
|
74
|
+
const redis2 = await getClient();
|
|
75
|
+
const data = await redis2.hget(`${prefix}jobs`, jobId);
|
|
76
|
+
return data ? JSON.parse(data) : null;
|
|
77
|
+
},
|
|
78
|
+
async getStats() {
|
|
79
|
+
const redis2 = await getClient();
|
|
80
|
+
const all = await redis2.hgetall(`${prefix}jobs`);
|
|
81
|
+
let pending = 0, processing = 0, completed = 0, failed = 0;
|
|
82
|
+
for (const val of Object.values(all)) {
|
|
83
|
+
const job = JSON.parse(val);
|
|
84
|
+
if (job.status === "pending") pending++;
|
|
85
|
+
else if (job.status === "processing") processing++;
|
|
86
|
+
else if (job.status === "completed") completed++;
|
|
87
|
+
else if (job.status === "failed") failed++;
|
|
88
|
+
}
|
|
89
|
+
return { pending, processing, completed, failed };
|
|
90
|
+
}
|
|
91
|
+
};
|
|
92
|
+
return adapter;
|
|
93
|
+
};
|
|
94
|
+
var redis_default = redis;
|
|
95
|
+
export {
|
|
96
|
+
redis_default as default,
|
|
97
|
+
redis
|
|
98
|
+
};
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @flightdev/queue - Agnostic Background Job Queue
|
|
3
|
+
*
|
|
4
|
+
* @example
|
|
5
|
+
* ```typescript
|
|
6
|
+
* import { createQueue } from '@flightdev/queue';
|
|
7
|
+
* import { redis } from '@flightdev/queue/redis';
|
|
8
|
+
*
|
|
9
|
+
* const queue = createQueue(redis({ url: process.env.REDIS_URL }));
|
|
10
|
+
*
|
|
11
|
+
* // Define a job
|
|
12
|
+
* queue.define('sendEmail', async (job) => {
|
|
13
|
+
* await sendEmail(job.data.to, job.data.subject, job.data.body);
|
|
14
|
+
* });
|
|
15
|
+
*
|
|
16
|
+
* // Enqueue a job
|
|
17
|
+
* await queue.enqueue('sendEmail', { to: 'user@example.com', subject: 'Welcome!' });
|
|
18
|
+
*
|
|
19
|
+
* // Start processing
|
|
20
|
+
* queue.process();
|
|
21
|
+
* ```
|
|
22
|
+
*/
|
|
23
|
+
type JobStatus = 'pending' | 'processing' | 'completed' | 'failed' | 'retrying';
|
|
24
|
+
interface Job<T = unknown> {
|
|
25
|
+
id: string;
|
|
26
|
+
name: string;
|
|
27
|
+
data: T;
|
|
28
|
+
status: JobStatus;
|
|
29
|
+
attempts: number;
|
|
30
|
+
maxAttempts: number;
|
|
31
|
+
createdAt: Date;
|
|
32
|
+
scheduledAt?: Date;
|
|
33
|
+
startedAt?: Date;
|
|
34
|
+
completedAt?: Date;
|
|
35
|
+
error?: string;
|
|
36
|
+
}
|
|
37
|
+
interface JobOptions {
|
|
38
|
+
delay?: number;
|
|
39
|
+
maxAttempts?: number;
|
|
40
|
+
priority?: number;
|
|
41
|
+
scheduledAt?: Date;
|
|
42
|
+
}
|
|
43
|
+
type JobHandler<T = unknown> = (job: Job<T>) => Promise<void>;
|
|
44
|
+
interface QueueAdapter {
|
|
45
|
+
readonly name: string;
|
|
46
|
+
enqueue<T>(name: string, data: T, options?: JobOptions): Promise<Job<T>>;
|
|
47
|
+
dequeue(name?: string): Promise<Job | null>;
|
|
48
|
+
complete(jobId: string): Promise<void>;
|
|
49
|
+
fail(jobId: string, error: string): Promise<void>;
|
|
50
|
+
retry(jobId: string): Promise<void>;
|
|
51
|
+
getJob(jobId: string): Promise<Job | null>;
|
|
52
|
+
getStats(): Promise<{
|
|
53
|
+
pending: number;
|
|
54
|
+
processing: number;
|
|
55
|
+
completed: number;
|
|
56
|
+
failed: number;
|
|
57
|
+
}>;
|
|
58
|
+
}
|
|
59
|
+
type QueueAdapterFactory<TConfig = unknown> = (config?: TConfig) => QueueAdapter;
|
|
60
|
+
interface QueueService {
|
|
61
|
+
readonly adapter: QueueAdapter;
|
|
62
|
+
define<T>(name: string, handler: JobHandler<T>): void;
|
|
63
|
+
enqueue<T>(name: string, data: T, options?: JobOptions): Promise<Job<T>>;
|
|
64
|
+
dequeue(name?: string): Promise<Job | null>;
|
|
65
|
+
complete(jobId: string): Promise<void>;
|
|
66
|
+
fail(jobId: string, error: string): Promise<void>;
|
|
67
|
+
retry(jobId: string): Promise<void>;
|
|
68
|
+
getJob(jobId: string): Promise<Job | null>;
|
|
69
|
+
process(concurrency?: number): void;
|
|
70
|
+
stop(): void;
|
|
71
|
+
getStats(): Promise<{
|
|
72
|
+
pending: number;
|
|
73
|
+
processing: number;
|
|
74
|
+
completed: number;
|
|
75
|
+
failed: number;
|
|
76
|
+
}>;
|
|
77
|
+
}
|
|
78
|
+
declare function createQueue(adapter: QueueAdapter): QueueService;
|
|
79
|
+
|
|
80
|
+
export { type Job, type JobHandler, type JobOptions, type JobStatus, type QueueAdapter, type QueueAdapterFactory, type QueueService, createQueue };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
// src/index.ts
|
|
2
|
+
function createQueue(adapter) {
|
|
3
|
+
const handlers = /* @__PURE__ */ new Map();
|
|
4
|
+
let processing = false;
|
|
5
|
+
let intervalId = null;
|
|
6
|
+
async function processJob() {
|
|
7
|
+
const job = await adapter.dequeue();
|
|
8
|
+
if (!job) return;
|
|
9
|
+
const handler = handlers.get(job.name);
|
|
10
|
+
if (!handler) {
|
|
11
|
+
await adapter.fail(job.id, `No handler for job: ${job.name}`);
|
|
12
|
+
return;
|
|
13
|
+
}
|
|
14
|
+
try {
|
|
15
|
+
await handler(job);
|
|
16
|
+
await adapter.complete(job.id);
|
|
17
|
+
} catch (error) {
|
|
18
|
+
const errorMsg = error instanceof Error ? error.message : "Unknown error";
|
|
19
|
+
if (job.attempts < job.maxAttempts) {
|
|
20
|
+
await adapter.retry(job.id);
|
|
21
|
+
} else {
|
|
22
|
+
await adapter.fail(job.id, errorMsg);
|
|
23
|
+
}
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
return {
|
|
27
|
+
adapter,
|
|
28
|
+
define(name, handler) {
|
|
29
|
+
handlers.set(name, handler);
|
|
30
|
+
},
|
|
31
|
+
enqueue(name, data, options) {
|
|
32
|
+
return adapter.enqueue(name, data, options);
|
|
33
|
+
},
|
|
34
|
+
dequeue(name) {
|
|
35
|
+
return adapter.dequeue(name);
|
|
36
|
+
},
|
|
37
|
+
complete(jobId) {
|
|
38
|
+
return adapter.complete(jobId);
|
|
39
|
+
},
|
|
40
|
+
fail(jobId, error) {
|
|
41
|
+
return adapter.fail(jobId, error);
|
|
42
|
+
},
|
|
43
|
+
retry(jobId) {
|
|
44
|
+
return adapter.retry(jobId);
|
|
45
|
+
},
|
|
46
|
+
getJob(jobId) {
|
|
47
|
+
return adapter.getJob(jobId);
|
|
48
|
+
},
|
|
49
|
+
process(concurrency = 1) {
|
|
50
|
+
if (processing) return;
|
|
51
|
+
processing = true;
|
|
52
|
+
intervalId = setInterval(async () => {
|
|
53
|
+
const promises = Array.from({ length: concurrency }, () => processJob());
|
|
54
|
+
await Promise.all(promises);
|
|
55
|
+
}, 100);
|
|
56
|
+
},
|
|
57
|
+
stop() {
|
|
58
|
+
processing = false;
|
|
59
|
+
if (intervalId) {
|
|
60
|
+
clearInterval(intervalId);
|
|
61
|
+
intervalId = null;
|
|
62
|
+
}
|
|
63
|
+
},
|
|
64
|
+
getStats() {
|
|
65
|
+
return adapter.getStats();
|
|
66
|
+
}
|
|
67
|
+
};
|
|
68
|
+
}
|
|
69
|
+
export {
|
|
70
|
+
createQueue
|
|
71
|
+
};
|
package/dist/jobs.d.ts
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
1
|
+
import { Job, JobOptions, QueueService } from './index.js';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Declarative Job Definition API
|
|
5
|
+
*
|
|
6
|
+
* A more declarative way to define jobs with configuration,
|
|
7
|
+
* validation, and type safety.
|
|
8
|
+
*
|
|
9
|
+
* @example
|
|
10
|
+
* ```typescript
|
|
11
|
+
* import { defineJob, createJobRunner } from '@flightdev/queue/jobs';
|
|
12
|
+
* import { z } from 'zod';
|
|
13
|
+
*
|
|
14
|
+
* // Define a job with schema validation
|
|
15
|
+
* const sendEmailJob = defineJob({
|
|
16
|
+
* name: 'sendEmail',
|
|
17
|
+
* schema: z.object({
|
|
18
|
+
* to: z.string().email(),
|
|
19
|
+
* subject: z.string(),
|
|
20
|
+
* body: z.string(),
|
|
21
|
+
* }),
|
|
22
|
+
* handler: async ({ data, log }) => {
|
|
23
|
+
* log.info(`Sending email to ${data.to}`);
|
|
24
|
+
* await emailService.send(data);
|
|
25
|
+
* return { sent: true };
|
|
26
|
+
* },
|
|
27
|
+
* options: {
|
|
28
|
+
* maxAttempts: 3,
|
|
29
|
+
* backoff: 'exponential',
|
|
30
|
+
* },
|
|
31
|
+
* });
|
|
32
|
+
*
|
|
33
|
+
* // Enqueue with type safety
|
|
34
|
+
* await sendEmailJob.enqueue({ to: 'user@example.com', subject: 'Hi!' });
|
|
35
|
+
* ```
|
|
36
|
+
*/
|
|
37
|
+
|
|
38
|
+
/**
|
|
39
|
+
* Logger interface for job execution.
|
|
40
|
+
*/
|
|
41
|
+
interface JobLogger {
|
|
42
|
+
info: (message: string, data?: Record<string, unknown>) => void;
|
|
43
|
+
warn: (message: string, data?: Record<string, unknown>) => void;
|
|
44
|
+
error: (message: string, data?: Record<string, unknown>) => void;
|
|
45
|
+
debug: (message: string, data?: Record<string, unknown>) => void;
|
|
46
|
+
}
|
|
47
|
+
/**
|
|
48
|
+
* Context passed to job handlers.
|
|
49
|
+
*/
|
|
50
|
+
interface JobContext<TData> {
|
|
51
|
+
/** The job data */
|
|
52
|
+
data: TData;
|
|
53
|
+
/** Job metadata */
|
|
54
|
+
job: Job<TData>;
|
|
55
|
+
/** Logger scoped to this job */
|
|
56
|
+
log: JobLogger;
|
|
57
|
+
/** Report progress (0-100) */
|
|
58
|
+
progress: (percent: number) => Promise<void>;
|
|
59
|
+
/** Abort signal for cancellation */
|
|
60
|
+
signal: AbortSignal;
|
|
61
|
+
}
|
|
62
|
+
/**
|
|
63
|
+
* Result from a job handler.
|
|
64
|
+
*/
|
|
65
|
+
type JobResult = void | unknown;
|
|
66
|
+
/**
|
|
67
|
+
* Job handler function with context.
|
|
68
|
+
*/
|
|
69
|
+
type JobHandlerFn<TData, TResult = JobResult> = (context: JobContext<TData>) => Promise<TResult>;
|
|
70
|
+
/**
|
|
71
|
+
* Backoff strategy for retries.
|
|
72
|
+
*/
|
|
73
|
+
type BackoffStrategy = 'linear' | 'exponential' | 'fixed' | number[];
|
|
74
|
+
/**
|
|
75
|
+
* Job definition options.
|
|
76
|
+
*/
|
|
77
|
+
interface JobDefinitionOptions {
|
|
78
|
+
/** Maximum retry attempts (default: 3) */
|
|
79
|
+
maxAttempts?: number;
|
|
80
|
+
/** Backoff strategy for retries */
|
|
81
|
+
backoff?: BackoffStrategy;
|
|
82
|
+
/** Base delay for backoff in ms (default: 1000) */
|
|
83
|
+
backoffDelay?: number;
|
|
84
|
+
/** Default priority (higher = processed first) */
|
|
85
|
+
priority?: number;
|
|
86
|
+
/** Timeout in ms */
|
|
87
|
+
timeout?: number;
|
|
88
|
+
/** Only allow one instance of this job to run at a time */
|
|
89
|
+
unique?: boolean;
|
|
90
|
+
/** Rate limit: max jobs per interval */
|
|
91
|
+
rateLimit?: {
|
|
92
|
+
max: number;
|
|
93
|
+
interval: number;
|
|
94
|
+
};
|
|
95
|
+
}
|
|
96
|
+
/**
|
|
97
|
+
* Job definition configuration.
|
|
98
|
+
*/
|
|
99
|
+
interface JobDefinition<TData = unknown, TResult = JobResult> {
|
|
100
|
+
/** Unique job name */
|
|
101
|
+
name: string;
|
|
102
|
+
/** Zod-like schema for validation (optional) */
|
|
103
|
+
schema?: {
|
|
104
|
+
parse: (data: unknown) => TData;
|
|
105
|
+
};
|
|
106
|
+
/** Job handler function */
|
|
107
|
+
handler: JobHandlerFn<TData, TResult>;
|
|
108
|
+
/** Job options */
|
|
109
|
+
options?: JobDefinitionOptions;
|
|
110
|
+
/** Hook called before job runs */
|
|
111
|
+
onStart?: (job: Job<TData>) => void | Promise<void>;
|
|
112
|
+
/** Hook called after job completes */
|
|
113
|
+
onComplete?: (job: Job<TData>, result: TResult) => void | Promise<void>;
|
|
114
|
+
/** Hook called when job fails */
|
|
115
|
+
onError?: (job: Job<TData>, error: Error) => void | Promise<void>;
|
|
116
|
+
}
|
|
117
|
+
/**
|
|
118
|
+
* Defined job with enqueue methods.
|
|
119
|
+
*/
|
|
120
|
+
interface DefinedJob<TData, TResult = JobResult> {
|
|
121
|
+
/** Job name */
|
|
122
|
+
readonly name: string;
|
|
123
|
+
/** Definition config */
|
|
124
|
+
readonly definition: JobDefinition<TData, TResult>;
|
|
125
|
+
/** Enqueue a job */
|
|
126
|
+
enqueue: (data: TData, options?: Partial<JobOptions>) => Promise<Job<TData>>;
|
|
127
|
+
/** Schedule a job to run at a specific time */
|
|
128
|
+
schedule: (data: TData, at: Date) => Promise<Job<TData>>;
|
|
129
|
+
/** Schedule recurring job */
|
|
130
|
+
recurring: (data: TData, cron: string) => Promise<void>;
|
|
131
|
+
/** Register with a queue service */
|
|
132
|
+
register: (queue: QueueService) => void;
|
|
133
|
+
}
|
|
134
|
+
/**
|
|
135
|
+
* Set the global queue service for all defined jobs.
|
|
136
|
+
*/
|
|
137
|
+
declare function setGlobalQueue(queue: QueueService): void;
|
|
138
|
+
/**
|
|
139
|
+
* Get the global queue service.
|
|
140
|
+
*/
|
|
141
|
+
declare function getGlobalQueue(): QueueService | null;
|
|
142
|
+
/**
|
|
143
|
+
* Define a job with declarative configuration.
|
|
144
|
+
*
|
|
145
|
+
* @param definition - Job definition config
|
|
146
|
+
* @returns Defined job with enqueue methods
|
|
147
|
+
*
|
|
148
|
+
* @example
|
|
149
|
+
* ```typescript
|
|
150
|
+
* const processOrderJob = defineJob({
|
|
151
|
+
* name: 'processOrder',
|
|
152
|
+
* schema: OrderSchema,
|
|
153
|
+
* handler: async ({ data, log, progress }) => {
|
|
154
|
+
* log.info('Processing order', { orderId: data.id });
|
|
155
|
+
*
|
|
156
|
+
* await validateInventory(data);
|
|
157
|
+
* await progress(25);
|
|
158
|
+
*
|
|
159
|
+
* await chargePayment(data);
|
|
160
|
+
* await progress(50);
|
|
161
|
+
*
|
|
162
|
+
* await shipOrder(data);
|
|
163
|
+
* await progress(100);
|
|
164
|
+
*
|
|
165
|
+
* return { shipped: true };
|
|
166
|
+
* },
|
|
167
|
+
* options: {
|
|
168
|
+
* maxAttempts: 3,
|
|
169
|
+
* backoff: 'exponential',
|
|
170
|
+
* timeout: 60000,
|
|
171
|
+
* },
|
|
172
|
+
* onComplete: (job, result) => {
|
|
173
|
+
* console.log(`Order ${job.data.id} processed:`, result);
|
|
174
|
+
* },
|
|
175
|
+
* });
|
|
176
|
+
* ```
|
|
177
|
+
*/
|
|
178
|
+
declare function defineJob<TData = unknown, TResult = JobResult>(definition: JobDefinition<TData, TResult>): DefinedJob<TData, TResult>;
|
|
179
|
+
/**
|
|
180
|
+
* Create a job runner that processes defined jobs.
|
|
181
|
+
*
|
|
182
|
+
* @example
|
|
183
|
+
* ```typescript
|
|
184
|
+
* const runner = createJobRunner({
|
|
185
|
+
* queue,
|
|
186
|
+
* jobs: [sendEmailJob, processOrderJob],
|
|
187
|
+
* concurrency: 5,
|
|
188
|
+
* });
|
|
189
|
+
*
|
|
190
|
+
* await runner.start();
|
|
191
|
+
* ```
|
|
192
|
+
*/
|
|
193
|
+
declare function createJobRunner(options: {
|
|
194
|
+
queue: QueueService;
|
|
195
|
+
jobs: DefinedJob<any, any>[];
|
|
196
|
+
concurrency?: number;
|
|
197
|
+
}): {
|
|
198
|
+
start(): void;
|
|
199
|
+
stop(): void;
|
|
200
|
+
stats(): Promise<{
|
|
201
|
+
pending: number;
|
|
202
|
+
processing: number;
|
|
203
|
+
completed: number;
|
|
204
|
+
failed: number;
|
|
205
|
+
}>;
|
|
206
|
+
};
|
|
207
|
+
/**
|
|
208
|
+
* Calculate backoff delay based on strategy.
|
|
209
|
+
*/
|
|
210
|
+
declare function calculateBackoff(attempt: number, strategy: BackoffStrategy, baseDelay?: number): number;
|
|
211
|
+
|
|
212
|
+
export { type BackoffStrategy, type DefinedJob, type JobContext, type JobDefinition, type JobDefinitionOptions, type JobHandlerFn, type JobLogger, type JobResult, calculateBackoff, createJobRunner, defineJob, getGlobalQueue, setGlobalQueue };
|
package/dist/jobs.js
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
// src/jobs.ts
|
|
2
|
+
var globalQueue = null;
|
|
3
|
+
function setGlobalQueue(queue) {
|
|
4
|
+
globalQueue = queue;
|
|
5
|
+
}
|
|
6
|
+
function getGlobalQueue() {
|
|
7
|
+
return globalQueue;
|
|
8
|
+
}
|
|
9
|
+
function defineJob(definition) {
|
|
10
|
+
let registeredQueue = null;
|
|
11
|
+
function createLogger(jobId) {
|
|
12
|
+
const prefix = `[Job:${definition.name}:${jobId}]`;
|
|
13
|
+
return {
|
|
14
|
+
info: (msg, data) => console.log(`${prefix} ${msg}`, data ?? ""),
|
|
15
|
+
warn: (msg, data) => console.warn(`${prefix} ${msg}`, data ?? ""),
|
|
16
|
+
error: (msg, data) => console.error(`${prefix} ${msg}`, data ?? ""),
|
|
17
|
+
debug: (msg, data) => console.debug(`${prefix} ${msg}`, data ?? "")
|
|
18
|
+
};
|
|
19
|
+
}
|
|
20
|
+
function createHandler() {
|
|
21
|
+
return async (job) => {
|
|
22
|
+
const controller = new AbortController();
|
|
23
|
+
const log = createLogger(job.id);
|
|
24
|
+
let timeoutId;
|
|
25
|
+
if (definition.options?.timeout) {
|
|
26
|
+
timeoutId = setTimeout(() => {
|
|
27
|
+
controller.abort();
|
|
28
|
+
}, definition.options.timeout);
|
|
29
|
+
}
|
|
30
|
+
const context = {
|
|
31
|
+
data: job.data,
|
|
32
|
+
job,
|
|
33
|
+
log,
|
|
34
|
+
signal: controller.signal,
|
|
35
|
+
progress: async (_percent) => {
|
|
36
|
+
}
|
|
37
|
+
};
|
|
38
|
+
try {
|
|
39
|
+
if (definition.schema) {
|
|
40
|
+
context.data = definition.schema.parse(job.data);
|
|
41
|
+
}
|
|
42
|
+
if (definition.onStart) {
|
|
43
|
+
await definition.onStart(job);
|
|
44
|
+
}
|
|
45
|
+
const result = await definition.handler(context);
|
|
46
|
+
if (definition.onComplete) {
|
|
47
|
+
await definition.onComplete(job, result);
|
|
48
|
+
}
|
|
49
|
+
} catch (error) {
|
|
50
|
+
if (definition.onError) {
|
|
51
|
+
await definition.onError(job, error);
|
|
52
|
+
}
|
|
53
|
+
throw error;
|
|
54
|
+
} finally {
|
|
55
|
+
if (timeoutId) {
|
|
56
|
+
clearTimeout(timeoutId);
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
};
|
|
60
|
+
}
|
|
61
|
+
function getQueue() {
|
|
62
|
+
const queue = registeredQueue ?? globalQueue;
|
|
63
|
+
if (!queue) {
|
|
64
|
+
throw new Error(
|
|
65
|
+
`No queue registered for job "${definition.name}". Call job.register(queue) or setGlobalQueue(queue) first.`
|
|
66
|
+
);
|
|
67
|
+
}
|
|
68
|
+
return queue;
|
|
69
|
+
}
|
|
70
|
+
return {
|
|
71
|
+
name: definition.name,
|
|
72
|
+
definition,
|
|
73
|
+
async enqueue(data, options) {
|
|
74
|
+
const queue = getQueue();
|
|
75
|
+
return queue.enqueue(definition.name, data, {
|
|
76
|
+
maxAttempts: definition.options?.maxAttempts ?? 3,
|
|
77
|
+
priority: definition.options?.priority,
|
|
78
|
+
...options
|
|
79
|
+
});
|
|
80
|
+
},
|
|
81
|
+
async schedule(data, at) {
|
|
82
|
+
const queue = getQueue();
|
|
83
|
+
return queue.enqueue(definition.name, data, {
|
|
84
|
+
maxAttempts: definition.options?.maxAttempts ?? 3,
|
|
85
|
+
scheduledAt: at
|
|
86
|
+
});
|
|
87
|
+
},
|
|
88
|
+
async recurring(_data, _cron) {
|
|
89
|
+
throw new Error("Recurring jobs require an adapter with cron support");
|
|
90
|
+
},
|
|
91
|
+
register(queue) {
|
|
92
|
+
registeredQueue = queue;
|
|
93
|
+
queue.define(definition.name, createHandler());
|
|
94
|
+
}
|
|
95
|
+
};
|
|
96
|
+
}
|
|
97
|
+
function createJobRunner(options) {
|
|
98
|
+
const { queue, jobs, concurrency = 1 } = options;
|
|
99
|
+
for (const job of jobs) {
|
|
100
|
+
job.register(queue);
|
|
101
|
+
}
|
|
102
|
+
return {
|
|
103
|
+
start() {
|
|
104
|
+
queue.process(concurrency);
|
|
105
|
+
},
|
|
106
|
+
stop() {
|
|
107
|
+
queue.stop();
|
|
108
|
+
},
|
|
109
|
+
async stats() {
|
|
110
|
+
return queue.getStats();
|
|
111
|
+
}
|
|
112
|
+
};
|
|
113
|
+
}
|
|
114
|
+
function calculateBackoff(attempt, strategy, baseDelay = 1e3) {
|
|
115
|
+
if (Array.isArray(strategy)) {
|
|
116
|
+
return strategy[Math.min(attempt, strategy.length - 1)] ?? baseDelay;
|
|
117
|
+
}
|
|
118
|
+
switch (strategy) {
|
|
119
|
+
case "linear":
|
|
120
|
+
return baseDelay * attempt;
|
|
121
|
+
case "exponential":
|
|
122
|
+
return baseDelay * Math.pow(2, attempt - 1);
|
|
123
|
+
case "fixed":
|
|
124
|
+
default:
|
|
125
|
+
return baseDelay;
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
export {
|
|
129
|
+
calculateBackoff,
|
|
130
|
+
createJobRunner,
|
|
131
|
+
defineJob,
|
|
132
|
+
getGlobalQueue,
|
|
133
|
+
setGlobalQueue
|
|
134
|
+
};
|
package/package.json
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@flightdev/queue",
|
|
3
|
+
"version": "0.2.0",
|
|
4
|
+
"description": "Agnostic background job queue for Flight Framework. Choose your backend: Redis, BullMQ, SQLite, Postgres, SQS.",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"exports": {
|
|
7
|
+
".": {
|
|
8
|
+
"types": "./dist/index.d.ts",
|
|
9
|
+
"import": "./dist/index.js"
|
|
10
|
+
},
|
|
11
|
+
"./jobs": {
|
|
12
|
+
"types": "./dist/jobs.d.ts",
|
|
13
|
+
"import": "./dist/jobs.js"
|
|
14
|
+
},
|
|
15
|
+
"./memory": {
|
|
16
|
+
"types": "./dist/adapters/memory.d.ts",
|
|
17
|
+
"import": "./dist/adapters/memory.js"
|
|
18
|
+
},
|
|
19
|
+
"./redis": {
|
|
20
|
+
"types": "./dist/adapters/redis.d.ts",
|
|
21
|
+
"import": "./dist/adapters/redis.js"
|
|
22
|
+
},
|
|
23
|
+
"./bullmq": {
|
|
24
|
+
"types": "./dist/adapters/bullmq.d.ts",
|
|
25
|
+
"import": "./dist/adapters/bullmq.js"
|
|
26
|
+
}
|
|
27
|
+
},
|
|
28
|
+
"files": [
|
|
29
|
+
"dist"
|
|
30
|
+
],
|
|
31
|
+
"peerDependencies": {
|
|
32
|
+
"ioredis": ">=5.0.0",
|
|
33
|
+
"bullmq": ">=5.0.0"
|
|
34
|
+
},
|
|
35
|
+
"peerDependenciesMeta": {
|
|
36
|
+
"ioredis": {
|
|
37
|
+
"optional": true
|
|
38
|
+
},
|
|
39
|
+
"bullmq": {
|
|
40
|
+
"optional": true
|
|
41
|
+
}
|
|
42
|
+
},
|
|
43
|
+
"devDependencies": {
|
|
44
|
+
"tsup": "^8.0.0",
|
|
45
|
+
"typescript": "^5.7.0",
|
|
46
|
+
"vitest": "^2.0.0"
|
|
47
|
+
},
|
|
48
|
+
"license": "MIT",
|
|
49
|
+
"scripts": {
|
|
50
|
+
"build": "tsup",
|
|
51
|
+
"dev": "tsup --watch",
|
|
52
|
+
"test": "vitest run",
|
|
53
|
+
"typecheck": "tsc --noEmit"
|
|
54
|
+
}
|
|
55
|
+
}
|