@workglow/job-queue 0.0.57 → 0.0.59
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +432 -306
- package/dist/browser.js +808 -509
- package/dist/browser.js.map +9 -8
- package/dist/bun.js +823 -823
- package/dist/bun.js.map +9 -10
- package/dist/common-server.d.ts +0 -2
- package/dist/common-server.d.ts.map +1 -1
- package/dist/common.d.ts +4 -3
- package/dist/common.d.ts.map +1 -1
- package/dist/job/Job.d.ts +3 -4
- package/dist/job/Job.d.ts.map +1 -1
- package/dist/job/JobQueueClient.d.ts +171 -0
- package/dist/job/JobQueueClient.d.ts.map +1 -0
- package/dist/job/JobQueueEventListeners.d.ts +1 -1
- package/dist/job/JobQueueEventListeners.d.ts.map +1 -1
- package/dist/job/JobQueueServer.d.ts +160 -0
- package/dist/job/JobQueueServer.d.ts.map +1 -0
- package/dist/job/JobQueueWorker.d.ts +157 -0
- package/dist/job/JobQueueWorker.d.ts.map +1 -0
- package/dist/limiter/RateLimiter.d.ts +49 -0
- package/dist/limiter/RateLimiter.d.ts.map +1 -0
- package/dist/node.js +823 -823
- package/dist/node.js.map +9 -10
- package/package.json +5 -13
- package/dist/job/IJobQueue.d.ts +0 -160
- package/dist/job/IJobQueue.d.ts.map +0 -1
- package/dist/job/JobQueue.d.ts +0 -272
- package/dist/job/JobQueue.d.ts.map +0 -1
- package/dist/limiter/InMemoryRateLimiter.d.ts +0 -32
- package/dist/limiter/InMemoryRateLimiter.d.ts.map +0 -1
- package/dist/limiter/PostgresRateLimiter.d.ts +0 -53
- package/dist/limiter/PostgresRateLimiter.d.ts.map +0 -1
- package/dist/limiter/SqliteRateLimiter.d.ts +0 -44
- package/dist/limiter/SqliteRateLimiter.d.ts.map +0 -1
package/README.md
CHANGED
|
@@ -1,51 +1,45 @@
|
|
|
1
1
|
# @workglow/job-queue
|
|
2
2
|
|
|
3
|
-
A TypeScript-first job queue system for managing and processing asynchronous tasks
|
|
3
|
+
A TypeScript-first job queue system with a separated client-server architecture for managing and processing asynchronous tasks. Features rate limiting, progress tracking, automatic retries, and cross-platform persistence.
|
|
4
4
|
|
|
5
5
|
- [Features](#features)
|
|
6
6
|
- [Installation](#installation)
|
|
7
|
+
- [Architecture](#architecture)
|
|
7
8
|
- [Quick Start](#quick-start)
|
|
8
9
|
- [Core Concepts](#core-concepts)
|
|
9
10
|
- [Jobs](#jobs)
|
|
10
|
-
- [
|
|
11
|
-
- [
|
|
12
|
-
- [
|
|
11
|
+
- [JobQueueClient](#jobqueueclient)
|
|
12
|
+
- [JobQueueServer](#jobqueueserver)
|
|
13
|
+
- [JobQueueWorker](#jobqueueworker)
|
|
13
14
|
- [Usage Examples](#usage-examples)
|
|
14
15
|
- [Creating Custom Jobs](#creating-custom-jobs)
|
|
15
|
-
- [
|
|
16
|
+
- [Submitting Jobs](#submitting-jobs)
|
|
16
17
|
- [Progress Tracking](#progress-tracking)
|
|
17
18
|
- [Error Handling and Retries](#error-handling-and-retries)
|
|
18
19
|
- [Event Listeners](#event-listeners)
|
|
19
|
-
- [
|
|
20
|
+
- [Aborting Jobs](#aborting-jobs)
|
|
20
21
|
- [Storage Configurations](#storage-configurations)
|
|
21
|
-
- [In-Memory Storage](#in-memory-storage)
|
|
22
|
-
- [IndexedDB Storage (Browser)](#indexeddb-storage-browser)
|
|
23
|
-
- [SQLite Storage (Node.js/Bun)](#sqlite-storage-nodejsbun)
|
|
24
|
-
- [PostgreSQL Storage (Node.js/Bun)](#postgresql-storage-nodejsbun)
|
|
25
22
|
- [Rate Limiting Strategies](#rate-limiting-strategies)
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
- [Rate Limiter](#rate-limiter)
|
|
29
|
-
- [Composite Limiter](#composite-limiter)
|
|
30
|
-
- [Queue Modes](#queue-modes)
|
|
23
|
+
- [Scaling Workers](#scaling-workers)
|
|
24
|
+
- [Cross-Process Communication](#cross-process-communication)
|
|
31
25
|
- [API Reference](#api-reference)
|
|
32
|
-
- [JobQueue Methods](#jobqueue-methods)
|
|
33
|
-
- [Job Class](#job-class)
|
|
34
26
|
- [TypeScript Types](#typescript-types)
|
|
35
27
|
- [Testing](#testing)
|
|
36
28
|
- [License](#license)
|
|
37
29
|
|
|
38
30
|
## Features
|
|
39
31
|
|
|
32
|
+
- **Separated architecture**: Client, server, and worker components for flexible deployment
|
|
40
33
|
- **Cross-platform**: Works in browsers (IndexedDB), Node.js, and Bun
|
|
41
34
|
- **Multiple storage backends**: In-Memory, IndexedDB, SQLite, PostgreSQL
|
|
42
35
|
- **Rate limiting**: Concurrency, delay, and composite rate limiting strategies
|
|
43
36
|
- **Progress tracking**: Real-time job progress with events and callbacks
|
|
44
|
-
- **Retry logic**: Configurable retry attempts with
|
|
37
|
+
- **Retry logic**: Configurable retry attempts with support for delayed retries
|
|
45
38
|
- **Event system**: Comprehensive event listeners for job lifecycle
|
|
46
39
|
- **TypeScript-first**: Full type safety with generic input/output types
|
|
47
|
-
- **
|
|
48
|
-
- **
|
|
40
|
+
- **Worker scaling**: Dynamic worker count adjustment
|
|
41
|
+
- **Same-process optimization**: Direct event forwarding when client and server run together
|
|
42
|
+
- **Cross-process support**: Storage-based subscriptions for distributed deployments
|
|
49
43
|
|
|
50
44
|
## Installation
|
|
51
45
|
|
|
@@ -66,16 +60,44 @@ bun add pg @types/pg
|
|
|
66
60
|
bun add @workglow/storage
|
|
67
61
|
```
|
|
68
62
|
|
|
63
|
+
## Architecture
|
|
64
|
+
|
|
65
|
+
The job queue system is split into three main components:
|
|
66
|
+
|
|
67
|
+
```
|
|
68
|
+
┌─────────────────┐ ┌─────────────────┐
|
|
69
|
+
│ JobQueueClient │────▶│ JobQueueServer │
|
|
70
|
+
│ (submit jobs) │ │ (coordinate) │
|
|
71
|
+
└─────────────────┘ └────────┬────────┘
|
|
72
|
+
│
|
|
73
|
+
┌────────────┼────────────┐
|
|
74
|
+
▼ ▼ ▼
|
|
75
|
+
┌──────────┐ ┌──────────┐ ┌──────────┐
|
|
76
|
+
│ Worker │ │ Worker │ │ Worker │
|
|
77
|
+
└──────────┘ └──────────┘ └──────────┘
|
|
78
|
+
│ │ │
|
|
79
|
+
└────────────┴────────────┘
|
|
80
|
+
│
|
|
81
|
+
▼
|
|
82
|
+
┌─────────────────┐
|
|
83
|
+
│ Storage │
|
|
84
|
+
└─────────────────┘
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
- **JobQueueClient**: Submits jobs and monitors their progress
|
|
88
|
+
- **JobQueueServer**: Coordinates workers, manages lifecycle, handles cleanup
|
|
89
|
+
- **JobQueueWorker**: Processes jobs from the queue
|
|
90
|
+
|
|
69
91
|
## Quick Start
|
|
70
92
|
|
|
71
93
|
```typescript
|
|
72
|
-
import { Job,
|
|
94
|
+
import { Job, JobQueueClient, JobQueueServer, IJobExecuteContext } from "@workglow/job-queue";
|
|
73
95
|
import { InMemoryQueueStorage } from "@workglow/storage";
|
|
74
96
|
|
|
75
97
|
// 1. Define your input/output types
|
|
76
98
|
interface ProcessTextInput {
|
|
77
99
|
text: string;
|
|
78
|
-
|
|
100
|
+
uppercase?: boolean;
|
|
79
101
|
}
|
|
80
102
|
|
|
81
103
|
interface ProcessTextOutput {
|
|
@@ -86,91 +108,134 @@ interface ProcessTextOutput {
|
|
|
86
108
|
// 2. Create a custom job class
|
|
87
109
|
class ProcessTextJob extends Job<ProcessTextInput, ProcessTextOutput> {
|
|
88
110
|
async execute(input: ProcessTextInput, context: IJobExecuteContext): Promise<ProcessTextOutput> {
|
|
89
|
-
const { text, options = {} } = input;
|
|
90
|
-
|
|
91
|
-
// Simulate work with progress updates
|
|
92
111
|
await context.updateProgress(25, "Starting text processing");
|
|
93
112
|
|
|
94
|
-
|
|
113
|
+
const processedText = input.uppercase ? input.text.toUpperCase() : input.text.toLowerCase();
|
|
95
114
|
await context.updateProgress(50, "Processing text");
|
|
96
115
|
|
|
97
|
-
const
|
|
98
|
-
await context.updateProgress(75, "Counting words");
|
|
99
|
-
|
|
100
|
-
const wordCount = text.split(/\s+/).filter((word) => word.length > 0).length;
|
|
116
|
+
const wordCount = input.text.split(/\s+/).filter((word) => word.length > 0).length;
|
|
101
117
|
await context.updateProgress(100, "Complete");
|
|
102
118
|
|
|
103
119
|
return { processedText, wordCount };
|
|
104
120
|
}
|
|
105
121
|
}
|
|
106
122
|
|
|
107
|
-
// 3.
|
|
108
|
-
const
|
|
109
|
-
|
|
123
|
+
// 3. Set up storage, server, and client
|
|
124
|
+
const queueName = "text-processor";
|
|
125
|
+
const storage = new InMemoryQueueStorage<ProcessTextInput, ProcessTextOutput>(queueName);
|
|
126
|
+
await storage.setupDatabase();
|
|
127
|
+
|
|
128
|
+
const server = new JobQueueServer(ProcessTextJob, {
|
|
129
|
+
storage,
|
|
130
|
+
queueName,
|
|
131
|
+
workerCount: 2,
|
|
110
132
|
deleteAfterCompletionMs: 60_000, // Clean up after 1 minute
|
|
111
|
-
deleteAfterFailureMs: 300_000, // Keep failed jobs for 5 minutes
|
|
112
133
|
});
|
|
113
134
|
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
const job = new ProcessTextJob({
|
|
118
|
-
input: { text: "Hello World", options: { uppercase: true } },
|
|
119
|
-
maxRetries: 3,
|
|
135
|
+
const client = new JobQueueClient<ProcessTextInput, ProcessTextOutput>({
|
|
136
|
+
storage,
|
|
137
|
+
queueName,
|
|
120
138
|
});
|
|
121
139
|
|
|
122
|
-
|
|
123
|
-
|
|
140
|
+
// 4. Connect client to server for same-process optimization
|
|
141
|
+
client.attach(server);
|
|
142
|
+
|
|
143
|
+
// 5. Start the server
|
|
144
|
+
await server.start();
|
|
145
|
+
|
|
146
|
+
// 6. Submit jobs and wait for results
|
|
147
|
+
const handle = await client.submit({ text: "Hello World", uppercase: true });
|
|
148
|
+
const result = await handle.waitFor();
|
|
124
149
|
console.log(result); // { processedText: "HELLO WORLD", wordCount: 2 }
|
|
125
150
|
|
|
126
|
-
|
|
151
|
+
// 7. Clean up
|
|
152
|
+
await server.stop();
|
|
127
153
|
```
|
|
128
154
|
|
|
129
155
|
## Core Concepts
|
|
130
156
|
|
|
131
157
|
### Jobs
|
|
132
158
|
|
|
133
|
-
Jobs are units of work
|
|
159
|
+
Jobs are units of work with strongly typed input and output. Extend the `Job` class and implement the `execute` method:
|
|
134
160
|
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
161
|
+
```typescript
|
|
162
|
+
class MyJob extends Job<MyInput, MyOutput> {
|
|
163
|
+
async execute(input: MyInput, context: IJobExecuteContext): Promise<MyOutput> {
|
|
164
|
+
// Check for abort signal
|
|
165
|
+
if (context.signal.aborted) {
|
|
166
|
+
throw new AbortSignalJobError("Job was aborted");
|
|
167
|
+
}
|
|
140
168
|
|
|
141
|
-
|
|
169
|
+
// Update progress
|
|
170
|
+
await context.updateProgress(50, "Halfway there", { stage: "processing" });
|
|
142
171
|
|
|
143
|
-
|
|
172
|
+
// Do work and return result
|
|
173
|
+
return { result: "done" };
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
```
|
|
177
|
+
|
|
178
|
+
### JobQueueClient
|
|
179
|
+
|
|
180
|
+
The client submits jobs and monitors their progress. It can operate in two modes:
|
|
181
|
+
|
|
182
|
+
1. **Attached to server** (same process): Direct event forwarding for optimal performance
|
|
183
|
+
2. **Connected via storage** (cross process): Uses storage subscriptions for updates
|
|
184
|
+
|
|
185
|
+
```typescript
|
|
186
|
+
const client = new JobQueueClient<Input, Output>({
|
|
187
|
+
storage,
|
|
188
|
+
queueName: "my-queue",
|
|
189
|
+
});
|
|
190
|
+
|
|
191
|
+
// Option 1: Attach to local server (recommended for same-process)
|
|
192
|
+
client.attach(server);
|
|
193
|
+
|
|
194
|
+
// Option 2: Connect via storage (for cross-process scenarios)
|
|
195
|
+
client.connect();
|
|
196
|
+
```
|
|
144
197
|
|
|
145
|
-
|
|
146
|
-
- **Rate limiting**: Controls job execution rate
|
|
147
|
-
- **Event system**: Lifecycle notifications
|
|
148
|
-
- **Queue modes**: CLIENT (submit only), SERVER (process only), BOTH
|
|
198
|
+
### JobQueueServer
|
|
149
199
|
|
|
150
|
-
|
|
200
|
+
The server coordinates workers, manages job lifecycle, and handles cleanup:
|
|
151
201
|
|
|
152
|
-
|
|
202
|
+
```typescript
|
|
203
|
+
const server = new JobQueueServer(MyJob, {
|
|
204
|
+
storage,
|
|
205
|
+
queueName: "my-queue",
|
|
206
|
+
workerCount: 4, // Number of concurrent workers
|
|
207
|
+
pollIntervalMs: 100, // How often workers check for new jobs
|
|
208
|
+
deleteAfterCompletionMs: 60_000, // Delete completed jobs after 1 minute
|
|
209
|
+
deleteAfterFailureMs: 300_000, // Delete failed jobs after 5 minutes
|
|
210
|
+
deleteAfterDisabledMs: 60_000, // Delete disabled jobs after 1 minute
|
|
211
|
+
cleanupIntervalMs: 10_000, // How often to run cleanup
|
|
212
|
+
limiter: new ConcurrencyLimiter(10), // Rate limiting
|
|
213
|
+
});
|
|
214
|
+
```
|
|
153
215
|
|
|
154
|
-
|
|
155
|
-
- **IndexedDbQueueStorage**: Browser persistent storage
|
|
156
|
-
- **SqliteQueueStorage**: Local SQLite file
|
|
157
|
-
- **PostgresQueueStorage**: PostgreSQL database
|
|
216
|
+
### JobQueueWorker
|
|
158
217
|
|
|
159
|
-
|
|
218
|
+
Workers are created and managed by the server. You typically don't interact with them directly, but they can be used standalone for custom scenarios:
|
|
160
219
|
|
|
161
|
-
|
|
220
|
+
```typescript
|
|
221
|
+
const worker = new JobQueueWorker(MyJob, {
|
|
222
|
+
storage,
|
|
223
|
+
queueName: "my-queue",
|
|
224
|
+
limiter: new ConcurrencyLimiter(5),
|
|
225
|
+
pollIntervalMs: 100,
|
|
226
|
+
});
|
|
162
227
|
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
228
|
+
await worker.start();
|
|
229
|
+
// Worker processes jobs until stopped
|
|
230
|
+
await worker.stop();
|
|
231
|
+
```
|
|
167
232
|
|
|
168
233
|
## Usage Examples
|
|
169
234
|
|
|
170
235
|
### Creating Custom Jobs
|
|
171
236
|
|
|
172
237
|
```typescript
|
|
173
|
-
import { Job, IJobExecuteContext } from "@workglow/job-queue";
|
|
238
|
+
import { Job, IJobExecuteContext, RetryableJobError, PermanentJobError } from "@workglow/job-queue";
|
|
174
239
|
|
|
175
240
|
interface DownloadInput {
|
|
176
241
|
url: string;
|
|
@@ -186,18 +251,19 @@ class DownloadJob extends Job<DownloadInput, DownloadOutput> {
|
|
|
186
251
|
async execute(input: DownloadInput, context: IJobExecuteContext): Promise<DownloadOutput> {
|
|
187
252
|
const { url, filename } = input;
|
|
188
253
|
|
|
189
|
-
//
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
254
|
+
// Handle abort signal
|
|
255
|
+
const checkAbort = () => {
|
|
256
|
+
if (context.signal.aborted) {
|
|
257
|
+
throw new AbortSignalJobError("Download aborted");
|
|
258
|
+
}
|
|
259
|
+
};
|
|
193
260
|
|
|
194
|
-
|
|
261
|
+
checkAbort();
|
|
195
262
|
await context.updateProgress(10, "Starting download");
|
|
196
263
|
|
|
197
264
|
// Simulate download with progress
|
|
198
265
|
for (let i = 20; i <= 90; i += 10) {
|
|
199
|
-
|
|
200
|
-
|
|
266
|
+
checkAbort();
|
|
201
267
|
await new Promise((resolve) => setTimeout(resolve, 100));
|
|
202
268
|
await context.updateProgress(i, `Downloaded ${i}%`);
|
|
203
269
|
}
|
|
@@ -206,95 +272,82 @@ class DownloadJob extends Job<DownloadInput, DownloadOutput> {
|
|
|
206
272
|
|
|
207
273
|
return {
|
|
208
274
|
filepath: `/downloads/${filename}`,
|
|
209
|
-
size: 1024 * 1024,
|
|
275
|
+
size: 1024 * 1024,
|
|
210
276
|
};
|
|
211
277
|
}
|
|
212
278
|
}
|
|
213
279
|
```
|
|
214
280
|
|
|
215
|
-
###
|
|
281
|
+
### Submitting Jobs
|
|
216
282
|
|
|
217
283
|
```typescript
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
284
|
+
// Submit a single job
|
|
285
|
+
const handle = await client.submit(
|
|
286
|
+
{ url: "https://example.com/file.zip", filename: "file.zip" },
|
|
287
|
+
{
|
|
288
|
+
maxRetries: 5, // Override default retry count
|
|
289
|
+
jobRunId: "batch-001", // Group related jobs
|
|
290
|
+
runAfter: new Date(Date.now() + 60000), // Delay execution by 1 minute
|
|
291
|
+
deadlineAt: new Date(Date.now() + 3600000), // Must complete within 1 hour
|
|
292
|
+
}
|
|
293
|
+
);
|
|
294
|
+
|
|
295
|
+
// The handle provides methods to interact with the job
|
|
296
|
+
console.log(handle.id); // Job ID
|
|
297
|
+
const output = await handle.waitFor(); // Wait for completion
|
|
298
|
+
await handle.abort(); // Abort the job
|
|
299
|
+
handle.onProgress((progress, message, details) => {
|
|
300
|
+
console.log(`${progress}%: ${message}`);
|
|
226
301
|
});
|
|
227
302
|
|
|
228
|
-
//
|
|
229
|
-
await
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
})
|
|
237
|
-
),
|
|
238
|
-
queue.add(
|
|
239
|
-
new DownloadJob({
|
|
240
|
-
input: { url: "https://example.com/file2.zip", filename: "file2.zip" },
|
|
241
|
-
})
|
|
242
|
-
),
|
|
243
|
-
]);
|
|
244
|
-
|
|
245
|
-
// Check queue status
|
|
246
|
-
const queueSize = await queue.size(); // Total jobs
|
|
247
|
-
const pendingJobs = await queue.size(JobStatus.PENDING);
|
|
248
|
-
const runningJobs = await queue.size(JobStatus.RUNNING);
|
|
249
|
-
|
|
250
|
-
// Peek at jobs
|
|
251
|
-
const nextJobs = await queue.peek(JobStatus.PENDING, 5);
|
|
252
|
-
|
|
253
|
-
// Get queue statistics
|
|
254
|
-
const stats = queue.getStats();
|
|
255
|
-
console.log(`Completed: ${stats.completedJobs}, Failed: ${stats.failedJobs}`);
|
|
303
|
+
// Submit multiple jobs
|
|
304
|
+
const handles = await client.submitBatch(
|
|
305
|
+
[
|
|
306
|
+
{ url: "https://example.com/file1.zip", filename: "file1.zip" },
|
|
307
|
+
{ url: "https://example.com/file2.zip", filename: "file2.zip" },
|
|
308
|
+
],
|
|
309
|
+
{ jobRunId: "batch-002" }
|
|
310
|
+
);
|
|
256
311
|
```
|
|
257
312
|
|
|
258
313
|
### Progress Tracking
|
|
259
314
|
|
|
260
315
|
```typescript
|
|
261
|
-
//
|
|
262
|
-
const
|
|
263
|
-
|
|
316
|
+
// Method 1: Using the job handle
|
|
317
|
+
const handle = await client.submit(input);
|
|
318
|
+
const cleanup = handle.onProgress((progress, message, details) => {
|
|
319
|
+
console.log(`Job ${handle.id}: ${progress}% - ${message}`);
|
|
264
320
|
if (details) {
|
|
265
321
|
console.log("Details:", details);
|
|
266
322
|
}
|
|
267
323
|
});
|
|
268
324
|
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
job.onJobProgress((progress, message, details) => {
|
|
272
|
-
console.log(`Progress: ${progress}% - ${message}`);
|
|
273
|
-
});
|
|
325
|
+
await handle.waitFor();
|
|
326
|
+
cleanup(); // Remove listener
|
|
274
327
|
|
|
275
|
-
|
|
328
|
+
// Method 2: Using client events
|
|
329
|
+
client.on("job_progress", (queueName, jobId, progress, message, details) => {
|
|
330
|
+
console.log(`[${queueName}] Job ${jobId}: ${progress}% - ${message}`);
|
|
331
|
+
});
|
|
276
332
|
|
|
277
|
-
//
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
} finally {
|
|
282
|
-
removeListener(); // Clean up listener
|
|
283
|
-
}
|
|
333
|
+
// Method 3: Using onJobProgress for a specific job
|
|
334
|
+
const removeListener = client.onJobProgress(jobId, (progress, message, details) => {
|
|
335
|
+
console.log(`Progress: ${progress}%`);
|
|
336
|
+
});
|
|
284
337
|
```
|
|
285
338
|
|
|
286
339
|
### Error Handling and Retries
|
|
287
340
|
|
|
288
341
|
```typescript
|
|
289
|
-
import { RetryableJobError, PermanentJobError } from "@workglow/job-queue";
|
|
342
|
+
import { RetryableJobError, PermanentJobError, AbortSignalJobError } from "@workglow/job-queue";
|
|
290
343
|
|
|
291
|
-
class ApiCallJob extends Job<{ endpoint: string }, { data:
|
|
344
|
+
class ApiCallJob extends Job<{ endpoint: string }, { data: unknown }> {
|
|
292
345
|
async execute(input: { endpoint: string }, context: IJobExecuteContext) {
|
|
293
346
|
try {
|
|
294
|
-
const response = await fetch(input.endpoint);
|
|
347
|
+
const response = await fetch(input.endpoint, { signal: context.signal });
|
|
295
348
|
|
|
296
349
|
if (response.status === 429) {
|
|
297
|
-
// Rate limited - retry
|
|
350
|
+
// Rate limited - retry after delay
|
|
298
351
|
throw new RetryableJobError(
|
|
299
352
|
"Rate limited",
|
|
300
353
|
new Date(Date.now() + 60000) // Retry in 1 minute
|
|
@@ -307,104 +360,83 @@ class ApiCallJob extends Job<{ endpoint: string }, { data: any }> {
|
|
|
307
360
|
}
|
|
308
361
|
|
|
309
362
|
if (!response.ok) {
|
|
310
|
-
// Server error - allow retries
|
|
363
|
+
// Server error - allow retries (uses default retry logic)
|
|
311
364
|
throw new RetryableJobError(`HTTP ${response.status}`);
|
|
312
365
|
}
|
|
313
366
|
|
|
314
367
|
return { data: await response.json() };
|
|
315
368
|
} catch (error) {
|
|
316
|
-
if (
|
|
369
|
+
if (
|
|
370
|
+
error instanceof RetryableJobError ||
|
|
371
|
+
error instanceof PermanentJobError ||
|
|
372
|
+
error instanceof AbortSignalJobError
|
|
373
|
+
) {
|
|
317
374
|
throw error;
|
|
318
375
|
}
|
|
319
|
-
// Network errors
|
|
320
|
-
throw new RetryableJobError(error
|
|
376
|
+
// Network errors - allow retries
|
|
377
|
+
throw new RetryableJobError(String(error));
|
|
321
378
|
}
|
|
322
379
|
}
|
|
323
380
|
}
|
|
324
|
-
|
|
325
|
-
// Create job with retry configuration
|
|
326
|
-
const apiJob = new ApiCallJob({
|
|
327
|
-
input: { endpoint: "https://api.example.com/data" },
|
|
328
|
-
maxRetries: 5, // Try up to 5 times
|
|
329
|
-
});
|
|
330
381
|
```
|
|
331
382
|
|
|
332
383
|
### Event Listeners
|
|
333
384
|
|
|
334
385
|
```typescript
|
|
335
|
-
//
|
|
336
|
-
|
|
337
|
-
console.log(`
|
|
386
|
+
// Client events
|
|
387
|
+
client.on("job_start", (queueName, jobId) => {
|
|
388
|
+
console.log(`Job ${jobId} started`);
|
|
338
389
|
});
|
|
339
390
|
|
|
340
|
-
|
|
341
|
-
console.log(`Job ${jobId}
|
|
391
|
+
client.on("job_complete", (queueName, jobId, output) => {
|
|
392
|
+
console.log(`Job ${jobId} completed:`, output);
|
|
342
393
|
});
|
|
343
394
|
|
|
344
|
-
|
|
345
|
-
console.
|
|
395
|
+
client.on("job_error", (queueName, jobId, error) => {
|
|
396
|
+
console.error(`Job ${jobId} failed: ${error}`);
|
|
346
397
|
});
|
|
347
398
|
|
|
348
|
-
|
|
349
|
-
console.
|
|
399
|
+
client.on("job_retry", (queueName, jobId, runAfter) => {
|
|
400
|
+
console.log(`Job ${jobId} will retry at ${runAfter}`);
|
|
350
401
|
});
|
|
351
402
|
|
|
352
|
-
|
|
353
|
-
console.log(`Job ${jobId}
|
|
403
|
+
client.on("job_disabled", (queueName, jobId) => {
|
|
404
|
+
console.log(`Job ${jobId} was disabled`);
|
|
405
|
+
});
|
|
406
|
+
|
|
407
|
+
client.on("job_aborting", (queueName, jobId) => {
|
|
408
|
+
console.log(`Job ${jobId} abort requested`);
|
|
409
|
+
});
|
|
410
|
+
|
|
411
|
+
// Server events
|
|
412
|
+
server.on("server_start", (queueName) => {
|
|
413
|
+
console.log(`Server ${queueName} started`);
|
|
354
414
|
});
|
|
355
415
|
|
|
356
|
-
|
|
357
|
-
console.log(`
|
|
416
|
+
server.on("server_stop", (queueName) => {
|
|
417
|
+
console.log(`Server ${queueName} stopped`);
|
|
358
418
|
});
|
|
359
419
|
|
|
360
|
-
|
|
420
|
+
server.on("stats_update", (queueName, stats) => {
|
|
361
421
|
console.log(`Queue stats:`, stats);
|
|
362
422
|
});
|
|
363
423
|
|
|
364
424
|
// Wait for specific events
|
|
365
|
-
const [queueName] = await
|
|
366
|
-
const [queueName, jobId, output] = await queue.waitOn("job_complete");
|
|
425
|
+
const [queueName, jobId, output] = await client.waitOn("job_complete");
|
|
367
426
|
```
|
|
368
427
|
|
|
369
|
-
###
|
|
428
|
+
### Aborting Jobs
|
|
370
429
|
|
|
371
430
|
```typescript
|
|
372
|
-
//
|
|
373
|
-
const
|
|
374
|
-
|
|
375
|
-
try {
|
|
376
|
-
// This will resolve with the job output or reject with an error
|
|
377
|
-
const output = await queue.waitFor(jobId);
|
|
378
|
-
console.log("Job completed successfully:", output);
|
|
379
|
-
} catch (error) {
|
|
380
|
-
console.error("Job failed:", error);
|
|
381
|
-
}
|
|
382
|
-
|
|
383
|
-
// Check if output already exists for given input (caching)
|
|
384
|
-
const existingOutput = await queue.outputForInput({
|
|
385
|
-
url: "https://example.com/file.zip",
|
|
386
|
-
filename: "file.zip",
|
|
387
|
-
});
|
|
388
|
-
|
|
389
|
-
if (existingOutput) {
|
|
390
|
-
console.log("Already processed:", existingOutput);
|
|
391
|
-
} else {
|
|
392
|
-
// Add new job
|
|
393
|
-
const newJobId = await queue.add(
|
|
394
|
-
new DownloadJob({
|
|
395
|
-
input: { url: "https://example.com/file.zip", filename: "file.zip" },
|
|
396
|
-
})
|
|
397
|
-
);
|
|
398
|
-
}
|
|
431
|
+
// Abort a single job
|
|
432
|
+
const handle = await client.submit({ taskType: "long_running" });
|
|
433
|
+
await handle.abort();
|
|
399
434
|
|
|
400
|
-
//
|
|
401
|
-
await
|
|
435
|
+
// Or using the client directly
|
|
436
|
+
await client.abort(jobId);
|
|
402
437
|
|
|
403
|
-
//
|
|
404
|
-
|
|
405
|
-
if (job) {
|
|
406
|
-
console.log(`Job status: ${job.status}, progress: ${job.progress}%`);
|
|
407
|
-
}
|
|
438
|
+
// Abort all jobs in a job run
|
|
439
|
+
await client.abortJobRun("batch-001");
|
|
408
440
|
```
|
|
409
441
|
|
|
410
442
|
## Storage Configurations
|
|
@@ -412,44 +444,33 @@ if (job) {
|
|
|
412
444
|
### In-Memory Storage
|
|
413
445
|
|
|
414
446
|
```typescript
|
|
415
|
-
import { JobQueue } from "@workglow/job-queue";
|
|
416
447
|
import { InMemoryQueueStorage } from "@workglow/storage";
|
|
417
448
|
|
|
418
|
-
const
|
|
419
|
-
|
|
420
|
-
// Jobs are lost when the process restarts
|
|
421
|
-
});
|
|
449
|
+
const storage = new InMemoryQueueStorage<Input, Output>("my-queue");
|
|
450
|
+
await storage.setupDatabase();
|
|
422
451
|
```
|
|
423
452
|
|
|
424
453
|
### IndexedDB Storage (Browser)
|
|
425
454
|
|
|
426
455
|
```typescript
|
|
427
|
-
import { JobQueue } from "@workglow/job-queue";
|
|
428
456
|
import { IndexedDbQueueStorage } from "@workglow/storage";
|
|
429
457
|
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
storage: new IndexedDbQueueStorage("my-queue"),
|
|
433
|
-
// Jobs persist in browser storage
|
|
434
|
-
});
|
|
458
|
+
const storage = new IndexedDbQueueStorage<Input, Output>("my-queue");
|
|
459
|
+
await storage.setupDatabase();
|
|
435
460
|
```
|
|
436
461
|
|
|
437
462
|
### SQLite Storage (Node.js/Bun)
|
|
438
463
|
|
|
439
464
|
```typescript
|
|
440
|
-
import { JobQueue } from "@workglow/job-queue";
|
|
441
465
|
import { SqliteQueueStorage } from "@workglow/storage";
|
|
442
466
|
|
|
443
|
-
const
|
|
444
|
-
|
|
445
|
-
// Jobs persist in SQLite file
|
|
446
|
-
});
|
|
467
|
+
const storage = new SqliteQueueStorage<Input, Output>("./jobs.db", "my-queue");
|
|
468
|
+
await storage.setupDatabase();
|
|
447
469
|
```
|
|
448
470
|
|
|
449
471
|
### PostgreSQL Storage (Node.js/Bun)
|
|
450
472
|
|
|
451
473
|
```typescript
|
|
452
|
-
import { JobQueue } from "@workglow/job-queue";
|
|
453
474
|
import { PostgresQueueStorage } from "@workglow/storage";
|
|
454
475
|
import { Pool } from "pg";
|
|
455
476
|
|
|
@@ -461,10 +482,8 @@ const pool = new Pool({
|
|
|
461
482
|
password: "password",
|
|
462
483
|
});
|
|
463
484
|
|
|
464
|
-
const
|
|
465
|
-
|
|
466
|
-
// Jobs persist in PostgreSQL
|
|
467
|
-
});
|
|
485
|
+
const storage = new PostgresQueueStorage<Input, Output>(pool, "my-queue");
|
|
486
|
+
await storage.setupDatabase();
|
|
468
487
|
```
|
|
469
488
|
|
|
470
489
|
## Rate Limiting Strategies
|
|
@@ -474,13 +493,8 @@ const queue = new JobQueue("my-queue", MyJob, {
|
|
|
474
493
|
```typescript
|
|
475
494
|
import { ConcurrencyLimiter } from "@workglow/job-queue";
|
|
476
495
|
|
|
477
|
-
// Limit to 5 concurrent jobs
|
|
478
|
-
const limiter = new ConcurrencyLimiter(5
|
|
479
|
-
|
|
480
|
-
const queue = new JobQueue("my-queue", MyJob, {
|
|
481
|
-
storage: new InMemoryQueueStorage("my-queue"),
|
|
482
|
-
limiter,
|
|
483
|
-
});
|
|
496
|
+
// Limit to 5 concurrent jobs
|
|
497
|
+
const limiter = new ConcurrencyLimiter(5);
|
|
484
498
|
```
|
|
485
499
|
|
|
486
500
|
### Delay Limiter
|
|
@@ -501,9 +515,9 @@ import { InMemoryRateLimiter } from "@workglow/job-queue";
|
|
|
501
515
|
const limiter = new InMemoryRateLimiter({
|
|
502
516
|
maxExecutions: 10,
|
|
503
517
|
windowSizeInSeconds: 60,
|
|
504
|
-
initialBackoffDelay: 1000,
|
|
505
|
-
backoffMultiplier: 2,
|
|
506
|
-
maxBackoffDelay: 60000,
|
|
518
|
+
initialBackoffDelay: 1000,
|
|
519
|
+
backoffMultiplier: 2,
|
|
520
|
+
maxBackoffDelay: 60000,
|
|
507
521
|
});
|
|
508
522
|
```
|
|
509
523
|
|
|
@@ -514,66 +528,130 @@ import { CompositeLimiter, ConcurrencyLimiter, DelayLimiter } from "@workglow/jo
|
|
|
514
528
|
|
|
515
529
|
// Combine multiple limiting strategies
|
|
516
530
|
const limiter = new CompositeLimiter([
|
|
517
|
-
new ConcurrencyLimiter(3),
|
|
518
|
-
new DelayLimiter(100),
|
|
531
|
+
new ConcurrencyLimiter(3),
|
|
532
|
+
new DelayLimiter(100),
|
|
519
533
|
new InMemoryRateLimiter({
|
|
520
|
-
// Max 20 per minute
|
|
521
534
|
maxExecutions: 20,
|
|
522
535
|
windowSizeInSeconds: 60,
|
|
523
536
|
}),
|
|
524
537
|
]);
|
|
525
538
|
```
|
|
526
539
|
|
|
527
|
-
##
|
|
540
|
+
## Scaling Workers
|
|
528
541
|
|
|
529
542
|
```typescript
|
|
530
|
-
|
|
543
|
+
// Start with 2 workers
|
|
544
|
+
const server = new JobQueueServer(MyJob, {
|
|
545
|
+
storage,
|
|
546
|
+
queueName: "my-queue",
|
|
547
|
+
workerCount: 2,
|
|
548
|
+
});
|
|
549
|
+
|
|
550
|
+
await server.start();
|
|
531
551
|
|
|
532
|
-
//
|
|
533
|
-
await
|
|
552
|
+
// Scale up to 5 workers
|
|
553
|
+
await server.scaleWorkers(5);
|
|
534
554
|
|
|
535
|
-
//
|
|
536
|
-
await
|
|
555
|
+
// Scale down to 1 worker
|
|
556
|
+
await server.scaleWorkers(1);
|
|
537
557
|
|
|
538
|
-
//
|
|
539
|
-
|
|
558
|
+
// Check current worker count
|
|
559
|
+
console.log(server.getWorkerCount());
|
|
540
560
|
```
|
|
541
561
|
|
|
542
|
-
##
|
|
562
|
+
## Cross-Process Communication
|
|
543
563
|
|
|
544
|
-
|
|
564
|
+
When the client and server run in different processes, use storage subscriptions:
|
|
545
565
|
|
|
546
566
|
```typescript
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
stop(): Promise<this>;
|
|
551
|
-
clear(): Promise<this>;
|
|
552
|
-
restart(): Promise<this>;
|
|
567
|
+
// Process A: Server
|
|
568
|
+
const server = new JobQueueServer(MyJob, { storage, queueName });
|
|
569
|
+
await server.start();
|
|
553
570
|
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
waitFor(jobId: unknown): Promise<Output | undefined>;
|
|
558
|
-
abort(jobId: unknown): Promise<void>;
|
|
571
|
+
// Process B: Client
|
|
572
|
+
const client = new JobQueueClient<Input, Output>({ storage, queueName });
|
|
573
|
+
client.connect(); // Uses storage subscriptions instead of direct attachment
|
|
559
574
|
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
575
|
+
const handle = await client.submit(input);
|
|
576
|
+
await handle.waitFor(); // Works across processes
|
|
577
|
+
|
|
578
|
+
// Don't forget to disconnect when done
|
|
579
|
+
client.disconnect();
|
|
580
|
+
```
|
|
564
581
|
|
|
565
|
-
|
|
582
|
+
## API Reference
|
|
583
|
+
|
|
584
|
+
### JobQueueClient
|
|
585
|
+
|
|
586
|
+
```typescript
|
|
587
|
+
class JobQueueClient<Input, Output> {
|
|
588
|
+
// Connection management
|
|
589
|
+
attach(server: JobQueueServer<Input, Output>): void;
|
|
590
|
+
detach(): void;
|
|
591
|
+
connect(): void;
|
|
592
|
+
disconnect(): void;
|
|
593
|
+
|
|
594
|
+
// Job submission
|
|
595
|
+
submit(input: Input, options?: SubmitOptions): Promise<JobHandle<Output>>;
|
|
596
|
+
submitBatch(
|
|
597
|
+
inputs: readonly Input[],
|
|
598
|
+
options?: BatchOptions
|
|
599
|
+
): Promise<readonly JobHandle<Output>[]>;
|
|
600
|
+
|
|
601
|
+
// Job queries
|
|
602
|
+
getJob(id: unknown): Promise<Job<Input, Output> | undefined>;
|
|
603
|
+
getJobsByRunId(runId: string): Promise<readonly Job<Input, Output>[]>;
|
|
604
|
+
peek(status?: JobStatus, num?: number): Promise<readonly Job<Input, Output>[]>;
|
|
605
|
+
size(status?: JobStatus): Promise<number>;
|
|
566
606
|
outputForInput(input: Input): Promise<Output | null>;
|
|
567
|
-
|
|
607
|
+
|
|
608
|
+
// Job control
|
|
609
|
+
waitFor(jobId: unknown): Promise<Output>;
|
|
610
|
+
abort(jobId: unknown): Promise<void>;
|
|
611
|
+
abortJobRun(jobRunId: string): Promise<void>;
|
|
568
612
|
|
|
569
613
|
// Progress tracking
|
|
570
|
-
updateProgress(
|
|
571
|
-
jobId: unknown,
|
|
572
|
-
progress: number,
|
|
573
|
-
message?: string,
|
|
574
|
-
details?: Record<string, any>
|
|
575
|
-
): Promise<void>;
|
|
576
614
|
onJobProgress(jobId: unknown, listener: JobProgressListener): () => void;
|
|
615
|
+
|
|
616
|
+
// Events
|
|
617
|
+
on<Event extends JobQueueEvents>(event: Event, listener: Listener): void;
|
|
618
|
+
off<Event extends JobQueueEvents>(event: Event, listener: Listener): void;
|
|
619
|
+
once<Event extends JobQueueEvents>(event: Event, listener: Listener): void;
|
|
620
|
+
waitOn<Event extends JobQueueEvents>(event: Event): Promise<Parameters>;
|
|
621
|
+
}
|
|
622
|
+
```
|
|
623
|
+
|
|
624
|
+
### JobQueueServer
|
|
625
|
+
|
|
626
|
+
```typescript
|
|
627
|
+
class JobQueueServer<Input, Output> {
|
|
628
|
+
// Lifecycle
|
|
629
|
+
start(): Promise<this>;
|
|
630
|
+
stop(): Promise<this>;
|
|
631
|
+
isRunning(): boolean;
|
|
632
|
+
|
|
633
|
+
// Workers
|
|
634
|
+
scaleWorkers(count: number): Promise<void>;
|
|
635
|
+
getWorkerCount(): number;
|
|
636
|
+
|
|
637
|
+
// Statistics
|
|
638
|
+
getStats(): JobQueueStats;
|
|
639
|
+
getStorage(): IQueueStorage<Input, Output>;
|
|
640
|
+
|
|
641
|
+
// Events
|
|
642
|
+
on<Event extends JobQueueServerEvents>(event: Event, listener: Listener): void;
|
|
643
|
+
off<Event extends JobQueueServerEvents>(event: Event, listener: Listener): void;
|
|
644
|
+
}
|
|
645
|
+
```
|
|
646
|
+
|
|
647
|
+
### JobHandle
|
|
648
|
+
|
|
649
|
+
```typescript
|
|
650
|
+
interface JobHandle<Output> {
|
|
651
|
+
readonly id: unknown;
|
|
652
|
+
waitFor(): Promise<Output>;
|
|
653
|
+
abort(): Promise<void>;
|
|
654
|
+
onProgress(callback: JobProgressListener): () => void;
|
|
577
655
|
}
|
|
578
656
|
```
|
|
579
657
|
|
|
@@ -588,17 +666,21 @@ class Job<Input, Output> {
|
|
|
588
666
|
status: JobStatus;
|
|
589
667
|
progress: number;
|
|
590
668
|
progressMessage: string;
|
|
591
|
-
progressDetails: Record<string,
|
|
669
|
+
progressDetails: Record<string, unknown> | null;
|
|
592
670
|
maxRetries: number;
|
|
593
671
|
runAttempts: number;
|
|
594
672
|
error: string | null;
|
|
673
|
+
errorCode: string | null;
|
|
595
674
|
createdAt: Date;
|
|
596
675
|
completedAt: Date | null;
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
676
|
+
runAfter: Date;
|
|
677
|
+
deadlineAt: Date | null;
|
|
678
|
+
lastRanAt: Date | null;
|
|
679
|
+
jobRunId: string | undefined;
|
|
680
|
+
fingerprint: string | undefined;
|
|
681
|
+
|
|
682
|
+
// Methods (override in subclass)
|
|
683
|
+
execute(input: Input, context: IJobExecuteContext): Promise<Output>;
|
|
602
684
|
}
|
|
603
685
|
```
|
|
604
686
|
|
|
@@ -606,24 +688,7 @@ class Job<Input, Output> {
|
|
|
606
688
|
|
|
607
689
|
```typescript
|
|
608
690
|
// Job statuses
|
|
609
|
-
|
|
610
|
-
PENDING = "PENDING",
|
|
611
|
-
RUNNING = "RUNNING",
|
|
612
|
-
COMPLETED = "COMPLETED",
|
|
613
|
-
FAILED = "FAILED",
|
|
614
|
-
ABORTING = "ABORTING",
|
|
615
|
-
DISABLED = "DISABLED",
|
|
616
|
-
}
|
|
617
|
-
|
|
618
|
-
// Queue options
|
|
619
|
-
interface JobQueueOptions<Input, Output> {
|
|
620
|
-
deleteAfterCompletionMs?: number;
|
|
621
|
-
deleteAfterFailureMs?: number;
|
|
622
|
-
deleteAfterDisabledMs?: number;
|
|
623
|
-
waitDurationInMilliseconds?: number;
|
|
624
|
-
limiter?: ILimiter;
|
|
625
|
-
storage?: IQueueStorage<Input, Output>;
|
|
626
|
-
}
|
|
691
|
+
type JobStatus = "PENDING" | "PROCESSING" | "COMPLETED" | "FAILED" | "ABORTING" | "DISABLED";
|
|
627
692
|
|
|
628
693
|
// Job execution context
|
|
629
694
|
interface IJobExecuteContext {
|
|
@@ -631,7 +696,7 @@ interface IJobExecuteContext {
|
|
|
631
696
|
updateProgress: (
|
|
632
697
|
progress: number,
|
|
633
698
|
message?: string,
|
|
634
|
-
details?: Record<string,
|
|
699
|
+
details?: Record<string, unknown> | null
|
|
635
700
|
) => Promise<void>;
|
|
636
701
|
}
|
|
637
702
|
|
|
@@ -639,19 +704,38 @@ interface IJobExecuteContext {
|
|
|
639
704
|
type JobProgressListener = (
|
|
640
705
|
progress: number,
|
|
641
706
|
message: string,
|
|
642
|
-
details: Record<string,
|
|
707
|
+
details: Record<string, unknown> | null
|
|
643
708
|
) => void;
|
|
644
709
|
|
|
645
710
|
// Queue statistics
|
|
646
711
|
interface JobQueueStats {
|
|
647
|
-
totalJobs: number;
|
|
648
|
-
completedJobs: number;
|
|
649
|
-
failedJobs: number;
|
|
650
|
-
abortedJobs: number;
|
|
651
|
-
retriedJobs: number;
|
|
652
|
-
disabledJobs: number;
|
|
653
|
-
averageProcessingTime?: number;
|
|
654
|
-
lastUpdateTime: Date;
|
|
712
|
+
readonly totalJobs: number;
|
|
713
|
+
readonly completedJobs: number;
|
|
714
|
+
readonly failedJobs: number;
|
|
715
|
+
readonly abortedJobs: number;
|
|
716
|
+
readonly retriedJobs: number;
|
|
717
|
+
readonly disabledJobs: number;
|
|
718
|
+
readonly averageProcessingTime?: number;
|
|
719
|
+
readonly lastUpdateTime: Date;
|
|
720
|
+
}
|
|
721
|
+
|
|
722
|
+
// Client options
|
|
723
|
+
interface JobQueueClientOptions<Input, Output> {
|
|
724
|
+
readonly storage: IQueueStorage<Input, Output>;
|
|
725
|
+
readonly queueName: string;
|
|
726
|
+
}
|
|
727
|
+
|
|
728
|
+
// Server options
|
|
729
|
+
interface JobQueueServerOptions<Input, Output> {
|
|
730
|
+
readonly storage: IQueueStorage<Input, Output>;
|
|
731
|
+
readonly queueName: string;
|
|
732
|
+
readonly limiter?: ILimiter;
|
|
733
|
+
readonly workerCount?: number;
|
|
734
|
+
readonly pollIntervalMs?: number;
|
|
735
|
+
readonly deleteAfterCompletionMs?: number;
|
|
736
|
+
readonly deleteAfterFailureMs?: number;
|
|
737
|
+
readonly deleteAfterDisabledMs?: number;
|
|
738
|
+
readonly cleanupIntervalMs?: number;
|
|
655
739
|
}
|
|
656
740
|
```
|
|
657
741
|
|
|
@@ -666,25 +750,67 @@ bun test
|
|
|
666
750
|
Example test:
|
|
667
751
|
|
|
668
752
|
```typescript
|
|
669
|
-
import { describe, it, expect } from "vitest";
|
|
670
|
-
import {
|
|
753
|
+
import { describe, it, expect, beforeEach, afterEach } from "vitest";
|
|
754
|
+
import { Job, JobQueueClient, JobQueueServer, IJobExecuteContext } from "@workglow/job-queue";
|
|
671
755
|
import { InMemoryQueueStorage } from "@workglow/storage";
|
|
672
756
|
|
|
757
|
+
class TestJob extends Job<{ data: string }, { result: string }> {
|
|
758
|
+
async execute(input: { data: string }, context: IJobExecuteContext) {
|
|
759
|
+
await context.updateProgress(50, "Processing");
|
|
760
|
+
return { result: input.data.toUpperCase() };
|
|
761
|
+
}
|
|
762
|
+
}
|
|
763
|
+
|
|
673
764
|
describe("JobQueue", () => {
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
765
|
+
let server: JobQueueServer<{ data: string }, { result: string }>;
|
|
766
|
+
let client: JobQueueClient<{ data: string }, { result: string }>;
|
|
767
|
+
let storage: InMemoryQueueStorage<{ data: string }, { result: string }>;
|
|
768
|
+
|
|
769
|
+
beforeEach(async () => {
|
|
770
|
+
storage = new InMemoryQueueStorage("test-queue");
|
|
771
|
+
await storage.setupDatabase();
|
|
772
|
+
|
|
773
|
+
server = new JobQueueServer(TestJob, {
|
|
774
|
+
storage,
|
|
775
|
+
queueName: "test-queue",
|
|
776
|
+
pollIntervalMs: 1,
|
|
777
|
+
});
|
|
778
|
+
|
|
779
|
+
client = new JobQueueClient({
|
|
780
|
+
storage,
|
|
781
|
+
queueName: "test-queue",
|
|
677
782
|
});
|
|
678
783
|
|
|
679
|
-
|
|
784
|
+
client.attach(server);
|
|
785
|
+
});
|
|
786
|
+
|
|
787
|
+
afterEach(async () => {
|
|
788
|
+
await server.stop();
|
|
789
|
+
await storage.deleteAll();
|
|
790
|
+
});
|
|
791
|
+
|
|
792
|
+
it("should process jobs successfully", async () => {
|
|
793
|
+
await server.start();
|
|
794
|
+
|
|
795
|
+
const handle = await client.submit({ data: "hello" });
|
|
796
|
+
const result = await handle.waitFor();
|
|
797
|
+
|
|
798
|
+
expect(result).toEqual({ result: "HELLO" });
|
|
799
|
+
});
|
|
800
|
+
|
|
801
|
+
it("should track progress", async () => {
|
|
802
|
+
await server.start();
|
|
680
803
|
|
|
681
|
-
const
|
|
682
|
-
const
|
|
683
|
-
|
|
804
|
+
const progressUpdates: number[] = [];
|
|
805
|
+
const handle = await client.submit({ data: "test" });
|
|
806
|
+
|
|
807
|
+
handle.onProgress((progress) => {
|
|
808
|
+
progressUpdates.push(progress);
|
|
809
|
+
});
|
|
684
810
|
|
|
685
|
-
|
|
811
|
+
await handle.waitFor();
|
|
686
812
|
|
|
687
|
-
|
|
813
|
+
expect(progressUpdates).toContain(50);
|
|
688
814
|
});
|
|
689
815
|
});
|
|
690
816
|
```
|