@monque/core 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +124 -0
- package/dist/errors-BX3oWYfZ.cjs +155 -0
- package/dist/errors-BX3oWYfZ.cjs.map +1 -0
- package/dist/errors-Ca92IlaL.cjs +3 -0
- package/dist/errors-DW20rHWR.mjs +125 -0
- package/dist/errors-DW20rHWR.mjs.map +1 -0
- package/dist/errors-DWkXsP3R.mjs +3 -0
- package/dist/index.cjs +1618 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +1460 -0
- package/dist/index.d.cts.map +1 -0
- package/dist/index.d.mts +1460 -0
- package/dist/index.d.mts.map +1 -0
- package/dist/index.mjs +1599 -0
- package/dist/index.mjs.map +1 -0
- package/package.json +91 -0
package/dist/index.cjs
ADDED
|
@@ -0,0 +1,1618 @@
|
|
|
1
|
+
const require_errors = require('./errors-BX3oWYfZ.cjs');
|
|
2
|
+
let node_crypto = require("node:crypto");
|
|
3
|
+
let node_events = require("node:events");
|
|
4
|
+
let cron_parser = require("cron-parser");
|
|
5
|
+
|
|
6
|
+
//#region src/jobs/types.ts
|
|
7
|
+
/**
|
|
8
|
+
* Represents the lifecycle states of a job in the queue.
|
|
9
|
+
*
|
|
10
|
+
* Jobs transition through states as follows:
|
|
11
|
+
* - PENDING → PROCESSING (when picked up by a worker)
|
|
12
|
+
* - PROCESSING → COMPLETED (on success)
|
|
13
|
+
* - PROCESSING → PENDING (on failure, if retries remain)
|
|
14
|
+
* - PROCESSING → FAILED (on failure, after max retries exhausted)
|
|
15
|
+
*
|
|
16
|
+
* @example
|
|
17
|
+
* ```typescript
|
|
18
|
+
* if (job.status === JobStatus.PENDING) {
|
|
19
|
+
* // job is waiting to be picked up
|
|
20
|
+
* }
|
|
21
|
+
* ```
|
|
22
|
+
*/
|
|
23
|
+
const JobStatus = {
|
|
24
|
+
PENDING: "pending",
|
|
25
|
+
PROCESSING: "processing",
|
|
26
|
+
COMPLETED: "completed",
|
|
27
|
+
FAILED: "failed"
|
|
28
|
+
};
|
|
29
|
+
|
|
30
|
+
//#endregion
|
|
31
|
+
//#region src/jobs/guards.ts
|
|
32
|
+
/**
|
|
33
|
+
* Type guard to check if a job has been persisted to MongoDB.
|
|
34
|
+
*
|
|
35
|
+
* A persisted job is guaranteed to have an `_id` field, which means it has been
|
|
36
|
+
* successfully inserted into the database. This is useful when you need to ensure
|
|
37
|
+
* a job can be updated or referenced by its ID.
|
|
38
|
+
*
|
|
39
|
+
* @template T - The type of the job's data payload
|
|
40
|
+
* @param job - The job to check
|
|
41
|
+
* @returns `true` if the job has a valid `_id`, narrowing the type to `PersistedJob<T>`
|
|
42
|
+
*
|
|
43
|
+
* @example Basic usage
|
|
44
|
+
* ```typescript
|
|
45
|
+
* const job: Job<EmailData> = await monque.enqueue('send-email', emailData);
|
|
46
|
+
*
|
|
47
|
+
* if (isPersistedJob(job)) {
|
|
48
|
+
* // TypeScript knows job._id exists
|
|
49
|
+
* console.log(`Job ID: ${job._id.toString()}`);
|
|
50
|
+
* }
|
|
51
|
+
* ```
|
|
52
|
+
*
|
|
53
|
+
* @example In a conditional
|
|
54
|
+
* ```typescript
|
|
55
|
+
* function logJobId(job: Job) {
|
|
56
|
+
* if (!isPersistedJob(job)) {
|
|
57
|
+
* console.log('Job not yet persisted');
|
|
58
|
+
* return;
|
|
59
|
+
* }
|
|
60
|
+
* // TypeScript knows job is PersistedJob here
|
|
61
|
+
* console.log(`Processing job ${job._id.toString()}`);
|
|
62
|
+
* }
|
|
63
|
+
* ```
|
|
64
|
+
*/
|
|
65
|
+
function isPersistedJob(job) {
|
|
66
|
+
return "_id" in job && job._id !== void 0 && job._id !== null;
|
|
67
|
+
}
|
|
68
|
+
/**
|
|
69
|
+
* Type guard to check if a value is a valid job status.
|
|
70
|
+
*
|
|
71
|
+
* Validates that a value is one of the four valid job statuses: `'pending'`,
|
|
72
|
+
* `'processing'`, `'completed'`, or `'failed'`. Useful for runtime validation
|
|
73
|
+
* of user input or external data.
|
|
74
|
+
*
|
|
75
|
+
* @param value - The value to check
|
|
76
|
+
* @returns `true` if the value is a valid `JobStatusType`, narrowing the type
|
|
77
|
+
*
|
|
78
|
+
* @example Validating user input
|
|
79
|
+
* ```typescript
|
|
80
|
+
* function filterByStatus(status: string) {
|
|
81
|
+
* if (!isValidJobStatus(status)) {
|
|
82
|
+
* throw new Error(`Invalid status: ${status}`);
|
|
83
|
+
* }
|
|
84
|
+
* // TypeScript knows status is JobStatusType here
|
|
85
|
+
* return db.jobs.find({ status });
|
|
86
|
+
* }
|
|
87
|
+
* ```
|
|
88
|
+
*
|
|
89
|
+
* @example Runtime validation
|
|
90
|
+
* ```typescript
|
|
91
|
+
* const statusFromApi = externalData.status;
|
|
92
|
+
*
|
|
93
|
+
* if (isValidJobStatus(statusFromApi)) {
|
|
94
|
+
* job.status = statusFromApi;
|
|
95
|
+
* } else {
|
|
96
|
+
* job.status = JobStatus.PENDING;
|
|
97
|
+
* }
|
|
98
|
+
* ```
|
|
99
|
+
*/
|
|
100
|
+
function isValidJobStatus(value) {
|
|
101
|
+
return typeof value === "string" && Object.values(JobStatus).includes(value);
|
|
102
|
+
}
|
|
103
|
+
/**
|
|
104
|
+
* Type guard to check if a job is in pending status.
|
|
105
|
+
*
|
|
106
|
+
* A convenience helper for checking if a job is waiting to be processed.
|
|
107
|
+
* Equivalent to `job.status === JobStatus.PENDING` but with better semantics.
|
|
108
|
+
*
|
|
109
|
+
* @template T - The type of the job's data payload
|
|
110
|
+
* @param job - The job to check
|
|
111
|
+
* @returns `true` if the job status is `'pending'`
|
|
112
|
+
*
|
|
113
|
+
* @example Filter pending jobs
|
|
114
|
+
* ```typescript
|
|
115
|
+
* const jobs = await monque.getJobs();
|
|
116
|
+
* const pendingJobs = jobs.filter(isPendingJob);
|
|
117
|
+
* console.log(`${pendingJobs.length} jobs waiting to be processed`);
|
|
118
|
+
* ```
|
|
119
|
+
*
|
|
120
|
+
* @example Conditional logic
|
|
121
|
+
* ```typescript
|
|
122
|
+
* if (isPendingJob(job)) {
|
|
123
|
+
* await monque.now(job.name, job.data);
|
|
124
|
+
* }
|
|
125
|
+
* ```
|
|
126
|
+
*/
|
|
127
|
+
function isPendingJob(job) {
|
|
128
|
+
return job.status === JobStatus.PENDING;
|
|
129
|
+
}
|
|
130
|
+
/**
|
|
131
|
+
* Type guard to check if a job is currently being processed.
|
|
132
|
+
*
|
|
133
|
+
* A convenience helper for checking if a job is actively running.
|
|
134
|
+
* Equivalent to `job.status === JobStatus.PROCESSING` but with better semantics.
|
|
135
|
+
*
|
|
136
|
+
* @template T - The type of the job's data payload
|
|
137
|
+
* @param job - The job to check
|
|
138
|
+
* @returns `true` if the job status is `'processing'`
|
|
139
|
+
*
|
|
140
|
+
* @example Monitor active jobs
|
|
141
|
+
* ```typescript
|
|
142
|
+
* const jobs = await monque.getJobs();
|
|
143
|
+
* const activeJobs = jobs.filter(isProcessingJob);
|
|
144
|
+
* console.log(`${activeJobs.length} jobs currently running`);
|
|
145
|
+
* ```
|
|
146
|
+
*/
|
|
147
|
+
function isProcessingJob(job) {
|
|
148
|
+
return job.status === JobStatus.PROCESSING;
|
|
149
|
+
}
|
|
150
|
+
/**
|
|
151
|
+
* Type guard to check if a job has completed successfully.
|
|
152
|
+
*
|
|
153
|
+
* A convenience helper for checking if a job finished without errors.
|
|
154
|
+
* Equivalent to `job.status === JobStatus.COMPLETED` but with better semantics.
|
|
155
|
+
*
|
|
156
|
+
* @template T - The type of the job's data payload
|
|
157
|
+
* @param job - The job to check
|
|
158
|
+
* @returns `true` if the job status is `'completed'`
|
|
159
|
+
*
|
|
160
|
+
* @example Find completed jobs
|
|
161
|
+
* ```typescript
|
|
162
|
+
* const jobs = await monque.getJobs();
|
|
163
|
+
* const completedJobs = jobs.filter(isCompletedJob);
|
|
164
|
+
* console.log(`${completedJobs.length} jobs completed successfully`);
|
|
165
|
+
* ```
|
|
166
|
+
*/
|
|
167
|
+
function isCompletedJob(job) {
|
|
168
|
+
return job.status === JobStatus.COMPLETED;
|
|
169
|
+
}
|
|
170
|
+
/**
|
|
171
|
+
* Type guard to check if a job has permanently failed.
|
|
172
|
+
*
|
|
173
|
+
* A convenience helper for checking if a job exhausted all retries.
|
|
174
|
+
* Equivalent to `job.status === JobStatus.FAILED` but with better semantics.
|
|
175
|
+
*
|
|
176
|
+
* @template T - The type of the job's data payload
|
|
177
|
+
* @param job - The job to check
|
|
178
|
+
* @returns `true` if the job status is `'failed'`
|
|
179
|
+
*
|
|
180
|
+
* @example Handle failed jobs
|
|
181
|
+
* ```typescript
|
|
182
|
+
* const jobs = await monque.getJobs();
|
|
183
|
+
* const failedJobs = jobs.filter(isFailedJob);
|
|
184
|
+
*
|
|
185
|
+
* for (const job of failedJobs) {
|
|
186
|
+
* console.error(`Job ${job.name} failed: ${job.failReason}`);
|
|
187
|
+
* await sendAlert(job);
|
|
188
|
+
* }
|
|
189
|
+
* ```
|
|
190
|
+
*/
|
|
191
|
+
function isFailedJob(job) {
|
|
192
|
+
return job.status === JobStatus.FAILED;
|
|
193
|
+
}
|
|
194
|
+
/**
|
|
195
|
+
* Type guard to check if a job is a recurring scheduled job.
|
|
196
|
+
*
|
|
197
|
+
* A recurring job has a `repeatInterval` cron expression and will be automatically
|
|
198
|
+
* rescheduled after each successful completion.
|
|
199
|
+
*
|
|
200
|
+
* @template T - The type of the job's data payload
|
|
201
|
+
* @param job - The job to check
|
|
202
|
+
* @returns `true` if the job has a `repeatInterval` defined
|
|
203
|
+
*
|
|
204
|
+
* @example Filter recurring jobs
|
|
205
|
+
* ```typescript
|
|
206
|
+
* const jobs = await monque.getJobs();
|
|
207
|
+
* const recurringJobs = jobs.filter(isRecurringJob);
|
|
208
|
+
* console.log(`${recurringJobs.length} jobs will repeat automatically`);
|
|
209
|
+
* ```
|
|
210
|
+
*
|
|
211
|
+
* @example Conditional cleanup
|
|
212
|
+
* ```typescript
|
|
213
|
+
* if (!isRecurringJob(job) && isCompletedJob(job)) {
|
|
214
|
+
* // Safe to delete one-time completed jobs
|
|
215
|
+
* await deleteJob(job._id);
|
|
216
|
+
* }
|
|
217
|
+
* ```
|
|
218
|
+
*/
|
|
219
|
+
function isRecurringJob(job) {
|
|
220
|
+
return job.repeatInterval !== void 0 && job.repeatInterval !== null;
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
//#endregion
|
|
224
|
+
//#region src/shared/utils/backoff.ts
|
|
225
|
+
/**
|
|
226
|
+
* Default base interval for exponential backoff in milliseconds.
|
|
227
|
+
* @default 1000
|
|
228
|
+
*/
|
|
229
|
+
const DEFAULT_BASE_INTERVAL = 1e3;
|
|
230
|
+
/**
|
|
231
|
+
* Default maximum delay cap for exponential backoff in milliseconds.
|
|
232
|
+
*
|
|
233
|
+
* This prevents unbounded delays (e.g. failCount=20 is >11 days at 1s base)
|
|
234
|
+
* and avoids precision/overflow issues for very large fail counts.
|
|
235
|
+
* @default 86400000 (24 hours)
|
|
236
|
+
*/
|
|
237
|
+
const DEFAULT_MAX_BACKOFF_DELAY = 1440 * 60 * 1e3;
|
|
238
|
+
/**
|
|
239
|
+
* Calculate the next run time using exponential backoff.
|
|
240
|
+
*
|
|
241
|
+
* Formula: nextRunAt = now + (2^failCount × baseInterval)
|
|
242
|
+
*
|
|
243
|
+
* @param failCount - Number of previous failed attempts
|
|
244
|
+
* @param baseInterval - Base interval in milliseconds (default: 1000ms)
|
|
245
|
+
* @param maxDelay - Maximum delay in milliseconds (optional)
|
|
246
|
+
* @returns The next run date
|
|
247
|
+
*
|
|
248
|
+
* @example
|
|
249
|
+
* ```typescript
|
|
250
|
+
* // First retry (failCount=1): 2^1 * 1000 = 2000ms delay
|
|
251
|
+
* const nextRun = calculateBackoff(1);
|
|
252
|
+
*
|
|
253
|
+
* // Second retry (failCount=2): 2^2 * 1000 = 4000ms delay
|
|
254
|
+
* const nextRun = calculateBackoff(2);
|
|
255
|
+
*
|
|
256
|
+
* // With custom base interval
|
|
257
|
+
* const nextRun = calculateBackoff(3, 500); // 2^3 * 500 = 4000ms delay
|
|
258
|
+
*
|
|
259
|
+
* // With max delay
|
|
260
|
+
* const nextRun = calculateBackoff(10, 1000, 60000); // capped at 60000ms
|
|
261
|
+
* ```
|
|
262
|
+
*/
|
|
263
|
+
function calculateBackoff(failCount, baseInterval = DEFAULT_BASE_INTERVAL, maxDelay) {
|
|
264
|
+
const effectiveMaxDelay = maxDelay ?? DEFAULT_MAX_BACKOFF_DELAY;
|
|
265
|
+
let delay = 2 ** failCount * baseInterval;
|
|
266
|
+
if (delay > effectiveMaxDelay) delay = effectiveMaxDelay;
|
|
267
|
+
return new Date(Date.now() + delay);
|
|
268
|
+
}
|
|
269
|
+
/**
|
|
270
|
+
* Calculate just the delay in milliseconds for a given fail count.
|
|
271
|
+
*
|
|
272
|
+
* @param failCount - Number of previous failed attempts
|
|
273
|
+
* @param baseInterval - Base interval in milliseconds (default: 1000ms)
|
|
274
|
+
* @param maxDelay - Maximum delay in milliseconds (optional)
|
|
275
|
+
* @returns The delay in milliseconds
|
|
276
|
+
*/
|
|
277
|
+
function calculateBackoffDelay(failCount, baseInterval = DEFAULT_BASE_INTERVAL, maxDelay) {
|
|
278
|
+
const effectiveMaxDelay = maxDelay ?? DEFAULT_MAX_BACKOFF_DELAY;
|
|
279
|
+
let delay = 2 ** failCount * baseInterval;
|
|
280
|
+
if (delay > effectiveMaxDelay) delay = effectiveMaxDelay;
|
|
281
|
+
return delay;
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
//#endregion
|
|
285
|
+
//#region src/shared/utils/cron.ts
|
|
286
|
+
/**
|
|
287
|
+
* Parse a cron expression and return the next scheduled run date.
|
|
288
|
+
*
|
|
289
|
+
* @param expression - A 5-field cron expression (minute hour day-of-month month day-of-week) or a predefined expression
|
|
290
|
+
* @param currentDate - The reference date for calculating next run (default: now)
|
|
291
|
+
* @returns The next scheduled run date
|
|
292
|
+
* @throws {InvalidCronError} If the cron expression is invalid
|
|
293
|
+
*
|
|
294
|
+
* @example
|
|
295
|
+
* ```typescript
|
|
296
|
+
* // Every minute
|
|
297
|
+
* const nextRun = getNextCronDate('* * * * *');
|
|
298
|
+
*
|
|
299
|
+
* // Every day at midnight
|
|
300
|
+
* const nextRun = getNextCronDate('0 0 * * *');
|
|
301
|
+
*
|
|
302
|
+
* // Using predefined expression
|
|
303
|
+
* const nextRun = getNextCronDate('@daily');
|
|
304
|
+
*
|
|
305
|
+
* // Every Monday at 9am
|
|
306
|
+
* const nextRun = getNextCronDate('0 9 * * 1');
|
|
307
|
+
* ```
|
|
308
|
+
*/
|
|
309
|
+
function getNextCronDate(expression, currentDate) {
|
|
310
|
+
try {
|
|
311
|
+
return cron_parser.CronExpressionParser.parse(expression, { currentDate: currentDate ?? /* @__PURE__ */ new Date() }).next().toDate();
|
|
312
|
+
} catch (error) {
|
|
313
|
+
handleCronParseError(expression, error);
|
|
314
|
+
}
|
|
315
|
+
}
|
|
316
|
+
/**
|
|
317
|
+
* Validate a cron expression without calculating the next run date.
|
|
318
|
+
*
|
|
319
|
+
* @param expression - A 5-field cron expression
|
|
320
|
+
* @throws {InvalidCronError} If the cron expression is invalid
|
|
321
|
+
*
|
|
322
|
+
* @example
|
|
323
|
+
* ```typescript
|
|
324
|
+
* validateCronExpression('0 9 * * 1'); // Throws if invalid
|
|
325
|
+
* ```
|
|
326
|
+
*/
|
|
327
|
+
function validateCronExpression(expression) {
|
|
328
|
+
try {
|
|
329
|
+
cron_parser.CronExpressionParser.parse(expression);
|
|
330
|
+
} catch (error) {
|
|
331
|
+
handleCronParseError(expression, error);
|
|
332
|
+
}
|
|
333
|
+
}
|
|
334
|
+
function handleCronParseError(expression, error) {
|
|
335
|
+
throw new require_errors.InvalidCronError(expression, `Invalid cron expression "${expression}": ${error instanceof Error ? error.message : "Unknown parsing error"}. Expected 5-field format: "minute hour day-of-month month day-of-week" or predefined expression (e.g. @daily). Example: "0 9 * * 1" (every Monday at 9am)`);
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
//#endregion
|
|
339
|
+
//#region src/scheduler/monque.ts
|
|
340
|
+
/**
|
|
341
|
+
* Default configuration values
|
|
342
|
+
*/
|
|
343
|
+
const DEFAULTS = {
|
|
344
|
+
collectionName: "monque_jobs",
|
|
345
|
+
pollInterval: 1e3,
|
|
346
|
+
maxRetries: 10,
|
|
347
|
+
baseRetryInterval: 1e3,
|
|
348
|
+
shutdownTimeout: 3e4,
|
|
349
|
+
defaultConcurrency: 5,
|
|
350
|
+
lockTimeout: 18e5,
|
|
351
|
+
recoverStaleJobs: true,
|
|
352
|
+
heartbeatInterval: 3e4,
|
|
353
|
+
retentionInterval: 36e5
|
|
354
|
+
};
|
|
355
|
+
/**
|
|
356
|
+
* Monque - MongoDB-backed job scheduler
|
|
357
|
+
*
|
|
358
|
+
* A type-safe job scheduler with atomic locking, exponential backoff, cron scheduling,
|
|
359
|
+
* stale job recovery, and event-driven observability. Built on native MongoDB driver.
|
|
360
|
+
*
|
|
361
|
+
* @example Complete lifecycle
|
|
362
|
+
* ```;
|
|
363
|
+
typescript
|
|
364
|
+
*
|
|
365
|
+
|
|
366
|
+
import { Monque } from '@monque/core';
|
|
367
|
+
|
|
368
|
+
*
|
|
369
|
+
|
|
370
|
+
import { MongoClient } from 'mongodb';
|
|
371
|
+
|
|
372
|
+
*
|
|
373
|
+
*
|
|
374
|
+
const client = new MongoClient('mongodb://localhost:27017');
|
|
375
|
+
* await client.connect()
|
|
376
|
+
*
|
|
377
|
+
const db = client.db('myapp');
|
|
378
|
+
*
|
|
379
|
+
* // Create instance with options
|
|
380
|
+
*
|
|
381
|
+
const monque = new Monque(db, {
|
|
382
|
+
* collectionName: 'jobs',
|
|
383
|
+
* pollInterval: 1000,
|
|
384
|
+
* maxRetries: 10,
|
|
385
|
+
* shutdownTimeout: 30000,
|
|
386
|
+
* });
|
|
387
|
+
*
|
|
388
|
+
* // Initialize (sets up indexes and recovers stale jobs)
|
|
389
|
+
* await monque.initialize()
|
|
390
|
+
*
|
|
391
|
+
* // Register workers with type safety
|
|
392
|
+
*
|
|
393
|
+
type EmailJob = {};
|
|
394
|
+
* to: string
|
|
395
|
+
* subject: string
|
|
396
|
+
* body: string
|
|
397
|
+
* }
|
|
398
|
+
*
|
|
399
|
+
* monque.worker<EmailJob>('send-email', async (job) =>
|
|
400
|
+
{
|
|
401
|
+
* await emailService.send(job.data.to, job.data.subject, job.data.body)
|
|
402
|
+
*
|
|
403
|
+
}
|
|
404
|
+
)
|
|
405
|
+
*
|
|
406
|
+
* // Monitor events for observability
|
|
407
|
+
* monque.on('job:complete', (
|
|
408
|
+
{
|
|
409
|
+
job, duration;
|
|
410
|
+
}
|
|
411
|
+
) =>
|
|
412
|
+
{
|
|
413
|
+
* logger.info(`Job $job.namecompleted in $durationms`);
|
|
414
|
+
* });
|
|
415
|
+
*
|
|
416
|
+
* monque.on('job:fail', ({ job, error, willRetry }) => {
|
|
417
|
+
* logger.error(`Job $job.namefailed:`, error);
|
|
418
|
+
* });
|
|
419
|
+
*
|
|
420
|
+
* // Start processing
|
|
421
|
+
* monque.start();
|
|
422
|
+
*
|
|
423
|
+
* // Enqueue jobs
|
|
424
|
+
* await monque.enqueue('send-email', {
|
|
425
|
+
* to: 'user@example.com',
|
|
426
|
+
* subject: 'Welcome!',
|
|
427
|
+
* body: 'Thanks for signing up.'
|
|
428
|
+
* });
|
|
429
|
+
*
|
|
430
|
+
* // Graceful shutdown
|
|
431
|
+
* process.on('SIGTERM', async () => {
|
|
432
|
+
* await monque.stop();
|
|
433
|
+
* await client.close();
|
|
434
|
+
* process.exit(0);
|
|
435
|
+
* });
|
|
436
|
+
* ```
|
|
437
|
+
*/
|
|
438
|
+
var Monque = class extends node_events.EventEmitter {
|
|
439
|
+
db;
|
|
440
|
+
options;
|
|
441
|
+
collection = null;
|
|
442
|
+
workers = /* @__PURE__ */ new Map();
|
|
443
|
+
pollIntervalId = null;
|
|
444
|
+
heartbeatIntervalId = null;
|
|
445
|
+
cleanupIntervalId = null;
|
|
446
|
+
isRunning = false;
|
|
447
|
+
isInitialized = false;
|
|
448
|
+
/**
|
|
449
|
+
* MongoDB Change Stream for real-time job notifications.
|
|
450
|
+
* When available, provides instant job processing without polling delay.
|
|
451
|
+
*/
|
|
452
|
+
changeStream = null;
|
|
453
|
+
/**
|
|
454
|
+
* Number of consecutive reconnection attempts for change stream.
|
|
455
|
+
* Used for exponential backoff during reconnection.
|
|
456
|
+
*/
|
|
457
|
+
changeStreamReconnectAttempts = 0;
|
|
458
|
+
/**
|
|
459
|
+
* Maximum reconnection attempts before falling back to polling-only mode.
|
|
460
|
+
*/
|
|
461
|
+
maxChangeStreamReconnectAttempts = 3;
|
|
462
|
+
/**
|
|
463
|
+
* Debounce timer for change stream event processing.
|
|
464
|
+
* Prevents claim storms when multiple events arrive in quick succession.
|
|
465
|
+
*/
|
|
466
|
+
changeStreamDebounceTimer = null;
|
|
467
|
+
/**
|
|
468
|
+
* Whether the scheduler is currently using change streams for notifications.
|
|
469
|
+
*/
|
|
470
|
+
usingChangeStreams = false;
|
|
471
|
+
/**
|
|
472
|
+
* Timer ID for change stream reconnection with exponential backoff.
|
|
473
|
+
* Tracked to allow cancellation during shutdown.
|
|
474
|
+
*/
|
|
475
|
+
changeStreamReconnectTimer = null;
|
|
476
|
+
constructor(db, options = {}) {
|
|
477
|
+
super();
|
|
478
|
+
this.db = db;
|
|
479
|
+
this.options = {
|
|
480
|
+
collectionName: options.collectionName ?? DEFAULTS.collectionName,
|
|
481
|
+
pollInterval: options.pollInterval ?? DEFAULTS.pollInterval,
|
|
482
|
+
maxRetries: options.maxRetries ?? DEFAULTS.maxRetries,
|
|
483
|
+
baseRetryInterval: options.baseRetryInterval ?? DEFAULTS.baseRetryInterval,
|
|
484
|
+
shutdownTimeout: options.shutdownTimeout ?? DEFAULTS.shutdownTimeout,
|
|
485
|
+
defaultConcurrency: options.defaultConcurrency ?? DEFAULTS.defaultConcurrency,
|
|
486
|
+
lockTimeout: options.lockTimeout ?? DEFAULTS.lockTimeout,
|
|
487
|
+
recoverStaleJobs: options.recoverStaleJobs ?? DEFAULTS.recoverStaleJobs,
|
|
488
|
+
maxBackoffDelay: options.maxBackoffDelay,
|
|
489
|
+
schedulerInstanceId: options.schedulerInstanceId ?? (0, node_crypto.randomUUID)(),
|
|
490
|
+
heartbeatInterval: options.heartbeatInterval ?? DEFAULTS.heartbeatInterval,
|
|
491
|
+
jobRetention: options.jobRetention
|
|
492
|
+
};
|
|
493
|
+
}
|
|
494
|
+
/**
|
|
495
|
+
* Initialize the scheduler by setting up the MongoDB collection and indexes.
|
|
496
|
+
* Must be called before start().
|
|
497
|
+
*
|
|
498
|
+
* @throws {ConnectionError} If collection or index creation fails
|
|
499
|
+
*/
|
|
500
|
+
async initialize() {
|
|
501
|
+
if (this.isInitialized) return;
|
|
502
|
+
try {
|
|
503
|
+
this.collection = this.db.collection(this.options.collectionName);
|
|
504
|
+
await this.createIndexes();
|
|
505
|
+
if (this.options.recoverStaleJobs) await this.recoverStaleJobs();
|
|
506
|
+
this.isInitialized = true;
|
|
507
|
+
} catch (error) {
|
|
508
|
+
throw new require_errors.ConnectionError(`Failed to initialize Monque: ${error instanceof Error ? error.message : "Unknown error during initialization"}`);
|
|
509
|
+
}
|
|
510
|
+
}
|
|
511
|
+
/**
|
|
512
|
+
* Create required MongoDB indexes for efficient job processing.
|
|
513
|
+
*
|
|
514
|
+
* The following indexes are created:
|
|
515
|
+
* - `{status, nextRunAt}` - For efficient job polling queries
|
|
516
|
+
* - `{name, uniqueKey}` - Partial unique index for deduplication (pending/processing only)
|
|
517
|
+
* - `{name, status}` - For job lookup by type
|
|
518
|
+
* - `{claimedBy, status}` - For finding jobs owned by a specific scheduler instance
|
|
519
|
+
* - `{lastHeartbeat, status}` - For monitoring/debugging queries (e.g., inspecting heartbeat age)
|
|
520
|
+
* - `{status, nextRunAt, claimedBy}` - For atomic claim queries (find unclaimed pending jobs)
|
|
521
|
+
* - `{lockedAt, lastHeartbeat, status}` - Supports recovery scans and monitoring access patterns
|
|
522
|
+
*/
|
|
523
|
+
async createIndexes() {
|
|
524
|
+
if (!this.collection) throw new require_errors.ConnectionError("Collection not initialized");
|
|
525
|
+
await this.collection.createIndex({
|
|
526
|
+
status: 1,
|
|
527
|
+
nextRunAt: 1
|
|
528
|
+
}, { background: true });
|
|
529
|
+
await this.collection.createIndex({
|
|
530
|
+
name: 1,
|
|
531
|
+
uniqueKey: 1
|
|
532
|
+
}, {
|
|
533
|
+
unique: true,
|
|
534
|
+
partialFilterExpression: {
|
|
535
|
+
uniqueKey: { $exists: true },
|
|
536
|
+
status: { $in: [JobStatus.PENDING, JobStatus.PROCESSING] }
|
|
537
|
+
},
|
|
538
|
+
background: true
|
|
539
|
+
});
|
|
540
|
+
await this.collection.createIndex({
|
|
541
|
+
name: 1,
|
|
542
|
+
status: 1
|
|
543
|
+
}, { background: true });
|
|
544
|
+
await this.collection.createIndex({
|
|
545
|
+
claimedBy: 1,
|
|
546
|
+
status: 1
|
|
547
|
+
}, { background: true });
|
|
548
|
+
await this.collection.createIndex({
|
|
549
|
+
lastHeartbeat: 1,
|
|
550
|
+
status: 1
|
|
551
|
+
}, { background: true });
|
|
552
|
+
await this.collection.createIndex({
|
|
553
|
+
status: 1,
|
|
554
|
+
nextRunAt: 1,
|
|
555
|
+
claimedBy: 1
|
|
556
|
+
}, { background: true });
|
|
557
|
+
await this.collection.createIndex({
|
|
558
|
+
status: 1,
|
|
559
|
+
lockedAt: 1,
|
|
560
|
+
lastHeartbeat: 1
|
|
561
|
+
}, { background: true });
|
|
562
|
+
}
|
|
563
|
+
/**
|
|
564
|
+
* Recover stale jobs that were left in 'processing' status.
|
|
565
|
+
* A job is considered stale if its `lockedAt` timestamp exceeds the configured `lockTimeout`.
|
|
566
|
+
* Stale jobs are reset to 'pending' so they can be picked up by workers again.
|
|
567
|
+
*/
|
|
568
|
+
async recoverStaleJobs() {
|
|
569
|
+
if (!this.collection) return;
|
|
570
|
+
const staleThreshold = new Date(Date.now() - this.options.lockTimeout);
|
|
571
|
+
const result = await this.collection.updateMany({
|
|
572
|
+
status: JobStatus.PROCESSING,
|
|
573
|
+
lockedAt: { $lt: staleThreshold }
|
|
574
|
+
}, {
|
|
575
|
+
$set: {
|
|
576
|
+
status: JobStatus.PENDING,
|
|
577
|
+
updatedAt: /* @__PURE__ */ new Date()
|
|
578
|
+
},
|
|
579
|
+
$unset: {
|
|
580
|
+
lockedAt: "",
|
|
581
|
+
claimedBy: "",
|
|
582
|
+
lastHeartbeat: "",
|
|
583
|
+
heartbeatInterval: ""
|
|
584
|
+
}
|
|
585
|
+
});
|
|
586
|
+
if (result.modifiedCount > 0) this.emit("stale:recovered", { count: result.modifiedCount });
|
|
587
|
+
}
|
|
588
|
+
/**
|
|
589
|
+
* Clean up old completed and failed jobs based on retention policy.
|
|
590
|
+
*
|
|
591
|
+
* - Removes completed jobs older than `jobRetention.completed`
|
|
592
|
+
* - Removes failed jobs older than `jobRetention.failed`
|
|
593
|
+
*
|
|
594
|
+
* The cleanup runs concurrently for both statuses if configured.
|
|
595
|
+
*
|
|
596
|
+
* @returns Promise resolving when all deletion operations complete
|
|
597
|
+
*/
|
|
598
|
+
async cleanupJobs() {
|
|
599
|
+
if (!this.collection || !this.options.jobRetention) return;
|
|
600
|
+
const { completed, failed } = this.options.jobRetention;
|
|
601
|
+
const now = Date.now();
|
|
602
|
+
const deletions = [];
|
|
603
|
+
if (completed) {
|
|
604
|
+
const cutoff = new Date(now - completed);
|
|
605
|
+
deletions.push(this.collection.deleteMany({
|
|
606
|
+
status: JobStatus.COMPLETED,
|
|
607
|
+
updatedAt: { $lt: cutoff }
|
|
608
|
+
}));
|
|
609
|
+
}
|
|
610
|
+
if (failed) {
|
|
611
|
+
const cutoff = new Date(now - failed);
|
|
612
|
+
deletions.push(this.collection.deleteMany({
|
|
613
|
+
status: JobStatus.FAILED,
|
|
614
|
+
updatedAt: { $lt: cutoff }
|
|
615
|
+
}));
|
|
616
|
+
}
|
|
617
|
+
if (deletions.length > 0) await Promise.all(deletions);
|
|
618
|
+
}
|
|
619
|
+
/**
|
|
620
|
+
* Enqueue a job for processing.
|
|
621
|
+
*
|
|
622
|
+
* Jobs are stored in MongoDB and processed by registered workers. Supports
|
|
623
|
+
* delayed execution via `runAt` and deduplication via `uniqueKey`.
|
|
624
|
+
*
|
|
625
|
+
* When a `uniqueKey` is provided, only one pending or processing job with that key
|
|
626
|
+
* can exist. Completed or failed jobs don't block new jobs with the same key.
|
|
627
|
+
*
|
|
628
|
+
* Failed jobs are automatically retried with exponential backoff up to `maxRetries`
|
|
629
|
+
* (default: 10 attempts). The delay between retries is calculated as `2^failCount × baseRetryInterval`.
|
|
630
|
+
*
|
|
631
|
+
* @template T - The job data payload type (must be JSON-serializable)
|
|
632
|
+
* @param name - Job type identifier, must match a registered worker
|
|
633
|
+
* @param data - Job payload, will be passed to the worker handler
|
|
634
|
+
* @param options - Scheduling and deduplication options
|
|
635
|
+
* @returns Promise resolving to the created or existing job document
|
|
636
|
+
* @throws {ConnectionError} If database operation fails or scheduler not initialized
|
|
637
|
+
*
|
|
638
|
+
* @example Basic job enqueueing
|
|
639
|
+
* ```typescript
|
|
640
|
+
* await monque.enqueue('send-email', {
|
|
641
|
+
* to: 'user@example.com',
|
|
642
|
+
* subject: 'Welcome!',
|
|
643
|
+
* body: 'Thanks for signing up.'
|
|
644
|
+
* });
|
|
645
|
+
* ```
|
|
646
|
+
*
|
|
647
|
+
* @example Delayed execution
|
|
648
|
+
* ```typescript
|
|
649
|
+
* const oneHourLater = new Date(Date.now() + 3600000);
|
|
650
|
+
* await monque.enqueue('reminder', { message: 'Check in!' }, {
|
|
651
|
+
* runAt: oneHourLater
|
|
652
|
+
* });
|
|
653
|
+
* ```
|
|
654
|
+
*
|
|
655
|
+
* @example Prevent duplicates with unique key
|
|
656
|
+
* ```typescript
|
|
657
|
+
* await monque.enqueue('sync-user', { userId: '123' }, {
|
|
658
|
+
* uniqueKey: 'sync-user-123'
|
|
659
|
+
* });
|
|
660
|
+
* // Subsequent enqueues with same uniqueKey return existing pending/processing job
|
|
661
|
+
* ```
|
|
662
|
+
*/
|
|
663
|
+
async enqueue(name, data, options = {}) {
|
|
664
|
+
this.ensureInitialized();
|
|
665
|
+
const now = /* @__PURE__ */ new Date();
|
|
666
|
+
const job = {
|
|
667
|
+
name,
|
|
668
|
+
data,
|
|
669
|
+
status: JobStatus.PENDING,
|
|
670
|
+
nextRunAt: options.runAt ?? now,
|
|
671
|
+
failCount: 0,
|
|
672
|
+
createdAt: now,
|
|
673
|
+
updatedAt: now
|
|
674
|
+
};
|
|
675
|
+
if (options.uniqueKey) job.uniqueKey = options.uniqueKey;
|
|
676
|
+
try {
|
|
677
|
+
if (options.uniqueKey) {
|
|
678
|
+
if (!this.collection) throw new require_errors.ConnectionError("Failed to enqueue job: collection not available");
|
|
679
|
+
const result$1 = await this.collection.findOneAndUpdate({
|
|
680
|
+
name,
|
|
681
|
+
uniqueKey: options.uniqueKey,
|
|
682
|
+
status: { $in: [JobStatus.PENDING, JobStatus.PROCESSING] }
|
|
683
|
+
}, { $setOnInsert: job }, {
|
|
684
|
+
upsert: true,
|
|
685
|
+
returnDocument: "after"
|
|
686
|
+
});
|
|
687
|
+
if (!result$1) throw new require_errors.ConnectionError("Failed to enqueue job: findOneAndUpdate returned no document");
|
|
688
|
+
return this.documentToPersistedJob(result$1);
|
|
689
|
+
}
|
|
690
|
+
const result = await this.collection?.insertOne(job);
|
|
691
|
+
if (!result) throw new require_errors.ConnectionError("Failed to enqueue job: collection not available");
|
|
692
|
+
return {
|
|
693
|
+
...job,
|
|
694
|
+
_id: result.insertedId
|
|
695
|
+
};
|
|
696
|
+
} catch (error) {
|
|
697
|
+
if (error instanceof require_errors.ConnectionError) throw error;
|
|
698
|
+
throw new require_errors.ConnectionError(`Failed to enqueue job: ${error instanceof Error ? error.message : "Unknown error during enqueue"}`, error instanceof Error ? { cause: error } : void 0);
|
|
699
|
+
}
|
|
700
|
+
}
|
|
701
|
+
/**
|
|
702
|
+
* Enqueue a job for immediate processing.
|
|
703
|
+
*
|
|
704
|
+
* Convenience method equivalent to `enqueue(name, data, { runAt: new Date() })`.
|
|
705
|
+
* Jobs are picked up on the next poll cycle (typically within 1 second based on `pollInterval`).
|
|
706
|
+
*
|
|
707
|
+
* @template T - The job data payload type (must be JSON-serializable)
|
|
708
|
+
* @param name - Job type identifier, must match a registered worker
|
|
709
|
+
* @param data - Job payload, will be passed to the worker handler
|
|
710
|
+
* @returns Promise resolving to the created job document
|
|
711
|
+
* @throws {ConnectionError} If database operation fails or scheduler not initialized
|
|
712
|
+
*
|
|
713
|
+
* @example Send email immediately
|
|
714
|
+
* ```typescript
|
|
715
|
+
* await monque.now('send-email', {
|
|
716
|
+
* to: 'admin@example.com',
|
|
717
|
+
* subject: 'Alert',
|
|
718
|
+
* body: 'Immediate attention required'
|
|
719
|
+
* });
|
|
720
|
+
* ```
|
|
721
|
+
*
|
|
722
|
+
* @example Process order in background
|
|
723
|
+
* ```typescript
|
|
724
|
+
* const order = await createOrder(data);
|
|
725
|
+
* await monque.now('process-order', { orderId: order.id });
|
|
726
|
+
* return order; // Return immediately, processing happens async
|
|
727
|
+
* ```
|
|
728
|
+
*/
|
|
729
|
+
async now(name, data) {
|
|
730
|
+
return this.enqueue(name, data, { runAt: /* @__PURE__ */ new Date() });
|
|
731
|
+
}
|
|
732
|
+
/**
|
|
733
|
+
* Schedule a recurring job with a cron expression.
|
|
734
|
+
*
|
|
735
|
+
* Creates a job that automatically re-schedules itself based on the cron pattern.
|
|
736
|
+
* Uses standard 5-field cron format: minute, hour, day of month, month, day of week.
|
|
737
|
+
* Also supports predefined expressions like `@daily`, `@weekly`, `@monthly`, etc.
|
|
738
|
+
* After successful completion, the job is reset to `pending` status and scheduled
|
|
739
|
+
* for its next run based on the cron expression.
|
|
740
|
+
*
|
|
741
|
+
* When a `uniqueKey` is provided, only one pending or processing job with that key
|
|
742
|
+
* can exist. This prevents duplicate scheduled jobs on application restart.
|
|
743
|
+
*
|
|
744
|
+
* @template T - The job data payload type (must be JSON-serializable)
|
|
745
|
+
* @param cron - Cron expression (5 fields or predefined expression)
|
|
746
|
+
* @param name - Job type identifier, must match a registered worker
|
|
747
|
+
* @param data - Job payload, will be passed to the worker handler on each run
|
|
748
|
+
* @param options - Scheduling options (uniqueKey for deduplication)
|
|
749
|
+
* @returns Promise resolving to the created job document with `repeatInterval` set
|
|
750
|
+
* @throws {InvalidCronError} If cron expression is invalid
|
|
751
|
+
* @throws {ConnectionError} If database operation fails or scheduler not initialized
|
|
752
|
+
*
|
|
753
|
+
* @example Hourly cleanup job
|
|
754
|
+
* ```typescript
|
|
755
|
+
* await monque.schedule('0 * * * *', 'cleanup-temp-files', {
|
|
756
|
+
* directory: '/tmp/uploads'
|
|
757
|
+
* });
|
|
758
|
+
* ```
|
|
759
|
+
*
|
|
760
|
+
* @example Prevent duplicate scheduled jobs with unique key
|
|
761
|
+
* ```typescript
|
|
762
|
+
* await monque.schedule('0 * * * *', 'hourly-report', { type: 'sales' }, {
|
|
763
|
+
* uniqueKey: 'hourly-report-sales'
|
|
764
|
+
* });
|
|
765
|
+
* // Subsequent calls with same uniqueKey return existing pending/processing job
|
|
766
|
+
* ```
|
|
767
|
+
*
|
|
768
|
+
* @example Daily report at midnight (using predefined expression)
|
|
769
|
+
* ```typescript
|
|
770
|
+
* await monque.schedule('@daily', 'daily-report', {
|
|
771
|
+
* reportType: 'sales',
|
|
772
|
+
* recipients: ['analytics@example.com']
|
|
773
|
+
* });
|
|
774
|
+
* ```
|
|
775
|
+
*/
|
|
776
|
+
async schedule(cron, name, data, options = {}) {
|
|
777
|
+
this.ensureInitialized();
|
|
778
|
+
const nextRunAt = getNextCronDate(cron);
|
|
779
|
+
const now = /* @__PURE__ */ new Date();
|
|
780
|
+
const job = {
|
|
781
|
+
name,
|
|
782
|
+
data,
|
|
783
|
+
status: JobStatus.PENDING,
|
|
784
|
+
nextRunAt,
|
|
785
|
+
repeatInterval: cron,
|
|
786
|
+
failCount: 0,
|
|
787
|
+
createdAt: now,
|
|
788
|
+
updatedAt: now
|
|
789
|
+
};
|
|
790
|
+
if (options.uniqueKey) job.uniqueKey = options.uniqueKey;
|
|
791
|
+
try {
|
|
792
|
+
if (options.uniqueKey) {
|
|
793
|
+
if (!this.collection) throw new require_errors.ConnectionError("Failed to schedule job: collection not available");
|
|
794
|
+
const result$1 = await this.collection.findOneAndUpdate({
|
|
795
|
+
name,
|
|
796
|
+
uniqueKey: options.uniqueKey,
|
|
797
|
+
status: { $in: [JobStatus.PENDING, JobStatus.PROCESSING] }
|
|
798
|
+
}, { $setOnInsert: job }, {
|
|
799
|
+
upsert: true,
|
|
800
|
+
returnDocument: "after"
|
|
801
|
+
});
|
|
802
|
+
if (!result$1) throw new require_errors.ConnectionError("Failed to schedule job: findOneAndUpdate returned no document");
|
|
803
|
+
return this.documentToPersistedJob(result$1);
|
|
804
|
+
}
|
|
805
|
+
const result = await this.collection?.insertOne(job);
|
|
806
|
+
if (!result) throw new require_errors.ConnectionError("Failed to schedule job: collection not available");
|
|
807
|
+
return {
|
|
808
|
+
...job,
|
|
809
|
+
_id: result.insertedId
|
|
810
|
+
};
|
|
811
|
+
} catch (error) {
|
|
812
|
+
if (error instanceof require_errors.MonqueError) throw error;
|
|
813
|
+
throw new require_errors.ConnectionError(`Failed to schedule job: ${error instanceof Error ? error.message : "Unknown error during schedule"}`, error instanceof Error ? { cause: error } : void 0);
|
|
814
|
+
}
|
|
815
|
+
}
|
|
816
|
+
/**
|
|
817
|
+
* Register a worker to process jobs of a specific type.
|
|
818
|
+
*
|
|
819
|
+
* Workers can be registered before or after calling `start()`. Each worker
|
|
820
|
+
* processes jobs concurrently up to its configured concurrency limit (default: 5).
|
|
821
|
+
*
|
|
822
|
+
* The handler function receives the full job object including metadata (`_id`, `status`,
|
|
823
|
+
* `failCount`, etc.). If the handler throws an error, the job is retried with exponential
|
|
824
|
+
* backoff up to `maxRetries` times. After exhausting retries, the job is marked as `failed`.
|
|
825
|
+
*
|
|
826
|
+
* Events are emitted during job processing: `job:start`, `job:complete`, `job:fail`, and `job:error`.
|
|
827
|
+
*
|
|
828
|
+
* **Duplicate Registration**: By default, registering a worker for a job name that already has
|
|
829
|
+
* a worker will throw a `WorkerRegistrationError`. This fail-fast behavior prevents accidental
|
|
830
|
+
* replacement of handlers. To explicitly replace a worker, pass `{ replace: true }`.
|
|
831
|
+
*
|
|
832
|
+
* @template T - The job data payload type for type-safe access to `job.data`
|
|
833
|
+
* @param name - Job type identifier to handle
|
|
834
|
+
* @param handler - Async function to execute for each job
|
|
835
|
+
* @param options - Worker configuration
|
|
836
|
+
* @param options.concurrency - Maximum concurrent jobs for this worker (default: `defaultConcurrency`)
|
|
837
|
+
* @param options.replace - When `true`, replace existing worker instead of throwing error
|
|
838
|
+
* @throws {WorkerRegistrationError} When a worker is already registered for `name` and `replace` is not `true`
|
|
839
|
+
*
|
|
840
|
+
* @example Basic email worker
|
|
841
|
+
* ```typescript
|
|
842
|
+
* interface EmailJob {
|
|
843
|
+
* to: string;
|
|
844
|
+
* subject: string;
|
|
845
|
+
* body: string;
|
|
846
|
+
* }
|
|
847
|
+
*
|
|
848
|
+
* monque.worker<EmailJob>('send-email', async (job) => {
|
|
849
|
+
* await emailService.send(job.data.to, job.data.subject, job.data.body);
|
|
850
|
+
* });
|
|
851
|
+
* ```
|
|
852
|
+
*
|
|
853
|
+
* @example Worker with custom concurrency
|
|
854
|
+
* ```typescript
|
|
855
|
+
* // Limit to 2 concurrent video processing jobs (resource-intensive)
|
|
856
|
+
* monque.worker('process-video', async (job) => {
|
|
857
|
+
* await videoProcessor.transcode(job.data.videoId);
|
|
858
|
+
* }, { concurrency: 2 });
|
|
859
|
+
* ```
|
|
860
|
+
*
|
|
861
|
+
* @example Replacing an existing worker
|
|
862
|
+
* ```typescript
|
|
863
|
+
* // Replace the existing handler for 'send-email'
|
|
864
|
+
* monque.worker('send-email', newEmailHandler, { replace: true });
|
|
865
|
+
* ```
|
|
866
|
+
*
|
|
867
|
+
* @example Worker with error handling
|
|
868
|
+
* ```typescript
|
|
869
|
+
* monque.worker('sync-user', async (job) => {
|
|
870
|
+
* try {
|
|
871
|
+
* await externalApi.syncUser(job.data.userId);
|
|
872
|
+
* } catch (error) {
|
|
873
|
+
* // Job will retry with exponential backoff
|
|
874
|
+
* // Delay = 2^failCount × baseRetryInterval (default: 1000ms)
|
|
875
|
+
* throw new Error(`Sync failed: ${error.message}`);
|
|
876
|
+
* }
|
|
877
|
+
* });
|
|
878
|
+
* ```
|
|
879
|
+
*/
|
|
880
|
+
worker(name, handler, options = {}) {
|
|
881
|
+
const concurrency = options.concurrency ?? this.options.defaultConcurrency;
|
|
882
|
+
if (this.workers.has(name) && options.replace !== true) throw new require_errors.WorkerRegistrationError(`Worker already registered for job name "${name}". Use { replace: true } to replace.`, name);
|
|
883
|
+
this.workers.set(name, {
|
|
884
|
+
handler,
|
|
885
|
+
concurrency,
|
|
886
|
+
activeJobs: /* @__PURE__ */ new Map()
|
|
887
|
+
});
|
|
888
|
+
}
|
|
889
|
+
/**
|
|
890
|
+
* Start polling for and processing jobs.
|
|
891
|
+
*
|
|
892
|
+
* Begins polling MongoDB at the configured interval (default: 1 second) to pick up
|
|
893
|
+
* pending jobs and dispatch them to registered workers. Must call `initialize()` first.
|
|
894
|
+
* Workers can be registered before or after calling `start()`.
|
|
895
|
+
*
|
|
896
|
+
* Jobs are processed concurrently up to each worker's configured concurrency limit.
|
|
897
|
+
* The scheduler continues running until `stop()` is called.
|
|
898
|
+
*
|
|
899
|
+
* @example Basic startup
|
|
900
|
+
* ```typescript
|
|
901
|
+
* const monque = new Monque(db);
|
|
902
|
+
* await monque.initialize();
|
|
903
|
+
*
|
|
904
|
+
* monque.worker('send-email', emailHandler);
|
|
905
|
+
* monque.worker('process-order', orderHandler);
|
|
906
|
+
*
|
|
907
|
+
* monque.start(); // Begin processing jobs
|
|
908
|
+
* ```
|
|
909
|
+
*
|
|
910
|
+
* @example With event monitoring
|
|
911
|
+
* ```typescript
|
|
912
|
+
* monque.on('job:start', (job) => {
|
|
913
|
+
* logger.info(`Starting job ${job.name}`);
|
|
914
|
+
* });
|
|
915
|
+
*
|
|
916
|
+
* monque.on('job:complete', ({ job, duration }) => {
|
|
917
|
+
* metrics.recordJobDuration(job.name, duration);
|
|
918
|
+
* });
|
|
919
|
+
*
|
|
920
|
+
* monque.on('job:fail', ({ job, error, willRetry }) => {
|
|
921
|
+
* logger.error(`Job ${job.name} failed:`, error);
|
|
922
|
+
* if (!willRetry) {
|
|
923
|
+
* alerting.sendAlert(`Job permanently failed: ${job.name}`);
|
|
924
|
+
* }
|
|
925
|
+
* });
|
|
926
|
+
*
|
|
927
|
+
* monque.start();
|
|
928
|
+
* ```
|
|
929
|
+
*
|
|
930
|
+
* @throws {ConnectionError} If scheduler not initialized (call `initialize()` first)
|
|
931
|
+
*/
|
|
932
|
+
start() {
|
|
933
|
+
if (this.isRunning) return;
|
|
934
|
+
if (!this.isInitialized) throw new require_errors.ConnectionError("Monque not initialized. Call initialize() before start().");
|
|
935
|
+
this.isRunning = true;
|
|
936
|
+
this.setupChangeStream();
|
|
937
|
+
this.pollIntervalId = setInterval(() => {
|
|
938
|
+
this.poll().catch((error) => {
|
|
939
|
+
this.emit("job:error", { error });
|
|
940
|
+
});
|
|
941
|
+
}, this.options.pollInterval);
|
|
942
|
+
this.heartbeatIntervalId = setInterval(() => {
|
|
943
|
+
this.updateHeartbeats().catch((error) => {
|
|
944
|
+
this.emit("job:error", { error });
|
|
945
|
+
});
|
|
946
|
+
}, this.options.heartbeatInterval);
|
|
947
|
+
if (this.options.jobRetention) {
|
|
948
|
+
const interval = this.options.jobRetention.interval ?? DEFAULTS.retentionInterval;
|
|
949
|
+
this.cleanupJobs().catch((error) => {
|
|
950
|
+
this.emit("job:error", { error });
|
|
951
|
+
});
|
|
952
|
+
this.cleanupIntervalId = setInterval(() => {
|
|
953
|
+
this.cleanupJobs().catch((error) => {
|
|
954
|
+
this.emit("job:error", { error });
|
|
955
|
+
});
|
|
956
|
+
}, interval);
|
|
957
|
+
}
|
|
958
|
+
this.poll().catch((error) => {
|
|
959
|
+
this.emit("job:error", { error });
|
|
960
|
+
});
|
|
961
|
+
}
|
|
962
|
+
/**
|
|
963
|
+
* Stop the scheduler gracefully, waiting for in-progress jobs to complete.
|
|
964
|
+
*
|
|
965
|
+
* Stops polling for new jobs and waits for all active jobs to finish processing.
|
|
966
|
+
* Times out after the configured `shutdownTimeout` (default: 30 seconds), emitting
|
|
967
|
+
* a `job:error` event with a `ShutdownTimeoutError` containing incomplete jobs.
|
|
968
|
+
* On timeout, jobs still in progress are left as `processing` for stale job recovery.
|
|
969
|
+
*
|
|
970
|
+
* It's safe to call `stop()` multiple times - subsequent calls are no-ops if already stopped.
|
|
971
|
+
*
|
|
972
|
+
* @returns Promise that resolves when all jobs complete or timeout is reached
|
|
973
|
+
*
|
|
974
|
+
* @example Graceful application shutdown
|
|
975
|
+
* ```typescript
|
|
976
|
+
* process.on('SIGTERM', async () => {
|
|
977
|
+
* console.log('Shutting down gracefully...');
|
|
978
|
+
* await monque.stop(); // Wait for jobs to complete
|
|
979
|
+
* await mongoClient.close();
|
|
980
|
+
* process.exit(0);
|
|
981
|
+
* });
|
|
982
|
+
* ```
|
|
983
|
+
*
|
|
984
|
+
* @example With timeout handling
|
|
985
|
+
* ```typescript
|
|
986
|
+
* monque.on('job:error', ({ error }) => {
|
|
987
|
+
* if (error.name === 'ShutdownTimeoutError') {
|
|
988
|
+
* logger.warn('Forced shutdown after timeout:', error.incompleteJobs);
|
|
989
|
+
* }
|
|
990
|
+
* });
|
|
991
|
+
*
|
|
992
|
+
* await monque.stop();
|
|
993
|
+
* ```
|
|
994
|
+
*/
|
|
995
|
+
async stop() {
|
|
996
|
+
if (!this.isRunning) return;
|
|
997
|
+
this.isRunning = false;
|
|
998
|
+
await this.closeChangeStream();
|
|
999
|
+
if (this.changeStreamDebounceTimer) {
|
|
1000
|
+
clearTimeout(this.changeStreamDebounceTimer);
|
|
1001
|
+
this.changeStreamDebounceTimer = null;
|
|
1002
|
+
}
|
|
1003
|
+
if (this.changeStreamReconnectTimer) {
|
|
1004
|
+
clearTimeout(this.changeStreamReconnectTimer);
|
|
1005
|
+
this.changeStreamReconnectTimer = null;
|
|
1006
|
+
}
|
|
1007
|
+
if (this.cleanupIntervalId) {
|
|
1008
|
+
clearInterval(this.cleanupIntervalId);
|
|
1009
|
+
this.cleanupIntervalId = null;
|
|
1010
|
+
}
|
|
1011
|
+
if (this.pollIntervalId) {
|
|
1012
|
+
clearInterval(this.pollIntervalId);
|
|
1013
|
+
this.pollIntervalId = null;
|
|
1014
|
+
}
|
|
1015
|
+
if (this.heartbeatIntervalId) {
|
|
1016
|
+
clearInterval(this.heartbeatIntervalId);
|
|
1017
|
+
this.heartbeatIntervalId = null;
|
|
1018
|
+
}
|
|
1019
|
+
if (this.getActiveJobs().length === 0) return;
|
|
1020
|
+
let checkInterval;
|
|
1021
|
+
const waitForJobs = new Promise((resolve) => {
|
|
1022
|
+
checkInterval = setInterval(() => {
|
|
1023
|
+
if (this.getActiveJobs().length === 0) {
|
|
1024
|
+
clearInterval(checkInterval);
|
|
1025
|
+
resolve(void 0);
|
|
1026
|
+
}
|
|
1027
|
+
}, 100);
|
|
1028
|
+
});
|
|
1029
|
+
const timeout = new Promise((resolve) => {
|
|
1030
|
+
setTimeout(() => resolve("timeout"), this.options.shutdownTimeout);
|
|
1031
|
+
});
|
|
1032
|
+
let result;
|
|
1033
|
+
try {
|
|
1034
|
+
result = await Promise.race([waitForJobs, timeout]);
|
|
1035
|
+
} finally {
|
|
1036
|
+
if (checkInterval) clearInterval(checkInterval);
|
|
1037
|
+
}
|
|
1038
|
+
if (result === "timeout") {
|
|
1039
|
+
const incompleteJobs = this.getActiveJobsList();
|
|
1040
|
+
const { ShutdownTimeoutError: ShutdownTimeoutError$1 } = await Promise.resolve().then(() => require("./errors-Ca92IlaL.cjs"));
|
|
1041
|
+
const error = new ShutdownTimeoutError$1(`Shutdown timed out after ${this.options.shutdownTimeout}ms with ${incompleteJobs.length} incomplete jobs`, incompleteJobs);
|
|
1042
|
+
this.emit("job:error", { error });
|
|
1043
|
+
}
|
|
1044
|
+
}
|
|
1045
|
+
/**
|
|
1046
|
+
* Check if the scheduler is healthy (running and connected).
|
|
1047
|
+
*
|
|
1048
|
+
* Returns `true` when the scheduler is started, initialized, and has an active
|
|
1049
|
+
* MongoDB collection reference. Useful for health check endpoints and monitoring.
|
|
1050
|
+
*
|
|
1051
|
+
* A healthy scheduler:
|
|
1052
|
+
* - Has called `initialize()` successfully
|
|
1053
|
+
* - Has called `start()` and is actively polling
|
|
1054
|
+
* - Has a valid MongoDB collection reference
|
|
1055
|
+
*
|
|
1056
|
+
* @returns `true` if scheduler is running and connected, `false` otherwise
|
|
1057
|
+
*
|
|
1058
|
+
* @example Express health check endpoint
|
|
1059
|
+
* ```typescript
|
|
1060
|
+
* app.get('/health', (req, res) => {
|
|
1061
|
+
* const healthy = monque.isHealthy();
|
|
1062
|
+
* res.status(healthy ? 200 : 503).json({
|
|
1063
|
+
* status: healthy ? 'ok' : 'unavailable',
|
|
1064
|
+
* scheduler: healthy,
|
|
1065
|
+
* timestamp: new Date().toISOString()
|
|
1066
|
+
* });
|
|
1067
|
+
* });
|
|
1068
|
+
* ```
|
|
1069
|
+
*
|
|
1070
|
+
* @example Kubernetes readiness probe
|
|
1071
|
+
* ```typescript
|
|
1072
|
+
* app.get('/readyz', (req, res) => {
|
|
1073
|
+
* if (monque.isHealthy() && dbConnected) {
|
|
1074
|
+
* res.status(200).send('ready');
|
|
1075
|
+
* } else {
|
|
1076
|
+
* res.status(503).send('not ready');
|
|
1077
|
+
* }
|
|
1078
|
+
* });
|
|
1079
|
+
* ```
|
|
1080
|
+
*
|
|
1081
|
+
* @example Periodic health monitoring
|
|
1082
|
+
* ```typescript
|
|
1083
|
+
* setInterval(() => {
|
|
1084
|
+
* if (!monque.isHealthy()) {
|
|
1085
|
+
* logger.error('Scheduler unhealthy');
|
|
1086
|
+
* metrics.increment('scheduler.unhealthy');
|
|
1087
|
+
* }
|
|
1088
|
+
* }, 60000); // Check every minute
|
|
1089
|
+
* ```
|
|
1090
|
+
*/
|
|
1091
|
+
isHealthy() {
|
|
1092
|
+
return this.isRunning && this.isInitialized && this.collection !== null;
|
|
1093
|
+
}
|
|
1094
|
+
/**
|
|
1095
|
+
* Query jobs from the queue with optional filters.
|
|
1096
|
+
*
|
|
1097
|
+
* Provides read-only access to job data for monitoring, debugging, and
|
|
1098
|
+
* administrative purposes. Results are ordered by `nextRunAt` ascending.
|
|
1099
|
+
*
|
|
1100
|
+
* @template T - The expected type of the job data payload
|
|
1101
|
+
* @param filter - Optional filter criteria
|
|
1102
|
+
* @returns Promise resolving to array of matching jobs
|
|
1103
|
+
* @throws {ConnectionError} If scheduler not initialized
|
|
1104
|
+
*
|
|
1105
|
+
* @example Get all pending jobs
|
|
1106
|
+
* ```typescript
|
|
1107
|
+
* const pendingJobs = await monque.getJobs({ status: JobStatus.PENDING });
|
|
1108
|
+
* console.log(`${pendingJobs.length} jobs waiting`);
|
|
1109
|
+
* ```
|
|
1110
|
+
*
|
|
1111
|
+
* @example Get failed email jobs
|
|
1112
|
+
* ```typescript
|
|
1113
|
+
* const failedEmails = await monque.getJobs({
|
|
1114
|
+
* name: 'send-email',
|
|
1115
|
+
* status: JobStatus.FAILED,
|
|
1116
|
+
* });
|
|
1117
|
+
* for (const job of failedEmails) {
|
|
1118
|
+
* console.error(`Job ${job._id} failed: ${job.failReason}`);
|
|
1119
|
+
* }
|
|
1120
|
+
* ```
|
|
1121
|
+
*
|
|
1122
|
+
* @example Paginated job listing
|
|
1123
|
+
* ```typescript
|
|
1124
|
+
* const page1 = await monque.getJobs({ limit: 50, skip: 0 });
|
|
1125
|
+
* const page2 = await monque.getJobs({ limit: 50, skip: 50 });
|
|
1126
|
+
* ```
|
|
1127
|
+
*
|
|
1128
|
+
* @example Use with type guards from @monque/core
|
|
1129
|
+
* ```typescript
|
|
1130
|
+
* import { isPendingJob, isRecurringJob } from '@monque/core';
|
|
1131
|
+
*
|
|
1132
|
+
* const jobs = await monque.getJobs();
|
|
1133
|
+
* const pendingRecurring = jobs.filter(job => isPendingJob(job) && isRecurringJob(job));
|
|
1134
|
+
* ```
|
|
1135
|
+
*/
|
|
1136
|
+
async getJobs(filter = {}) {
|
|
1137
|
+
this.ensureInitialized();
|
|
1138
|
+
if (!this.collection) throw new require_errors.ConnectionError("Failed to query jobs: collection not available");
|
|
1139
|
+
const query = {};
|
|
1140
|
+
if (filter.name !== void 0) query["name"] = filter.name;
|
|
1141
|
+
if (filter.status !== void 0) if (Array.isArray(filter.status)) query["status"] = { $in: filter.status };
|
|
1142
|
+
else query["status"] = filter.status;
|
|
1143
|
+
const limit = filter.limit ?? 100;
|
|
1144
|
+
const skip = filter.skip ?? 0;
|
|
1145
|
+
try {
|
|
1146
|
+
return (await this.collection.find(query).sort({ nextRunAt: 1 }).skip(skip).limit(limit).toArray()).map((doc) => this.documentToPersistedJob(doc));
|
|
1147
|
+
} catch (error) {
|
|
1148
|
+
throw new require_errors.ConnectionError(`Failed to query jobs: ${error instanceof Error ? error.message : "Unknown error during getJobs"}`, error instanceof Error ? { cause: error } : void 0);
|
|
1149
|
+
}
|
|
1150
|
+
}
|
|
1151
|
+
/**
|
|
1152
|
+
* Get a single job by its MongoDB ObjectId.
|
|
1153
|
+
*
|
|
1154
|
+
* Useful for retrieving job details when you have a job ID from events,
|
|
1155
|
+
* logs, or stored references.
|
|
1156
|
+
*
|
|
1157
|
+
* @template T - The expected type of the job data payload
|
|
1158
|
+
* @param id - The job's ObjectId
|
|
1159
|
+
* @returns Promise resolving to the job if found, null otherwise
|
|
1160
|
+
* @throws {ConnectionError} If scheduler not initialized
|
|
1161
|
+
*
|
|
1162
|
+
* @example Look up job from event
|
|
1163
|
+
* ```typescript
|
|
1164
|
+
* monque.on('job:fail', async ({ job }) => {
|
|
1165
|
+
* // Later, retrieve the job to check its status
|
|
1166
|
+
* const currentJob = await monque.getJob(job._id);
|
|
1167
|
+
* console.log(`Job status: ${currentJob?.status}`);
|
|
1168
|
+
* });
|
|
1169
|
+
* ```
|
|
1170
|
+
*
|
|
1171
|
+
* @example Admin endpoint
|
|
1172
|
+
* ```typescript
|
|
1173
|
+
* app.get('/jobs/:id', async (req, res) => {
|
|
1174
|
+
* const job = await monque.getJob(new ObjectId(req.params.id));
|
|
1175
|
+
* if (!job) {
|
|
1176
|
+
* return res.status(404).json({ error: 'Job not found' });
|
|
1177
|
+
* }
|
|
1178
|
+
* res.json(job);
|
|
1179
|
+
* });
|
|
1180
|
+
* ```
|
|
1181
|
+
*/
|
|
1182
|
+
async getJob(id) {
|
|
1183
|
+
this.ensureInitialized();
|
|
1184
|
+
if (!this.collection) throw new require_errors.ConnectionError("Failed to get job: collection not available");
|
|
1185
|
+
try {
|
|
1186
|
+
const doc = await this.collection.findOne({ _id: id });
|
|
1187
|
+
if (!doc) return null;
|
|
1188
|
+
return this.documentToPersistedJob(doc);
|
|
1189
|
+
} catch (error) {
|
|
1190
|
+
throw new require_errors.ConnectionError(`Failed to get job: ${error instanceof Error ? error.message : "Unknown error during getJob"}`, error instanceof Error ? { cause: error } : void 0);
|
|
1191
|
+
}
|
|
1192
|
+
}
|
|
1193
|
+
/**
|
|
1194
|
+
* Poll for available jobs and process them.
|
|
1195
|
+
*
|
|
1196
|
+
* Called at regular intervals (configured by `pollInterval`). For each registered worker,
|
|
1197
|
+
* attempts to acquire jobs up to the worker's available concurrency slots.
|
|
1198
|
+
*
|
|
1199
|
+
* @private
|
|
1200
|
+
*/
|
|
1201
|
+
async poll() {
|
|
1202
|
+
if (!this.isRunning || !this.collection) return;
|
|
1203
|
+
for (const [name, worker] of this.workers) {
|
|
1204
|
+
const availableSlots = worker.concurrency - worker.activeJobs.size;
|
|
1205
|
+
if (availableSlots <= 0) continue;
|
|
1206
|
+
for (let i = 0; i < availableSlots; i++) {
|
|
1207
|
+
const job = await this.acquireJob(name);
|
|
1208
|
+
if (job) this.processJob(job, worker).catch((error) => {
|
|
1209
|
+
this.emit("job:error", {
|
|
1210
|
+
error,
|
|
1211
|
+
job
|
|
1212
|
+
});
|
|
1213
|
+
});
|
|
1214
|
+
else break;
|
|
1215
|
+
}
|
|
1216
|
+
}
|
|
1217
|
+
}
|
|
1218
|
+
/**
|
|
1219
|
+
* Atomically acquire a pending job for processing using the claimedBy pattern.
|
|
1220
|
+
*
|
|
1221
|
+
* Uses MongoDB's `findOneAndUpdate` with atomic operations to ensure only one scheduler
|
|
1222
|
+
* instance can claim a job. The query ensures the job is:
|
|
1223
|
+
* - In pending status
|
|
1224
|
+
* - Has nextRunAt <= now
|
|
1225
|
+
* - Is not claimed by another instance (claimedBy is null/undefined)
|
|
1226
|
+
*
|
|
1227
|
+
* @private
|
|
1228
|
+
* @param name - The job type to acquire
|
|
1229
|
+
* @returns The acquired job with updated status, claimedBy, and heartbeat info, or `null` if no jobs available
|
|
1230
|
+
*/
|
|
1231
|
+
async acquireJob(name) {
|
|
1232
|
+
if (!this.collection) return null;
|
|
1233
|
+
const now = /* @__PURE__ */ new Date();
|
|
1234
|
+
const result = await this.collection.findOneAndUpdate({
|
|
1235
|
+
name,
|
|
1236
|
+
status: JobStatus.PENDING,
|
|
1237
|
+
nextRunAt: { $lte: now },
|
|
1238
|
+
$or: [{ claimedBy: null }, { claimedBy: { $exists: false } }]
|
|
1239
|
+
}, { $set: {
|
|
1240
|
+
status: JobStatus.PROCESSING,
|
|
1241
|
+
claimedBy: this.options.schedulerInstanceId,
|
|
1242
|
+
lockedAt: now,
|
|
1243
|
+
lastHeartbeat: now,
|
|
1244
|
+
heartbeatInterval: this.options.heartbeatInterval,
|
|
1245
|
+
updatedAt: now
|
|
1246
|
+
} }, {
|
|
1247
|
+
sort: { nextRunAt: 1 },
|
|
1248
|
+
returnDocument: "after"
|
|
1249
|
+
});
|
|
1250
|
+
if (!result) return null;
|
|
1251
|
+
return this.documentToPersistedJob(result);
|
|
1252
|
+
}
|
|
1253
|
+
/**
|
|
1254
|
+
* Execute a job using its registered worker handler.
|
|
1255
|
+
*
|
|
1256
|
+
* Tracks the job as active during processing, emits lifecycle events, and handles
|
|
1257
|
+
* both success and failure cases. On success, calls `completeJob()`. On failure,
|
|
1258
|
+
* calls `failJob()` which implements exponential backoff retry logic.
|
|
1259
|
+
*
|
|
1260
|
+
* @private
|
|
1261
|
+
* @param job - The job to process
|
|
1262
|
+
* @param worker - The worker registration containing the handler and active job tracking
|
|
1263
|
+
*/
|
|
1264
|
+
async processJob(job, worker) {
|
|
1265
|
+
const jobId = job._id.toString();
|
|
1266
|
+
worker.activeJobs.set(jobId, job);
|
|
1267
|
+
const startTime = Date.now();
|
|
1268
|
+
this.emit("job:start", job);
|
|
1269
|
+
try {
|
|
1270
|
+
await worker.handler(job);
|
|
1271
|
+
const duration = Date.now() - startTime;
|
|
1272
|
+
await this.completeJob(job);
|
|
1273
|
+
this.emit("job:complete", {
|
|
1274
|
+
job,
|
|
1275
|
+
duration
|
|
1276
|
+
});
|
|
1277
|
+
} catch (error) {
|
|
1278
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
1279
|
+
await this.failJob(job, err);
|
|
1280
|
+
const willRetry = job.failCount + 1 < this.options.maxRetries;
|
|
1281
|
+
this.emit("job:fail", {
|
|
1282
|
+
job,
|
|
1283
|
+
error: err,
|
|
1284
|
+
willRetry
|
|
1285
|
+
});
|
|
1286
|
+
} finally {
|
|
1287
|
+
worker.activeJobs.delete(jobId);
|
|
1288
|
+
}
|
|
1289
|
+
}
|
|
1290
|
+
/**
|
|
1291
|
+
* Mark a job as completed successfully.
|
|
1292
|
+
*
|
|
1293
|
+
* For recurring jobs (with `repeatInterval`), schedules the next run based on the cron
|
|
1294
|
+
* expression and resets `failCount` to 0. For one-time jobs, sets status to `completed`.
|
|
1295
|
+
* Clears `lockedAt` and `failReason` fields in both cases.
|
|
1296
|
+
*
|
|
1297
|
+
* @private
|
|
1298
|
+
* @param job - The job that completed successfully
|
|
1299
|
+
*/
|
|
1300
|
+
async completeJob(job) {
|
|
1301
|
+
if (!this.collection || !isPersistedJob(job)) return;
|
|
1302
|
+
if (job.repeatInterval) {
|
|
1303
|
+
const nextRunAt = getNextCronDate(job.repeatInterval);
|
|
1304
|
+
await this.collection.updateOne({ _id: job._id }, {
|
|
1305
|
+
$set: {
|
|
1306
|
+
status: JobStatus.PENDING,
|
|
1307
|
+
nextRunAt,
|
|
1308
|
+
failCount: 0,
|
|
1309
|
+
updatedAt: /* @__PURE__ */ new Date()
|
|
1310
|
+
},
|
|
1311
|
+
$unset: {
|
|
1312
|
+
lockedAt: "",
|
|
1313
|
+
claimedBy: "",
|
|
1314
|
+
lastHeartbeat: "",
|
|
1315
|
+
heartbeatInterval: "",
|
|
1316
|
+
failReason: ""
|
|
1317
|
+
}
|
|
1318
|
+
});
|
|
1319
|
+
} else {
|
|
1320
|
+
await this.collection.updateOne({ _id: job._id }, {
|
|
1321
|
+
$set: {
|
|
1322
|
+
status: JobStatus.COMPLETED,
|
|
1323
|
+
updatedAt: /* @__PURE__ */ new Date()
|
|
1324
|
+
},
|
|
1325
|
+
$unset: {
|
|
1326
|
+
lockedAt: "",
|
|
1327
|
+
claimedBy: "",
|
|
1328
|
+
lastHeartbeat: "",
|
|
1329
|
+
heartbeatInterval: "",
|
|
1330
|
+
failReason: ""
|
|
1331
|
+
}
|
|
1332
|
+
});
|
|
1333
|
+
job.status = JobStatus.COMPLETED;
|
|
1334
|
+
}
|
|
1335
|
+
}
|
|
1336
|
+
/**
|
|
1337
|
+
* Handle job failure with exponential backoff retry logic.
|
|
1338
|
+
*
|
|
1339
|
+
* Increments `failCount` and calculates next retry time using exponential backoff:
|
|
1340
|
+
* `nextRunAt = 2^failCount × baseRetryInterval` (capped by optional `maxBackoffDelay`).
|
|
1341
|
+
*
|
|
1342
|
+
* If `failCount >= maxRetries`, marks job as permanently `failed`. Otherwise, resets
|
|
1343
|
+
* to `pending` status for retry. Stores error message in `failReason` field.
|
|
1344
|
+
*
|
|
1345
|
+
* @private
|
|
1346
|
+
* @param job - The job that failed
|
|
1347
|
+
* @param error - The error that caused the failure
|
|
1348
|
+
*/
|
|
1349
|
+
async failJob(job, error) {
|
|
1350
|
+
if (!this.collection || !isPersistedJob(job)) return;
|
|
1351
|
+
const newFailCount = job.failCount + 1;
|
|
1352
|
+
if (newFailCount >= this.options.maxRetries) await this.collection.updateOne({ _id: job._id }, {
|
|
1353
|
+
$set: {
|
|
1354
|
+
status: JobStatus.FAILED,
|
|
1355
|
+
failCount: newFailCount,
|
|
1356
|
+
failReason: error.message,
|
|
1357
|
+
updatedAt: /* @__PURE__ */ new Date()
|
|
1358
|
+
},
|
|
1359
|
+
$unset: {
|
|
1360
|
+
lockedAt: "",
|
|
1361
|
+
claimedBy: "",
|
|
1362
|
+
lastHeartbeat: "",
|
|
1363
|
+
heartbeatInterval: ""
|
|
1364
|
+
}
|
|
1365
|
+
});
|
|
1366
|
+
else {
|
|
1367
|
+
const nextRunAt = calculateBackoff(newFailCount, this.options.baseRetryInterval, this.options.maxBackoffDelay);
|
|
1368
|
+
await this.collection.updateOne({ _id: job._id }, {
|
|
1369
|
+
$set: {
|
|
1370
|
+
status: JobStatus.PENDING,
|
|
1371
|
+
failCount: newFailCount,
|
|
1372
|
+
failReason: error.message,
|
|
1373
|
+
nextRunAt,
|
|
1374
|
+
updatedAt: /* @__PURE__ */ new Date()
|
|
1375
|
+
},
|
|
1376
|
+
$unset: {
|
|
1377
|
+
lockedAt: "",
|
|
1378
|
+
claimedBy: "",
|
|
1379
|
+
lastHeartbeat: "",
|
|
1380
|
+
heartbeatInterval: ""
|
|
1381
|
+
}
|
|
1382
|
+
});
|
|
1383
|
+
}
|
|
1384
|
+
}
|
|
1385
|
+
/**
|
|
1386
|
+
* Ensure the scheduler is initialized before operations.
|
|
1387
|
+
*
|
|
1388
|
+
* @private
|
|
1389
|
+
* @throws {ConnectionError} If scheduler not initialized or collection unavailable
|
|
1390
|
+
*/
|
|
1391
|
+
ensureInitialized() {
|
|
1392
|
+
if (!this.isInitialized || !this.collection) throw new require_errors.ConnectionError("Monque not initialized. Call initialize() first.");
|
|
1393
|
+
}
|
|
1394
|
+
/**
|
|
1395
|
+
* Update heartbeats for all jobs claimed by this scheduler instance.
|
|
1396
|
+
*
|
|
1397
|
+
* This method runs periodically while the scheduler is running to indicate
|
|
1398
|
+
* that jobs are still being actively processed.
|
|
1399
|
+
*
|
|
1400
|
+
* `lastHeartbeat` is primarily an observability signal (monitoring/debugging).
|
|
1401
|
+
* Stale recovery is based on `lockedAt` + `lockTimeout`.
|
|
1402
|
+
*
|
|
1403
|
+
* @private
|
|
1404
|
+
*/
|
|
1405
|
+
async updateHeartbeats() {
|
|
1406
|
+
if (!this.collection || !this.isRunning) return;
|
|
1407
|
+
const now = /* @__PURE__ */ new Date();
|
|
1408
|
+
await this.collection.updateMany({
|
|
1409
|
+
claimedBy: this.options.schedulerInstanceId,
|
|
1410
|
+
status: JobStatus.PROCESSING
|
|
1411
|
+
}, { $set: {
|
|
1412
|
+
lastHeartbeat: now,
|
|
1413
|
+
updatedAt: now
|
|
1414
|
+
} });
|
|
1415
|
+
}
|
|
1416
|
+
/**
|
|
1417
|
+
* Set up MongoDB Change Stream for real-time job notifications.
|
|
1418
|
+
*
|
|
1419
|
+
* Change streams provide instant notifications when jobs are inserted or when
|
|
1420
|
+
* job status changes to pending (e.g., after a retry). This eliminates the
|
|
1421
|
+
* polling delay for reactive job processing.
|
|
1422
|
+
*
|
|
1423
|
+
* The change stream watches for:
|
|
1424
|
+
* - Insert operations (new jobs)
|
|
1425
|
+
* - Update operations where status field changes
|
|
1426
|
+
*
|
|
1427
|
+
* If change streams are unavailable (e.g., standalone MongoDB), the system
|
|
1428
|
+
* gracefully falls back to polling-only mode.
|
|
1429
|
+
*
|
|
1430
|
+
* @private
|
|
1431
|
+
*/
|
|
1432
|
+
setupChangeStream() {
|
|
1433
|
+
if (!this.collection || !this.isRunning) return;
|
|
1434
|
+
try {
|
|
1435
|
+
this.changeStream = this.collection.watch([{ $match: { $or: [{ operationType: "insert" }, {
|
|
1436
|
+
operationType: "update",
|
|
1437
|
+
"updateDescription.updatedFields.status": { $exists: true }
|
|
1438
|
+
}] } }], { fullDocument: "updateLookup" });
|
|
1439
|
+
this.changeStream.on("change", (change) => {
|
|
1440
|
+
this.handleChangeStreamEvent(change);
|
|
1441
|
+
});
|
|
1442
|
+
this.changeStream.on("error", (error) => {
|
|
1443
|
+
this.emit("changestream:error", { error });
|
|
1444
|
+
this.handleChangeStreamError(error);
|
|
1445
|
+
});
|
|
1446
|
+
this.usingChangeStreams = true;
|
|
1447
|
+
this.changeStreamReconnectAttempts = 0;
|
|
1448
|
+
this.emit("changestream:connected", void 0);
|
|
1449
|
+
} catch (error) {
|
|
1450
|
+
this.usingChangeStreams = false;
|
|
1451
|
+
const reason = error instanceof Error ? error.message : "Unknown error";
|
|
1452
|
+
this.emit("changestream:fallback", { reason });
|
|
1453
|
+
}
|
|
1454
|
+
}
|
|
1455
|
+
/**
|
|
1456
|
+
* Handle a change stream event by triggering a debounced poll.
|
|
1457
|
+
*
|
|
1458
|
+
* Events are debounced to prevent "claim storms" when multiple changes arrive
|
|
1459
|
+
* in rapid succession (e.g., bulk job inserts). A 100ms debounce window
|
|
1460
|
+
* collects multiple events and triggers a single poll.
|
|
1461
|
+
*
|
|
1462
|
+
* @private
|
|
1463
|
+
* @param change - The change stream event document
|
|
1464
|
+
*/
|
|
1465
|
+
handleChangeStreamEvent(change) {
|
|
1466
|
+
if (!this.isRunning) return;
|
|
1467
|
+
const isInsert = change.operationType === "insert";
|
|
1468
|
+
const isUpdate = change.operationType === "update";
|
|
1469
|
+
const isPendingStatus = ("fullDocument" in change ? change.fullDocument : void 0)?.["status"] === JobStatus.PENDING;
|
|
1470
|
+
if (isInsert || isUpdate && isPendingStatus) {
|
|
1471
|
+
if (this.changeStreamDebounceTimer) clearTimeout(this.changeStreamDebounceTimer);
|
|
1472
|
+
this.changeStreamDebounceTimer = setTimeout(() => {
|
|
1473
|
+
this.changeStreamDebounceTimer = null;
|
|
1474
|
+
this.poll().catch((error) => {
|
|
1475
|
+
this.emit("job:error", { error });
|
|
1476
|
+
});
|
|
1477
|
+
}, 100);
|
|
1478
|
+
}
|
|
1479
|
+
}
|
|
1480
|
+
/**
|
|
1481
|
+
* Handle change stream errors with exponential backoff reconnection.
|
|
1482
|
+
*
|
|
1483
|
+
* Attempts to reconnect up to `maxChangeStreamReconnectAttempts` times with
|
|
1484
|
+
* exponential backoff (base 1000ms). After exhausting retries, falls back to
|
|
1485
|
+
* polling-only mode.
|
|
1486
|
+
*
|
|
1487
|
+
* @private
|
|
1488
|
+
* @param error - The error that caused the change stream failure
|
|
1489
|
+
*/
|
|
1490
|
+
handleChangeStreamError(error) {
|
|
1491
|
+
if (!this.isRunning) return;
|
|
1492
|
+
this.changeStreamReconnectAttempts++;
|
|
1493
|
+
if (this.changeStreamReconnectAttempts > this.maxChangeStreamReconnectAttempts) {
|
|
1494
|
+
this.usingChangeStreams = false;
|
|
1495
|
+
this.emit("changestream:fallback", { reason: `Exhausted ${this.maxChangeStreamReconnectAttempts} reconnection attempts: ${error.message}` });
|
|
1496
|
+
return;
|
|
1497
|
+
}
|
|
1498
|
+
const delay = 2 ** (this.changeStreamReconnectAttempts - 1) * 1e3;
|
|
1499
|
+
if (this.changeStreamReconnectTimer) clearTimeout(this.changeStreamReconnectTimer);
|
|
1500
|
+
this.changeStreamReconnectTimer = setTimeout(() => {
|
|
1501
|
+
this.changeStreamReconnectTimer = null;
|
|
1502
|
+
if (this.isRunning) {
|
|
1503
|
+
if (this.changeStream) {
|
|
1504
|
+
this.changeStream.close().catch(() => {});
|
|
1505
|
+
this.changeStream = null;
|
|
1506
|
+
}
|
|
1507
|
+
this.setupChangeStream();
|
|
1508
|
+
}
|
|
1509
|
+
}, delay);
|
|
1510
|
+
}
|
|
1511
|
+
/**
|
|
1512
|
+
* Close the change stream cursor and emit closed event.
|
|
1513
|
+
*
|
|
1514
|
+
* @private
|
|
1515
|
+
*/
|
|
1516
|
+
async closeChangeStream() {
|
|
1517
|
+
if (this.changeStream) {
|
|
1518
|
+
try {
|
|
1519
|
+
await this.changeStream.close();
|
|
1520
|
+
} catch {}
|
|
1521
|
+
this.changeStream = null;
|
|
1522
|
+
if (this.usingChangeStreams) this.emit("changestream:closed", void 0);
|
|
1523
|
+
}
|
|
1524
|
+
this.usingChangeStreams = false;
|
|
1525
|
+
this.changeStreamReconnectAttempts = 0;
|
|
1526
|
+
}
|
|
1527
|
+
/**
|
|
1528
|
+
* Get array of active job IDs across all workers.
|
|
1529
|
+
*
|
|
1530
|
+
* @private
|
|
1531
|
+
* @returns Array of job ID strings currently being processed
|
|
1532
|
+
*/
|
|
1533
|
+
getActiveJobs() {
|
|
1534
|
+
const activeJobs = [];
|
|
1535
|
+
for (const worker of this.workers.values()) activeJobs.push(...worker.activeJobs.keys());
|
|
1536
|
+
return activeJobs;
|
|
1537
|
+
}
|
|
1538
|
+
/**
|
|
1539
|
+
* Get list of active job documents (for shutdown timeout error).
|
|
1540
|
+
*
|
|
1541
|
+
* @private
|
|
1542
|
+
* @returns Array of active Job objects
|
|
1543
|
+
*/
|
|
1544
|
+
getActiveJobsList() {
|
|
1545
|
+
const activeJobs = [];
|
|
1546
|
+
for (const worker of this.workers.values()) activeJobs.push(...worker.activeJobs.values());
|
|
1547
|
+
return activeJobs;
|
|
1548
|
+
}
|
|
1549
|
+
/**
|
|
1550
|
+
* Convert a MongoDB document to a typed PersistedJob object.
|
|
1551
|
+
*
|
|
1552
|
+
* Maps raw MongoDB document fields to the strongly-typed `PersistedJob<T>` interface,
|
|
1553
|
+
* ensuring type safety and handling optional fields (`lockedAt`, `failReason`, etc.).
|
|
1554
|
+
*
|
|
1555
|
+
* @private
|
|
1556
|
+
* @template T - The job data payload type
|
|
1557
|
+
* @param doc - The raw MongoDB document with `_id`
|
|
1558
|
+
* @returns A strongly-typed PersistedJob object with guaranteed `_id`
|
|
1559
|
+
*/
|
|
1560
|
+
documentToPersistedJob(doc) {
|
|
1561
|
+
const job = {
|
|
1562
|
+
_id: doc._id,
|
|
1563
|
+
name: doc["name"],
|
|
1564
|
+
data: doc["data"],
|
|
1565
|
+
status: doc["status"],
|
|
1566
|
+
nextRunAt: doc["nextRunAt"],
|
|
1567
|
+
failCount: doc["failCount"],
|
|
1568
|
+
createdAt: doc["createdAt"],
|
|
1569
|
+
updatedAt: doc["updatedAt"]
|
|
1570
|
+
};
|
|
1571
|
+
if (doc["lockedAt"] !== void 0) job.lockedAt = doc["lockedAt"];
|
|
1572
|
+
if (doc["claimedBy"] !== void 0) job.claimedBy = doc["claimedBy"];
|
|
1573
|
+
if (doc["lastHeartbeat"] !== void 0) job.lastHeartbeat = doc["lastHeartbeat"];
|
|
1574
|
+
if (doc["heartbeatInterval"] !== void 0) job.heartbeatInterval = doc["heartbeatInterval"];
|
|
1575
|
+
if (doc["failReason"] !== void 0) job.failReason = doc["failReason"];
|
|
1576
|
+
if (doc["repeatInterval"] !== void 0) job.repeatInterval = doc["repeatInterval"];
|
|
1577
|
+
if (doc["uniqueKey"] !== void 0) job.uniqueKey = doc["uniqueKey"];
|
|
1578
|
+
return job;
|
|
1579
|
+
}
|
|
1580
|
+
/**
|
|
1581
|
+
* Type-safe event emitter methods
|
|
1582
|
+
*/
|
|
1583
|
+
emit(event, payload) {
|
|
1584
|
+
return super.emit(event, payload);
|
|
1585
|
+
}
|
|
1586
|
+
on(event, listener) {
|
|
1587
|
+
return super.on(event, listener);
|
|
1588
|
+
}
|
|
1589
|
+
once(event, listener) {
|
|
1590
|
+
return super.once(event, listener);
|
|
1591
|
+
}
|
|
1592
|
+
off(event, listener) {
|
|
1593
|
+
return super.off(event, listener);
|
|
1594
|
+
}
|
|
1595
|
+
};
|
|
1596
|
+
|
|
1597
|
+
//#endregion
|
|
1598
|
+
exports.ConnectionError = require_errors.ConnectionError;
|
|
1599
|
+
exports.DEFAULT_BASE_INTERVAL = DEFAULT_BASE_INTERVAL;
|
|
1600
|
+
exports.DEFAULT_MAX_BACKOFF_DELAY = DEFAULT_MAX_BACKOFF_DELAY;
|
|
1601
|
+
exports.InvalidCronError = require_errors.InvalidCronError;
|
|
1602
|
+
exports.JobStatus = JobStatus;
|
|
1603
|
+
exports.Monque = Monque;
|
|
1604
|
+
exports.MonqueError = require_errors.MonqueError;
|
|
1605
|
+
exports.ShutdownTimeoutError = require_errors.ShutdownTimeoutError;
|
|
1606
|
+
exports.WorkerRegistrationError = require_errors.WorkerRegistrationError;
|
|
1607
|
+
exports.calculateBackoff = calculateBackoff;
|
|
1608
|
+
exports.calculateBackoffDelay = calculateBackoffDelay;
|
|
1609
|
+
exports.getNextCronDate = getNextCronDate;
|
|
1610
|
+
exports.isCompletedJob = isCompletedJob;
|
|
1611
|
+
exports.isFailedJob = isFailedJob;
|
|
1612
|
+
exports.isPendingJob = isPendingJob;
|
|
1613
|
+
exports.isPersistedJob = isPersistedJob;
|
|
1614
|
+
exports.isProcessingJob = isProcessingJob;
|
|
1615
|
+
exports.isRecurringJob = isRecurringJob;
|
|
1616
|
+
exports.isValidJobStatus = isValidJobStatus;
|
|
1617
|
+
exports.validateCronExpression = validateCronExpression;
|
|
1618
|
+
//# sourceMappingURL=index.cjs.map
|