@monque/core 0.3.0 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +20 -3
- package/dist/index.cjs +2209 -942
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +532 -267
- package/dist/index.d.cts.map +1 -1
- package/dist/index.d.mts +533 -268
- package/dist/index.d.mts.map +1 -1
- package/dist/index.mjs +2212 -950
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -2
- package/dist/errors-D5ZGG2uI.cjs +0 -155
- package/dist/errors-D5ZGG2uI.cjs.map +0 -1
- package/dist/errors-DEvnqoOC.mjs +0 -3
- package/dist/errors-DQ2_gprw.mjs +0 -125
- package/dist/errors-DQ2_gprw.mjs.map +0 -1
- package/dist/errors-Dfli-u59.cjs +0 -3
package/dist/index.cjs
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
|
|
1
|
+
let mongodb = require("mongodb");
|
|
2
|
+
let cron_parser = require("cron-parser");
|
|
2
3
|
let node_crypto = require("node:crypto");
|
|
3
4
|
let node_events = require("node:events");
|
|
4
|
-
let cron_parser = require("cron-parser");
|
|
5
5
|
|
|
6
6
|
//#region src/jobs/types.ts
|
|
7
7
|
/**
|
|
@@ -12,6 +12,7 @@ let cron_parser = require("cron-parser");
|
|
|
12
12
|
* - PROCESSING → COMPLETED (on success)
|
|
13
13
|
* - PROCESSING → PENDING (on failure, if retries remain)
|
|
14
14
|
* - PROCESSING → FAILED (on failure, after max retries exhausted)
|
|
15
|
+
* - PENDING → CANCELLED (on manual cancellation)
|
|
15
16
|
*
|
|
16
17
|
* @example
|
|
17
18
|
* ```typescript
|
|
@@ -24,7 +25,20 @@ const JobStatus = {
|
|
|
24
25
|
PENDING: "pending",
|
|
25
26
|
PROCESSING: "processing",
|
|
26
27
|
COMPLETED: "completed",
|
|
27
|
-
FAILED: "failed"
|
|
28
|
+
FAILED: "failed",
|
|
29
|
+
CANCELLED: "cancelled"
|
|
30
|
+
};
|
|
31
|
+
/**
|
|
32
|
+
* Valid cursor directions for pagination.
|
|
33
|
+
*
|
|
34
|
+
* @example
|
|
35
|
+
* ```typescript
|
|
36
|
+
* const direction = CursorDirection.FORWARD;
|
|
37
|
+
* ```
|
|
38
|
+
*/
|
|
39
|
+
const CursorDirection = {
|
|
40
|
+
FORWARD: "forward",
|
|
41
|
+
BACKWARD: "backward"
|
|
28
42
|
};
|
|
29
43
|
|
|
30
44
|
//#endregion
|
|
@@ -68,8 +82,8 @@ function isPersistedJob(job) {
|
|
|
68
82
|
/**
|
|
69
83
|
* Type guard to check if a value is a valid job status.
|
|
70
84
|
*
|
|
71
|
-
* Validates that a value is one of the
|
|
72
|
-
* `'processing'`, `'completed'`, or `'
|
|
85
|
+
* Validates that a value is one of the five valid job statuses: `'pending'`,
|
|
86
|
+
* `'processing'`, `'completed'`, `'failed'`, or `'cancelled'`. Useful for runtime validation
|
|
73
87
|
* of user input or external data.
|
|
74
88
|
*
|
|
75
89
|
* @param value - The value to check
|
|
@@ -192,6 +206,26 @@ function isFailedJob(job) {
|
|
|
192
206
|
return job.status === JobStatus.FAILED;
|
|
193
207
|
}
|
|
194
208
|
/**
|
|
209
|
+
* Type guard to check if a job has been manually cancelled.
|
|
210
|
+
*
|
|
211
|
+
* A convenience helper for checking if a job was cancelled by an operator.
|
|
212
|
+
* Equivalent to `job.status === JobStatus.CANCELLED` but with better semantics.
|
|
213
|
+
*
|
|
214
|
+
* @template T - The type of the job's data payload
|
|
215
|
+
* @param job - The job to check
|
|
216
|
+
* @returns `true` if the job status is `'cancelled'`
|
|
217
|
+
*
|
|
218
|
+
* @example Filter cancelled jobs
|
|
219
|
+
* ```typescript
|
|
220
|
+
* const jobs = await monque.getJobs();
|
|
221
|
+
* const cancelledJobs = jobs.filter(isCancelledJob);
|
|
222
|
+
* console.log(`${cancelledJobs.length} jobs were cancelled`);
|
|
223
|
+
* ```
|
|
224
|
+
*/
|
|
225
|
+
function isCancelledJob(job) {
|
|
226
|
+
return job.status === JobStatus.CANCELLED;
|
|
227
|
+
}
|
|
228
|
+
/**
|
|
195
229
|
* Type guard to check if a job is a recurring scheduled job.
|
|
196
230
|
*
|
|
197
231
|
* A recurring job has a `repeatInterval` cron expression and will be automatically
|
|
@@ -220,6 +254,198 @@ function isRecurringJob(job) {
|
|
|
220
254
|
return job.repeatInterval !== void 0 && job.repeatInterval !== null;
|
|
221
255
|
}
|
|
222
256
|
|
|
257
|
+
//#endregion
|
|
258
|
+
//#region src/shared/errors.ts
|
|
259
|
+
/**
|
|
260
|
+
* Base error class for all Monque-related errors.
|
|
261
|
+
*
|
|
262
|
+
* @example
|
|
263
|
+
* ```typescript
|
|
264
|
+
* try {
|
|
265
|
+
* await monque.enqueue('job', data);
|
|
266
|
+
* } catch (error) {
|
|
267
|
+
* if (error instanceof MonqueError) {
|
|
268
|
+
* console.error('Monque error:', error.message);
|
|
269
|
+
* }
|
|
270
|
+
* }
|
|
271
|
+
* ```
|
|
272
|
+
*/
|
|
273
|
+
var MonqueError = class MonqueError extends Error {
|
|
274
|
+
constructor(message) {
|
|
275
|
+
super(message);
|
|
276
|
+
this.name = "MonqueError";
|
|
277
|
+
/* istanbul ignore next -- @preserve captureStackTrace is always available in Node.js */
|
|
278
|
+
if (Error.captureStackTrace) Error.captureStackTrace(this, MonqueError);
|
|
279
|
+
}
|
|
280
|
+
};
|
|
281
|
+
/**
|
|
282
|
+
* Error thrown when an invalid cron expression is provided.
|
|
283
|
+
*
|
|
284
|
+
* @example
|
|
285
|
+
* ```typescript
|
|
286
|
+
* try {
|
|
287
|
+
* await monque.schedule('invalid cron', 'job', data);
|
|
288
|
+
* } catch (error) {
|
|
289
|
+
* if (error instanceof InvalidCronError) {
|
|
290
|
+
* console.error('Invalid expression:', error.expression);
|
|
291
|
+
* }
|
|
292
|
+
* }
|
|
293
|
+
* ```
|
|
294
|
+
*/
|
|
295
|
+
var InvalidCronError = class InvalidCronError extends MonqueError {
|
|
296
|
+
constructor(expression, message) {
|
|
297
|
+
super(message);
|
|
298
|
+
this.expression = expression;
|
|
299
|
+
this.name = "InvalidCronError";
|
|
300
|
+
/* istanbul ignore next -- @preserve captureStackTrace is always available in Node.js */
|
|
301
|
+
if (Error.captureStackTrace) Error.captureStackTrace(this, InvalidCronError);
|
|
302
|
+
}
|
|
303
|
+
};
|
|
304
|
+
/**
|
|
305
|
+
* Error thrown when there's a database connection issue.
|
|
306
|
+
*
|
|
307
|
+
* @example
|
|
308
|
+
* ```typescript
|
|
309
|
+
* try {
|
|
310
|
+
* await monque.enqueue('job', data);
|
|
311
|
+
* } catch (error) {
|
|
312
|
+
* if (error instanceof ConnectionError) {
|
|
313
|
+
* console.error('Database connection lost');
|
|
314
|
+
* }
|
|
315
|
+
* }
|
|
316
|
+
* ```
|
|
317
|
+
*/
|
|
318
|
+
var ConnectionError = class ConnectionError extends MonqueError {
|
|
319
|
+
constructor(message, options) {
|
|
320
|
+
super(message);
|
|
321
|
+
this.name = "ConnectionError";
|
|
322
|
+
if (options?.cause) this.cause = options.cause;
|
|
323
|
+
/* istanbul ignore next -- @preserve captureStackTrace is always available in Node.js */
|
|
324
|
+
if (Error.captureStackTrace) Error.captureStackTrace(this, ConnectionError);
|
|
325
|
+
}
|
|
326
|
+
};
|
|
327
|
+
/**
|
|
328
|
+
* Error thrown when graceful shutdown times out.
|
|
329
|
+
* Includes information about jobs that were still in progress.
|
|
330
|
+
*
|
|
331
|
+
* @example
|
|
332
|
+
* ```typescript
|
|
333
|
+
* try {
|
|
334
|
+
* await monque.stop();
|
|
335
|
+
* } catch (error) {
|
|
336
|
+
* if (error instanceof ShutdownTimeoutError) {
|
|
337
|
+
* console.error('Incomplete jobs:', error.incompleteJobs.length);
|
|
338
|
+
* }
|
|
339
|
+
* }
|
|
340
|
+
* ```
|
|
341
|
+
*/
|
|
342
|
+
var ShutdownTimeoutError = class ShutdownTimeoutError extends MonqueError {
|
|
343
|
+
constructor(message, incompleteJobs) {
|
|
344
|
+
super(message);
|
|
345
|
+
this.incompleteJobs = incompleteJobs;
|
|
346
|
+
this.name = "ShutdownTimeoutError";
|
|
347
|
+
/* istanbul ignore next -- @preserve captureStackTrace is always available in Node.js */
|
|
348
|
+
if (Error.captureStackTrace) Error.captureStackTrace(this, ShutdownTimeoutError);
|
|
349
|
+
}
|
|
350
|
+
};
|
|
351
|
+
/**
|
|
352
|
+
* Error thrown when attempting to register a worker for a job name
|
|
353
|
+
* that already has a registered worker, without explicitly allowing replacement.
|
|
354
|
+
*
|
|
355
|
+
* @example
|
|
356
|
+
* ```typescript
|
|
357
|
+
* try {
|
|
358
|
+
* monque.register('send-email', handler1);
|
|
359
|
+
* monque.register('send-email', handler2); // throws
|
|
360
|
+
* } catch (error) {
|
|
361
|
+
* if (error instanceof WorkerRegistrationError) {
|
|
362
|
+
* console.error('Worker already registered for:', error.jobName);
|
|
363
|
+
* }
|
|
364
|
+
* }
|
|
365
|
+
*
|
|
366
|
+
* // To intentionally replace a worker:
|
|
367
|
+
* monque.register('send-email', handler2, { replace: true });
|
|
368
|
+
* ```
|
|
369
|
+
*/
|
|
370
|
+
var WorkerRegistrationError = class WorkerRegistrationError extends MonqueError {
|
|
371
|
+
constructor(message, jobName) {
|
|
372
|
+
super(message);
|
|
373
|
+
this.jobName = jobName;
|
|
374
|
+
this.name = "WorkerRegistrationError";
|
|
375
|
+
/* istanbul ignore next -- @preserve captureStackTrace is always available in Node.js */
|
|
376
|
+
if (Error.captureStackTrace) Error.captureStackTrace(this, WorkerRegistrationError);
|
|
377
|
+
}
|
|
378
|
+
};
|
|
379
|
+
/**
|
|
380
|
+
* Error thrown when a state transition is invalid.
|
|
381
|
+
*
|
|
382
|
+
* @example
|
|
383
|
+
* ```typescript
|
|
384
|
+
* try {
|
|
385
|
+
* await monque.cancelJob(jobId);
|
|
386
|
+
* } catch (error) {
|
|
387
|
+
* if (error instanceof JobStateError) {
|
|
388
|
+
* console.error(`Cannot cancel job in state: ${error.currentStatus}`);
|
|
389
|
+
* }
|
|
390
|
+
* }
|
|
391
|
+
* ```
|
|
392
|
+
*/
|
|
393
|
+
var JobStateError = class JobStateError extends MonqueError {
|
|
394
|
+
constructor(message, jobId, currentStatus, attemptedAction) {
|
|
395
|
+
super(message);
|
|
396
|
+
this.jobId = jobId;
|
|
397
|
+
this.currentStatus = currentStatus;
|
|
398
|
+
this.attemptedAction = attemptedAction;
|
|
399
|
+
this.name = "JobStateError";
|
|
400
|
+
/* istanbul ignore next -- @preserve captureStackTrace is always available in Node.js */
|
|
401
|
+
if (Error.captureStackTrace) Error.captureStackTrace(this, JobStateError);
|
|
402
|
+
}
|
|
403
|
+
};
|
|
404
|
+
/**
|
|
405
|
+
* Error thrown when a pagination cursor is invalid or malformed.
|
|
406
|
+
*
|
|
407
|
+
* @example
|
|
408
|
+
* ```typescript
|
|
409
|
+
* try {
|
|
410
|
+
* await monque.listJobs({ cursor: 'invalid-cursor' });
|
|
411
|
+
* } catch (error) {
|
|
412
|
+
* if (error instanceof InvalidCursorError) {
|
|
413
|
+
* console.error('Invalid cursor provided');
|
|
414
|
+
* }
|
|
415
|
+
* }
|
|
416
|
+
* ```
|
|
417
|
+
*/
|
|
418
|
+
var InvalidCursorError = class InvalidCursorError extends MonqueError {
|
|
419
|
+
constructor(message) {
|
|
420
|
+
super(message);
|
|
421
|
+
this.name = "InvalidCursorError";
|
|
422
|
+
/* istanbul ignore next -- @preserve captureStackTrace is always available in Node.js */
|
|
423
|
+
if (Error.captureStackTrace) Error.captureStackTrace(this, InvalidCursorError);
|
|
424
|
+
}
|
|
425
|
+
};
|
|
426
|
+
/**
|
|
427
|
+
* Error thrown when a statistics aggregation times out.
|
|
428
|
+
*
|
|
429
|
+
* @example
|
|
430
|
+
* ```typescript
|
|
431
|
+
* try {
|
|
432
|
+
* const stats = await monque.getQueueStats();
|
|
433
|
+
* } catch (error) {
|
|
434
|
+
* if (error instanceof AggregationTimeoutError) {
|
|
435
|
+
* console.error('Stats took too long to calculate');
|
|
436
|
+
* }
|
|
437
|
+
* }
|
|
438
|
+
* ```
|
|
439
|
+
*/
|
|
440
|
+
var AggregationTimeoutError = class AggregationTimeoutError extends MonqueError {
|
|
441
|
+
constructor(message = "Statistics aggregation exceeded 30 second timeout") {
|
|
442
|
+
super(message);
|
|
443
|
+
this.name = "AggregationTimeoutError";
|
|
444
|
+
/* istanbul ignore next -- @preserve captureStackTrace is always available in Node.js */
|
|
445
|
+
if (Error.captureStackTrace) Error.captureStackTrace(this, AggregationTimeoutError);
|
|
446
|
+
}
|
|
447
|
+
};
|
|
448
|
+
|
|
223
449
|
//#endregion
|
|
224
450
|
//#region src/shared/utils/backoff.ts
|
|
225
451
|
/**
|
|
@@ -332,764 +558,1853 @@ function validateCronExpression(expression) {
|
|
|
332
558
|
}
|
|
333
559
|
}
|
|
334
560
|
function handleCronParseError(expression, error) {
|
|
335
|
-
throw new
|
|
561
|
+
throw new InvalidCronError(expression, `Invalid cron expression "${expression}": ${error instanceof Error ? error.message : "Unknown parsing error"}. Expected 5-field format: "minute hour day-of-month month day-of-week" or predefined expression (e.g. @daily). Example: "0 9 * * 1" (every Monday at 9am)`);
|
|
336
562
|
}
|
|
337
563
|
|
|
338
564
|
//#endregion
|
|
339
|
-
//#region src/scheduler/
|
|
340
|
-
/**
|
|
341
|
-
* Default configuration values
|
|
342
|
-
*/
|
|
343
|
-
const DEFAULTS = {
|
|
344
|
-
collectionName: "monque_jobs",
|
|
345
|
-
pollInterval: 1e3,
|
|
346
|
-
maxRetries: 10,
|
|
347
|
-
baseRetryInterval: 1e3,
|
|
348
|
-
shutdownTimeout: 3e4,
|
|
349
|
-
defaultConcurrency: 5,
|
|
350
|
-
lockTimeout: 18e5,
|
|
351
|
-
recoverStaleJobs: true,
|
|
352
|
-
heartbeatInterval: 3e4,
|
|
353
|
-
retentionInterval: 36e5
|
|
354
|
-
};
|
|
565
|
+
//#region src/scheduler/helpers.ts
|
|
355
566
|
/**
|
|
356
|
-
*
|
|
357
|
-
*
|
|
358
|
-
* A type-safe job scheduler with atomic locking, exponential backoff, cron scheduling,
|
|
359
|
-
* stale job recovery, and event-driven observability. Built on native MongoDB driver.
|
|
360
|
-
*
|
|
361
|
-
* @example Complete lifecycle
|
|
362
|
-
* ```;
|
|
363
|
-
typescript
|
|
364
|
-
*
|
|
365
|
-
|
|
366
|
-
import { Monque } from '@monque/core';
|
|
367
|
-
|
|
368
|
-
*
|
|
369
|
-
|
|
370
|
-
import { MongoClient } from 'mongodb';
|
|
371
|
-
|
|
372
|
-
*
|
|
373
|
-
*
|
|
374
|
-
const client = new MongoClient('mongodb://localhost:27017');
|
|
375
|
-
* await client.connect()
|
|
376
|
-
*
|
|
377
|
-
const db = client.db('myapp');
|
|
378
|
-
*
|
|
379
|
-
* // Create instance with options
|
|
380
|
-
*
|
|
381
|
-
const monque = new Monque(db, {
|
|
382
|
-
* collectionName: 'jobs',
|
|
383
|
-
* pollInterval: 1000,
|
|
384
|
-
* maxRetries: 10,
|
|
385
|
-
* shutdownTimeout: 30000,
|
|
386
|
-
* });
|
|
387
|
-
*
|
|
388
|
-
* // Initialize (sets up indexes and recovers stale jobs)
|
|
389
|
-
* await monque.initialize()
|
|
390
|
-
*
|
|
391
|
-
* // Register workers with type safety
|
|
392
|
-
*
|
|
393
|
-
type EmailJob = {};
|
|
394
|
-
* to: string
|
|
395
|
-
* subject: string
|
|
396
|
-
* body: string
|
|
397
|
-
* }
|
|
567
|
+
* Build a MongoDB query filter from a JobSelector.
|
|
398
568
|
*
|
|
399
|
-
*
|
|
400
|
-
|
|
401
|
-
* await emailService.send(job.data.to, job.data.subject, job.data.body)
|
|
569
|
+
* Translates the high-level `JobSelector` interface into a MongoDB `Filter<Document>`.
|
|
570
|
+
* Handles array values for status (using `$in`) and date range filtering.
|
|
402
571
|
*
|
|
572
|
+
* @param filter - The user-provided job selector
|
|
573
|
+
* @returns A standard MongoDB filter object
|
|
574
|
+
*/
|
|
575
|
+
function buildSelectorQuery(filter) {
|
|
576
|
+
const query = {};
|
|
577
|
+
if (filter.name) query["name"] = filter.name;
|
|
578
|
+
if (filter.status) if (Array.isArray(filter.status)) query["status"] = { $in: filter.status };
|
|
579
|
+
else query["status"] = filter.status;
|
|
580
|
+
if (filter.olderThan || filter.newerThan) {
|
|
581
|
+
query["createdAt"] = {};
|
|
582
|
+
if (filter.olderThan) query["createdAt"].$lt = filter.olderThan;
|
|
583
|
+
if (filter.newerThan) query["createdAt"].$gt = filter.newerThan;
|
|
584
|
+
}
|
|
585
|
+
return query;
|
|
403
586
|
}
|
|
404
|
-
|
|
587
|
+
/**
|
|
588
|
+
* Encode an ObjectId and direction into an opaque cursor string.
|
|
405
589
|
*
|
|
406
|
-
*
|
|
407
|
-
*
|
|
408
|
-
|
|
409
|
-
job
|
|
590
|
+
* Format: `prefix` + `base64url(objectId)`
|
|
591
|
+
* Prefix: 'F' (forward) or 'B' (backward)
|
|
592
|
+
*
|
|
593
|
+
* @param id - The job ID to use as the cursor anchor (exclusive)
|
|
594
|
+
* @param direction - 'forward' or 'backward'
|
|
595
|
+
* @returns Base64url-encoded cursor string
|
|
596
|
+
*/
|
|
597
|
+
function encodeCursor(id, direction) {
|
|
598
|
+
return (direction === "forward" ? "F" : "B") + Buffer.from(id.toHexString(), "hex").toString("base64url");
|
|
410
599
|
}
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
* logger.info(`Job $job.namecompleted in $durationms`);
|
|
414
|
-
* });
|
|
600
|
+
/**
|
|
601
|
+
* Decode an opaque cursor string into an ObjectId and direction.
|
|
415
602
|
*
|
|
416
|
-
*
|
|
417
|
-
* logger.error(`Job $job.namefailed:`, error);
|
|
418
|
-
* });
|
|
603
|
+
* Validates format and returns the components.
|
|
419
604
|
*
|
|
420
|
-
*
|
|
421
|
-
*
|
|
605
|
+
* @param cursor - The opaque cursor string
|
|
606
|
+
* @returns The decoded ID and direction
|
|
607
|
+
* @throws {InvalidCursorError} If the cursor format is invalid or ID is malformed
|
|
608
|
+
*/
|
|
609
|
+
function decodeCursor(cursor) {
|
|
610
|
+
if (!cursor || cursor.length < 2) throw new InvalidCursorError("Cursor is empty or too short");
|
|
611
|
+
const prefix = cursor.charAt(0);
|
|
612
|
+
const payload = cursor.slice(1);
|
|
613
|
+
let direction;
|
|
614
|
+
if (prefix === "F") direction = CursorDirection.FORWARD;
|
|
615
|
+
else if (prefix === "B") direction = CursorDirection.BACKWARD;
|
|
616
|
+
else throw new InvalidCursorError(`Invalid cursor prefix: ${prefix}`);
|
|
617
|
+
try {
|
|
618
|
+
const hex = Buffer.from(payload, "base64url").toString("hex");
|
|
619
|
+
if (hex.length !== 24) throw new InvalidCursorError("Invalid length");
|
|
620
|
+
return {
|
|
621
|
+
id: new mongodb.ObjectId(hex),
|
|
622
|
+
direction
|
|
623
|
+
};
|
|
624
|
+
} catch (error) {
|
|
625
|
+
if (error instanceof InvalidCursorError) throw error;
|
|
626
|
+
throw new InvalidCursorError("Invalid cursor payload");
|
|
627
|
+
}
|
|
628
|
+
}
|
|
629
|
+
|
|
630
|
+
//#endregion
|
|
631
|
+
//#region src/scheduler/services/change-stream-handler.ts
|
|
632
|
+
/**
|
|
633
|
+
* Internal service for MongoDB Change Stream lifecycle.
|
|
422
634
|
*
|
|
423
|
-
*
|
|
424
|
-
*
|
|
425
|
-
* to: 'user@example.com',
|
|
426
|
-
* subject: 'Welcome!',
|
|
427
|
-
* body: 'Thanks for signing up.'
|
|
428
|
-
* });
|
|
635
|
+
* Provides real-time job notifications when available, with automatic
|
|
636
|
+
* reconnection and graceful fallback to polling-only mode.
|
|
429
637
|
*
|
|
430
|
-
*
|
|
431
|
-
* process.on('SIGTERM', async () => {
|
|
432
|
-
* await monque.stop();
|
|
433
|
-
* await client.close();
|
|
434
|
-
* process.exit(0);
|
|
435
|
-
* });
|
|
436
|
-
* ```
|
|
638
|
+
* @internal Not part of public API.
|
|
437
639
|
*/
|
|
438
|
-
var
|
|
439
|
-
|
|
440
|
-
options;
|
|
441
|
-
collection = null;
|
|
442
|
-
workers = /* @__PURE__ */ new Map();
|
|
443
|
-
pollIntervalId = null;
|
|
444
|
-
heartbeatIntervalId = null;
|
|
445
|
-
cleanupIntervalId = null;
|
|
446
|
-
isRunning = false;
|
|
447
|
-
isInitialized = false;
|
|
448
|
-
/**
|
|
449
|
-
* MongoDB Change Stream for real-time job notifications.
|
|
450
|
-
* When available, provides instant job processing without polling delay.
|
|
451
|
-
*/
|
|
640
|
+
var ChangeStreamHandler = class {
|
|
641
|
+
/** MongoDB Change Stream for real-time job notifications */
|
|
452
642
|
changeStream = null;
|
|
453
|
-
/**
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
/**
|
|
463
|
-
* Debounce timer for change stream event processing.
|
|
464
|
-
* Prevents claim storms when multiple events arrive in quick succession.
|
|
465
|
-
*/
|
|
466
|
-
changeStreamDebounceTimer = null;
|
|
467
|
-
/**
|
|
468
|
-
* Whether the scheduler is currently using change streams for notifications.
|
|
469
|
-
*/
|
|
643
|
+
/** Number of consecutive reconnection attempts */
|
|
644
|
+
reconnectAttempts = 0;
|
|
645
|
+
/** Maximum reconnection attempts before falling back to polling-only mode */
|
|
646
|
+
maxReconnectAttempts = 3;
|
|
647
|
+
/** Debounce timer for change stream event processing */
|
|
648
|
+
debounceTimer = null;
|
|
649
|
+
/** Timer ID for reconnection with exponential backoff */
|
|
650
|
+
reconnectTimer = null;
|
|
651
|
+
/** Whether the scheduler is currently using change streams */
|
|
470
652
|
usingChangeStreams = false;
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
*/
|
|
475
|
-
changeStreamReconnectTimer = null;
|
|
476
|
-
constructor(db, options = {}) {
|
|
477
|
-
super();
|
|
478
|
-
this.db = db;
|
|
479
|
-
this.options = {
|
|
480
|
-
collectionName: options.collectionName ?? DEFAULTS.collectionName,
|
|
481
|
-
pollInterval: options.pollInterval ?? DEFAULTS.pollInterval,
|
|
482
|
-
maxRetries: options.maxRetries ?? DEFAULTS.maxRetries,
|
|
483
|
-
baseRetryInterval: options.baseRetryInterval ?? DEFAULTS.baseRetryInterval,
|
|
484
|
-
shutdownTimeout: options.shutdownTimeout ?? DEFAULTS.shutdownTimeout,
|
|
485
|
-
defaultConcurrency: options.defaultConcurrency ?? DEFAULTS.defaultConcurrency,
|
|
486
|
-
lockTimeout: options.lockTimeout ?? DEFAULTS.lockTimeout,
|
|
487
|
-
recoverStaleJobs: options.recoverStaleJobs ?? DEFAULTS.recoverStaleJobs,
|
|
488
|
-
maxBackoffDelay: options.maxBackoffDelay,
|
|
489
|
-
schedulerInstanceId: options.schedulerInstanceId ?? (0, node_crypto.randomUUID)(),
|
|
490
|
-
heartbeatInterval: options.heartbeatInterval ?? DEFAULTS.heartbeatInterval,
|
|
491
|
-
jobRetention: options.jobRetention
|
|
492
|
-
};
|
|
653
|
+
constructor(ctx, onPoll) {
|
|
654
|
+
this.ctx = ctx;
|
|
655
|
+
this.onPoll = onPoll;
|
|
493
656
|
}
|
|
494
657
|
/**
|
|
495
|
-
*
|
|
496
|
-
* Must be called before start().
|
|
658
|
+
* Set up MongoDB Change Stream for real-time job notifications.
|
|
497
659
|
*
|
|
498
|
-
*
|
|
660
|
+
* Change streams provide instant notifications when jobs are inserted or when
|
|
661
|
+
* job status changes to pending (e.g., after a retry). This eliminates the
|
|
662
|
+
* polling delay for reactive job processing.
|
|
663
|
+
*
|
|
664
|
+
* The change stream watches for:
|
|
665
|
+
* - Insert operations (new jobs)
|
|
666
|
+
* - Update operations where status field changes
|
|
667
|
+
*
|
|
668
|
+
* If change streams are unavailable (e.g., standalone MongoDB), the system
|
|
669
|
+
* gracefully falls back to polling-only mode.
|
|
499
670
|
*/
|
|
500
|
-
|
|
501
|
-
if (this.
|
|
671
|
+
setup() {
|
|
672
|
+
if (!this.ctx.isRunning()) return;
|
|
502
673
|
try {
|
|
503
|
-
this.
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
674
|
+
this.changeStream = this.ctx.collection.watch([{ $match: { $or: [{ operationType: "insert" }, {
|
|
675
|
+
operationType: "update",
|
|
676
|
+
"updateDescription.updatedFields.status": { $exists: true }
|
|
677
|
+
}] } }], { fullDocument: "updateLookup" });
|
|
678
|
+
this.changeStream.on("change", (change) => {
|
|
679
|
+
this.handleEvent(change);
|
|
680
|
+
});
|
|
681
|
+
this.changeStream.on("error", (error) => {
|
|
682
|
+
this.ctx.emit("changestream:error", { error });
|
|
683
|
+
this.handleError(error);
|
|
684
|
+
});
|
|
685
|
+
this.usingChangeStreams = true;
|
|
686
|
+
this.reconnectAttempts = 0;
|
|
687
|
+
this.ctx.emit("changestream:connected", void 0);
|
|
507
688
|
} catch (error) {
|
|
508
|
-
|
|
689
|
+
this.usingChangeStreams = false;
|
|
690
|
+
const reason = error instanceof Error ? error.message : "Unknown error";
|
|
691
|
+
this.ctx.emit("changestream:fallback", { reason });
|
|
509
692
|
}
|
|
510
693
|
}
|
|
511
694
|
/**
|
|
512
|
-
*
|
|
695
|
+
* Handle a change stream event by triggering a debounced poll.
|
|
513
696
|
*
|
|
514
|
-
*
|
|
515
|
-
*
|
|
516
|
-
*
|
|
517
|
-
*
|
|
518
|
-
*
|
|
519
|
-
* - `{lastHeartbeat, status}` - For monitoring/debugging queries (e.g., inspecting heartbeat age)
|
|
520
|
-
* - `{status, nextRunAt, claimedBy}` - For atomic claim queries (find unclaimed pending jobs)
|
|
521
|
-
* - `{lockedAt, lastHeartbeat, status}` - Supports recovery scans and monitoring access patterns
|
|
697
|
+
* Events are debounced to prevent "claim storms" when multiple changes arrive
|
|
698
|
+
* in rapid succession (e.g., bulk job inserts). A 100ms debounce window
|
|
699
|
+
* collects multiple events and triggers a single poll.
|
|
700
|
+
*
|
|
701
|
+
* @param change - The change stream event document
|
|
522
702
|
*/
|
|
523
|
-
|
|
524
|
-
if (!this.
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
},
|
|
538
|
-
background: true
|
|
539
|
-
});
|
|
540
|
-
await this.collection.createIndex({
|
|
541
|
-
name: 1,
|
|
542
|
-
status: 1
|
|
543
|
-
}, { background: true });
|
|
544
|
-
await this.collection.createIndex({
|
|
545
|
-
claimedBy: 1,
|
|
546
|
-
status: 1
|
|
547
|
-
}, { background: true });
|
|
548
|
-
await this.collection.createIndex({
|
|
549
|
-
lastHeartbeat: 1,
|
|
550
|
-
status: 1
|
|
551
|
-
}, { background: true });
|
|
552
|
-
await this.collection.createIndex({
|
|
553
|
-
status: 1,
|
|
554
|
-
nextRunAt: 1,
|
|
555
|
-
claimedBy: 1
|
|
556
|
-
}, { background: true });
|
|
557
|
-
await this.collection.createIndex({
|
|
558
|
-
status: 1,
|
|
559
|
-
lockedAt: 1,
|
|
560
|
-
lastHeartbeat: 1
|
|
561
|
-
}, { background: true });
|
|
703
|
+
handleEvent(change) {
|
|
704
|
+
if (!this.ctx.isRunning()) return;
|
|
705
|
+
const isInsert = change.operationType === "insert";
|
|
706
|
+
const isUpdate = change.operationType === "update";
|
|
707
|
+
const isPendingStatus = ("fullDocument" in change ? change.fullDocument : void 0)?.["status"] === JobStatus.PENDING;
|
|
708
|
+
if (isInsert || isUpdate && isPendingStatus) {
|
|
709
|
+
if (this.debounceTimer) clearTimeout(this.debounceTimer);
|
|
710
|
+
this.debounceTimer = setTimeout(() => {
|
|
711
|
+
this.debounceTimer = null;
|
|
712
|
+
this.onPoll().catch((error) => {
|
|
713
|
+
this.ctx.emit("job:error", { error });
|
|
714
|
+
});
|
|
715
|
+
}, 100);
|
|
716
|
+
}
|
|
562
717
|
}
|
|
563
718
|
/**
|
|
564
|
-
*
|
|
565
|
-
*
|
|
566
|
-
*
|
|
719
|
+
* Handle change stream errors with exponential backoff reconnection.
|
|
720
|
+
*
|
|
721
|
+
* Attempts to reconnect up to `maxReconnectAttempts` times with
|
|
722
|
+
* exponential backoff (base 1000ms). After exhausting retries, falls back to
|
|
723
|
+
* polling-only mode.
|
|
724
|
+
*
|
|
725
|
+
* @param error - The error that caused the change stream failure
|
|
567
726
|
*/
|
|
568
|
-
|
|
569
|
-
if (!this.
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
727
|
+
handleError(error) {
|
|
728
|
+
if (!this.ctx.isRunning()) return;
|
|
729
|
+
this.reconnectAttempts++;
|
|
730
|
+
if (this.reconnectAttempts > this.maxReconnectAttempts) {
|
|
731
|
+
this.usingChangeStreams = false;
|
|
732
|
+
if (this.reconnectTimer) {
|
|
733
|
+
clearTimeout(this.reconnectTimer);
|
|
734
|
+
this.reconnectTimer = null;
|
|
735
|
+
}
|
|
736
|
+
if (this.changeStream) {
|
|
737
|
+
this.changeStream.close().catch(() => {});
|
|
738
|
+
this.changeStream = null;
|
|
739
|
+
}
|
|
740
|
+
this.ctx.emit("changestream:fallback", { reason: `Exhausted ${this.maxReconnectAttempts} reconnection attempts: ${error.message}` });
|
|
741
|
+
return;
|
|
742
|
+
}
|
|
743
|
+
const delay = 2 ** (this.reconnectAttempts - 1) * 1e3;
|
|
744
|
+
if (this.reconnectTimer) clearTimeout(this.reconnectTimer);
|
|
745
|
+
this.reconnectTimer = setTimeout(() => {
|
|
746
|
+
this.reconnectTimer = null;
|
|
747
|
+
if (this.ctx.isRunning()) {
|
|
748
|
+
if (this.changeStream) {
|
|
749
|
+
this.changeStream.close().catch(() => {});
|
|
750
|
+
this.changeStream = null;
|
|
751
|
+
}
|
|
752
|
+
this.setup();
|
|
753
|
+
}
|
|
754
|
+
}, delay);
|
|
755
|
+
}
|
|
756
|
+
/**
|
|
757
|
+
* Close the change stream cursor and emit closed event.
|
|
758
|
+
*/
|
|
759
|
+
async close() {
|
|
760
|
+
if (this.debounceTimer) {
|
|
761
|
+
clearTimeout(this.debounceTimer);
|
|
762
|
+
this.debounceTimer = null;
|
|
763
|
+
}
|
|
764
|
+
if (this.reconnectTimer) {
|
|
765
|
+
clearTimeout(this.reconnectTimer);
|
|
766
|
+
this.reconnectTimer = null;
|
|
767
|
+
}
|
|
768
|
+
if (this.changeStream) {
|
|
769
|
+
try {
|
|
770
|
+
await this.changeStream.close();
|
|
771
|
+
} catch {}
|
|
772
|
+
this.changeStream = null;
|
|
773
|
+
if (this.usingChangeStreams) this.ctx.emit("changestream:closed", void 0);
|
|
774
|
+
}
|
|
775
|
+
this.usingChangeStreams = false;
|
|
776
|
+
this.reconnectAttempts = 0;
|
|
777
|
+
}
|
|
778
|
+
/**
|
|
779
|
+
* Check if change streams are currently active.
|
|
780
|
+
*/
|
|
781
|
+
isActive() {
|
|
782
|
+
return this.usingChangeStreams;
|
|
783
|
+
}
|
|
784
|
+
};
|
|
785
|
+
|
|
786
|
+
//#endregion
|
|
787
|
+
//#region src/scheduler/services/job-manager.ts
|
|
788
|
+
/**
|
|
789
|
+
* Internal service for job lifecycle management operations.
|
|
790
|
+
*
|
|
791
|
+
* Provides atomic state transitions (cancel, retry, reschedule) and deletion.
|
|
792
|
+
* Emits appropriate events on each operation.
|
|
793
|
+
*
|
|
794
|
+
* @internal Not part of public API - use Monque class methods instead.
|
|
795
|
+
*/
|
|
796
|
+
var JobManager = class {
|
|
797
|
+
constructor(ctx) {
|
|
798
|
+
this.ctx = ctx;
|
|
799
|
+
}
|
|
800
|
+
/**
|
|
801
|
+
* Cancel a pending or scheduled job.
|
|
802
|
+
*
|
|
803
|
+
* Sets the job status to 'cancelled' and emits a 'job:cancelled' event.
|
|
804
|
+
* If the job is already cancelled, this is a no-op and returns the job.
|
|
805
|
+
* Cannot cancel jobs that are currently 'processing', 'completed', or 'failed'.
|
|
806
|
+
*
|
|
807
|
+
* @param jobId - The ID of the job to cancel
|
|
808
|
+
* @returns The cancelled job, or null if not found
|
|
809
|
+
* @throws {JobStateError} If job is in an invalid state for cancellation
|
|
810
|
+
*
|
|
811
|
+
* @example Cancel a pending job
|
|
812
|
+
* ```typescript
|
|
813
|
+
* const job = await monque.enqueue('report', { type: 'daily' });
|
|
814
|
+
* await monque.cancelJob(job._id.toString());
|
|
815
|
+
* ```
|
|
816
|
+
*/
|
|
817
|
+
async cancelJob(jobId) {
|
|
818
|
+
if (!mongodb.ObjectId.isValid(jobId)) return null;
|
|
819
|
+
const _id = new mongodb.ObjectId(jobId);
|
|
820
|
+
const jobDoc = await this.ctx.collection.findOne({ _id });
|
|
821
|
+
if (!jobDoc) return null;
|
|
822
|
+
const currentJob = jobDoc;
|
|
823
|
+
if (currentJob.status === JobStatus.CANCELLED) return this.ctx.documentToPersistedJob(currentJob);
|
|
824
|
+
if (currentJob.status !== JobStatus.PENDING) throw new JobStateError(`Cannot cancel job in status '${currentJob.status}'`, jobId, currentJob.status, "cancel");
|
|
825
|
+
const result = await this.ctx.collection.findOneAndUpdate({
|
|
826
|
+
_id,
|
|
827
|
+
status: JobStatus.PENDING
|
|
828
|
+
}, { $set: {
|
|
829
|
+
status: JobStatus.CANCELLED,
|
|
830
|
+
updatedAt: /* @__PURE__ */ new Date()
|
|
831
|
+
} }, { returnDocument: "after" });
|
|
832
|
+
if (!result) throw new JobStateError("Job status changed during cancellation attempt", jobId, "unknown", "cancel");
|
|
833
|
+
const job = this.ctx.documentToPersistedJob(result);
|
|
834
|
+
this.ctx.emit("job:cancelled", { job });
|
|
835
|
+
return job;
|
|
836
|
+
}
|
|
837
|
+
/**
|
|
838
|
+
* Retry a failed or cancelled job.
|
|
839
|
+
*
|
|
840
|
+
* Resets the job to 'pending' status, clears failure count/reason, and sets
|
|
841
|
+
* nextRunAt to now (immediate retry). Emits a 'job:retried' event.
|
|
842
|
+
*
|
|
843
|
+
* @param jobId - The ID of the job to retry
|
|
844
|
+
* @returns The updated job, or null if not found
|
|
845
|
+
* @throws {JobStateError} If job is in an invalid state for retry (must be failed or cancelled)
|
|
846
|
+
*
|
|
847
|
+
* @example Retry a failed job
|
|
848
|
+
* ```typescript
|
|
849
|
+
* monque.on('job:fail', async ({ job }) => {
|
|
850
|
+
* console.log(`Job ${job._id} failed, retrying manually...`);
|
|
851
|
+
* await monque.retryJob(job._id.toString());
|
|
852
|
+
* });
|
|
853
|
+
* ```
|
|
854
|
+
*/
|
|
855
|
+
async retryJob(jobId) {
|
|
856
|
+
if (!mongodb.ObjectId.isValid(jobId)) return null;
|
|
857
|
+
const _id = new mongodb.ObjectId(jobId);
|
|
858
|
+
const currentJob = await this.ctx.collection.findOne({ _id });
|
|
859
|
+
if (!currentJob) return null;
|
|
860
|
+
if (currentJob["status"] !== JobStatus.FAILED && currentJob["status"] !== JobStatus.CANCELLED) throw new JobStateError(`Cannot retry job in status '${currentJob["status"]}'`, jobId, currentJob["status"], "retry");
|
|
861
|
+
const previousStatus = currentJob["status"];
|
|
862
|
+
const result = await this.ctx.collection.findOneAndUpdate({
|
|
863
|
+
_id,
|
|
864
|
+
status: { $in: [JobStatus.FAILED, JobStatus.CANCELLED] }
|
|
574
865
|
}, {
|
|
575
866
|
$set: {
|
|
576
867
|
status: JobStatus.PENDING,
|
|
868
|
+
failCount: 0,
|
|
869
|
+
nextRunAt: /* @__PURE__ */ new Date(),
|
|
577
870
|
updatedAt: /* @__PURE__ */ new Date()
|
|
578
871
|
},
|
|
579
872
|
$unset: {
|
|
873
|
+
failReason: "",
|
|
580
874
|
lockedAt: "",
|
|
581
875
|
claimedBy: "",
|
|
582
876
|
lastHeartbeat: "",
|
|
583
877
|
heartbeatInterval: ""
|
|
584
878
|
}
|
|
879
|
+
}, { returnDocument: "after" });
|
|
880
|
+
if (!result) throw new JobStateError("Job status changed during retry attempt", jobId, "unknown", "retry");
|
|
881
|
+
const job = this.ctx.documentToPersistedJob(result);
|
|
882
|
+
this.ctx.emit("job:retried", {
|
|
883
|
+
job,
|
|
884
|
+
previousStatus
|
|
585
885
|
});
|
|
586
|
-
|
|
886
|
+
return job;
|
|
587
887
|
}
|
|
588
888
|
/**
|
|
589
|
-
*
|
|
889
|
+
* Reschedule a pending job to run at a different time.
|
|
590
890
|
*
|
|
591
|
-
*
|
|
592
|
-
* - Removes failed jobs older than `jobRetention.failed`
|
|
891
|
+
* Only works for jobs in 'pending' status.
|
|
593
892
|
*
|
|
594
|
-
*
|
|
893
|
+
* @param jobId - The ID of the job to reschedule
|
|
894
|
+
* @param runAt - The new Date when the job should run
|
|
895
|
+
* @returns The updated job, or null if not found
|
|
896
|
+
* @throws {JobStateError} If job is not in pending state
|
|
595
897
|
*
|
|
596
|
-
* @
|
|
898
|
+
* @example Delay a job by 1 hour
|
|
899
|
+
* ```typescript
|
|
900
|
+
* const nextHour = new Date(Date.now() + 60 * 60 * 1000);
|
|
901
|
+
* await monque.rescheduleJob(jobId, nextHour);
|
|
902
|
+
* ```
|
|
597
903
|
*/
|
|
598
|
-
async
|
|
599
|
-
if (!
|
|
600
|
-
const
|
|
601
|
-
const
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
updatedAt: { $lt: cutoff }
|
|
615
|
-
}));
|
|
616
|
-
}
|
|
617
|
-
if (deletions.length > 0) await Promise.all(deletions);
|
|
904
|
+
async rescheduleJob(jobId, runAt) {
|
|
905
|
+
if (!mongodb.ObjectId.isValid(jobId)) return null;
|
|
906
|
+
const _id = new mongodb.ObjectId(jobId);
|
|
907
|
+
const currentJobDoc = await this.ctx.collection.findOne({ _id });
|
|
908
|
+
if (!currentJobDoc) return null;
|
|
909
|
+
const currentJob = currentJobDoc;
|
|
910
|
+
if (currentJob.status !== JobStatus.PENDING) throw new JobStateError(`Cannot reschedule job in status '${currentJob.status}'`, jobId, currentJob.status, "reschedule");
|
|
911
|
+
const result = await this.ctx.collection.findOneAndUpdate({
|
|
912
|
+
_id,
|
|
913
|
+
status: JobStatus.PENDING
|
|
914
|
+
}, { $set: {
|
|
915
|
+
nextRunAt: runAt,
|
|
916
|
+
updatedAt: /* @__PURE__ */ new Date()
|
|
917
|
+
} }, { returnDocument: "after" });
|
|
918
|
+
if (!result) throw new JobStateError("Job status changed during reschedule attempt", jobId, "unknown", "reschedule");
|
|
919
|
+
return this.ctx.documentToPersistedJob(result);
|
|
618
920
|
}
|
|
619
921
|
/**
|
|
620
|
-
*
|
|
621
|
-
*
|
|
622
|
-
* Jobs are stored in MongoDB and processed by registered workers. Supports
|
|
623
|
-
* delayed execution via `runAt` and deduplication via `uniqueKey`.
|
|
922
|
+
* Permanently delete a job.
|
|
624
923
|
*
|
|
625
|
-
*
|
|
626
|
-
*
|
|
627
|
-
*
|
|
628
|
-
* Failed jobs are automatically retried with exponential backoff up to `maxRetries`
|
|
629
|
-
* (default: 10 attempts). The delay between retries is calculated as `2^failCount × baseRetryInterval`.
|
|
924
|
+
* This action is irreversible. Emits a 'job:deleted' event upon success.
|
|
925
|
+
* Can delete a job in any state.
|
|
630
926
|
*
|
|
631
|
-
* @
|
|
632
|
-
* @
|
|
633
|
-
* @param data - Job payload, will be passed to the worker handler
|
|
634
|
-
* @param options - Scheduling and deduplication options
|
|
635
|
-
* @returns Promise resolving to the created or existing job document
|
|
636
|
-
* @throws {ConnectionError} If database operation fails or scheduler not initialized
|
|
927
|
+
* @param jobId - The ID of the job to delete
|
|
928
|
+
* @returns true if deleted, false if job not found
|
|
637
929
|
*
|
|
638
|
-
* @example
|
|
930
|
+
* @example Delete a cleanup job
|
|
639
931
|
* ```typescript
|
|
640
|
-
* await monque.
|
|
641
|
-
*
|
|
642
|
-
*
|
|
643
|
-
*
|
|
644
|
-
* });
|
|
932
|
+
* const deleted = await monque.deleteJob(jobId);
|
|
933
|
+
* if (deleted) {
|
|
934
|
+
* console.log('Job permanently removed');
|
|
935
|
+
* }
|
|
645
936
|
* ```
|
|
937
|
+
*/
|
|
938
|
+
async deleteJob(jobId) {
|
|
939
|
+
if (!mongodb.ObjectId.isValid(jobId)) return false;
|
|
940
|
+
const _id = new mongodb.ObjectId(jobId);
|
|
941
|
+
if ((await this.ctx.collection.deleteOne({ _id })).deletedCount > 0) {
|
|
942
|
+
this.ctx.emit("job:deleted", { jobId });
|
|
943
|
+
return true;
|
|
944
|
+
}
|
|
945
|
+
return false;
|
|
946
|
+
}
|
|
947
|
+
/**
|
|
948
|
+
* Cancel multiple jobs matching the given filter.
|
|
646
949
|
*
|
|
647
|
-
*
|
|
648
|
-
*
|
|
649
|
-
*
|
|
650
|
-
* await monque.enqueue('reminder', { message: 'Check in!' }, {
|
|
651
|
-
* runAt: oneHourLater
|
|
652
|
-
* });
|
|
653
|
-
* ```
|
|
950
|
+
* Only cancels jobs in 'pending' status. Jobs in other states are collected
|
|
951
|
+
* as errors in the result. Emits a 'jobs:cancelled' event with the IDs of
|
|
952
|
+
* successfully cancelled jobs.
|
|
654
953
|
*
|
|
655
|
-
* @
|
|
954
|
+
* @param filter - Selector for which jobs to cancel (name, status, date range)
|
|
955
|
+
* @returns Result with count of cancelled jobs and any errors encountered
|
|
956
|
+
*
|
|
957
|
+
* @example Cancel all pending jobs for a queue
|
|
656
958
|
* ```typescript
|
|
657
|
-
* await monque.
|
|
658
|
-
*
|
|
959
|
+
* const result = await monque.cancelJobs({
|
|
960
|
+
* name: 'email-queue',
|
|
961
|
+
* status: 'pending'
|
|
659
962
|
* });
|
|
660
|
-
*
|
|
963
|
+
* console.log(`Cancelled ${result.count} jobs`);
|
|
661
964
|
* ```
|
|
662
965
|
*/
|
|
663
|
-
async
|
|
664
|
-
|
|
665
|
-
const
|
|
666
|
-
const
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
if (options.uniqueKey) job.uniqueKey = options.uniqueKey;
|
|
676
|
-
try {
|
|
677
|
-
if (options.uniqueKey) {
|
|
678
|
-
if (!this.collection) throw new require_errors.ConnectionError("Failed to enqueue job: collection not available");
|
|
679
|
-
const result$1 = await this.collection.findOneAndUpdate({
|
|
680
|
-
name,
|
|
681
|
-
uniqueKey: options.uniqueKey,
|
|
682
|
-
status: { $in: [JobStatus.PENDING, JobStatus.PROCESSING] }
|
|
683
|
-
}, { $setOnInsert: job }, {
|
|
684
|
-
upsert: true,
|
|
685
|
-
returnDocument: "after"
|
|
966
|
+
async cancelJobs(filter) {
|
|
967
|
+
const baseQuery = buildSelectorQuery(filter);
|
|
968
|
+
const errors = [];
|
|
969
|
+
const cancelledIds = [];
|
|
970
|
+
const cursor = this.ctx.collection.find(baseQuery);
|
|
971
|
+
for await (const doc of cursor) {
|
|
972
|
+
const job = doc;
|
|
973
|
+
const jobId = job._id.toString();
|
|
974
|
+
if (job.status !== JobStatus.PENDING && job.status !== JobStatus.CANCELLED) {
|
|
975
|
+
errors.push({
|
|
976
|
+
jobId,
|
|
977
|
+
error: `Cannot cancel job in status '${job.status}'`
|
|
686
978
|
});
|
|
687
|
-
|
|
688
|
-
return this.documentToPersistedJob(result$1);
|
|
979
|
+
continue;
|
|
689
980
|
}
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
981
|
+
if (job.status === JobStatus.CANCELLED) {
|
|
982
|
+
cancelledIds.push(jobId);
|
|
983
|
+
continue;
|
|
984
|
+
}
|
|
985
|
+
if (await this.ctx.collection.findOneAndUpdate({
|
|
986
|
+
_id: job._id,
|
|
987
|
+
status: JobStatus.PENDING
|
|
988
|
+
}, { $set: {
|
|
989
|
+
status: JobStatus.CANCELLED,
|
|
990
|
+
updatedAt: /* @__PURE__ */ new Date()
|
|
991
|
+
} }, { returnDocument: "after" })) cancelledIds.push(jobId);
|
|
992
|
+
else errors.push({
|
|
993
|
+
jobId,
|
|
994
|
+
error: "Job status changed during cancellation"
|
|
995
|
+
});
|
|
699
996
|
}
|
|
997
|
+
if (cancelledIds.length > 0) this.ctx.emit("jobs:cancelled", {
|
|
998
|
+
jobIds: cancelledIds,
|
|
999
|
+
count: cancelledIds.length
|
|
1000
|
+
});
|
|
1001
|
+
return {
|
|
1002
|
+
count: cancelledIds.length,
|
|
1003
|
+
errors
|
|
1004
|
+
};
|
|
700
1005
|
}
|
|
701
1006
|
/**
|
|
702
|
-
*
|
|
1007
|
+
* Retry multiple jobs matching the given filter.
|
|
703
1008
|
*
|
|
704
|
-
*
|
|
705
|
-
*
|
|
1009
|
+
* Only retries jobs in 'failed' or 'cancelled' status. Jobs in other states
|
|
1010
|
+
* are collected as errors in the result. Emits a 'jobs:retried' event with
|
|
1011
|
+
* the IDs of successfully retried jobs.
|
|
706
1012
|
*
|
|
707
|
-
* @
|
|
708
|
-
* @
|
|
709
|
-
* @param data - Job payload, will be passed to the worker handler
|
|
710
|
-
* @returns Promise resolving to the created job document
|
|
711
|
-
* @throws {ConnectionError} If database operation fails or scheduler not initialized
|
|
1013
|
+
* @param filter - Selector for which jobs to retry (name, status, date range)
|
|
1014
|
+
* @returns Result with count of retried jobs and any errors encountered
|
|
712
1015
|
*
|
|
713
|
-
* @example
|
|
1016
|
+
* @example Retry all failed jobs
|
|
714
1017
|
* ```typescript
|
|
715
|
-
* await monque.
|
|
716
|
-
*
|
|
717
|
-
* subject: 'Alert',
|
|
718
|
-
* body: 'Immediate attention required'
|
|
1018
|
+
* const result = await monque.retryJobs({
|
|
1019
|
+
* status: 'failed'
|
|
719
1020
|
* });
|
|
720
|
-
*
|
|
721
|
-
*
|
|
722
|
-
* @example Process order in background
|
|
723
|
-
* ```typescript
|
|
724
|
-
* const order = await createOrder(data);
|
|
725
|
-
* await monque.now('process-order', { orderId: order.id });
|
|
726
|
-
* return order; // Return immediately, processing happens async
|
|
1021
|
+
* console.log(`Retried ${result.count} jobs`);
|
|
727
1022
|
* ```
|
|
728
1023
|
*/
|
|
729
|
-
async
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
1024
|
+
async retryJobs(filter) {
|
|
1025
|
+
const baseQuery = buildSelectorQuery(filter);
|
|
1026
|
+
const errors = [];
|
|
1027
|
+
const retriedIds = [];
|
|
1028
|
+
const cursor = this.ctx.collection.find(baseQuery);
|
|
1029
|
+
for await (const doc of cursor) {
|
|
1030
|
+
const job = doc;
|
|
1031
|
+
const jobId = job._id.toString();
|
|
1032
|
+
if (job.status !== JobStatus.FAILED && job.status !== JobStatus.CANCELLED) {
|
|
1033
|
+
errors.push({
|
|
1034
|
+
jobId,
|
|
1035
|
+
error: `Cannot retry job in status '${job.status}'`
|
|
1036
|
+
});
|
|
1037
|
+
continue;
|
|
1038
|
+
}
|
|
1039
|
+
if (await this.ctx.collection.findOneAndUpdate({
|
|
1040
|
+
_id: job._id,
|
|
1041
|
+
status: { $in: [JobStatus.FAILED, JobStatus.CANCELLED] }
|
|
1042
|
+
}, {
|
|
1043
|
+
$set: {
|
|
1044
|
+
status: JobStatus.PENDING,
|
|
1045
|
+
failCount: 0,
|
|
1046
|
+
nextRunAt: /* @__PURE__ */ new Date(),
|
|
1047
|
+
updatedAt: /* @__PURE__ */ new Date()
|
|
1048
|
+
},
|
|
1049
|
+
$unset: {
|
|
1050
|
+
failReason: "",
|
|
1051
|
+
lockedAt: "",
|
|
1052
|
+
claimedBy: "",
|
|
1053
|
+
lastHeartbeat: "",
|
|
1054
|
+
heartbeatInterval: ""
|
|
1055
|
+
}
|
|
1056
|
+
}, { returnDocument: "after" })) retriedIds.push(jobId);
|
|
1057
|
+
else errors.push({
|
|
1058
|
+
jobId,
|
|
1059
|
+
error: "Job status changed during retry attempt"
|
|
1060
|
+
});
|
|
1061
|
+
}
|
|
1062
|
+
if (retriedIds.length > 0) this.ctx.emit("jobs:retried", {
|
|
1063
|
+
jobIds: retriedIds,
|
|
1064
|
+
count: retriedIds.length
|
|
1065
|
+
});
|
|
1066
|
+
return {
|
|
1067
|
+
count: retriedIds.length,
|
|
1068
|
+
errors
|
|
1069
|
+
};
|
|
1070
|
+
}
|
|
1071
|
+
/**
|
|
1072
|
+
* Delete multiple jobs matching the given filter.
|
|
1073
|
+
*
|
|
1074
|
+
* Deletes jobs in any status. Uses a batch delete for efficiency.
|
|
1075
|
+
* Emits a 'jobs:deleted' event with the count of deleted jobs.
|
|
1076
|
+
* Does not emit individual 'job:deleted' events to avoid noise.
|
|
1077
|
+
*
|
|
1078
|
+
* @param filter - Selector for which jobs to delete (name, status, date range)
|
|
1079
|
+
* @returns Result with count of deleted jobs (errors array always empty for delete)
|
|
1080
|
+
*
|
|
1081
|
+
* @example Delete old completed jobs
|
|
1082
|
+
* ```typescript
|
|
1083
|
+
* const weekAgo = new Date(Date.now() - 7 * 24 * 60 * 60 * 1000);
|
|
1084
|
+
* const result = await monque.deleteJobs({
|
|
1085
|
+
* status: 'completed',
|
|
1086
|
+
* olderThan: weekAgo
|
|
1087
|
+
* });
|
|
1088
|
+
* console.log(`Deleted ${result.count} jobs`);
|
|
1089
|
+
* ```
|
|
1090
|
+
*/
|
|
1091
|
+
async deleteJobs(filter) {
|
|
1092
|
+
const query = buildSelectorQuery(filter);
|
|
1093
|
+
const result = await this.ctx.collection.deleteMany(query);
|
|
1094
|
+
if (result.deletedCount > 0) this.ctx.emit("jobs:deleted", { count: result.deletedCount });
|
|
1095
|
+
return {
|
|
1096
|
+
count: result.deletedCount,
|
|
1097
|
+
errors: []
|
|
1098
|
+
};
|
|
1099
|
+
}
|
|
1100
|
+
};
|
|
1101
|
+
|
|
1102
|
+
//#endregion
|
|
1103
|
+
//#region src/scheduler/services/job-processor.ts
|
|
1104
|
+
/**
|
|
1105
|
+
* Internal service for job processing and execution.
|
|
1106
|
+
*
|
|
1107
|
+
* Manages the poll loop, atomic job acquisition, handler execution,
|
|
1108
|
+
* and job completion/failure with exponential backoff retry logic.
|
|
1109
|
+
*
|
|
1110
|
+
* @internal Not part of public API.
|
|
1111
|
+
*/
|
|
1112
|
+
var JobProcessor = class {
|
|
1113
|
+
constructor(ctx) {
|
|
1114
|
+
this.ctx = ctx;
|
|
1115
|
+
}
|
|
1116
|
+
/**
|
|
1117
|
+
* Poll for available jobs and process them.
|
|
1118
|
+
*
|
|
1119
|
+
* Called at regular intervals (configured by `pollInterval`). For each registered worker,
|
|
1120
|
+
* attempts to acquire jobs up to the worker's available concurrency slots.
|
|
1121
|
+
* Aborts early if the scheduler is stopping (`isRunning` is false).
|
|
1122
|
+
*/
|
|
1123
|
+
async poll() {
|
|
1124
|
+
if (!this.ctx.isRunning()) return;
|
|
1125
|
+
for (const [name, worker] of this.ctx.workers) {
|
|
1126
|
+
const availableSlots = worker.concurrency - worker.activeJobs.size;
|
|
1127
|
+
if (availableSlots <= 0) continue;
|
|
1128
|
+
for (let i = 0; i < availableSlots; i++) {
|
|
1129
|
+
if (!this.ctx.isRunning()) return;
|
|
1130
|
+
const job = await this.acquireJob(name);
|
|
1131
|
+
if (job) this.processJob(job, worker).catch((error) => {
|
|
1132
|
+
this.ctx.emit("job:error", {
|
|
1133
|
+
error,
|
|
1134
|
+
job
|
|
1135
|
+
});
|
|
1136
|
+
});
|
|
1137
|
+
else break;
|
|
1138
|
+
}
|
|
1139
|
+
}
|
|
1140
|
+
}
|
|
1141
|
+
/**
|
|
1142
|
+
* Atomically acquire a pending job for processing using the claimedBy pattern.
|
|
1143
|
+
*
|
|
1144
|
+
* Uses MongoDB's `findOneAndUpdate` with atomic operations to ensure only one scheduler
|
|
1145
|
+
* instance can claim a job. The query ensures the job is:
|
|
1146
|
+
* - In pending status
|
|
1147
|
+
* - Has nextRunAt <= now
|
|
1148
|
+
* - Is not claimed by another instance (claimedBy is null/undefined)
|
|
1149
|
+
*
|
|
1150
|
+
* Returns `null` immediately if scheduler is stopping (`isRunning` is false).
|
|
1151
|
+
*
|
|
1152
|
+
* @param name - The job type to acquire
|
|
1153
|
+
* @returns The acquired job with updated status, claimedBy, and heartbeat info, or `null` if no jobs available
|
|
1154
|
+
*/
|
|
1155
|
+
async acquireJob(name) {
|
|
1156
|
+
if (!this.ctx.isRunning()) return null;
|
|
1157
|
+
const now = /* @__PURE__ */ new Date();
|
|
1158
|
+
const result = await this.ctx.collection.findOneAndUpdate({
|
|
1159
|
+
name,
|
|
1160
|
+
status: JobStatus.PENDING,
|
|
1161
|
+
nextRunAt: { $lte: now },
|
|
1162
|
+
$or: [{ claimedBy: null }, { claimedBy: { $exists: false } }]
|
|
1163
|
+
}, { $set: {
|
|
1164
|
+
status: JobStatus.PROCESSING,
|
|
1165
|
+
claimedBy: this.ctx.instanceId,
|
|
1166
|
+
lockedAt: now,
|
|
1167
|
+
lastHeartbeat: now,
|
|
1168
|
+
heartbeatInterval: this.ctx.options.heartbeatInterval,
|
|
1169
|
+
updatedAt: now
|
|
1170
|
+
} }, {
|
|
1171
|
+
sort: { nextRunAt: 1 },
|
|
1172
|
+
returnDocument: "after"
|
|
1173
|
+
});
|
|
1174
|
+
if (!this.ctx.isRunning()) return null;
|
|
1175
|
+
if (!result) return null;
|
|
1176
|
+
return this.ctx.documentToPersistedJob(result);
|
|
1177
|
+
}
|
|
1178
|
+
/**
|
|
1179
|
+
* Execute a job using its registered worker handler.
|
|
1180
|
+
*
|
|
1181
|
+
* Tracks the job as active during processing, emits lifecycle events, and handles
|
|
1182
|
+
* both success and failure cases. On success, calls `completeJob()`. On failure,
|
|
1183
|
+
* calls `failJob()` which implements exponential backoff retry logic.
|
|
1184
|
+
*
|
|
1185
|
+
* @param job - The job to process
|
|
1186
|
+
* @param worker - The worker registration containing the handler and active job tracking
|
|
1187
|
+
*/
|
|
1188
|
+
async processJob(job, worker) {
|
|
1189
|
+
const jobId = job._id.toString();
|
|
1190
|
+
worker.activeJobs.set(jobId, job);
|
|
1191
|
+
const startTime = Date.now();
|
|
1192
|
+
this.ctx.emit("job:start", job);
|
|
1193
|
+
try {
|
|
1194
|
+
await worker.handler(job);
|
|
1195
|
+
const duration = Date.now() - startTime;
|
|
1196
|
+
await this.completeJob(job);
|
|
1197
|
+
this.ctx.emit("job:complete", {
|
|
1198
|
+
job,
|
|
1199
|
+
duration
|
|
1200
|
+
});
|
|
1201
|
+
} catch (error) {
|
|
1202
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
1203
|
+
await this.failJob(job, err);
|
|
1204
|
+
const willRetry = job.failCount + 1 < this.ctx.options.maxRetries;
|
|
1205
|
+
this.ctx.emit("job:fail", {
|
|
1206
|
+
job,
|
|
1207
|
+
error: err,
|
|
1208
|
+
willRetry
|
|
1209
|
+
});
|
|
1210
|
+
} finally {
|
|
1211
|
+
worker.activeJobs.delete(jobId);
|
|
1212
|
+
}
|
|
1213
|
+
}
|
|
1214
|
+
/**
|
|
1215
|
+
* Mark a job as completed successfully.
|
|
1216
|
+
*
|
|
1217
|
+
* For recurring jobs (with `repeatInterval`), schedules the next run based on the cron
|
|
1218
|
+
* expression and resets `failCount` to 0. For one-time jobs, sets status to `completed`.
|
|
1219
|
+
* Clears `lockedAt` and `failReason` fields in both cases.
|
|
1220
|
+
*
|
|
1221
|
+
* @param job - The job that completed successfully
|
|
1222
|
+
*/
|
|
1223
|
+
async completeJob(job) {
|
|
1224
|
+
if (!isPersistedJob(job)) return;
|
|
1225
|
+
if (job.repeatInterval) {
|
|
1226
|
+
const nextRunAt = getNextCronDate(job.repeatInterval);
|
|
1227
|
+
await this.ctx.collection.updateOne({ _id: job._id }, {
|
|
1228
|
+
$set: {
|
|
1229
|
+
status: JobStatus.PENDING,
|
|
1230
|
+
nextRunAt,
|
|
1231
|
+
failCount: 0,
|
|
1232
|
+
updatedAt: /* @__PURE__ */ new Date()
|
|
1233
|
+
},
|
|
1234
|
+
$unset: {
|
|
1235
|
+
lockedAt: "",
|
|
1236
|
+
claimedBy: "",
|
|
1237
|
+
lastHeartbeat: "",
|
|
1238
|
+
heartbeatInterval: "",
|
|
1239
|
+
failReason: ""
|
|
1240
|
+
}
|
|
1241
|
+
});
|
|
1242
|
+
} else {
|
|
1243
|
+
await this.ctx.collection.updateOne({ _id: job._id }, {
|
|
1244
|
+
$set: {
|
|
1245
|
+
status: JobStatus.COMPLETED,
|
|
1246
|
+
updatedAt: /* @__PURE__ */ new Date()
|
|
1247
|
+
},
|
|
1248
|
+
$unset: {
|
|
1249
|
+
lockedAt: "",
|
|
1250
|
+
claimedBy: "",
|
|
1251
|
+
lastHeartbeat: "",
|
|
1252
|
+
heartbeatInterval: "",
|
|
1253
|
+
failReason: ""
|
|
1254
|
+
}
|
|
1255
|
+
});
|
|
1256
|
+
job.status = JobStatus.COMPLETED;
|
|
1257
|
+
}
|
|
1258
|
+
}
|
|
1259
|
+
/**
|
|
1260
|
+
* Handle job failure with exponential backoff retry logic.
|
|
1261
|
+
*
|
|
1262
|
+
* Increments `failCount` and calculates next retry time using exponential backoff:
|
|
1263
|
+
* `nextRunAt = 2^failCount × baseRetryInterval` (capped by optional `maxBackoffDelay`).
|
|
1264
|
+
*
|
|
1265
|
+
* If `failCount >= maxRetries`, marks job as permanently `failed`. Otherwise, resets
|
|
1266
|
+
* to `pending` status for retry. Stores error message in `failReason` field.
|
|
1267
|
+
*
|
|
1268
|
+
* @param job - The job that failed
|
|
1269
|
+
* @param error - The error that caused the failure
|
|
1270
|
+
*/
|
|
1271
|
+
async failJob(job, error) {
|
|
1272
|
+
if (!isPersistedJob(job)) return;
|
|
1273
|
+
const newFailCount = job.failCount + 1;
|
|
1274
|
+
if (newFailCount >= this.ctx.options.maxRetries) await this.ctx.collection.updateOne({ _id: job._id }, {
|
|
1275
|
+
$set: {
|
|
1276
|
+
status: JobStatus.FAILED,
|
|
1277
|
+
failCount: newFailCount,
|
|
1278
|
+
failReason: error.message,
|
|
1279
|
+
updatedAt: /* @__PURE__ */ new Date()
|
|
1280
|
+
},
|
|
1281
|
+
$unset: {
|
|
1282
|
+
lockedAt: "",
|
|
1283
|
+
claimedBy: "",
|
|
1284
|
+
lastHeartbeat: "",
|
|
1285
|
+
heartbeatInterval: ""
|
|
1286
|
+
}
|
|
1287
|
+
});
|
|
1288
|
+
else {
|
|
1289
|
+
const nextRunAt = calculateBackoff(newFailCount, this.ctx.options.baseRetryInterval, this.ctx.options.maxBackoffDelay);
|
|
1290
|
+
await this.ctx.collection.updateOne({ _id: job._id }, {
|
|
1291
|
+
$set: {
|
|
1292
|
+
status: JobStatus.PENDING,
|
|
1293
|
+
failCount: newFailCount,
|
|
1294
|
+
failReason: error.message,
|
|
1295
|
+
nextRunAt,
|
|
1296
|
+
updatedAt: /* @__PURE__ */ new Date()
|
|
1297
|
+
},
|
|
1298
|
+
$unset: {
|
|
1299
|
+
lockedAt: "",
|
|
1300
|
+
claimedBy: "",
|
|
1301
|
+
lastHeartbeat: "",
|
|
1302
|
+
heartbeatInterval: ""
|
|
1303
|
+
}
|
|
1304
|
+
});
|
|
1305
|
+
}
|
|
1306
|
+
}
|
|
1307
|
+
/**
|
|
1308
|
+
* Update heartbeats for all jobs claimed by this scheduler instance.
|
|
1309
|
+
*
|
|
1310
|
+
* This method runs periodically while the scheduler is running to indicate
|
|
1311
|
+
* that jobs are still being actively processed.
|
|
1312
|
+
*
|
|
1313
|
+
* `lastHeartbeat` is primarily an observability signal (monitoring/debugging).
|
|
1314
|
+
* Stale recovery is based on `lockedAt` + `lockTimeout`.
|
|
1315
|
+
*/
|
|
1316
|
+
async updateHeartbeats() {
|
|
1317
|
+
if (!this.ctx.isRunning()) return;
|
|
1318
|
+
const now = /* @__PURE__ */ new Date();
|
|
1319
|
+
await this.ctx.collection.updateMany({
|
|
1320
|
+
claimedBy: this.ctx.instanceId,
|
|
1321
|
+
status: JobStatus.PROCESSING
|
|
1322
|
+
}, { $set: {
|
|
1323
|
+
lastHeartbeat: now,
|
|
1324
|
+
updatedAt: now
|
|
1325
|
+
} });
|
|
1326
|
+
}
|
|
1327
|
+
};
|
|
1328
|
+
|
|
1329
|
+
//#endregion
|
|
1330
|
+
//#region src/scheduler/services/job-query.ts
|
|
1331
|
+
/**
|
|
1332
|
+
* Internal service for job query operations.
|
|
1333
|
+
*
|
|
1334
|
+
* Provides read-only access to jobs with filtering and cursor-based pagination.
|
|
1335
|
+
* All queries use efficient index-backed access patterns.
|
|
1336
|
+
*
|
|
1337
|
+
* @internal Not part of public API - use Monque class methods instead.
|
|
1338
|
+
*/
|
|
1339
|
+
var JobQueryService = class {
|
|
1340
|
+
constructor(ctx) {
|
|
1341
|
+
this.ctx = ctx;
|
|
1342
|
+
}
|
|
1343
|
+
/**
|
|
1344
|
+
* Get a single job by its MongoDB ObjectId.
|
|
1345
|
+
*
|
|
1346
|
+
* Useful for retrieving job details when you have a job ID from events,
|
|
1347
|
+
* logs, or stored references.
|
|
1348
|
+
*
|
|
1349
|
+
* @template T - The expected type of the job data payload
|
|
1350
|
+
* @param id - The job's ObjectId
|
|
1351
|
+
* @returns Promise resolving to the job if found, null otherwise
|
|
1352
|
+
* @throws {ConnectionError} If scheduler not initialized
|
|
1353
|
+
*
|
|
1354
|
+
* @example Look up job from event
|
|
1355
|
+
* ```typescript
|
|
1356
|
+
* monque.on('job:fail', async ({ job }) => {
|
|
1357
|
+
* // Later, retrieve the job to check its status
|
|
1358
|
+
* const currentJob = await monque.getJob(job._id);
|
|
1359
|
+
* console.log(`Job status: ${currentJob?.status}`);
|
|
1360
|
+
* });
|
|
1361
|
+
* ```
|
|
1362
|
+
*
|
|
1363
|
+
* @example Admin endpoint
|
|
1364
|
+
* ```typescript
|
|
1365
|
+
* app.get('/jobs/:id', async (req, res) => {
|
|
1366
|
+
* const job = await monque.getJob(new ObjectId(req.params.id));
|
|
1367
|
+
* if (!job) {
|
|
1368
|
+
* return res.status(404).json({ error: 'Job not found' });
|
|
1369
|
+
* }
|
|
1370
|
+
* res.json(job);
|
|
1371
|
+
* });
|
|
1372
|
+
* ```
|
|
1373
|
+
*/
|
|
1374
|
+
async getJob(id) {
|
|
1375
|
+
try {
|
|
1376
|
+
const doc = await this.ctx.collection.findOne({ _id: id });
|
|
1377
|
+
if (!doc) return null;
|
|
1378
|
+
return this.ctx.documentToPersistedJob(doc);
|
|
1379
|
+
} catch (error) {
|
|
1380
|
+
throw new ConnectionError(`Failed to get job: ${error instanceof Error ? error.message : "Unknown error during getJob"}`, error instanceof Error ? { cause: error } : void 0);
|
|
1381
|
+
}
|
|
1382
|
+
}
|
|
1383
|
+
/**
|
|
1384
|
+
* Query jobs from the queue with optional filters.
|
|
1385
|
+
*
|
|
1386
|
+
* Provides read-only access to job data for monitoring, debugging, and
|
|
1387
|
+
* administrative purposes. Results are ordered by `nextRunAt` ascending.
|
|
1388
|
+
*
|
|
1389
|
+
* @template T - The expected type of the job data payload
|
|
1390
|
+
* @param filter - Optional filter criteria
|
|
1391
|
+
* @returns Promise resolving to array of matching jobs
|
|
1392
|
+
* @throws {ConnectionError} If scheduler not initialized
|
|
1393
|
+
*
|
|
1394
|
+
* @example Get all pending jobs
|
|
1395
|
+
* ```typescript
|
|
1396
|
+
* const pendingJobs = await monque.getJobs({ status: JobStatus.PENDING });
|
|
1397
|
+
* console.log(`${pendingJobs.length} jobs waiting`);
|
|
1398
|
+
* ```
|
|
1399
|
+
*
|
|
1400
|
+
* @example Get failed email jobs
|
|
1401
|
+
* ```typescript
|
|
1402
|
+
* const failedEmails = await monque.getJobs({
|
|
1403
|
+
* name: 'send-email',
|
|
1404
|
+
* status: JobStatus.FAILED,
|
|
1405
|
+
* });
|
|
1406
|
+
* for (const job of failedEmails) {
|
|
1407
|
+
* console.error(`Job ${job._id} failed: ${job.failReason}`);
|
|
1408
|
+
* }
|
|
1409
|
+
* ```
|
|
1410
|
+
*
|
|
1411
|
+
* @example Paginated job listing
|
|
1412
|
+
* ```typescript
|
|
1413
|
+
* const page1 = await monque.getJobs({ limit: 50, skip: 0 });
|
|
1414
|
+
* const page2 = await monque.getJobs({ limit: 50, skip: 50 });
|
|
1415
|
+
* ```
|
|
1416
|
+
*
|
|
1417
|
+
* @example Use with type guards from @monque/core
|
|
1418
|
+
* ```typescript
|
|
1419
|
+
* import { isPendingJob, isRecurringJob } from '@monque/core';
|
|
1420
|
+
*
|
|
1421
|
+
* const jobs = await monque.getJobs();
|
|
1422
|
+
* const pendingRecurring = jobs.filter(job => isPendingJob(job) && isRecurringJob(job));
|
|
1423
|
+
* ```
|
|
1424
|
+
*/
|
|
1425
|
+
async getJobs(filter = {}) {
|
|
1426
|
+
const query = {};
|
|
1427
|
+
if (filter.name !== void 0) query["name"] = filter.name;
|
|
1428
|
+
if (filter.status !== void 0) if (Array.isArray(filter.status)) query["status"] = { $in: filter.status };
|
|
1429
|
+
else query["status"] = filter.status;
|
|
1430
|
+
const limit = filter.limit ?? 100;
|
|
1431
|
+
const skip = filter.skip ?? 0;
|
|
1432
|
+
try {
|
|
1433
|
+
return (await this.ctx.collection.find(query).sort({ nextRunAt: 1 }).skip(skip).limit(limit).toArray()).map((doc) => this.ctx.documentToPersistedJob(doc));
|
|
1434
|
+
} catch (error) {
|
|
1435
|
+
throw new ConnectionError(`Failed to query jobs: ${error instanceof Error ? error.message : "Unknown error during getJobs"}`, error instanceof Error ? { cause: error } : void 0);
|
|
1436
|
+
}
|
|
1437
|
+
}
|
|
1438
|
+
/**
|
|
1439
|
+
* Get a paginated list of jobs using opaque cursors.
|
|
1440
|
+
*
|
|
1441
|
+
* Provides stable pagination for large job lists. Supports forward and backward
|
|
1442
|
+
* navigation, filtering, and efficient database access via index-based cursor queries.
|
|
1443
|
+
*
|
|
1444
|
+
* @template T - The job data payload type
|
|
1445
|
+
* @param options - Pagination options (cursor, limit, direction, filter)
|
|
1446
|
+
* @returns Page of jobs with next/prev cursors
|
|
1447
|
+
* @throws {InvalidCursorError} If the provided cursor is malformed
|
|
1448
|
+
* @throws {ConnectionError} If database operation fails or scheduler not initialized
|
|
1449
|
+
*
|
|
1450
|
+
* @example List pending jobs
|
|
1451
|
+
* ```typescript
|
|
1452
|
+
* const page = await monque.getJobsWithCursor({
|
|
1453
|
+
* limit: 20,
|
|
1454
|
+
* filter: { status: 'pending' }
|
|
1455
|
+
* });
|
|
1456
|
+
* const jobs = page.jobs;
|
|
1457
|
+
*
|
|
1458
|
+
* // Get next page
|
|
1459
|
+
* if (page.hasNextPage) {
|
|
1460
|
+
* const page2 = await monque.getJobsWithCursor({
|
|
1461
|
+
* cursor: page.cursor,
|
|
1462
|
+
* limit: 20
|
|
1463
|
+
* });
|
|
1464
|
+
* }
|
|
1465
|
+
* ```
|
|
1466
|
+
*/
|
|
1467
|
+
async getJobsWithCursor(options = {}) {
|
|
1468
|
+
const limit = options.limit ?? 50;
|
|
1469
|
+
const direction = options.direction ?? CursorDirection.FORWARD;
|
|
1470
|
+
let anchorId = null;
|
|
1471
|
+
if (options.cursor) anchorId = decodeCursor(options.cursor).id;
|
|
1472
|
+
const query = options.filter ? buildSelectorQuery(options.filter) : {};
|
|
1473
|
+
const sortDir = direction === CursorDirection.FORWARD ? 1 : -1;
|
|
1474
|
+
if (anchorId) if (direction === CursorDirection.FORWARD) query._id = {
|
|
1475
|
+
...query._id,
|
|
1476
|
+
$gt: anchorId
|
|
1477
|
+
};
|
|
1478
|
+
else query._id = {
|
|
1479
|
+
...query._id,
|
|
1480
|
+
$lt: anchorId
|
|
1481
|
+
};
|
|
1482
|
+
const fetchLimit = limit + 1;
|
|
1483
|
+
let docs;
|
|
1484
|
+
try {
|
|
1485
|
+
docs = await this.ctx.collection.find(query).sort({ _id: sortDir }).limit(fetchLimit).toArray();
|
|
1486
|
+
} catch (error) {
|
|
1487
|
+
throw new ConnectionError(`Failed to query jobs with cursor: ${error instanceof Error ? error.message : "Unknown error during getJobsWithCursor"}`, error instanceof Error ? { cause: error } : void 0);
|
|
1488
|
+
}
|
|
1489
|
+
let hasMore = false;
|
|
1490
|
+
if (docs.length > limit) {
|
|
1491
|
+
hasMore = true;
|
|
1492
|
+
docs.pop();
|
|
1493
|
+
}
|
|
1494
|
+
if (direction === CursorDirection.BACKWARD) docs.reverse();
|
|
1495
|
+
const jobs = docs.map((doc) => this.ctx.documentToPersistedJob(doc));
|
|
1496
|
+
let nextCursor = null;
|
|
1497
|
+
if (jobs.length > 0) {
|
|
1498
|
+
const lastJob = jobs[jobs.length - 1];
|
|
1499
|
+
if (lastJob) nextCursor = encodeCursor(lastJob._id, direction);
|
|
1500
|
+
}
|
|
1501
|
+
let hasNextPage = false;
|
|
1502
|
+
let hasPreviousPage = false;
|
|
1503
|
+
if (direction === CursorDirection.FORWARD) {
|
|
1504
|
+
hasNextPage = hasMore;
|
|
1505
|
+
hasPreviousPage = !!anchorId;
|
|
1506
|
+
} else {
|
|
1507
|
+
hasNextPage = !!anchorId;
|
|
1508
|
+
hasPreviousPage = hasMore;
|
|
1509
|
+
}
|
|
1510
|
+
return {
|
|
1511
|
+
jobs,
|
|
1512
|
+
cursor: nextCursor,
|
|
1513
|
+
hasNextPage,
|
|
1514
|
+
hasPreviousPage
|
|
1515
|
+
};
|
|
1516
|
+
}
|
|
1517
|
+
/**
|
|
1518
|
+
* Get aggregate statistics for the job queue.
|
|
1519
|
+
*
|
|
1520
|
+
* Uses MongoDB aggregation pipeline for efficient server-side calculation.
|
|
1521
|
+
* Returns counts per status and optional average processing duration for completed jobs.
|
|
1522
|
+
*
|
|
1523
|
+
* @param filter - Optional filter to scope statistics by job name
|
|
1524
|
+
* @returns Promise resolving to queue statistics
|
|
1525
|
+
* @throws {AggregationTimeoutError} If aggregation exceeds 30 second timeout
|
|
1526
|
+
* @throws {ConnectionError} If database operation fails
|
|
1527
|
+
*
|
|
1528
|
+
* @example Get overall queue statistics
|
|
1529
|
+
* ```typescript
|
|
1530
|
+
* const stats = await monque.getQueueStats();
|
|
1531
|
+
* console.log(`Pending: ${stats.pending}, Failed: ${stats.failed}`);
|
|
1532
|
+
* ```
|
|
1533
|
+
*
|
|
1534
|
+
* @example Get statistics for a specific job type
|
|
1535
|
+
* ```typescript
|
|
1536
|
+
* const emailStats = await monque.getQueueStats({ name: 'send-email' });
|
|
1537
|
+
* console.log(`${emailStats.total} email jobs in queue`);
|
|
1538
|
+
* ```
|
|
1539
|
+
*/
|
|
1540
|
+
async getQueueStats(filter) {
|
|
1541
|
+
const matchStage = {};
|
|
1542
|
+
if (filter?.name) matchStage["name"] = filter.name;
|
|
1543
|
+
const pipeline = [...Object.keys(matchStage).length > 0 ? [{ $match: matchStage }] : [], { $facet: {
|
|
1544
|
+
statusCounts: [{ $group: {
|
|
1545
|
+
_id: "$status",
|
|
1546
|
+
count: { $sum: 1 }
|
|
1547
|
+
} }],
|
|
1548
|
+
avgDuration: [{ $match: { status: JobStatus.COMPLETED } }, { $group: {
|
|
1549
|
+
_id: null,
|
|
1550
|
+
avgMs: { $avg: { $subtract: ["$updatedAt", "$createdAt"] } }
|
|
1551
|
+
} }],
|
|
1552
|
+
total: [{ $count: "count" }]
|
|
1553
|
+
} }];
|
|
1554
|
+
try {
|
|
1555
|
+
const result = (await this.ctx.collection.aggregate(pipeline, { maxTimeMS: 3e4 }).toArray())[0];
|
|
1556
|
+
const stats = {
|
|
1557
|
+
pending: 0,
|
|
1558
|
+
processing: 0,
|
|
1559
|
+
completed: 0,
|
|
1560
|
+
failed: 0,
|
|
1561
|
+
cancelled: 0,
|
|
1562
|
+
total: 0
|
|
1563
|
+
};
|
|
1564
|
+
if (!result) return stats;
|
|
1565
|
+
const statusCounts = result["statusCounts"];
|
|
1566
|
+
for (const entry of statusCounts) {
|
|
1567
|
+
const status = entry._id;
|
|
1568
|
+
const count = entry.count;
|
|
1569
|
+
switch (status) {
|
|
1570
|
+
case JobStatus.PENDING:
|
|
1571
|
+
stats.pending = count;
|
|
1572
|
+
break;
|
|
1573
|
+
case JobStatus.PROCESSING:
|
|
1574
|
+
stats.processing = count;
|
|
1575
|
+
break;
|
|
1576
|
+
case JobStatus.COMPLETED:
|
|
1577
|
+
stats.completed = count;
|
|
1578
|
+
break;
|
|
1579
|
+
case JobStatus.FAILED:
|
|
1580
|
+
stats.failed = count;
|
|
1581
|
+
break;
|
|
1582
|
+
case JobStatus.CANCELLED:
|
|
1583
|
+
stats.cancelled = count;
|
|
1584
|
+
break;
|
|
1585
|
+
}
|
|
1586
|
+
}
|
|
1587
|
+
const totalResult = result["total"];
|
|
1588
|
+
if (totalResult.length > 0 && totalResult[0]) stats.total = totalResult[0].count;
|
|
1589
|
+
const avgDurationResult = result["avgDuration"];
|
|
1590
|
+
if (avgDurationResult.length > 0 && avgDurationResult[0]) {
|
|
1591
|
+
const avgMs = avgDurationResult[0].avgMs;
|
|
1592
|
+
if (typeof avgMs === "number" && !Number.isNaN(avgMs)) stats.avgProcessingDurationMs = Math.round(avgMs);
|
|
1593
|
+
}
|
|
1594
|
+
return stats;
|
|
1595
|
+
} catch (error) {
|
|
1596
|
+
if (error instanceof Error && error.message.includes("exceeded time limit")) throw new AggregationTimeoutError();
|
|
1597
|
+
throw new ConnectionError(`Failed to get queue stats: ${error instanceof Error ? error.message : "Unknown error during getQueueStats"}`, error instanceof Error ? { cause: error } : void 0);
|
|
1598
|
+
}
|
|
1599
|
+
}
|
|
1600
|
+
};
|
|
1601
|
+
|
|
1602
|
+
//#endregion
|
|
1603
|
+
//#region src/scheduler/services/job-scheduler.ts
|
|
1604
|
+
/**
|
|
1605
|
+
* Internal service for job scheduling operations.
|
|
1606
|
+
*
|
|
1607
|
+
* Handles enqueueing new jobs, immediate dispatch, and cron scheduling.
|
|
1608
|
+
* All operations are atomic and support deduplication via uniqueKey.
|
|
1609
|
+
*
|
|
1610
|
+
* @internal Not part of public API - use Monque class methods instead.
|
|
1611
|
+
*/
|
|
1612
|
+
var JobScheduler = class {
|
|
1613
|
+
constructor(ctx) {
|
|
1614
|
+
this.ctx = ctx;
|
|
1615
|
+
}
|
|
1616
|
+
/**
|
|
1617
|
+
* Enqueue a job for processing.
|
|
1618
|
+
*
|
|
1619
|
+
* Jobs are stored in MongoDB and processed by registered workers. Supports
|
|
1620
|
+
* delayed execution via `runAt` and deduplication via `uniqueKey`.
|
|
1621
|
+
*
|
|
1622
|
+
* When a `uniqueKey` is provided, only one pending or processing job with that key
|
|
1623
|
+
* can exist. Completed or failed jobs don't block new jobs with the same key.
|
|
1624
|
+
*
|
|
1625
|
+
* Failed jobs are automatically retried with exponential backoff up to `maxRetries`
|
|
1626
|
+
* (default: 10 attempts). The delay between retries is calculated as `2^failCount × baseRetryInterval`.
|
|
1627
|
+
*
|
|
1628
|
+
* @template T - The job data payload type (must be JSON-serializable)
|
|
1629
|
+
* @param name - Job type identifier, must match a registered worker
|
|
1630
|
+
* @param data - Job payload, will be passed to the worker handler
|
|
1631
|
+
* @param options - Scheduling and deduplication options
|
|
1632
|
+
* @returns Promise resolving to the created or existing job document
|
|
1633
|
+
* @throws {ConnectionError} If database operation fails or scheduler not initialized
|
|
1634
|
+
*
|
|
1635
|
+
* @example Basic job enqueueing
|
|
1636
|
+
* ```typescript
|
|
1637
|
+
* await monque.enqueue('send-email', {
|
|
1638
|
+
* to: 'user@example.com',
|
|
1639
|
+
* subject: 'Welcome!',
|
|
1640
|
+
* body: 'Thanks for signing up.'
|
|
1641
|
+
* });
|
|
1642
|
+
* ```
|
|
1643
|
+
*
|
|
1644
|
+
* @example Delayed execution
|
|
1645
|
+
* ```typescript
|
|
1646
|
+
* const oneHourLater = new Date(Date.now() + 3600000);
|
|
1647
|
+
* await monque.enqueue('reminder', { message: 'Check in!' }, {
|
|
1648
|
+
* runAt: oneHourLater
|
|
1649
|
+
* });
|
|
1650
|
+
* ```
|
|
1651
|
+
*
|
|
1652
|
+
* @example Prevent duplicates with unique key
|
|
1653
|
+
* ```typescript
|
|
1654
|
+
* await monque.enqueue('sync-user', { userId: '123' }, {
|
|
1655
|
+
* uniqueKey: 'sync-user-123'
|
|
1656
|
+
* });
|
|
1657
|
+
* // Subsequent enqueues with same uniqueKey return existing pending/processing job
|
|
1658
|
+
* ```
|
|
1659
|
+
*/
|
|
1660
|
+
async enqueue(name, data, options = {}) {
|
|
1661
|
+
const now = /* @__PURE__ */ new Date();
|
|
1662
|
+
const job = {
|
|
1663
|
+
name,
|
|
1664
|
+
data,
|
|
1665
|
+
status: JobStatus.PENDING,
|
|
1666
|
+
nextRunAt: options.runAt ?? now,
|
|
1667
|
+
failCount: 0,
|
|
1668
|
+
createdAt: now,
|
|
1669
|
+
updatedAt: now
|
|
1670
|
+
};
|
|
1671
|
+
if (options.uniqueKey) job.uniqueKey = options.uniqueKey;
|
|
1672
|
+
try {
|
|
1673
|
+
if (options.uniqueKey) {
|
|
1674
|
+
const result$1 = await this.ctx.collection.findOneAndUpdate({
|
|
1675
|
+
name,
|
|
1676
|
+
uniqueKey: options.uniqueKey,
|
|
1677
|
+
status: { $in: [JobStatus.PENDING, JobStatus.PROCESSING] }
|
|
1678
|
+
}, { $setOnInsert: job }, {
|
|
1679
|
+
upsert: true,
|
|
1680
|
+
returnDocument: "after"
|
|
1681
|
+
});
|
|
1682
|
+
if (!result$1) throw new ConnectionError("Failed to enqueue job: findOneAndUpdate returned no document");
|
|
1683
|
+
return this.ctx.documentToPersistedJob(result$1);
|
|
1684
|
+
}
|
|
1685
|
+
const result = await this.ctx.collection.insertOne(job);
|
|
1686
|
+
return {
|
|
1687
|
+
...job,
|
|
1688
|
+
_id: result.insertedId
|
|
1689
|
+
};
|
|
1690
|
+
} catch (error) {
|
|
1691
|
+
if (error instanceof ConnectionError) throw error;
|
|
1692
|
+
throw new ConnectionError(`Failed to enqueue job: ${error instanceof Error ? error.message : "Unknown error during enqueue"}`, error instanceof Error ? { cause: error } : void 0);
|
|
1693
|
+
}
|
|
1694
|
+
}
|
|
1695
|
+
/**
|
|
1696
|
+
* Enqueue a job for immediate processing.
|
|
1697
|
+
*
|
|
1698
|
+
* Convenience method equivalent to `enqueue(name, data, { runAt: new Date() })`.
|
|
1699
|
+
* Jobs are picked up on the next poll cycle (typically within 1 second based on `pollInterval`).
|
|
1700
|
+
*
|
|
1701
|
+
* @template T - The job data payload type (must be JSON-serializable)
|
|
1702
|
+
* @param name - Job type identifier, must match a registered worker
|
|
1703
|
+
* @param data - Job payload, will be passed to the worker handler
|
|
1704
|
+
* @returns Promise resolving to the created job document
|
|
1705
|
+
* @throws {ConnectionError} If database operation fails or scheduler not initialized
|
|
1706
|
+
*
|
|
1707
|
+
* @example Send email immediately
|
|
1708
|
+
* ```typescript
|
|
1709
|
+
* await monque.now('send-email', {
|
|
1710
|
+
* to: 'admin@example.com',
|
|
1711
|
+
* subject: 'Alert',
|
|
1712
|
+
* body: 'Immediate attention required'
|
|
1713
|
+
* });
|
|
1714
|
+
* ```
|
|
1715
|
+
*
|
|
1716
|
+
* @example Process order in background
|
|
1717
|
+
* ```typescript
|
|
1718
|
+
* const order = await createOrder(data);
|
|
1719
|
+
* await monque.now('process-order', { orderId: order.id });
|
|
1720
|
+
* return order; // Return immediately, processing happens async
|
|
1721
|
+
* ```
|
|
1722
|
+
*/
|
|
1723
|
+
async now(name, data) {
|
|
1724
|
+
return this.enqueue(name, data, { runAt: /* @__PURE__ */ new Date() });
|
|
1725
|
+
}
|
|
1726
|
+
/**
|
|
1727
|
+
* Schedule a recurring job with a cron expression.
|
|
1728
|
+
*
|
|
1729
|
+
* Creates a job that automatically re-schedules itself based on the cron pattern.
|
|
1730
|
+
* Uses standard 5-field cron format: minute, hour, day of month, month, day of week.
|
|
1731
|
+
* Also supports predefined expressions like `@daily`, `@weekly`, `@monthly`, etc.
|
|
1732
|
+
* After successful completion, the job is reset to `pending` status and scheduled
|
|
1733
|
+
* for its next run based on the cron expression.
|
|
1734
|
+
*
|
|
1735
|
+
* When a `uniqueKey` is provided, only one pending or processing job with that key
|
|
1736
|
+
* can exist. This prevents duplicate scheduled jobs on application restart.
|
|
1737
|
+
*
|
|
1738
|
+
* @template T - The job data payload type (must be JSON-serializable)
|
|
1739
|
+
* @param cron - Cron expression (5 fields or predefined expression)
|
|
1740
|
+
* @param name - Job type identifier, must match a registered worker
|
|
1741
|
+
* @param data - Job payload, will be passed to the worker handler on each run
|
|
1742
|
+
* @param options - Scheduling options (uniqueKey for deduplication)
|
|
1743
|
+
* @returns Promise resolving to the created job document with `repeatInterval` set
|
|
1744
|
+
* @throws {InvalidCronError} If cron expression is invalid
|
|
1745
|
+
* @throws {ConnectionError} If database operation fails or scheduler not initialized
|
|
1746
|
+
*
|
|
1747
|
+
* @example Hourly cleanup job
|
|
1748
|
+
* ```typescript
|
|
1749
|
+
* await monque.schedule('0 * * * *', 'cleanup-temp-files', {
|
|
1750
|
+
* directory: '/tmp/uploads'
|
|
1751
|
+
* });
|
|
1752
|
+
* ```
|
|
1753
|
+
*
|
|
1754
|
+
* @example Prevent duplicate scheduled jobs with unique key
|
|
1755
|
+
* ```typescript
|
|
1756
|
+
* await monque.schedule('0 * * * *', 'hourly-report', { type: 'sales' }, {
|
|
1757
|
+
* uniqueKey: 'hourly-report-sales'
|
|
1758
|
+
* });
|
|
1759
|
+
* // Subsequent calls with same uniqueKey return existing pending/processing job
|
|
1760
|
+
* ```
|
|
1761
|
+
*
|
|
1762
|
+
* @example Daily report at midnight (using predefined expression)
|
|
1763
|
+
* ```typescript
|
|
1764
|
+
* await monque.schedule('@daily', 'daily-report', {
|
|
1765
|
+
* reportType: 'sales',
|
|
1766
|
+
* recipients: ['analytics@example.com']
|
|
1767
|
+
* });
|
|
1768
|
+
* ```
|
|
1769
|
+
*/
|
|
1770
|
+
async schedule(cron, name, data, options = {}) {
|
|
1771
|
+
const nextRunAt = getNextCronDate(cron);
|
|
1772
|
+
const now = /* @__PURE__ */ new Date();
|
|
1773
|
+
const job = {
|
|
1774
|
+
name,
|
|
1775
|
+
data,
|
|
1776
|
+
status: JobStatus.PENDING,
|
|
1777
|
+
nextRunAt,
|
|
1778
|
+
repeatInterval: cron,
|
|
1779
|
+
failCount: 0,
|
|
1780
|
+
createdAt: now,
|
|
1781
|
+
updatedAt: now
|
|
1782
|
+
};
|
|
1783
|
+
if (options.uniqueKey) job.uniqueKey = options.uniqueKey;
|
|
1784
|
+
try {
|
|
1785
|
+
if (options.uniqueKey) {
|
|
1786
|
+
const result$1 = await this.ctx.collection.findOneAndUpdate({
|
|
1787
|
+
name,
|
|
1788
|
+
uniqueKey: options.uniqueKey,
|
|
1789
|
+
status: { $in: [JobStatus.PENDING, JobStatus.PROCESSING] }
|
|
1790
|
+
}, { $setOnInsert: job }, {
|
|
1791
|
+
upsert: true,
|
|
1792
|
+
returnDocument: "after"
|
|
1793
|
+
});
|
|
1794
|
+
if (!result$1) throw new ConnectionError("Failed to schedule job: findOneAndUpdate returned no document");
|
|
1795
|
+
return this.ctx.documentToPersistedJob(result$1);
|
|
1796
|
+
}
|
|
1797
|
+
const result = await this.ctx.collection.insertOne(job);
|
|
1798
|
+
return {
|
|
1799
|
+
...job,
|
|
1800
|
+
_id: result.insertedId
|
|
1801
|
+
};
|
|
1802
|
+
} catch (error) {
|
|
1803
|
+
if (error instanceof MonqueError) throw error;
|
|
1804
|
+
throw new ConnectionError(`Failed to schedule job: ${error instanceof Error ? error.message : "Unknown error during schedule"}`, error instanceof Error ? { cause: error } : void 0);
|
|
1805
|
+
}
|
|
1806
|
+
}
|
|
1807
|
+
};
|
|
1808
|
+
|
|
1809
|
+
//#endregion
|
|
1810
|
+
//#region src/scheduler/monque.ts
|
|
1811
|
+
/**
|
|
1812
|
+
* Default configuration values
|
|
1813
|
+
*/
|
|
1814
|
+
const DEFAULTS = {
|
|
1815
|
+
collectionName: "monque_jobs",
|
|
1816
|
+
pollInterval: 1e3,
|
|
1817
|
+
maxRetries: 10,
|
|
1818
|
+
baseRetryInterval: 1e3,
|
|
1819
|
+
shutdownTimeout: 3e4,
|
|
1820
|
+
defaultConcurrency: 5,
|
|
1821
|
+
lockTimeout: 18e5,
|
|
1822
|
+
recoverStaleJobs: true,
|
|
1823
|
+
heartbeatInterval: 3e4,
|
|
1824
|
+
retentionInterval: 36e5
|
|
1825
|
+
};
|
|
1826
|
+
/**
|
|
1827
|
+
* Monque - MongoDB-backed job scheduler
|
|
1828
|
+
*
|
|
1829
|
+
* A type-safe job scheduler with atomic locking, exponential backoff, cron scheduling,
|
|
1830
|
+
* stale job recovery, and event-driven observability. Built on native MongoDB driver.
|
|
1831
|
+
*
|
|
1832
|
+
* @example Complete lifecycle
|
|
1833
|
+
* ```typescript
|
|
1834
|
+
* import { Monque } from '@monque/core';
|
|
1835
|
+
* import { MongoClient } from 'mongodb';
|
|
1836
|
+
*
|
|
1837
|
+
* const client = new MongoClient('mongodb://localhost:27017');
|
|
1838
|
+
* await client.connect();
|
|
1839
|
+
* const db = client.db('myapp');
|
|
1840
|
+
*
|
|
1841
|
+
* // Create instance with options
|
|
1842
|
+
* const monque = new Monque(db, {
|
|
1843
|
+
* collectionName: 'jobs',
|
|
1844
|
+
* pollInterval: 1000,
|
|
1845
|
+
* maxRetries: 10,
|
|
1846
|
+
* shutdownTimeout: 30000,
|
|
1847
|
+
* });
|
|
1848
|
+
*
|
|
1849
|
+
* // Initialize (sets up indexes and recovers stale jobs)
|
|
1850
|
+
* await monque.initialize();
|
|
1851
|
+
*
|
|
1852
|
+
* // Register workers with type safety
|
|
1853
|
+
* type EmailJob = {
|
|
1854
|
+
* to: string;
|
|
1855
|
+
* subject: string;
|
|
1856
|
+
* body: string;
|
|
1857
|
+
* };
|
|
1858
|
+
*
|
|
1859
|
+
* monque.register<EmailJob>('send-email', async (job) => {
|
|
1860
|
+
* await emailService.send(job.data.to, job.data.subject, job.data.body);
|
|
1861
|
+
* });
|
|
1862
|
+
*
|
|
1863
|
+
* // Monitor events for observability
|
|
1864
|
+
* monque.on('job:complete', ({ job, duration }) => {
|
|
1865
|
+
* logger.info(`Job ${job.name} completed in ${duration}ms`);
|
|
1866
|
+
* });
|
|
1867
|
+
*
|
|
1868
|
+
* monque.on('job:fail', ({ job, error, willRetry }) => {
|
|
1869
|
+
* logger.error(`Job ${job.name} failed:`, error);
|
|
1870
|
+
* });
|
|
1871
|
+
*
|
|
1872
|
+
* // Start processing
|
|
1873
|
+
* monque.start();
|
|
1874
|
+
*
|
|
1875
|
+
* // Enqueue jobs
|
|
1876
|
+
* await monque.enqueue('send-email', {
|
|
1877
|
+
* to: 'user@example.com',
|
|
1878
|
+
* subject: 'Welcome!',
|
|
1879
|
+
* body: 'Thanks for signing up.'
|
|
1880
|
+
* });
|
|
1881
|
+
*
|
|
1882
|
+
* // Graceful shutdown
|
|
1883
|
+
* process.on('SIGTERM', async () => {
|
|
1884
|
+
* await monque.stop();
|
|
1885
|
+
* await client.close();
|
|
1886
|
+
* process.exit(0);
|
|
1887
|
+
* });
|
|
1888
|
+
* ```
|
|
1889
|
+
*/
|
|
1890
|
+
var Monque = class extends node_events.EventEmitter {
|
|
1891
|
+
db;
|
|
1892
|
+
options;
|
|
1893
|
+
collection = null;
|
|
1894
|
+
workers = /* @__PURE__ */ new Map();
|
|
1895
|
+
pollIntervalId = null;
|
|
1896
|
+
heartbeatIntervalId = null;
|
|
1897
|
+
cleanupIntervalId = null;
|
|
1898
|
+
isRunning = false;
|
|
1899
|
+
isInitialized = false;
|
|
1900
|
+
_scheduler = null;
|
|
1901
|
+
_manager = null;
|
|
1902
|
+
_query = null;
|
|
1903
|
+
_processor = null;
|
|
1904
|
+
_changeStreamHandler = null;
|
|
1905
|
+
constructor(db, options = {}) {
|
|
1906
|
+
super();
|
|
1907
|
+
this.db = db;
|
|
1908
|
+
this.options = {
|
|
1909
|
+
collectionName: options.collectionName ?? DEFAULTS.collectionName,
|
|
1910
|
+
pollInterval: options.pollInterval ?? DEFAULTS.pollInterval,
|
|
1911
|
+
maxRetries: options.maxRetries ?? DEFAULTS.maxRetries,
|
|
1912
|
+
baseRetryInterval: options.baseRetryInterval ?? DEFAULTS.baseRetryInterval,
|
|
1913
|
+
shutdownTimeout: options.shutdownTimeout ?? DEFAULTS.shutdownTimeout,
|
|
1914
|
+
defaultConcurrency: options.defaultConcurrency ?? DEFAULTS.defaultConcurrency,
|
|
1915
|
+
lockTimeout: options.lockTimeout ?? DEFAULTS.lockTimeout,
|
|
1916
|
+
recoverStaleJobs: options.recoverStaleJobs ?? DEFAULTS.recoverStaleJobs,
|
|
1917
|
+
maxBackoffDelay: options.maxBackoffDelay,
|
|
1918
|
+
schedulerInstanceId: options.schedulerInstanceId ?? (0, node_crypto.randomUUID)(),
|
|
1919
|
+
heartbeatInterval: options.heartbeatInterval ?? DEFAULTS.heartbeatInterval,
|
|
1920
|
+
jobRetention: options.jobRetention
|
|
1921
|
+
};
|
|
1922
|
+
}
|
|
1923
|
+
/**
|
|
1924
|
+
* Initialize the scheduler by setting up the MongoDB collection and indexes.
|
|
1925
|
+
* Must be called before start().
|
|
1926
|
+
*
|
|
1927
|
+
* @throws {ConnectionError} If collection or index creation fails
|
|
1928
|
+
*/
|
|
1929
|
+
async initialize() {
|
|
1930
|
+
if (this.isInitialized) return;
|
|
1931
|
+
try {
|
|
1932
|
+
this.collection = this.db.collection(this.options.collectionName);
|
|
1933
|
+
await this.createIndexes();
|
|
1934
|
+
if (this.options.recoverStaleJobs) await this.recoverStaleJobs();
|
|
1935
|
+
const ctx = this.buildContext();
|
|
1936
|
+
this._scheduler = new JobScheduler(ctx);
|
|
1937
|
+
this._manager = new JobManager(ctx);
|
|
1938
|
+
this._query = new JobQueryService(ctx);
|
|
1939
|
+
this._processor = new JobProcessor(ctx);
|
|
1940
|
+
this._changeStreamHandler = new ChangeStreamHandler(ctx, () => this.processor.poll());
|
|
1941
|
+
this.isInitialized = true;
|
|
1942
|
+
} catch (error) {
|
|
1943
|
+
throw new ConnectionError(`Failed to initialize Monque: ${error instanceof Error ? error.message : "Unknown error during initialization"}`);
|
|
1944
|
+
}
|
|
1945
|
+
}
|
|
1946
|
+
/** @throws {ConnectionError} if not initialized */
|
|
1947
|
+
get scheduler() {
|
|
1948
|
+
if (!this._scheduler) throw new ConnectionError("Monque not initialized. Call initialize() first.");
|
|
1949
|
+
return this._scheduler;
|
|
1950
|
+
}
|
|
1951
|
+
/** @throws {ConnectionError} if not initialized */
|
|
1952
|
+
get manager() {
|
|
1953
|
+
if (!this._manager) throw new ConnectionError("Monque not initialized. Call initialize() first.");
|
|
1954
|
+
return this._manager;
|
|
1955
|
+
}
|
|
1956
|
+
/** @throws {ConnectionError} if not initialized */
|
|
1957
|
+
get query() {
|
|
1958
|
+
if (!this._query) throw new ConnectionError("Monque not initialized. Call initialize() first.");
|
|
1959
|
+
return this._query;
|
|
1960
|
+
}
|
|
1961
|
+
/** @throws {ConnectionError} if not initialized */
|
|
1962
|
+
get processor() {
|
|
1963
|
+
if (!this._processor) throw new ConnectionError("Monque not initialized. Call initialize() first.");
|
|
1964
|
+
return this._processor;
|
|
1965
|
+
}
|
|
1966
|
+
/** @throws {ConnectionError} if not initialized */
|
|
1967
|
+
get changeStreamHandler() {
|
|
1968
|
+
if (!this._changeStreamHandler) throw new ConnectionError("Monque not initialized. Call initialize() first.");
|
|
1969
|
+
return this._changeStreamHandler;
|
|
1970
|
+
}
|
|
1971
|
+
/**
|
|
1972
|
+
* Build the shared context for internal services.
|
|
1973
|
+
*/
|
|
1974
|
+
buildContext() {
|
|
1975
|
+
if (!this.collection) throw new ConnectionError("Collection not initialized");
|
|
1976
|
+
return {
|
|
1977
|
+
collection: this.collection,
|
|
1978
|
+
options: this.options,
|
|
1979
|
+
instanceId: this.options.schedulerInstanceId,
|
|
1980
|
+
workers: this.workers,
|
|
1981
|
+
isRunning: () => this.isRunning,
|
|
1982
|
+
emit: (event, payload) => this.emit(event, payload),
|
|
1983
|
+
documentToPersistedJob: (doc) => this.documentToPersistedJob(doc)
|
|
1984
|
+
};
|
|
1985
|
+
}
|
|
1986
|
+
/**
|
|
1987
|
+
* Create required MongoDB indexes for efficient job processing.
|
|
1988
|
+
*
|
|
1989
|
+
* The following indexes are created:
|
|
1990
|
+
* - `{status, nextRunAt}` - For efficient job polling queries
|
|
1991
|
+
* - `{name, uniqueKey}` - Partial unique index for deduplication (pending/processing only)
|
|
1992
|
+
* - `{name, status}` - For job lookup by type
|
|
1993
|
+
* - `{claimedBy, status}` - For finding jobs owned by a specific scheduler instance
|
|
1994
|
+
* - `{lastHeartbeat, status}` - For monitoring/debugging queries (e.g., inspecting heartbeat age)
|
|
1995
|
+
* - `{status, nextRunAt, claimedBy}` - For atomic claim queries (find unclaimed pending jobs)
|
|
1996
|
+
* - `{lockedAt, lastHeartbeat, status}` - Supports recovery scans and monitoring access patterns
|
|
1997
|
+
*/
|
|
1998
|
+
async createIndexes() {
|
|
1999
|
+
if (!this.collection) throw new ConnectionError("Collection not initialized");
|
|
2000
|
+
await this.collection.createIndex({
|
|
2001
|
+
status: 1,
|
|
2002
|
+
nextRunAt: 1
|
|
2003
|
+
}, { background: true });
|
|
2004
|
+
await this.collection.createIndex({
|
|
2005
|
+
name: 1,
|
|
2006
|
+
uniqueKey: 1
|
|
2007
|
+
}, {
|
|
2008
|
+
unique: true,
|
|
2009
|
+
partialFilterExpression: {
|
|
2010
|
+
uniqueKey: { $exists: true },
|
|
2011
|
+
status: { $in: [JobStatus.PENDING, JobStatus.PROCESSING] }
|
|
2012
|
+
},
|
|
2013
|
+
background: true
|
|
2014
|
+
});
|
|
2015
|
+
await this.collection.createIndex({
|
|
2016
|
+
name: 1,
|
|
2017
|
+
status: 1
|
|
2018
|
+
}, { background: true });
|
|
2019
|
+
await this.collection.createIndex({
|
|
2020
|
+
claimedBy: 1,
|
|
2021
|
+
status: 1
|
|
2022
|
+
}, { background: true });
|
|
2023
|
+
await this.collection.createIndex({
|
|
2024
|
+
lastHeartbeat: 1,
|
|
2025
|
+
status: 1
|
|
2026
|
+
}, { background: true });
|
|
2027
|
+
await this.collection.createIndex({
|
|
2028
|
+
status: 1,
|
|
2029
|
+
nextRunAt: 1,
|
|
2030
|
+
claimedBy: 1
|
|
2031
|
+
}, { background: true });
|
|
2032
|
+
await this.collection.createIndex({
|
|
2033
|
+
status: 1,
|
|
2034
|
+
lockedAt: 1,
|
|
2035
|
+
lastHeartbeat: 1
|
|
2036
|
+
}, { background: true });
|
|
2037
|
+
}
|
|
2038
|
+
/**
|
|
2039
|
+
* Recover stale jobs that were left in 'processing' status.
|
|
2040
|
+
* A job is considered stale if its `lockedAt` timestamp exceeds the configured `lockTimeout`.
|
|
2041
|
+
* Stale jobs are reset to 'pending' so they can be picked up by workers again.
|
|
2042
|
+
*/
|
|
2043
|
+
async recoverStaleJobs() {
|
|
2044
|
+
if (!this.collection) return;
|
|
2045
|
+
const staleThreshold = new Date(Date.now() - this.options.lockTimeout);
|
|
2046
|
+
const result = await this.collection.updateMany({
|
|
2047
|
+
status: JobStatus.PROCESSING,
|
|
2048
|
+
lockedAt: { $lt: staleThreshold }
|
|
2049
|
+
}, {
|
|
2050
|
+
$set: {
|
|
2051
|
+
status: JobStatus.PENDING,
|
|
2052
|
+
updatedAt: /* @__PURE__ */ new Date()
|
|
2053
|
+
},
|
|
2054
|
+
$unset: {
|
|
2055
|
+
lockedAt: "",
|
|
2056
|
+
claimedBy: "",
|
|
2057
|
+
lastHeartbeat: "",
|
|
2058
|
+
heartbeatInterval: ""
|
|
2059
|
+
}
|
|
2060
|
+
});
|
|
2061
|
+
if (result.modifiedCount > 0) this.emit("stale:recovered", { count: result.modifiedCount });
|
|
2062
|
+
}
|
|
2063
|
+
/**
|
|
2064
|
+
* Clean up old completed and failed jobs based on retention policy.
|
|
2065
|
+
*
|
|
2066
|
+
* - Removes completed jobs older than `jobRetention.completed`
|
|
2067
|
+
* - Removes failed jobs older than `jobRetention.failed`
|
|
2068
|
+
*
|
|
2069
|
+
* The cleanup runs concurrently for both statuses if configured.
|
|
2070
|
+
*
|
|
2071
|
+
* @returns Promise resolving when all deletion operations complete
|
|
2072
|
+
*/
|
|
2073
|
+
async cleanupJobs() {
|
|
2074
|
+
if (!this.collection || !this.options.jobRetention) return;
|
|
2075
|
+
const { completed, failed } = this.options.jobRetention;
|
|
2076
|
+
const now = Date.now();
|
|
2077
|
+
const deletions = [];
|
|
2078
|
+
if (completed) {
|
|
2079
|
+
const cutoff = new Date(now - completed);
|
|
2080
|
+
deletions.push(this.collection.deleteMany({
|
|
2081
|
+
status: JobStatus.COMPLETED,
|
|
2082
|
+
updatedAt: { $lt: cutoff }
|
|
2083
|
+
}));
|
|
2084
|
+
}
|
|
2085
|
+
if (failed) {
|
|
2086
|
+
const cutoff = new Date(now - failed);
|
|
2087
|
+
deletions.push(this.collection.deleteMany({
|
|
2088
|
+
status: JobStatus.FAILED,
|
|
2089
|
+
updatedAt: { $lt: cutoff }
|
|
2090
|
+
}));
|
|
2091
|
+
}
|
|
2092
|
+
if (deletions.length > 0) await Promise.all(deletions);
|
|
2093
|
+
}
|
|
2094
|
+
/**
|
|
2095
|
+
* Enqueue a job for processing.
|
|
2096
|
+
*
|
|
2097
|
+
* Jobs are stored in MongoDB and processed by registered workers. Supports
|
|
2098
|
+
* delayed execution via `runAt` and deduplication via `uniqueKey`.
|
|
740
2099
|
*
|
|
741
2100
|
* When a `uniqueKey` is provided, only one pending or processing job with that key
|
|
742
|
-
* can exist.
|
|
2101
|
+
* can exist. Completed or failed jobs don't block new jobs with the same key.
|
|
2102
|
+
*
|
|
2103
|
+
* Failed jobs are automatically retried with exponential backoff up to `maxRetries`
|
|
2104
|
+
* (default: 10 attempts). The delay between retries is calculated as `2^failCount × baseRetryInterval`.
|
|
743
2105
|
*
|
|
744
2106
|
* @template T - The job data payload type (must be JSON-serializable)
|
|
745
|
-
* @param cron - Cron expression (5 fields or predefined expression)
|
|
746
2107
|
* @param name - Job type identifier, must match a registered worker
|
|
747
|
-
* @param data - Job payload, will be passed to the worker handler
|
|
748
|
-
* @param options - Scheduling
|
|
749
|
-
* @returns Promise resolving to the created job document
|
|
750
|
-
* @throws {InvalidCronError} If cron expression is invalid
|
|
2108
|
+
* @param data - Job payload, will be passed to the worker handler
|
|
2109
|
+
* @param options - Scheduling and deduplication options
|
|
2110
|
+
* @returns Promise resolving to the created or existing job document
|
|
751
2111
|
* @throws {ConnectionError} If database operation fails or scheduler not initialized
|
|
752
2112
|
*
|
|
753
|
-
* @example
|
|
2113
|
+
* @example Basic job enqueueing
|
|
754
2114
|
* ```typescript
|
|
755
|
-
* await monque.
|
|
756
|
-
*
|
|
2115
|
+
* await monque.enqueue('send-email', {
|
|
2116
|
+
* to: 'user@example.com',
|
|
2117
|
+
* subject: 'Welcome!',
|
|
2118
|
+
* body: 'Thanks for signing up.'
|
|
757
2119
|
* });
|
|
758
2120
|
* ```
|
|
759
2121
|
*
|
|
760
|
-
* @example
|
|
2122
|
+
* @example Delayed execution
|
|
761
2123
|
* ```typescript
|
|
762
|
-
*
|
|
763
|
-
*
|
|
2124
|
+
* const oneHourLater = new Date(Date.now() + 3600000);
|
|
2125
|
+
* await monque.enqueue('reminder', { message: 'Check in!' }, {
|
|
2126
|
+
* runAt: oneHourLater
|
|
764
2127
|
* });
|
|
765
|
-
* // Subsequent calls with same uniqueKey return existing pending/processing job
|
|
766
2128
|
* ```
|
|
767
2129
|
*
|
|
768
|
-
* @example
|
|
2130
|
+
* @example Prevent duplicates with unique key
|
|
769
2131
|
* ```typescript
|
|
770
|
-
* await monque.
|
|
771
|
-
*
|
|
772
|
-
* recipients: ['analytics@example.com']
|
|
2132
|
+
* await monque.enqueue('sync-user', { userId: '123' }, {
|
|
2133
|
+
* uniqueKey: 'sync-user-123'
|
|
773
2134
|
* });
|
|
2135
|
+
* // Subsequent enqueues with same uniqueKey return existing pending/processing job
|
|
774
2136
|
* ```
|
|
775
2137
|
*/
|
|
776
|
-
async
|
|
2138
|
+
async enqueue(name, data, options = {}) {
|
|
777
2139
|
this.ensureInitialized();
|
|
778
|
-
|
|
779
|
-
const now = /* @__PURE__ */ new Date();
|
|
780
|
-
const job = {
|
|
781
|
-
name,
|
|
782
|
-
data,
|
|
783
|
-
status: JobStatus.PENDING,
|
|
784
|
-
nextRunAt,
|
|
785
|
-
repeatInterval: cron,
|
|
786
|
-
failCount: 0,
|
|
787
|
-
createdAt: now,
|
|
788
|
-
updatedAt: now
|
|
789
|
-
};
|
|
790
|
-
if (options.uniqueKey) job.uniqueKey = options.uniqueKey;
|
|
791
|
-
try {
|
|
792
|
-
if (options.uniqueKey) {
|
|
793
|
-
if (!this.collection) throw new require_errors.ConnectionError("Failed to schedule job: collection not available");
|
|
794
|
-
const result$1 = await this.collection.findOneAndUpdate({
|
|
795
|
-
name,
|
|
796
|
-
uniqueKey: options.uniqueKey,
|
|
797
|
-
status: { $in: [JobStatus.PENDING, JobStatus.PROCESSING] }
|
|
798
|
-
}, { $setOnInsert: job }, {
|
|
799
|
-
upsert: true,
|
|
800
|
-
returnDocument: "after"
|
|
801
|
-
});
|
|
802
|
-
if (!result$1) throw new require_errors.ConnectionError("Failed to schedule job: findOneAndUpdate returned no document");
|
|
803
|
-
return this.documentToPersistedJob(result$1);
|
|
804
|
-
}
|
|
805
|
-
const result = await this.collection?.insertOne(job);
|
|
806
|
-
if (!result) throw new require_errors.ConnectionError("Failed to schedule job: collection not available");
|
|
807
|
-
return {
|
|
808
|
-
...job,
|
|
809
|
-
_id: result.insertedId
|
|
810
|
-
};
|
|
811
|
-
} catch (error) {
|
|
812
|
-
if (error instanceof require_errors.MonqueError) throw error;
|
|
813
|
-
throw new require_errors.ConnectionError(`Failed to schedule job: ${error instanceof Error ? error.message : "Unknown error during schedule"}`, error instanceof Error ? { cause: error } : void 0);
|
|
814
|
-
}
|
|
2140
|
+
return this.scheduler.enqueue(name, data, options);
|
|
815
2141
|
}
|
|
816
2142
|
/**
|
|
817
|
-
*
|
|
818
|
-
*
|
|
819
|
-
* Workers can be registered before or after calling `start()`. Each worker
|
|
820
|
-
* processes jobs concurrently up to its configured concurrency limit (default: 5).
|
|
821
|
-
*
|
|
822
|
-
* The handler function receives the full job object including metadata (`_id`, `status`,
|
|
823
|
-
* `failCount`, etc.). If the handler throws an error, the job is retried with exponential
|
|
824
|
-
* backoff up to `maxRetries` times. After exhausting retries, the job is marked as `failed`.
|
|
2143
|
+
* Enqueue a job for immediate processing.
|
|
825
2144
|
*
|
|
826
|
-
*
|
|
2145
|
+
* Convenience method equivalent to `enqueue(name, data, { runAt: new Date() })`.
|
|
2146
|
+
* Jobs are picked up on the next poll cycle (typically within 1 second based on `pollInterval`).
|
|
827
2147
|
*
|
|
828
|
-
*
|
|
829
|
-
*
|
|
830
|
-
*
|
|
2148
|
+
* @template T - The job data payload type (must be JSON-serializable)
|
|
2149
|
+
* @param name - Job type identifier, must match a registered worker
|
|
2150
|
+
* @param data - Job payload, will be passed to the worker handler
|
|
2151
|
+
* @returns Promise resolving to the created job document
|
|
2152
|
+
* @throws {ConnectionError} If database operation fails or scheduler not initialized
|
|
831
2153
|
*
|
|
832
|
-
* @
|
|
833
|
-
*
|
|
834
|
-
*
|
|
835
|
-
* @
|
|
836
|
-
*
|
|
837
|
-
*
|
|
838
|
-
*
|
|
2154
|
+
* @example Send email immediately
|
|
2155
|
+
* ```typescript
|
|
2156
|
+
* await monque.now('send-email', {
|
|
2157
|
+
* to: 'admin@example.com',
|
|
2158
|
+
* subject: 'Alert',
|
|
2159
|
+
* body: 'Immediate attention required'
|
|
2160
|
+
* });
|
|
2161
|
+
* ```
|
|
839
2162
|
*
|
|
840
|
-
* @example
|
|
2163
|
+
* @example Process order in background
|
|
841
2164
|
* ```typescript
|
|
842
|
-
*
|
|
843
|
-
*
|
|
844
|
-
*
|
|
845
|
-
*
|
|
846
|
-
|
|
2165
|
+
* const order = await createOrder(data);
|
|
2166
|
+
* await monque.now('process-order', { orderId: order.id });
|
|
2167
|
+
* return order; // Return immediately, processing happens async
|
|
2168
|
+
* ```
|
|
2169
|
+
*/
|
|
2170
|
+
async now(name, data) {
|
|
2171
|
+
this.ensureInitialized();
|
|
2172
|
+
return this.scheduler.now(name, data);
|
|
2173
|
+
}
|
|
2174
|
+
/**
|
|
2175
|
+
* Schedule a recurring job with a cron expression.
|
|
2176
|
+
*
|
|
2177
|
+
* Creates a job that automatically re-schedules itself based on the cron pattern.
|
|
2178
|
+
* Uses standard 5-field cron format: minute, hour, day of month, month, day of week.
|
|
2179
|
+
* Also supports predefined expressions like `@daily`, `@weekly`, `@monthly`, etc.
|
|
2180
|
+
* After successful completion, the job is reset to `pending` status and scheduled
|
|
2181
|
+
* for its next run based on the cron expression.
|
|
847
2182
|
*
|
|
848
|
-
*
|
|
849
|
-
*
|
|
850
|
-
* });
|
|
851
|
-
* ```
|
|
2183
|
+
* When a `uniqueKey` is provided, only one pending or processing job with that key
|
|
2184
|
+
* can exist. This prevents duplicate scheduled jobs on application restart.
|
|
852
2185
|
*
|
|
853
|
-
* @
|
|
2186
|
+
* @template T - The job data payload type (must be JSON-serializable)
|
|
2187
|
+
* @param cron - Cron expression (5 fields or predefined expression)
|
|
2188
|
+
* @param name - Job type identifier, must match a registered worker
|
|
2189
|
+
* @param data - Job payload, will be passed to the worker handler on each run
|
|
2190
|
+
* @param options - Scheduling options (uniqueKey for deduplication)
|
|
2191
|
+
* @returns Promise resolving to the created job document with `repeatInterval` set
|
|
2192
|
+
* @throws {InvalidCronError} If cron expression is invalid
|
|
2193
|
+
* @throws {ConnectionError} If database operation fails or scheduler not initialized
|
|
2194
|
+
*
|
|
2195
|
+
* @example Hourly cleanup job
|
|
854
2196
|
* ```typescript
|
|
855
|
-
*
|
|
856
|
-
*
|
|
857
|
-
*
|
|
858
|
-
* }, { concurrency: 2 });
|
|
2197
|
+
* await monque.schedule('0 * * * *', 'cleanup-temp-files', {
|
|
2198
|
+
* directory: '/tmp/uploads'
|
|
2199
|
+
* });
|
|
859
2200
|
* ```
|
|
860
2201
|
*
|
|
861
|
-
* @example
|
|
2202
|
+
* @example Prevent duplicate scheduled jobs with unique key
|
|
862
2203
|
* ```typescript
|
|
863
|
-
*
|
|
864
|
-
*
|
|
2204
|
+
* await monque.schedule('0 * * * *', 'hourly-report', { type: 'sales' }, {
|
|
2205
|
+
* uniqueKey: 'hourly-report-sales'
|
|
2206
|
+
* });
|
|
2207
|
+
* // Subsequent calls with same uniqueKey return existing pending/processing job
|
|
865
2208
|
* ```
|
|
866
2209
|
*
|
|
867
|
-
* @example
|
|
2210
|
+
* @example Daily report at midnight (using predefined expression)
|
|
868
2211
|
* ```typescript
|
|
869
|
-
* monque.
|
|
870
|
-
*
|
|
871
|
-
*
|
|
872
|
-
* } catch (error) {
|
|
873
|
-
* // Job will retry with exponential backoff
|
|
874
|
-
* // Delay = 2^failCount × baseRetryInterval (default: 1000ms)
|
|
875
|
-
* throw new Error(`Sync failed: ${error.message}`);
|
|
876
|
-
* }
|
|
2212
|
+
* await monque.schedule('@daily', 'daily-report', {
|
|
2213
|
+
* reportType: 'sales',
|
|
2214
|
+
* recipients: ['analytics@example.com']
|
|
877
2215
|
* });
|
|
878
2216
|
* ```
|
|
879
2217
|
*/
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
this.workers.set(name, {
|
|
884
|
-
handler,
|
|
885
|
-
concurrency,
|
|
886
|
-
activeJobs: /* @__PURE__ */ new Map()
|
|
887
|
-
});
|
|
2218
|
+
async schedule(cron, name, data, options = {}) {
|
|
2219
|
+
this.ensureInitialized();
|
|
2220
|
+
return this.scheduler.schedule(cron, name, data, options);
|
|
888
2221
|
}
|
|
889
2222
|
/**
|
|
890
|
-
*
|
|
2223
|
+
* Cancel a pending or scheduled job.
|
|
891
2224
|
*
|
|
892
|
-
*
|
|
893
|
-
*
|
|
894
|
-
*
|
|
2225
|
+
* Sets the job status to 'cancelled' and emits a 'job:cancelled' event.
|
|
2226
|
+
* If the job is already cancelled, this is a no-op and returns the job.
|
|
2227
|
+
* Cannot cancel jobs that are currently 'processing', 'completed', or 'failed'.
|
|
895
2228
|
*
|
|
896
|
-
*
|
|
897
|
-
* The
|
|
2229
|
+
* @param jobId - The ID of the job to cancel
|
|
2230
|
+
* @returns The cancelled job, or null if not found
|
|
2231
|
+
* @throws {JobStateError} If job is in an invalid state for cancellation
|
|
898
2232
|
*
|
|
899
|
-
* @example
|
|
2233
|
+
* @example Cancel a pending job
|
|
900
2234
|
* ```typescript
|
|
901
|
-
* const
|
|
902
|
-
* await monque.
|
|
2235
|
+
* const job = await monque.enqueue('report', { type: 'daily' });
|
|
2236
|
+
* await monque.cancelJob(job._id.toString());
|
|
2237
|
+
* ```
|
|
2238
|
+
*/
|
|
2239
|
+
async cancelJob(jobId) {
|
|
2240
|
+
this.ensureInitialized();
|
|
2241
|
+
return this.manager.cancelJob(jobId);
|
|
2242
|
+
}
|
|
2243
|
+
/**
|
|
2244
|
+
* Retry a failed or cancelled job.
|
|
903
2245
|
*
|
|
904
|
-
*
|
|
905
|
-
*
|
|
2246
|
+
* Resets the job to 'pending' status, clears failure count/reason, and sets
|
|
2247
|
+
* nextRunAt to now (immediate retry). Emits a 'job:retried' event.
|
|
906
2248
|
*
|
|
907
|
-
*
|
|
908
|
-
*
|
|
2249
|
+
* @param jobId - The ID of the job to retry
|
|
2250
|
+
* @returns The updated job, or null if not found
|
|
2251
|
+
* @throws {JobStateError} If job is in an invalid state for retry (must be failed or cancelled)
|
|
909
2252
|
*
|
|
910
|
-
* @example
|
|
2253
|
+
* @example Retry a failed job
|
|
911
2254
|
* ```typescript
|
|
912
|
-
* monque.on('job:
|
|
913
|
-
*
|
|
2255
|
+
* monque.on('job:fail', async ({ job }) => {
|
|
2256
|
+
* console.log(`Job ${job._id} failed, retrying manually...`);
|
|
2257
|
+
* await monque.retryJob(job._id.toString());
|
|
914
2258
|
* });
|
|
2259
|
+
* ```
|
|
2260
|
+
*/
|
|
2261
|
+
async retryJob(jobId) {
|
|
2262
|
+
this.ensureInitialized();
|
|
2263
|
+
return this.manager.retryJob(jobId);
|
|
2264
|
+
}
|
|
2265
|
+
/**
|
|
2266
|
+
* Reschedule a pending job to run at a different time.
|
|
915
2267
|
*
|
|
916
|
-
*
|
|
917
|
-
* metrics.recordJobDuration(job.name, duration);
|
|
918
|
-
* });
|
|
2268
|
+
* Only works for jobs in 'pending' status.
|
|
919
2269
|
*
|
|
920
|
-
*
|
|
921
|
-
*
|
|
922
|
-
*
|
|
923
|
-
*
|
|
924
|
-
* }
|
|
925
|
-
* });
|
|
2270
|
+
* @param jobId - The ID of the job to reschedule
|
|
2271
|
+
* @param runAt - The new Date when the job should run
|
|
2272
|
+
* @returns The updated job, or null if not found
|
|
2273
|
+
* @throws {JobStateError} If job is not in pending state
|
|
926
2274
|
*
|
|
927
|
-
*
|
|
2275
|
+
* @example Delay a job by 1 hour
|
|
2276
|
+
* ```typescript
|
|
2277
|
+
* const nextHour = new Date(Date.now() + 60 * 60 * 1000);
|
|
2278
|
+
* await monque.rescheduleJob(jobId, nextHour);
|
|
928
2279
|
* ```
|
|
929
|
-
*
|
|
930
|
-
* @throws {ConnectionError} If scheduler not initialized (call `initialize()` first)
|
|
931
2280
|
*/
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
this.isRunning = true;
|
|
936
|
-
this.setupChangeStream();
|
|
937
|
-
this.pollIntervalId = setInterval(() => {
|
|
938
|
-
this.poll().catch((error) => {
|
|
939
|
-
this.emit("job:error", { error });
|
|
940
|
-
});
|
|
941
|
-
}, this.options.pollInterval);
|
|
942
|
-
this.heartbeatIntervalId = setInterval(() => {
|
|
943
|
-
this.updateHeartbeats().catch((error) => {
|
|
944
|
-
this.emit("job:error", { error });
|
|
945
|
-
});
|
|
946
|
-
}, this.options.heartbeatInterval);
|
|
947
|
-
if (this.options.jobRetention) {
|
|
948
|
-
const interval = this.options.jobRetention.interval ?? DEFAULTS.retentionInterval;
|
|
949
|
-
this.cleanupJobs().catch((error) => {
|
|
950
|
-
this.emit("job:error", { error });
|
|
951
|
-
});
|
|
952
|
-
this.cleanupIntervalId = setInterval(() => {
|
|
953
|
-
this.cleanupJobs().catch((error) => {
|
|
954
|
-
this.emit("job:error", { error });
|
|
955
|
-
});
|
|
956
|
-
}, interval);
|
|
957
|
-
}
|
|
958
|
-
this.poll().catch((error) => {
|
|
959
|
-
this.emit("job:error", { error });
|
|
960
|
-
});
|
|
2281
|
+
async rescheduleJob(jobId, runAt) {
|
|
2282
|
+
this.ensureInitialized();
|
|
2283
|
+
return this.manager.rescheduleJob(jobId, runAt);
|
|
961
2284
|
}
|
|
962
2285
|
/**
|
|
963
|
-
*
|
|
2286
|
+
* Permanently delete a job.
|
|
964
2287
|
*
|
|
965
|
-
*
|
|
966
|
-
*
|
|
967
|
-
* a `job:error` event with a `ShutdownTimeoutError` containing incomplete jobs.
|
|
968
|
-
* On timeout, jobs still in progress are left as `processing` for stale job recovery.
|
|
2288
|
+
* This action is irreversible. Emits a 'job:deleted' event upon success.
|
|
2289
|
+
* Can delete a job in any state.
|
|
969
2290
|
*
|
|
970
|
-
*
|
|
2291
|
+
* @param jobId - The ID of the job to delete
|
|
2292
|
+
* @returns true if deleted, false if job not found
|
|
971
2293
|
*
|
|
972
|
-
* @
|
|
2294
|
+
* @example Delete a cleanup job
|
|
2295
|
+
* ```typescript
|
|
2296
|
+
* const deleted = await monque.deleteJob(jobId);
|
|
2297
|
+
* if (deleted) {
|
|
2298
|
+
* console.log('Job permanently removed');
|
|
2299
|
+
* }
|
|
2300
|
+
* ```
|
|
2301
|
+
*/
|
|
2302
|
+
async deleteJob(jobId) {
|
|
2303
|
+
this.ensureInitialized();
|
|
2304
|
+
return this.manager.deleteJob(jobId);
|
|
2305
|
+
}
|
|
2306
|
+
/**
|
|
2307
|
+
* Cancel multiple jobs matching the given filter.
|
|
973
2308
|
*
|
|
974
|
-
*
|
|
2309
|
+
* Only cancels jobs in 'pending' status. Jobs in other states are collected
|
|
2310
|
+
* as errors in the result. Emits a 'jobs:cancelled' event with the IDs of
|
|
2311
|
+
* successfully cancelled jobs.
|
|
2312
|
+
*
|
|
2313
|
+
* @param filter - Selector for which jobs to cancel (name, status, date range)
|
|
2314
|
+
* @returns Result with count of cancelled jobs and any errors encountered
|
|
2315
|
+
*
|
|
2316
|
+
* @example Cancel all pending jobs for a queue
|
|
975
2317
|
* ```typescript
|
|
976
|
-
*
|
|
977
|
-
*
|
|
978
|
-
*
|
|
979
|
-
* await mongoClient.close();
|
|
980
|
-
* process.exit(0);
|
|
2318
|
+
* const result = await monque.cancelJobs({
|
|
2319
|
+
* name: 'email-queue',
|
|
2320
|
+
* status: 'pending'
|
|
981
2321
|
* });
|
|
2322
|
+
* console.log(`Cancelled ${result.count} jobs`);
|
|
982
2323
|
* ```
|
|
2324
|
+
*/
|
|
2325
|
+
async cancelJobs(filter) {
|
|
2326
|
+
this.ensureInitialized();
|
|
2327
|
+
return this.manager.cancelJobs(filter);
|
|
2328
|
+
}
|
|
2329
|
+
/**
|
|
2330
|
+
* Retry multiple jobs matching the given filter.
|
|
983
2331
|
*
|
|
984
|
-
*
|
|
2332
|
+
* Only retries jobs in 'failed' or 'cancelled' status. Jobs in other states
|
|
2333
|
+
* are collected as errors in the result. Emits a 'jobs:retried' event with
|
|
2334
|
+
* the IDs of successfully retried jobs.
|
|
2335
|
+
*
|
|
2336
|
+
* @param filter - Selector for which jobs to retry (name, status, date range)
|
|
2337
|
+
* @returns Result with count of retried jobs and any errors encountered
|
|
2338
|
+
*
|
|
2339
|
+
* @example Retry all failed jobs
|
|
985
2340
|
* ```typescript
|
|
986
|
-
*
|
|
987
|
-
*
|
|
988
|
-
* logger.warn('Forced shutdown after timeout:', error.incompleteJobs);
|
|
989
|
-
* }
|
|
2341
|
+
* const result = await monque.retryJobs({
|
|
2342
|
+
* status: 'failed'
|
|
990
2343
|
* });
|
|
991
|
-
*
|
|
992
|
-
* await monque.stop();
|
|
2344
|
+
* console.log(`Retried ${result.count} jobs`);
|
|
993
2345
|
* ```
|
|
994
2346
|
*/
|
|
995
|
-
async
|
|
996
|
-
|
|
997
|
-
this.
|
|
998
|
-
await this.closeChangeStream();
|
|
999
|
-
if (this.changeStreamDebounceTimer) {
|
|
1000
|
-
clearTimeout(this.changeStreamDebounceTimer);
|
|
1001
|
-
this.changeStreamDebounceTimer = null;
|
|
1002
|
-
}
|
|
1003
|
-
if (this.changeStreamReconnectTimer) {
|
|
1004
|
-
clearTimeout(this.changeStreamReconnectTimer);
|
|
1005
|
-
this.changeStreamReconnectTimer = null;
|
|
1006
|
-
}
|
|
1007
|
-
if (this.cleanupIntervalId) {
|
|
1008
|
-
clearInterval(this.cleanupIntervalId);
|
|
1009
|
-
this.cleanupIntervalId = null;
|
|
1010
|
-
}
|
|
1011
|
-
if (this.pollIntervalId) {
|
|
1012
|
-
clearInterval(this.pollIntervalId);
|
|
1013
|
-
this.pollIntervalId = null;
|
|
1014
|
-
}
|
|
1015
|
-
if (this.heartbeatIntervalId) {
|
|
1016
|
-
clearInterval(this.heartbeatIntervalId);
|
|
1017
|
-
this.heartbeatIntervalId = null;
|
|
1018
|
-
}
|
|
1019
|
-
if (this.getActiveJobs().length === 0) return;
|
|
1020
|
-
let checkInterval;
|
|
1021
|
-
const waitForJobs = new Promise((resolve) => {
|
|
1022
|
-
checkInterval = setInterval(() => {
|
|
1023
|
-
if (this.getActiveJobs().length === 0) {
|
|
1024
|
-
clearInterval(checkInterval);
|
|
1025
|
-
resolve(void 0);
|
|
1026
|
-
}
|
|
1027
|
-
}, 100);
|
|
1028
|
-
});
|
|
1029
|
-
const timeout = new Promise((resolve) => {
|
|
1030
|
-
setTimeout(() => resolve("timeout"), this.options.shutdownTimeout);
|
|
1031
|
-
});
|
|
1032
|
-
let result;
|
|
1033
|
-
try {
|
|
1034
|
-
result = await Promise.race([waitForJobs, timeout]);
|
|
1035
|
-
} finally {
|
|
1036
|
-
if (checkInterval) clearInterval(checkInterval);
|
|
1037
|
-
}
|
|
1038
|
-
if (result === "timeout") {
|
|
1039
|
-
const incompleteJobs = this.getActiveJobsList();
|
|
1040
|
-
const { ShutdownTimeoutError: ShutdownTimeoutError$1 } = await Promise.resolve().then(() => require("./errors-Dfli-u59.cjs"));
|
|
1041
|
-
const error = new ShutdownTimeoutError$1(`Shutdown timed out after ${this.options.shutdownTimeout}ms with ${incompleteJobs.length} incomplete jobs`, incompleteJobs);
|
|
1042
|
-
this.emit("job:error", { error });
|
|
1043
|
-
}
|
|
2347
|
+
async retryJobs(filter) {
|
|
2348
|
+
this.ensureInitialized();
|
|
2349
|
+
return this.manager.retryJobs(filter);
|
|
1044
2350
|
}
|
|
1045
2351
|
/**
|
|
1046
|
-
*
|
|
1047
|
-
*
|
|
1048
|
-
* Returns `true` when the scheduler is started, initialized, and has an active
|
|
1049
|
-
* MongoDB collection reference. Useful for health check endpoints and monitoring.
|
|
2352
|
+
* Delete multiple jobs matching the given filter.
|
|
1050
2353
|
*
|
|
1051
|
-
*
|
|
1052
|
-
*
|
|
1053
|
-
* - Has called `start()` and is actively polling
|
|
1054
|
-
* - Has a valid MongoDB collection reference
|
|
2354
|
+
* Deletes jobs in any status. Uses a batch delete for efficiency.
|
|
2355
|
+
* Does not emit individual 'job:deleted' events to avoid noise.
|
|
1055
2356
|
*
|
|
1056
|
-
* @
|
|
2357
|
+
* @param filter - Selector for which jobs to delete (name, status, date range)
|
|
2358
|
+
* @returns Result with count of deleted jobs (errors array always empty for delete)
|
|
1057
2359
|
*
|
|
1058
|
-
* @example
|
|
2360
|
+
* @example Delete old completed jobs
|
|
1059
2361
|
* ```typescript
|
|
1060
|
-
*
|
|
1061
|
-
*
|
|
1062
|
-
*
|
|
1063
|
-
*
|
|
1064
|
-
* scheduler: healthy,
|
|
1065
|
-
* timestamp: new Date().toISOString()
|
|
1066
|
-
* });
|
|
2362
|
+
* const weekAgo = new Date(Date.now() - 7 * 24 * 60 * 60 * 1000);
|
|
2363
|
+
* const result = await monque.deleteJobs({
|
|
2364
|
+
* status: 'completed',
|
|
2365
|
+
* olderThan: weekAgo
|
|
1067
2366
|
* });
|
|
2367
|
+
* console.log(`Deleted ${result.count} jobs`);
|
|
1068
2368
|
* ```
|
|
2369
|
+
*/
|
|
2370
|
+
async deleteJobs(filter) {
|
|
2371
|
+
this.ensureInitialized();
|
|
2372
|
+
return this.manager.deleteJobs(filter);
|
|
2373
|
+
}
|
|
2374
|
+
/**
|
|
2375
|
+
* Get a single job by its MongoDB ObjectId.
|
|
1069
2376
|
*
|
|
1070
|
-
*
|
|
2377
|
+
* Useful for retrieving job details when you have a job ID from events,
|
|
2378
|
+
* logs, or stored references.
|
|
2379
|
+
*
|
|
2380
|
+
* @template T - The expected type of the job data payload
|
|
2381
|
+
* @param id - The job's ObjectId
|
|
2382
|
+
* @returns Promise resolving to the job if found, null otherwise
|
|
2383
|
+
* @throws {ConnectionError} If scheduler not initialized
|
|
2384
|
+
*
|
|
2385
|
+
* @example Look up job from event
|
|
1071
2386
|
* ```typescript
|
|
1072
|
-
*
|
|
1073
|
-
*
|
|
1074
|
-
*
|
|
1075
|
-
*
|
|
1076
|
-
* res.status(503).send('not ready');
|
|
1077
|
-
* }
|
|
2387
|
+
* monque.on('job:fail', async ({ job }) => {
|
|
2388
|
+
* // Later, retrieve the job to check its status
|
|
2389
|
+
* const currentJob = await monque.getJob(job._id);
|
|
2390
|
+
* console.log(`Job status: ${currentJob?.status}`);
|
|
1078
2391
|
* });
|
|
1079
2392
|
* ```
|
|
1080
2393
|
*
|
|
1081
|
-
* @example
|
|
2394
|
+
* @example Admin endpoint
|
|
1082
2395
|
* ```typescript
|
|
1083
|
-
*
|
|
1084
|
-
*
|
|
1085
|
-
*
|
|
1086
|
-
*
|
|
2396
|
+
* app.get('/jobs/:id', async (req, res) => {
|
|
2397
|
+
* const job = await monque.getJob(new ObjectId(req.params.id));
|
|
2398
|
+
* if (!job) {
|
|
2399
|
+
* return res.status(404).json({ error: 'Job not found' });
|
|
1087
2400
|
* }
|
|
1088
|
-
*
|
|
2401
|
+
* res.json(job);
|
|
2402
|
+
* });
|
|
1089
2403
|
* ```
|
|
1090
|
-
*/
|
|
1091
|
-
|
|
1092
|
-
|
|
2404
|
+
*/
|
|
2405
|
+
async getJob(id) {
|
|
2406
|
+
this.ensureInitialized();
|
|
2407
|
+
return this.query.getJob(id);
|
|
1093
2408
|
}
|
|
1094
2409
|
/**
|
|
1095
2410
|
* Query jobs from the queue with optional filters.
|
|
@@ -1135,398 +2450,345 @@ var Monque = class extends node_events.EventEmitter {
|
|
|
1135
2450
|
*/
|
|
1136
2451
|
async getJobs(filter = {}) {
|
|
1137
2452
|
this.ensureInitialized();
|
|
1138
|
-
|
|
1139
|
-
const query = {};
|
|
1140
|
-
if (filter.name !== void 0) query["name"] = filter.name;
|
|
1141
|
-
if (filter.status !== void 0) if (Array.isArray(filter.status)) query["status"] = { $in: filter.status };
|
|
1142
|
-
else query["status"] = filter.status;
|
|
1143
|
-
const limit = filter.limit ?? 100;
|
|
1144
|
-
const skip = filter.skip ?? 0;
|
|
1145
|
-
try {
|
|
1146
|
-
return (await this.collection.find(query).sort({ nextRunAt: 1 }).skip(skip).limit(limit).toArray()).map((doc) => this.documentToPersistedJob(doc));
|
|
1147
|
-
} catch (error) {
|
|
1148
|
-
throw new require_errors.ConnectionError(`Failed to query jobs: ${error instanceof Error ? error.message : "Unknown error during getJobs"}`, error instanceof Error ? { cause: error } : void 0);
|
|
1149
|
-
}
|
|
2453
|
+
return this.query.getJobs(filter);
|
|
1150
2454
|
}
|
|
1151
2455
|
/**
|
|
1152
|
-
* Get a
|
|
2456
|
+
* Get a paginated list of jobs using opaque cursors.
|
|
1153
2457
|
*
|
|
1154
|
-
*
|
|
1155
|
-
*
|
|
2458
|
+
* Provides stable pagination for large job lists. Supports forward and backward
|
|
2459
|
+
* navigation, filtering, and efficient database access via index-based cursor queries.
|
|
1156
2460
|
*
|
|
1157
|
-
* @template T - The
|
|
1158
|
-
* @param
|
|
1159
|
-
* @returns
|
|
1160
|
-
* @throws {
|
|
2461
|
+
* @template T - The job data payload type
|
|
2462
|
+
* @param options - Pagination options (cursor, limit, direction, filter)
|
|
2463
|
+
* @returns Page of jobs with next/prev cursors
|
|
2464
|
+
* @throws {InvalidCursorError} If the provided cursor is malformed
|
|
2465
|
+
* @throws {ConnectionError} If database operation fails or scheduler not initialized
|
|
1161
2466
|
*
|
|
1162
|
-
* @example
|
|
2467
|
+
* @example List pending jobs
|
|
1163
2468
|
* ```typescript
|
|
1164
|
-
*
|
|
1165
|
-
*
|
|
1166
|
-
*
|
|
1167
|
-
* console.log(`Job status: ${currentJob?.status}`);
|
|
2469
|
+
* const page = await monque.getJobsWithCursor({
|
|
2470
|
+
* limit: 20,
|
|
2471
|
+
* filter: { status: 'pending' }
|
|
1168
2472
|
* });
|
|
1169
|
-
*
|
|
2473
|
+
* const jobs = page.jobs;
|
|
1170
2474
|
*
|
|
1171
|
-
*
|
|
1172
|
-
*
|
|
1173
|
-
*
|
|
1174
|
-
*
|
|
1175
|
-
*
|
|
1176
|
-
*
|
|
1177
|
-
*
|
|
1178
|
-
* res.json(job);
|
|
1179
|
-
* });
|
|
2475
|
+
* // Get next page
|
|
2476
|
+
* if (page.hasNextPage) {
|
|
2477
|
+
* const page2 = await monque.getJobsWithCursor({
|
|
2478
|
+
* cursor: page.cursor,
|
|
2479
|
+
* limit: 20
|
|
2480
|
+
* });
|
|
2481
|
+
* }
|
|
1180
2482
|
* ```
|
|
1181
2483
|
*/
|
|
1182
|
-
async
|
|
2484
|
+
async getJobsWithCursor(options = {}) {
|
|
1183
2485
|
this.ensureInitialized();
|
|
1184
|
-
|
|
1185
|
-
try {
|
|
1186
|
-
const doc = await this.collection.findOne({ _id: id });
|
|
1187
|
-
if (!doc) return null;
|
|
1188
|
-
return this.documentToPersistedJob(doc);
|
|
1189
|
-
} catch (error) {
|
|
1190
|
-
throw new require_errors.ConnectionError(`Failed to get job: ${error instanceof Error ? error.message : "Unknown error during getJob"}`, error instanceof Error ? { cause: error } : void 0);
|
|
1191
|
-
}
|
|
2486
|
+
return this.query.getJobsWithCursor(options);
|
|
1192
2487
|
}
|
|
1193
2488
|
/**
|
|
1194
|
-
*
|
|
1195
|
-
*
|
|
1196
|
-
* Called at regular intervals (configured by `pollInterval`). For each registered worker,
|
|
1197
|
-
* attempts to acquire jobs up to the worker's available concurrency slots.
|
|
1198
|
-
* Aborts early if the scheduler is stopping (`isRunning` is false).
|
|
2489
|
+
* Get aggregate statistics for the job queue.
|
|
1199
2490
|
*
|
|
1200
|
-
*
|
|
1201
|
-
|
|
1202
|
-
async poll() {
|
|
1203
|
-
if (!this.isRunning || !this.collection) return;
|
|
1204
|
-
for (const [name, worker] of this.workers) {
|
|
1205
|
-
const availableSlots = worker.concurrency - worker.activeJobs.size;
|
|
1206
|
-
if (availableSlots <= 0) continue;
|
|
1207
|
-
for (let i = 0; i < availableSlots; i++) {
|
|
1208
|
-
if (!this.isRunning) return;
|
|
1209
|
-
const job = await this.acquireJob(name);
|
|
1210
|
-
if (job) this.processJob(job, worker).catch((error) => {
|
|
1211
|
-
this.emit("job:error", {
|
|
1212
|
-
error,
|
|
1213
|
-
job
|
|
1214
|
-
});
|
|
1215
|
-
});
|
|
1216
|
-
else break;
|
|
1217
|
-
}
|
|
1218
|
-
}
|
|
1219
|
-
}
|
|
1220
|
-
/**
|
|
1221
|
-
* Atomically acquire a pending job for processing using the claimedBy pattern.
|
|
2491
|
+
* Uses MongoDB aggregation pipeline for efficient server-side calculation.
|
|
2492
|
+
* Returns counts per status and optional average processing duration for completed jobs.
|
|
1222
2493
|
*
|
|
1223
|
-
*
|
|
1224
|
-
*
|
|
1225
|
-
*
|
|
1226
|
-
*
|
|
1227
|
-
* - Is not claimed by another instance (claimedBy is null/undefined)
|
|
2494
|
+
* @param filter - Optional filter to scope statistics by job name
|
|
2495
|
+
* @returns Promise resolving to queue statistics
|
|
2496
|
+
* @throws {AggregationTimeoutError} If aggregation exceeds 30 second timeout
|
|
2497
|
+
* @throws {ConnectionError} If database operation fails
|
|
1228
2498
|
*
|
|
1229
|
-
*
|
|
2499
|
+
* @example Get overall queue statistics
|
|
2500
|
+
* ```typescript
|
|
2501
|
+
* const stats = await monque.getQueueStats();
|
|
2502
|
+
* console.log(`Pending: ${stats.pending}, Failed: ${stats.failed}`);
|
|
2503
|
+
* ```
|
|
1230
2504
|
*
|
|
1231
|
-
* @
|
|
1232
|
-
*
|
|
1233
|
-
*
|
|
2505
|
+
* @example Get statistics for a specific job type
|
|
2506
|
+
* ```typescript
|
|
2507
|
+
* const emailStats = await monque.getQueueStats({ name: 'send-email' });
|
|
2508
|
+
* console.log(`${emailStats.total} email jobs in queue`);
|
|
2509
|
+
* ```
|
|
1234
2510
|
*/
|
|
1235
|
-
async
|
|
1236
|
-
|
|
1237
|
-
|
|
1238
|
-
const result = await this.collection.findOneAndUpdate({
|
|
1239
|
-
name,
|
|
1240
|
-
status: JobStatus.PENDING,
|
|
1241
|
-
nextRunAt: { $lte: now },
|
|
1242
|
-
$or: [{ claimedBy: null }, { claimedBy: { $exists: false } }]
|
|
1243
|
-
}, { $set: {
|
|
1244
|
-
status: JobStatus.PROCESSING,
|
|
1245
|
-
claimedBy: this.options.schedulerInstanceId,
|
|
1246
|
-
lockedAt: now,
|
|
1247
|
-
lastHeartbeat: now,
|
|
1248
|
-
heartbeatInterval: this.options.heartbeatInterval,
|
|
1249
|
-
updatedAt: now
|
|
1250
|
-
} }, {
|
|
1251
|
-
sort: { nextRunAt: 1 },
|
|
1252
|
-
returnDocument: "after"
|
|
1253
|
-
});
|
|
1254
|
-
if (!result) return null;
|
|
1255
|
-
return this.documentToPersistedJob(result);
|
|
2511
|
+
async getQueueStats(filter) {
|
|
2512
|
+
this.ensureInitialized();
|
|
2513
|
+
return this.query.getQueueStats(filter);
|
|
1256
2514
|
}
|
|
1257
2515
|
/**
|
|
1258
|
-
*
|
|
2516
|
+
* Register a worker to process jobs of a specific type.
|
|
1259
2517
|
*
|
|
1260
|
-
*
|
|
1261
|
-
*
|
|
1262
|
-
* calls `failJob()` which implements exponential backoff retry logic.
|
|
2518
|
+
* Workers can be registered before or after calling `start()`. Each worker
|
|
2519
|
+
* processes jobs concurrently up to its configured concurrency limit (default: 5).
|
|
1263
2520
|
*
|
|
1264
|
-
*
|
|
1265
|
-
*
|
|
1266
|
-
*
|
|
1267
|
-
*/
|
|
1268
|
-
async processJob(job, worker) {
|
|
1269
|
-
const jobId = job._id.toString();
|
|
1270
|
-
worker.activeJobs.set(jobId, job);
|
|
1271
|
-
const startTime = Date.now();
|
|
1272
|
-
this.emit("job:start", job);
|
|
1273
|
-
try {
|
|
1274
|
-
await worker.handler(job);
|
|
1275
|
-
const duration = Date.now() - startTime;
|
|
1276
|
-
await this.completeJob(job);
|
|
1277
|
-
this.emit("job:complete", {
|
|
1278
|
-
job,
|
|
1279
|
-
duration
|
|
1280
|
-
});
|
|
1281
|
-
} catch (error) {
|
|
1282
|
-
const err = error instanceof Error ? error : new Error(String(error));
|
|
1283
|
-
await this.failJob(job, err);
|
|
1284
|
-
const willRetry = job.failCount + 1 < this.options.maxRetries;
|
|
1285
|
-
this.emit("job:fail", {
|
|
1286
|
-
job,
|
|
1287
|
-
error: err,
|
|
1288
|
-
willRetry
|
|
1289
|
-
});
|
|
1290
|
-
} finally {
|
|
1291
|
-
worker.activeJobs.delete(jobId);
|
|
1292
|
-
}
|
|
1293
|
-
}
|
|
1294
|
-
/**
|
|
1295
|
-
* Mark a job as completed successfully.
|
|
2521
|
+
* The handler function receives the full job object including metadata (`_id`, `status`,
|
|
2522
|
+
* `failCount`, etc.). If the handler throws an error, the job is retried with exponential
|
|
2523
|
+
* backoff up to `maxRetries` times. After exhausting retries, the job is marked as `failed`.
|
|
1296
2524
|
*
|
|
1297
|
-
*
|
|
1298
|
-
* expression and resets `failCount` to 0. For one-time jobs, sets status to `completed`.
|
|
1299
|
-
* Clears `lockedAt` and `failReason` fields in both cases.
|
|
2525
|
+
* Events are emitted during job processing: `job:start`, `job:complete`, `job:fail`, and `job:error`.
|
|
1300
2526
|
*
|
|
1301
|
-
*
|
|
1302
|
-
*
|
|
1303
|
-
|
|
1304
|
-
async completeJob(job) {
|
|
1305
|
-
if (!this.collection || !isPersistedJob(job)) return;
|
|
1306
|
-
if (job.repeatInterval) {
|
|
1307
|
-
const nextRunAt = getNextCronDate(job.repeatInterval);
|
|
1308
|
-
await this.collection.updateOne({ _id: job._id }, {
|
|
1309
|
-
$set: {
|
|
1310
|
-
status: JobStatus.PENDING,
|
|
1311
|
-
nextRunAt,
|
|
1312
|
-
failCount: 0,
|
|
1313
|
-
updatedAt: /* @__PURE__ */ new Date()
|
|
1314
|
-
},
|
|
1315
|
-
$unset: {
|
|
1316
|
-
lockedAt: "",
|
|
1317
|
-
claimedBy: "",
|
|
1318
|
-
lastHeartbeat: "",
|
|
1319
|
-
heartbeatInterval: "",
|
|
1320
|
-
failReason: ""
|
|
1321
|
-
}
|
|
1322
|
-
});
|
|
1323
|
-
} else {
|
|
1324
|
-
await this.collection.updateOne({ _id: job._id }, {
|
|
1325
|
-
$set: {
|
|
1326
|
-
status: JobStatus.COMPLETED,
|
|
1327
|
-
updatedAt: /* @__PURE__ */ new Date()
|
|
1328
|
-
},
|
|
1329
|
-
$unset: {
|
|
1330
|
-
lockedAt: "",
|
|
1331
|
-
claimedBy: "",
|
|
1332
|
-
lastHeartbeat: "",
|
|
1333
|
-
heartbeatInterval: "",
|
|
1334
|
-
failReason: ""
|
|
1335
|
-
}
|
|
1336
|
-
});
|
|
1337
|
-
job.status = JobStatus.COMPLETED;
|
|
1338
|
-
}
|
|
1339
|
-
}
|
|
1340
|
-
/**
|
|
1341
|
-
* Handle job failure with exponential backoff retry logic.
|
|
2527
|
+
* **Duplicate Registration**: By default, registering a worker for a job name that already has
|
|
2528
|
+
* a worker will throw a `WorkerRegistrationError`. This fail-fast behavior prevents accidental
|
|
2529
|
+
* replacement of handlers. To explicitly replace a worker, pass `{ replace: true }`.
|
|
1342
2530
|
*
|
|
1343
|
-
*
|
|
1344
|
-
*
|
|
2531
|
+
* @template T - The job data payload type for type-safe access to `job.data`
|
|
2532
|
+
* @param name - Job type identifier to handle
|
|
2533
|
+
* @param handler - Async function to execute for each job
|
|
2534
|
+
* @param options - Worker configuration
|
|
2535
|
+
* @param options.concurrency - Maximum concurrent jobs for this worker (default: `defaultConcurrency`)
|
|
2536
|
+
* @param options.replace - When `true`, replace existing worker instead of throwing error
|
|
2537
|
+
* @throws {WorkerRegistrationError} When a worker is already registered for `name` and `replace` is not `true`
|
|
1345
2538
|
*
|
|
1346
|
-
*
|
|
1347
|
-
*
|
|
2539
|
+
* @example Basic email worker
|
|
2540
|
+
* ```typescript
|
|
2541
|
+
* interface EmailJob {
|
|
2542
|
+
* to: string;
|
|
2543
|
+
* subject: string;
|
|
2544
|
+
* body: string;
|
|
2545
|
+
* }
|
|
1348
2546
|
*
|
|
1349
|
-
*
|
|
1350
|
-
*
|
|
1351
|
-
*
|
|
1352
|
-
|
|
1353
|
-
async failJob(job, error) {
|
|
1354
|
-
if (!this.collection || !isPersistedJob(job)) return;
|
|
1355
|
-
const newFailCount = job.failCount + 1;
|
|
1356
|
-
if (newFailCount >= this.options.maxRetries) await this.collection.updateOne({ _id: job._id }, {
|
|
1357
|
-
$set: {
|
|
1358
|
-
status: JobStatus.FAILED,
|
|
1359
|
-
failCount: newFailCount,
|
|
1360
|
-
failReason: error.message,
|
|
1361
|
-
updatedAt: /* @__PURE__ */ new Date()
|
|
1362
|
-
},
|
|
1363
|
-
$unset: {
|
|
1364
|
-
lockedAt: "",
|
|
1365
|
-
claimedBy: "",
|
|
1366
|
-
lastHeartbeat: "",
|
|
1367
|
-
heartbeatInterval: ""
|
|
1368
|
-
}
|
|
1369
|
-
});
|
|
1370
|
-
else {
|
|
1371
|
-
const nextRunAt = calculateBackoff(newFailCount, this.options.baseRetryInterval, this.options.maxBackoffDelay);
|
|
1372
|
-
await this.collection.updateOne({ _id: job._id }, {
|
|
1373
|
-
$set: {
|
|
1374
|
-
status: JobStatus.PENDING,
|
|
1375
|
-
failCount: newFailCount,
|
|
1376
|
-
failReason: error.message,
|
|
1377
|
-
nextRunAt,
|
|
1378
|
-
updatedAt: /* @__PURE__ */ new Date()
|
|
1379
|
-
},
|
|
1380
|
-
$unset: {
|
|
1381
|
-
lockedAt: "",
|
|
1382
|
-
claimedBy: "",
|
|
1383
|
-
lastHeartbeat: "",
|
|
1384
|
-
heartbeatInterval: ""
|
|
1385
|
-
}
|
|
1386
|
-
});
|
|
1387
|
-
}
|
|
1388
|
-
}
|
|
1389
|
-
/**
|
|
1390
|
-
* Ensure the scheduler is initialized before operations.
|
|
2547
|
+
* monque.register<EmailJob>('send-email', async (job) => {
|
|
2548
|
+
* await emailService.send(job.data.to, job.data.subject, job.data.body);
|
|
2549
|
+
* });
|
|
2550
|
+
* ```
|
|
1391
2551
|
*
|
|
1392
|
-
* @
|
|
1393
|
-
*
|
|
2552
|
+
* @example Worker with custom concurrency
|
|
2553
|
+
* ```typescript
|
|
2554
|
+
* // Limit to 2 concurrent video processing jobs (resource-intensive)
|
|
2555
|
+
* monque.register('process-video', async (job) => {
|
|
2556
|
+
* await videoProcessor.transcode(job.data.videoId);
|
|
2557
|
+
* }, { concurrency: 2 });
|
|
2558
|
+
* ```
|
|
2559
|
+
*
|
|
2560
|
+
* @example Replacing an existing worker
|
|
2561
|
+
* ```typescript
|
|
2562
|
+
* // Replace the existing handler for 'send-email'
|
|
2563
|
+
* monque.register('send-email', newEmailHandler, { replace: true });
|
|
2564
|
+
* ```
|
|
2565
|
+
*
|
|
2566
|
+
* @example Worker with error handling
|
|
2567
|
+
* ```typescript
|
|
2568
|
+
* monque.register('sync-user', async (job) => {
|
|
2569
|
+
* try {
|
|
2570
|
+
* await externalApi.syncUser(job.data.userId);
|
|
2571
|
+
* } catch (error) {
|
|
2572
|
+
* // Job will retry with exponential backoff
|
|
2573
|
+
* // Delay = 2^failCount × baseRetryInterval (default: 1000ms)
|
|
2574
|
+
* throw new Error(`Sync failed: ${error.message}`);
|
|
2575
|
+
* }
|
|
2576
|
+
* });
|
|
2577
|
+
* ```
|
|
1394
2578
|
*/
|
|
1395
|
-
|
|
1396
|
-
|
|
2579
|
+
register(name, handler, options = {}) {
|
|
2580
|
+
const concurrency = options.concurrency ?? this.options.defaultConcurrency;
|
|
2581
|
+
if (this.workers.has(name) && options.replace !== true) throw new WorkerRegistrationError(`Worker already registered for job name "${name}". Use { replace: true } to replace.`, name);
|
|
2582
|
+
this.workers.set(name, {
|
|
2583
|
+
handler,
|
|
2584
|
+
concurrency,
|
|
2585
|
+
activeJobs: /* @__PURE__ */ new Map()
|
|
2586
|
+
});
|
|
1397
2587
|
}
|
|
1398
2588
|
/**
|
|
1399
|
-
*
|
|
2589
|
+
* Start polling for and processing jobs.
|
|
1400
2590
|
*
|
|
1401
|
-
*
|
|
1402
|
-
*
|
|
2591
|
+
* Begins polling MongoDB at the configured interval (default: 1 second) to pick up
|
|
2592
|
+
* pending jobs and dispatch them to registered workers. Must call `initialize()` first.
|
|
2593
|
+
* Workers can be registered before or after calling `start()`.
|
|
1403
2594
|
*
|
|
1404
|
-
*
|
|
1405
|
-
*
|
|
2595
|
+
* Jobs are processed concurrently up to each worker's configured concurrency limit.
|
|
2596
|
+
* The scheduler continues running until `stop()` is called.
|
|
1406
2597
|
*
|
|
1407
|
-
* @
|
|
1408
|
-
|
|
1409
|
-
|
|
1410
|
-
|
|
1411
|
-
const now = /* @__PURE__ */ new Date();
|
|
1412
|
-
await this.collection.updateMany({
|
|
1413
|
-
claimedBy: this.options.schedulerInstanceId,
|
|
1414
|
-
status: JobStatus.PROCESSING
|
|
1415
|
-
}, { $set: {
|
|
1416
|
-
lastHeartbeat: now,
|
|
1417
|
-
updatedAt: now
|
|
1418
|
-
} });
|
|
1419
|
-
}
|
|
1420
|
-
/**
|
|
1421
|
-
* Set up MongoDB Change Stream for real-time job notifications.
|
|
2598
|
+
* @example Basic startup
|
|
2599
|
+
* ```typescript
|
|
2600
|
+
* const monque = new Monque(db);
|
|
2601
|
+
* await monque.initialize();
|
|
1422
2602
|
*
|
|
1423
|
-
*
|
|
1424
|
-
*
|
|
1425
|
-
* polling delay for reactive job processing.
|
|
2603
|
+
* monque.register('send-email', emailHandler);
|
|
2604
|
+
* monque.register('process-order', orderHandler);
|
|
1426
2605
|
*
|
|
1427
|
-
*
|
|
1428
|
-
*
|
|
1429
|
-
* - Update operations where status field changes
|
|
2606
|
+
* monque.start(); // Begin processing jobs
|
|
2607
|
+
* ```
|
|
1430
2608
|
*
|
|
1431
|
-
*
|
|
1432
|
-
*
|
|
2609
|
+
* @example With event monitoring
|
|
2610
|
+
* ```typescript
|
|
2611
|
+
* monque.on('job:start', (job) => {
|
|
2612
|
+
* logger.info(`Starting job ${job.name}`);
|
|
2613
|
+
* });
|
|
1433
2614
|
*
|
|
1434
|
-
*
|
|
2615
|
+
* monque.on('job:complete', ({ job, duration }) => {
|
|
2616
|
+
* metrics.recordJobDuration(job.name, duration);
|
|
2617
|
+
* });
|
|
2618
|
+
*
|
|
2619
|
+
* monque.on('job:fail', ({ job, error, willRetry }) => {
|
|
2620
|
+
* logger.error(`Job ${job.name} failed:`, error);
|
|
2621
|
+
* if (!willRetry) {
|
|
2622
|
+
* alerting.sendAlert(`Job permanently failed: ${job.name}`);
|
|
2623
|
+
* }
|
|
2624
|
+
* });
|
|
2625
|
+
*
|
|
2626
|
+
* monque.start();
|
|
2627
|
+
* ```
|
|
2628
|
+
*
|
|
2629
|
+
* @throws {ConnectionError} If scheduler not initialized (call `initialize()` first)
|
|
1435
2630
|
*/
|
|
1436
|
-
|
|
1437
|
-
if (
|
|
1438
|
-
|
|
1439
|
-
|
|
1440
|
-
|
|
1441
|
-
|
|
1442
|
-
|
|
1443
|
-
|
|
1444
|
-
this.handleChangeStreamEvent(change);
|
|
2631
|
+
start() {
|
|
2632
|
+
if (this.isRunning) return;
|
|
2633
|
+
if (!this.isInitialized) throw new ConnectionError("Monque not initialized. Call initialize() before start().");
|
|
2634
|
+
this.isRunning = true;
|
|
2635
|
+
this.changeStreamHandler.setup();
|
|
2636
|
+
this.pollIntervalId = setInterval(() => {
|
|
2637
|
+
this.processor.poll().catch((error) => {
|
|
2638
|
+
this.emit("job:error", { error });
|
|
1445
2639
|
});
|
|
1446
|
-
|
|
1447
|
-
|
|
1448
|
-
|
|
2640
|
+
}, this.options.pollInterval);
|
|
2641
|
+
this.heartbeatIntervalId = setInterval(() => {
|
|
2642
|
+
this.processor.updateHeartbeats().catch((error) => {
|
|
2643
|
+
this.emit("job:error", { error });
|
|
1449
2644
|
});
|
|
1450
|
-
|
|
1451
|
-
|
|
1452
|
-
this.
|
|
1453
|
-
|
|
1454
|
-
|
|
1455
|
-
|
|
1456
|
-
this.
|
|
2645
|
+
}, this.options.heartbeatInterval);
|
|
2646
|
+
if (this.options.jobRetention) {
|
|
2647
|
+
const interval = this.options.jobRetention.interval ?? DEFAULTS.retentionInterval;
|
|
2648
|
+
this.cleanupJobs().catch((error) => {
|
|
2649
|
+
this.emit("job:error", { error });
|
|
2650
|
+
});
|
|
2651
|
+
this.cleanupIntervalId = setInterval(() => {
|
|
2652
|
+
this.cleanupJobs().catch((error) => {
|
|
2653
|
+
this.emit("job:error", { error });
|
|
2654
|
+
});
|
|
2655
|
+
}, interval);
|
|
1457
2656
|
}
|
|
2657
|
+
this.processor.poll().catch((error) => {
|
|
2658
|
+
this.emit("job:error", { error });
|
|
2659
|
+
});
|
|
1458
2660
|
}
|
|
1459
2661
|
/**
|
|
1460
|
-
*
|
|
2662
|
+
* Stop the scheduler gracefully, waiting for in-progress jobs to complete.
|
|
1461
2663
|
*
|
|
1462
|
-
*
|
|
1463
|
-
*
|
|
1464
|
-
*
|
|
2664
|
+
* Stops polling for new jobs and waits for all active jobs to finish processing.
|
|
2665
|
+
* Times out after the configured `shutdownTimeout` (default: 30 seconds), emitting
|
|
2666
|
+
* a `job:error` event with a `ShutdownTimeoutError` containing incomplete jobs.
|
|
2667
|
+
* On timeout, jobs still in progress are left as `processing` for stale job recovery.
|
|
1465
2668
|
*
|
|
1466
|
-
*
|
|
1467
|
-
*
|
|
2669
|
+
* It's safe to call `stop()` multiple times - subsequent calls are no-ops if already stopped.
|
|
2670
|
+
*
|
|
2671
|
+
* @returns Promise that resolves when all jobs complete or timeout is reached
|
|
2672
|
+
*
|
|
2673
|
+
* @example Graceful application shutdown
|
|
2674
|
+
* ```typescript
|
|
2675
|
+
* process.on('SIGTERM', async () => {
|
|
2676
|
+
* console.log('Shutting down gracefully...');
|
|
2677
|
+
* await monque.stop(); // Wait for jobs to complete
|
|
2678
|
+
* await mongoClient.close();
|
|
2679
|
+
* process.exit(0);
|
|
2680
|
+
* });
|
|
2681
|
+
* ```
|
|
2682
|
+
*
|
|
2683
|
+
* @example With timeout handling
|
|
2684
|
+
* ```typescript
|
|
2685
|
+
* monque.on('job:error', ({ error }) => {
|
|
2686
|
+
* if (error.name === 'ShutdownTimeoutError') {
|
|
2687
|
+
* logger.warn('Forced shutdown after timeout:', error.incompleteJobs);
|
|
2688
|
+
* }
|
|
2689
|
+
* });
|
|
2690
|
+
*
|
|
2691
|
+
* await monque.stop();
|
|
2692
|
+
* ```
|
|
1468
2693
|
*/
|
|
1469
|
-
|
|
2694
|
+
async stop() {
|
|
1470
2695
|
if (!this.isRunning) return;
|
|
1471
|
-
|
|
1472
|
-
|
|
1473
|
-
|
|
1474
|
-
|
|
1475
|
-
|
|
1476
|
-
|
|
1477
|
-
|
|
1478
|
-
|
|
1479
|
-
|
|
1480
|
-
|
|
2696
|
+
this.isRunning = false;
|
|
2697
|
+
await this.changeStreamHandler.close();
|
|
2698
|
+
if (this.cleanupIntervalId) {
|
|
2699
|
+
clearInterval(this.cleanupIntervalId);
|
|
2700
|
+
this.cleanupIntervalId = null;
|
|
2701
|
+
}
|
|
2702
|
+
if (this.pollIntervalId) {
|
|
2703
|
+
clearInterval(this.pollIntervalId);
|
|
2704
|
+
this.pollIntervalId = null;
|
|
2705
|
+
}
|
|
2706
|
+
if (this.heartbeatIntervalId) {
|
|
2707
|
+
clearInterval(this.heartbeatIntervalId);
|
|
2708
|
+
this.heartbeatIntervalId = null;
|
|
2709
|
+
}
|
|
2710
|
+
if (this.getActiveJobs().length === 0) return;
|
|
2711
|
+
let checkInterval;
|
|
2712
|
+
const waitForJobs = new Promise((resolve) => {
|
|
2713
|
+
checkInterval = setInterval(() => {
|
|
2714
|
+
if (this.getActiveJobs().length === 0) {
|
|
2715
|
+
clearInterval(checkInterval);
|
|
2716
|
+
resolve(void 0);
|
|
2717
|
+
}
|
|
1481
2718
|
}, 100);
|
|
2719
|
+
});
|
|
2720
|
+
const timeout = new Promise((resolve) => {
|
|
2721
|
+
setTimeout(() => resolve("timeout"), this.options.shutdownTimeout);
|
|
2722
|
+
});
|
|
2723
|
+
let result;
|
|
2724
|
+
try {
|
|
2725
|
+
result = await Promise.race([waitForJobs, timeout]);
|
|
2726
|
+
} finally {
|
|
2727
|
+
if (checkInterval) clearInterval(checkInterval);
|
|
2728
|
+
}
|
|
2729
|
+
if (result === "timeout") {
|
|
2730
|
+
const incompleteJobs = this.getActiveJobsList();
|
|
2731
|
+
const error = new ShutdownTimeoutError(`Shutdown timed out after ${this.options.shutdownTimeout}ms with ${incompleteJobs.length} incomplete jobs`, incompleteJobs);
|
|
2732
|
+
this.emit("job:error", { error });
|
|
1482
2733
|
}
|
|
1483
2734
|
}
|
|
1484
2735
|
/**
|
|
1485
|
-
*
|
|
2736
|
+
* Check if the scheduler is healthy (running and connected).
|
|
1486
2737
|
*
|
|
1487
|
-
*
|
|
1488
|
-
*
|
|
1489
|
-
* polling-only mode.
|
|
2738
|
+
* Returns `true` when the scheduler is started, initialized, and has an active
|
|
2739
|
+
* MongoDB collection reference. Useful for health check endpoints and monitoring.
|
|
1490
2740
|
*
|
|
1491
|
-
*
|
|
1492
|
-
*
|
|
2741
|
+
* A healthy scheduler:
|
|
2742
|
+
* - Has called `initialize()` successfully
|
|
2743
|
+
* - Has called `start()` and is actively polling
|
|
2744
|
+
* - Has a valid MongoDB collection reference
|
|
2745
|
+
*
|
|
2746
|
+
* @returns `true` if scheduler is running and connected, `false` otherwise
|
|
2747
|
+
*
|
|
2748
|
+
* @example Express health check endpoint
|
|
2749
|
+
* ```typescript
|
|
2750
|
+
* app.get('/health', (req, res) => {
|
|
2751
|
+
* const healthy = monque.isHealthy();
|
|
2752
|
+
* res.status(healthy ? 200 : 503).json({
|
|
2753
|
+
* status: healthy ? 'ok' : 'unavailable',
|
|
2754
|
+
* scheduler: healthy,
|
|
2755
|
+
* timestamp: new Date().toISOString()
|
|
2756
|
+
* });
|
|
2757
|
+
* });
|
|
2758
|
+
* ```
|
|
2759
|
+
*
|
|
2760
|
+
* @example Kubernetes readiness probe
|
|
2761
|
+
* ```typescript
|
|
2762
|
+
* app.get('/readyz', (req, res) => {
|
|
2763
|
+
* if (monque.isHealthy() && dbConnected) {
|
|
2764
|
+
* res.status(200).send('ready');
|
|
2765
|
+
* } else {
|
|
2766
|
+
* res.status(503).send('not ready');
|
|
2767
|
+
* }
|
|
2768
|
+
* });
|
|
2769
|
+
* ```
|
|
2770
|
+
*
|
|
2771
|
+
* @example Periodic health monitoring
|
|
2772
|
+
* ```typescript
|
|
2773
|
+
* setInterval(() => {
|
|
2774
|
+
* if (!monque.isHealthy()) {
|
|
2775
|
+
* logger.error('Scheduler unhealthy');
|
|
2776
|
+
* metrics.increment('scheduler.unhealthy');
|
|
2777
|
+
* }
|
|
2778
|
+
* }, 60000); // Check every minute
|
|
2779
|
+
* ```
|
|
1493
2780
|
*/
|
|
1494
|
-
|
|
1495
|
-
|
|
1496
|
-
this.changeStreamReconnectAttempts++;
|
|
1497
|
-
if (this.changeStreamReconnectAttempts > this.maxChangeStreamReconnectAttempts) {
|
|
1498
|
-
this.usingChangeStreams = false;
|
|
1499
|
-
this.emit("changestream:fallback", { reason: `Exhausted ${this.maxChangeStreamReconnectAttempts} reconnection attempts: ${error.message}` });
|
|
1500
|
-
return;
|
|
1501
|
-
}
|
|
1502
|
-
const delay = 2 ** (this.changeStreamReconnectAttempts - 1) * 1e3;
|
|
1503
|
-
if (this.changeStreamReconnectTimer) clearTimeout(this.changeStreamReconnectTimer);
|
|
1504
|
-
this.changeStreamReconnectTimer = setTimeout(() => {
|
|
1505
|
-
this.changeStreamReconnectTimer = null;
|
|
1506
|
-
if (this.isRunning) {
|
|
1507
|
-
if (this.changeStream) {
|
|
1508
|
-
this.changeStream.close().catch(() => {});
|
|
1509
|
-
this.changeStream = null;
|
|
1510
|
-
}
|
|
1511
|
-
this.setupChangeStream();
|
|
1512
|
-
}
|
|
1513
|
-
}, delay);
|
|
2781
|
+
isHealthy() {
|
|
2782
|
+
return this.isRunning && this.isInitialized && this.collection !== null;
|
|
1514
2783
|
}
|
|
1515
2784
|
/**
|
|
1516
|
-
*
|
|
2785
|
+
* Ensure the scheduler is initialized before operations.
|
|
1517
2786
|
*
|
|
1518
2787
|
* @private
|
|
2788
|
+
* @throws {ConnectionError} If scheduler not initialized or collection unavailable
|
|
1519
2789
|
*/
|
|
1520
|
-
|
|
1521
|
-
if (this.
|
|
1522
|
-
try {
|
|
1523
|
-
await this.changeStream.close();
|
|
1524
|
-
} catch {}
|
|
1525
|
-
this.changeStream = null;
|
|
1526
|
-
if (this.usingChangeStreams) this.emit("changestream:closed", void 0);
|
|
1527
|
-
}
|
|
1528
|
-
this.usingChangeStreams = false;
|
|
1529
|
-
this.changeStreamReconnectAttempts = 0;
|
|
2790
|
+
ensureInitialized() {
|
|
2791
|
+
if (!this.isInitialized || !this.collection) throw new ConnectionError("Monque not initialized. Call initialize() first.");
|
|
1530
2792
|
}
|
|
1531
2793
|
/**
|
|
1532
2794
|
* Get array of active job IDs across all workers.
|
|
@@ -1599,18 +2861,23 @@ var Monque = class extends node_events.EventEmitter {
|
|
|
1599
2861
|
};
|
|
1600
2862
|
|
|
1601
2863
|
//#endregion
|
|
1602
|
-
exports.
|
|
2864
|
+
exports.AggregationTimeoutError = AggregationTimeoutError;
|
|
2865
|
+
exports.ConnectionError = ConnectionError;
|
|
2866
|
+
exports.CursorDirection = CursorDirection;
|
|
1603
2867
|
exports.DEFAULT_BASE_INTERVAL = DEFAULT_BASE_INTERVAL;
|
|
1604
2868
|
exports.DEFAULT_MAX_BACKOFF_DELAY = DEFAULT_MAX_BACKOFF_DELAY;
|
|
1605
|
-
exports.InvalidCronError =
|
|
2869
|
+
exports.InvalidCronError = InvalidCronError;
|
|
2870
|
+
exports.InvalidCursorError = InvalidCursorError;
|
|
2871
|
+
exports.JobStateError = JobStateError;
|
|
1606
2872
|
exports.JobStatus = JobStatus;
|
|
1607
2873
|
exports.Monque = Monque;
|
|
1608
|
-
exports.MonqueError =
|
|
1609
|
-
exports.ShutdownTimeoutError =
|
|
1610
|
-
exports.WorkerRegistrationError =
|
|
2874
|
+
exports.MonqueError = MonqueError;
|
|
2875
|
+
exports.ShutdownTimeoutError = ShutdownTimeoutError;
|
|
2876
|
+
exports.WorkerRegistrationError = WorkerRegistrationError;
|
|
1611
2877
|
exports.calculateBackoff = calculateBackoff;
|
|
1612
2878
|
exports.calculateBackoffDelay = calculateBackoffDelay;
|
|
1613
2879
|
exports.getNextCronDate = getNextCronDate;
|
|
2880
|
+
exports.isCancelledJob = isCancelledJob;
|
|
1614
2881
|
exports.isCompletedJob = isCompletedJob;
|
|
1615
2882
|
exports.isFailedJob = isFailedJob;
|
|
1616
2883
|
exports.isPendingJob = isPendingJob;
|