@monque/core 1.4.0 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,7 +2,7 @@ import { ObjectId } from 'mongodb';
2
2
 
3
3
  import { type BulkOperationResult, type JobSelector, JobStatus, type PersistedJob } from '@/jobs';
4
4
  import { buildSelectorQuery } from '@/scheduler';
5
- import { JobStateError } from '@/shared';
5
+ import { ConnectionError, JobStateError, MonqueError } from '@/shared';
6
6
 
7
7
  import type { SchedulerContext } from './types.js';
8
8
 
@@ -244,14 +244,15 @@ export class JobManager {
244
244
  // ─────────────────────────────────────────────────────────────────────────────
245
245
 
246
246
  /**
247
- * Cancel multiple jobs matching the given filter.
247
+ * Cancel multiple jobs matching the given filter via a single updateMany call.
248
248
  *
249
- * Only cancels jobs in 'pending' status. Jobs in other states are collected
250
- * as errors in the result. Emits a 'jobs:cancelled' event with the IDs of
249
+ * Only cancels jobs in 'pending' status the status guard is applied regardless
250
+ * of what the filter specifies. Jobs in other states are silently skipped (not
251
+ * matched by the query). Emits a 'jobs:cancelled' event with the count of
251
252
  * successfully cancelled jobs.
252
253
  *
253
254
  * @param filter - Selector for which jobs to cancel (name, status, date range)
254
- * @returns Result with count of cancelled jobs and any errors encountered
255
+ * @returns Result with count of cancelled jobs (errors array always empty for bulk ops)
255
256
  *
256
257
  * @example Cancel all pending jobs for a queue
257
258
  * ```typescript
@@ -263,75 +264,54 @@ export class JobManager {
263
264
  * ```
264
265
  */
265
266
  async cancelJobs(filter: JobSelector): Promise<BulkOperationResult> {
266
- const baseQuery = buildSelectorQuery(filter);
267
- const errors: Array<{ jobId: string; error: string }> = [];
268
- const cancelledIds: string[] = [];
269
-
270
- // Find all matching jobs and stream them to avoid memory pressure
271
- const cursor = this.ctx.collection.find(baseQuery);
272
-
273
- for await (const doc of cursor) {
274
- const jobId = doc._id.toString();
275
-
276
- if (doc['status'] !== JobStatus.PENDING && doc['status'] !== JobStatus.CANCELLED) {
277
- errors.push({
278
- jobId,
279
- error: `Cannot cancel job in status '${doc['status']}'`,
280
- });
281
- continue;
282
- }
267
+ const query = buildSelectorQuery(filter);
283
268
 
284
- // Skip already cancelled jobs (idempotent)
285
- if (doc['status'] === JobStatus.CANCELLED) {
286
- cancelledIds.push(jobId);
287
- continue;
269
+ // Enforce allowed status, but respect explicit status filters
270
+ if (filter.status !== undefined) {
271
+ const requested = Array.isArray(filter.status) ? filter.status : [filter.status];
272
+ if (!requested.includes(JobStatus.PENDING)) {
273
+ return { count: 0, errors: [] };
288
274
  }
275
+ }
276
+ query['status'] = JobStatus.PENDING;
289
277
 
290
- // Atomically update to cancelled
291
- const result = await this.ctx.collection.findOneAndUpdate(
292
- { _id: doc._id, status: JobStatus.PENDING },
293
- {
294
- $set: {
295
- status: JobStatus.CANCELLED,
296
- updatedAt: new Date(),
297
- },
278
+ try {
279
+ const result = await this.ctx.collection.updateMany(query, {
280
+ $set: {
281
+ status: JobStatus.CANCELLED,
282
+ updatedAt: new Date(),
298
283
  },
299
- { returnDocument: 'after' },
300
- );
284
+ });
285
+
286
+ const count = result.modifiedCount;
301
287
 
302
- if (result) {
303
- cancelledIds.push(jobId);
304
- } else {
305
- // Race condition: status changed
306
- errors.push({
307
- jobId,
308
- error: 'Job status changed during cancellation',
309
- });
288
+ if (count > 0) {
289
+ this.ctx.emit('jobs:cancelled', { count });
310
290
  }
311
- }
312
291
 
313
- if (cancelledIds.length > 0) {
314
- this.ctx.emit('jobs:cancelled', {
315
- jobIds: cancelledIds,
316
- count: cancelledIds.length,
317
- });
292
+ return { count, errors: [] };
293
+ } catch (error) {
294
+ if (error instanceof MonqueError) {
295
+ throw error;
296
+ }
297
+ const message = error instanceof Error ? error.message : 'Unknown error during cancelJobs';
298
+ throw new ConnectionError(
299
+ `Failed to cancel jobs: ${message}`,
300
+ error instanceof Error ? { cause: error } : undefined,
301
+ );
318
302
  }
319
-
320
- return {
321
- count: cancelledIds.length,
322
- errors,
323
- };
324
303
  }
325
304
 
326
305
  /**
327
- * Retry multiple jobs matching the given filter.
306
+ * Retry multiple jobs matching the given filter via a single pipeline-style updateMany call.
328
307
  *
329
- * Only retries jobs in 'failed' or 'cancelled' status. Jobs in other states
330
- * are collected as errors in the result. Emits a 'jobs:retried' event with
331
- * the IDs of successfully retried jobs.
308
+ * Only retries jobs in 'failed' or 'cancelled' status the status guard is applied
309
+ * regardless of what the filter specifies. Jobs in other states are silently skipped.
310
+ * Uses `$rand` for per-document staggered `nextRunAt` to avoid thundering herd on retry.
311
+ * Emits a 'jobs:retried' event with the count of successfully retried jobs.
332
312
  *
333
313
  * @param filter - Selector for which jobs to retry (name, status, date range)
334
- * @returns Result with count of retried jobs and any errors encountered
314
+ * @returns Result with count of retried jobs (errors array always empty for bulk ops)
335
315
  *
336
316
  * @example Retry all failed jobs
337
317
  * ```typescript
@@ -342,67 +322,60 @@ export class JobManager {
342
322
  * ```
343
323
  */
344
324
  async retryJobs(filter: JobSelector): Promise<BulkOperationResult> {
345
- const baseQuery = buildSelectorQuery(filter);
346
- const errors: Array<{ jobId: string; error: string }> = [];
347
- const retriedIds: string[] = [];
348
-
349
- const cursor = this.ctx.collection.find(baseQuery);
350
-
351
- for await (const doc of cursor) {
352
- const jobId = doc._id.toString();
325
+ const query = buildSelectorQuery(filter);
353
326
 
354
- if (doc['status'] !== JobStatus.FAILED && doc['status'] !== JobStatus.CANCELLED) {
355
- errors.push({
356
- jobId,
357
- error: `Cannot retry job in status '${doc['status']}'`,
358
- });
359
- continue;
327
+ // Enforce allowed statuses, but respect explicit status filters
328
+ const retryable = [JobStatus.FAILED, JobStatus.CANCELLED] as const;
329
+ if (filter.status !== undefined) {
330
+ const requested = Array.isArray(filter.status) ? filter.status : [filter.status];
331
+ const allowed = requested.filter(
332
+ (status): status is (typeof retryable)[number] =>
333
+ status === JobStatus.FAILED || status === JobStatus.CANCELLED,
334
+ );
335
+ if (allowed.length === 0) {
336
+ return { count: 0, errors: [] };
360
337
  }
338
+ query['status'] = allowed.length === 1 ? allowed[0] : { $in: allowed };
339
+ } else {
340
+ query['status'] = { $in: retryable };
341
+ }
361
342
 
362
- const result = await this.ctx.collection.findOneAndUpdate(
363
- {
364
- _id: doc._id,
365
- status: { $in: [JobStatus.FAILED, JobStatus.CANCELLED] },
366
- },
343
+ const spreadWindowMs = 30_000; // 30s max spread for staggered retry
344
+
345
+ try {
346
+ const result = await this.ctx.collection.updateMany(query, [
367
347
  {
368
348
  $set: {
369
349
  status: JobStatus.PENDING,
370
350
  failCount: 0,
371
- nextRunAt: new Date(),
351
+ nextRunAt: {
352
+ $add: [new Date(), { $multiply: [{ $rand: {} }, spreadWindowMs] }],
353
+ },
372
354
  updatedAt: new Date(),
373
355
  },
374
- $unset: {
375
- failReason: '',
376
- lockedAt: '',
377
- claimedBy: '',
378
- lastHeartbeat: '',
379
- heartbeatInterval: '',
380
- },
381
356
  },
382
- { returnDocument: 'after' },
383
- );
357
+ {
358
+ $unset: ['failReason', 'lockedAt', 'claimedBy', 'lastHeartbeat', 'heartbeatInterval'],
359
+ },
360
+ ]);
384
361
 
385
- if (result) {
386
- retriedIds.push(jobId);
387
- } else {
388
- errors.push({
389
- jobId,
390
- error: 'Job status changed during retry attempt',
391
- });
362
+ const count = result.modifiedCount;
363
+
364
+ if (count > 0) {
365
+ this.ctx.emit('jobs:retried', { count });
392
366
  }
393
- }
394
367
 
395
- if (retriedIds.length > 0) {
396
- this.ctx.emit('jobs:retried', {
397
- jobIds: retriedIds,
398
- count: retriedIds.length,
399
- });
368
+ return { count, errors: [] };
369
+ } catch (error) {
370
+ if (error instanceof MonqueError) {
371
+ throw error;
372
+ }
373
+ const message = error instanceof Error ? error.message : 'Unknown error during retryJobs';
374
+ throw new ConnectionError(
375
+ `Failed to retry jobs: ${message}`,
376
+ error instanceof Error ? { cause: error } : undefined,
377
+ );
400
378
  }
401
-
402
- return {
403
- count: retriedIds.length,
404
- errors,
405
- };
406
379
  }
407
380
 
408
381
  /**
@@ -428,16 +401,27 @@ export class JobManager {
428
401
  async deleteJobs(filter: JobSelector): Promise<BulkOperationResult> {
429
402
  const query = buildSelectorQuery(filter);
430
403
 
431
- // Use deleteMany for efficiency
432
- const result = await this.ctx.collection.deleteMany(query);
404
+ try {
405
+ // Use deleteMany for efficiency
406
+ const result = await this.ctx.collection.deleteMany(query);
433
407
 
434
- if (result.deletedCount > 0) {
435
- this.ctx.emit('jobs:deleted', { count: result.deletedCount });
436
- }
408
+ if (result.deletedCount > 0) {
409
+ this.ctx.emit('jobs:deleted', { count: result.deletedCount });
410
+ }
437
411
 
438
- return {
439
- count: result.deletedCount,
440
- errors: [],
441
- };
412
+ return {
413
+ count: result.deletedCount,
414
+ errors: [],
415
+ };
416
+ } catch (error) {
417
+ if (error instanceof MonqueError) {
418
+ throw error;
419
+ }
420
+ const message = error instanceof Error ? error.message : 'Unknown error during deleteJobs';
421
+ throw new ConnectionError(
422
+ `Failed to delete jobs: ${message}`,
423
+ error instanceof Error ? { cause: error } : undefined,
424
+ );
425
+ }
442
426
  }
443
427
  }
@@ -16,6 +16,11 @@ import { AggregationTimeoutError, ConnectionError } from '@/shared';
16
16
  import { buildSelectorQuery, decodeCursor, encodeCursor } from '../helpers.js';
17
17
  import type { SchedulerContext } from './types.js';
18
18
 
19
+ interface StatsCacheEntry {
20
+ data: QueueStats;
21
+ expiresAt: number;
22
+ }
23
+
19
24
  /**
20
25
  * Internal service for job query operations.
21
26
  *
@@ -25,6 +30,9 @@ import type { SchedulerContext } from './types.js';
25
30
  * @internal Not part of public API - use Monque class methods instead.
26
31
  */
27
32
  export class JobQueryService {
33
+ private readonly statsCache = new Map<string, StatsCacheEntry>();
34
+ private static readonly MAX_CACHE_SIZE = 100;
35
+
28
36
  constructor(private readonly ctx: SchedulerContext) {}
29
37
 
30
38
  /**
@@ -264,12 +272,24 @@ export class JobQueryService {
264
272
  };
265
273
  }
266
274
 
275
+ /**
276
+ * Clear all cached getQueueStats() results.
277
+ * Called on scheduler stop() for clean state on restart.
278
+ * @internal
279
+ */
280
+ clearStatsCache(): void {
281
+ this.statsCache.clear();
282
+ }
283
+
267
284
  /**
268
285
  * Get aggregate statistics for the job queue.
269
286
  *
270
287
  * Uses MongoDB aggregation pipeline for efficient server-side calculation.
271
288
  * Returns counts per status and optional average processing duration for completed jobs.
272
289
  *
290
+ * Results are cached per unique filter with a configurable TTL (default 5s).
291
+ * Set `statsCacheTtlMs: 0` to disable caching.
292
+ *
273
293
  * @param filter - Optional filter to scope statistics by job name
274
294
  * @returns Promise resolving to queue statistics
275
295
  * @throws {AggregationTimeoutError} If aggregation exceeds 30 second timeout
@@ -288,6 +308,16 @@ export class JobQueryService {
288
308
  * ```
289
309
  */
290
310
  async getQueueStats(filter?: Pick<JobSelector, 'name'>): Promise<QueueStats> {
311
+ const ttl = this.ctx.options.statsCacheTtlMs;
312
+ const cacheKey = filter?.name ?? '';
313
+
314
+ if (ttl > 0) {
315
+ const cached = this.statsCache.get(cacheKey);
316
+ if (cached && cached.expiresAt > Date.now()) {
317
+ return { ...cached.data };
318
+ }
319
+ }
320
+
291
321
  const matchStage: Document = {};
292
322
 
293
323
  if (filter?.name) {
@@ -350,48 +380,63 @@ export class JobQueryService {
350
380
  total: 0,
351
381
  };
352
382
 
353
- if (!result) {
354
- return stats;
355
- }
383
+ if (result) {
384
+ // Map status counts to stats
385
+ const statusCounts = result['statusCounts'] as Array<{ _id: string; count: number }>;
386
+ for (const entry of statusCounts) {
387
+ const status = entry._id;
388
+ const count = entry.count;
389
+
390
+ switch (status) {
391
+ case JobStatus.PENDING:
392
+ stats.pending = count;
393
+ break;
394
+ case JobStatus.PROCESSING:
395
+ stats.processing = count;
396
+ break;
397
+ case JobStatus.COMPLETED:
398
+ stats.completed = count;
399
+ break;
400
+ case JobStatus.FAILED:
401
+ stats.failed = count;
402
+ break;
403
+ case JobStatus.CANCELLED:
404
+ stats.cancelled = count;
405
+ break;
406
+ }
407
+ }
356
408
 
357
- // Map status counts to stats
358
- const statusCounts = result['statusCounts'] as Array<{ _id: string; count: number }>;
359
- for (const entry of statusCounts) {
360
- const status = entry._id;
361
- const count = entry.count;
362
-
363
- switch (status) {
364
- case JobStatus.PENDING:
365
- stats.pending = count;
366
- break;
367
- case JobStatus.PROCESSING:
368
- stats.processing = count;
369
- break;
370
- case JobStatus.COMPLETED:
371
- stats.completed = count;
372
- break;
373
- case JobStatus.FAILED:
374
- stats.failed = count;
375
- break;
376
- case JobStatus.CANCELLED:
377
- stats.cancelled = count;
378
- break;
409
+ // Extract total
410
+ const totalResult = result['total'] as Array<{ count: number }>;
411
+ if (totalResult.length > 0 && totalResult[0]) {
412
+ stats.total = totalResult[0].count;
379
413
  }
380
- }
381
414
 
382
- // Extract total
383
- const totalResult = result['total'] as Array<{ count: number }>;
384
- if (totalResult.length > 0 && totalResult[0]) {
385
- stats.total = totalResult[0].count;
415
+ // Extract average processing duration
416
+ const avgDurationResult = result['avgDuration'] as Array<{ avgMs: number }>;
417
+ if (avgDurationResult.length > 0 && avgDurationResult[0]) {
418
+ const avgMs = avgDurationResult[0].avgMs;
419
+ if (typeof avgMs === 'number' && !Number.isNaN(avgMs)) {
420
+ stats.avgProcessingDurationMs = Math.round(avgMs);
421
+ }
422
+ }
386
423
  }
387
424
 
388
- // Extract average processing duration
389
- const avgDurationResult = result['avgDuration'] as Array<{ avgMs: number }>;
390
- if (avgDurationResult.length > 0 && avgDurationResult[0]) {
391
- const avgMs = avgDurationResult[0].avgMs;
392
- if (typeof avgMs === 'number' && !Number.isNaN(avgMs)) {
393
- stats.avgProcessingDurationMs = Math.round(avgMs);
425
+ // Cache the result if TTL is enabled
426
+ if (ttl > 0) {
427
+ // Delete existing entry first so re-insertion moves it to end (Map insertion order = LRU)
428
+ this.statsCache.delete(cacheKey);
429
+ // LRU eviction: if cache is still full after removing existing key, evict the oldest entry
430
+ if (this.statsCache.size >= JobQueryService.MAX_CACHE_SIZE) {
431
+ const oldestKey = this.statsCache.keys().next().value;
432
+ if (oldestKey !== undefined) {
433
+ this.statsCache.delete(oldestKey);
434
+ }
394
435
  }
436
+ this.statsCache.set(cacheKey, {
437
+ data: { ...stats },
438
+ expiresAt: Date.now() + ttl,
439
+ });
395
440
  }
396
441
 
397
442
  return stats;
@@ -1,4 +1,4 @@
1
- import type { Document } from 'mongodb';
1
+ import { BSON, type Document } from 'mongodb';
2
2
 
3
3
  import {
4
4
  type EnqueueOptions,
@@ -7,7 +7,7 @@ import {
7
7
  type PersistedJob,
8
8
  type ScheduleOptions,
9
9
  } from '@/jobs';
10
- import { ConnectionError, getNextCronDate, MonqueError } from '@/shared';
10
+ import { ConnectionError, getNextCronDate, MonqueError, PayloadTooLargeError } from '@/shared';
11
11
 
12
12
  import type { SchedulerContext } from './types.js';
13
13
 
@@ -22,6 +22,41 @@ import type { SchedulerContext } from './types.js';
22
22
  export class JobScheduler {
23
23
  constructor(private readonly ctx: SchedulerContext) {}
24
24
 
25
+ /**
26
+ * Validate that the job data payload does not exceed the configured maximum BSON byte size.
27
+ *
28
+ * @param data - The job data payload to validate
29
+ * @throws {PayloadTooLargeError} If the payload exceeds `maxPayloadSize`
30
+ */
31
+ private validatePayloadSize(data: unknown): void {
32
+ const maxSize = this.ctx.options.maxPayloadSize;
33
+ if (maxSize === undefined) {
34
+ return;
35
+ }
36
+
37
+ let size: number;
38
+ try {
39
+ size = BSON.calculateObjectSize({ data } as Document);
40
+ } catch (error) {
41
+ const cause = error instanceof Error ? error : new Error(String(error));
42
+ const sizeError = new PayloadTooLargeError(
43
+ `Failed to calculate job payload size: ${cause.message}`,
44
+ -1,
45
+ maxSize,
46
+ );
47
+ sizeError.cause = cause;
48
+ throw sizeError;
49
+ }
50
+
51
+ if (size > maxSize) {
52
+ throw new PayloadTooLargeError(
53
+ `Job payload exceeds maximum size: ${size} bytes > ${maxSize} bytes`,
54
+ size,
55
+ maxSize,
56
+ );
57
+ }
58
+ }
59
+
25
60
  /**
26
61
  * Enqueue a job for processing.
27
62
  *
@@ -40,6 +75,7 @@ export class JobScheduler {
40
75
  * @param options - Scheduling and deduplication options
41
76
  * @returns Promise resolving to the created or existing job document
42
77
  * @throws {ConnectionError} If database operation fails or scheduler not initialized
78
+ * @throws {PayloadTooLargeError} If payload exceeds configured `maxPayloadSize`
43
79
  *
44
80
  * @example Basic job enqueueing
45
81
  * ```typescript
@@ -67,6 +103,7 @@ export class JobScheduler {
67
103
  * ```
68
104
  */
69
105
  async enqueue<T>(name: string, data: T, options: EnqueueOptions = {}): Promise<PersistedJob<T>> {
106
+ this.validatePayloadSize(data);
70
107
  const now = new Date();
71
108
  const job: Omit<Job<T>, '_id'> = {
72
109
  name,
@@ -174,6 +211,7 @@ export class JobScheduler {
174
211
  * @returns Promise resolving to the created job document with `repeatInterval` set
175
212
  * @throws {InvalidCronError} If cron expression is invalid
176
213
  * @throws {ConnectionError} If database operation fails or scheduler not initialized
214
+ * @throws {PayloadTooLargeError} If payload exceeds configured `maxPayloadSize`
177
215
  *
178
216
  * @example Hourly cleanup job
179
217
  * ```typescript
@@ -204,6 +242,8 @@ export class JobScheduler {
204
242
  data: T,
205
243
  options: ScheduleOptions = {},
206
244
  ): Promise<PersistedJob<T>> {
245
+ this.validatePayloadSize(data);
246
+
207
247
  // Validate cron and get next run date (throws InvalidCronError if invalid)
208
248
  const nextRunAt = getNextCronDate(cron);
209
249
 
@@ -0,0 +1,154 @@
1
+ import type { DeleteResult } from 'mongodb';
2
+
3
+ import { JobStatus } from '@/jobs';
4
+ import { toError } from '@/shared';
5
+
6
+ import type { SchedulerContext } from './types.js';
7
+
8
+ /**
9
+ * Default retention check interval (1 hour).
10
+ */
11
+ const DEFAULT_RETENTION_INTERVAL = 3600_000;
12
+
13
+ /**
14
+ * Callbacks for timer-driven operations.
15
+ *
16
+ * These are provided by the Monque facade to wire LifecycleManager's timers
17
+ * to JobProcessor methods without creating a direct dependency.
18
+ */
19
+ interface TimerCallbacks {
20
+ /** Poll for pending jobs */
21
+ poll: () => Promise<void>;
22
+ /** Update heartbeats for claimed jobs */
23
+ updateHeartbeats: () => Promise<void>;
24
+ }
25
+
26
+ /**
27
+ * Manages scheduler lifecycle timers and job cleanup.
28
+ *
29
+ * Owns poll interval, heartbeat interval, cleanup interval, and the
30
+ * cleanupJobs logic. Extracted from Monque to keep the facade thin.
31
+ *
32
+ * @internal Not part of public API.
33
+ */
34
+ export class LifecycleManager {
35
+ private readonly ctx: SchedulerContext;
36
+ private pollIntervalId: ReturnType<typeof setInterval> | null = null;
37
+ private heartbeatIntervalId: ReturnType<typeof setInterval> | null = null;
38
+ private cleanupIntervalId: ReturnType<typeof setInterval> | null = null;
39
+
40
+ constructor(ctx: SchedulerContext) {
41
+ this.ctx = ctx;
42
+ }
43
+
44
+ /**
45
+ * Start all lifecycle timers.
46
+ *
47
+ * Sets up poll interval, heartbeat interval, and (if configured)
48
+ * cleanup interval. Runs an initial poll immediately.
49
+ *
50
+ * @param callbacks - Functions to invoke on each timer tick
51
+ */
52
+ startTimers(callbacks: TimerCallbacks): void {
53
+ // Set up polling as backup (runs at configured interval)
54
+ this.pollIntervalId = setInterval(() => {
55
+ callbacks.poll().catch((error: unknown) => {
56
+ this.ctx.emit('job:error', { error: toError(error) });
57
+ });
58
+ }, this.ctx.options.pollInterval);
59
+
60
+ // Start heartbeat interval for claimed jobs
61
+ this.heartbeatIntervalId = setInterval(() => {
62
+ callbacks.updateHeartbeats().catch((error: unknown) => {
63
+ this.ctx.emit('job:error', { error: toError(error) });
64
+ });
65
+ }, this.ctx.options.heartbeatInterval);
66
+
67
+ // Start cleanup interval if retention is configured
68
+ if (this.ctx.options.jobRetention) {
69
+ const interval = this.ctx.options.jobRetention.interval ?? DEFAULT_RETENTION_INTERVAL;
70
+
71
+ // Run immediately on start
72
+ this.cleanupJobs().catch((error: unknown) => {
73
+ this.ctx.emit('job:error', { error: toError(error) });
74
+ });
75
+
76
+ this.cleanupIntervalId = setInterval(() => {
77
+ this.cleanupJobs().catch((error: unknown) => {
78
+ this.ctx.emit('job:error', { error: toError(error) });
79
+ });
80
+ }, interval);
81
+ }
82
+
83
+ // Run initial poll immediately to pick up any existing jobs
84
+ callbacks.poll().catch((error: unknown) => {
85
+ this.ctx.emit('job:error', { error: toError(error) });
86
+ });
87
+ }
88
+
89
+ /**
90
+ * Stop all lifecycle timers.
91
+ *
92
+ * Clears poll, heartbeat, and cleanup intervals.
93
+ */
94
+ stopTimers(): void {
95
+ if (this.cleanupIntervalId) {
96
+ clearInterval(this.cleanupIntervalId);
97
+ this.cleanupIntervalId = null;
98
+ }
99
+
100
+ if (this.pollIntervalId) {
101
+ clearInterval(this.pollIntervalId);
102
+ this.pollIntervalId = null;
103
+ }
104
+
105
+ if (this.heartbeatIntervalId) {
106
+ clearInterval(this.heartbeatIntervalId);
107
+ this.heartbeatIntervalId = null;
108
+ }
109
+ }
110
+
111
+ /**
112
+ * Clean up old completed and failed jobs based on retention policy.
113
+ *
114
+ * - Removes completed jobs older than `jobRetention.completed`
115
+ * - Removes failed jobs older than `jobRetention.failed`
116
+ *
117
+ * The cleanup runs concurrently for both statuses if configured.
118
+ *
119
+ * @returns Promise resolving when all deletion operations complete
120
+ */
121
+ async cleanupJobs(): Promise<void> {
122
+ if (!this.ctx.options.jobRetention) {
123
+ return;
124
+ }
125
+
126
+ const { completed, failed } = this.ctx.options.jobRetention;
127
+ const now = Date.now();
128
+ const deletions: Promise<DeleteResult>[] = [];
129
+
130
+ if (completed != null) {
131
+ const cutoff = new Date(now - completed);
132
+ deletions.push(
133
+ this.ctx.collection.deleteMany({
134
+ status: JobStatus.COMPLETED,
135
+ updatedAt: { $lt: cutoff },
136
+ }),
137
+ );
138
+ }
139
+
140
+ if (failed != null) {
141
+ const cutoff = new Date(now - failed);
142
+ deletions.push(
143
+ this.ctx.collection.deleteMany({
144
+ status: JobStatus.FAILED,
145
+ updatedAt: { $lt: cutoff },
146
+ }),
147
+ );
148
+ }
149
+
150
+ if (deletions.length > 0) {
151
+ await Promise.all(deletions);
152
+ }
153
+ }
154
+ }