@monque/core 1.1.0 → 1.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +15 -0
- package/dist/CHANGELOG.md +89 -0
- package/dist/LICENSE +15 -0
- package/dist/README.md +150 -0
- package/dist/index.cjs +6 -6
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +6 -14
- package/dist/index.d.cts.map +1 -1
- package/dist/index.d.mts +6 -14
- package/dist/index.d.mts.map +1 -1
- package/dist/index.mjs +6 -6
- package/dist/index.mjs.map +1 -1
- package/package.json +9 -7
- package/src/events/index.ts +1 -0
- package/src/events/types.ts +113 -0
- package/src/index.ts +51 -0
- package/src/jobs/guards.ts +220 -0
- package/src/jobs/index.ts +29 -0
- package/src/jobs/types.ts +335 -0
- package/src/scheduler/helpers.ts +107 -0
- package/src/scheduler/index.ts +5 -0
- package/src/scheduler/monque.ts +1309 -0
- package/src/scheduler/services/change-stream-handler.ts +239 -0
- package/src/scheduler/services/index.ts +8 -0
- package/src/scheduler/services/job-manager.ts +455 -0
- package/src/scheduler/services/job-processor.ts +301 -0
- package/src/scheduler/services/job-query.ts +411 -0
- package/src/scheduler/services/job-scheduler.ts +267 -0
- package/src/scheduler/services/types.ts +48 -0
- package/src/scheduler/types.ts +123 -0
- package/src/shared/errors.ts +225 -0
- package/src/shared/index.ts +18 -0
- package/src/shared/utils/backoff.ts +77 -0
- package/src/shared/utils/cron.ts +67 -0
- package/src/shared/utils/index.ts +7 -0
- package/src/workers/index.ts +1 -0
- package/src/workers/types.ts +39 -0
|
@@ -0,0 +1,301 @@
|
|
|
1
|
+
import { isPersistedJob, type Job, JobStatus, type PersistedJob } from '@/jobs';
|
|
2
|
+
import { calculateBackoff, getNextCronDate } from '@/shared';
|
|
3
|
+
import type { WorkerRegistration } from '@/workers';
|
|
4
|
+
|
|
5
|
+
import type { SchedulerContext } from './types.js';
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Internal service for job processing and execution.
|
|
9
|
+
*
|
|
10
|
+
* Manages the poll loop, atomic job acquisition, handler execution,
|
|
11
|
+
* and job completion/failure with exponential backoff retry logic.
|
|
12
|
+
*
|
|
13
|
+
* @internal Not part of public API.
|
|
14
|
+
*/
|
|
15
|
+
export class JobProcessor {
|
|
16
|
+
constructor(private readonly ctx: SchedulerContext) {}
|
|
17
|
+
|
|
18
|
+
/**
|
|
19
|
+
* Poll for available jobs and process them.
|
|
20
|
+
*
|
|
21
|
+
* Called at regular intervals (configured by `pollInterval`). For each registered worker,
|
|
22
|
+
* attempts to acquire jobs up to the worker's available concurrency slots.
|
|
23
|
+
* Aborts early if the scheduler is stopping (`isRunning` is false).
|
|
24
|
+
*/
|
|
25
|
+
async poll(): Promise<void> {
|
|
26
|
+
if (!this.ctx.isRunning()) {
|
|
27
|
+
return;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
for (const [name, worker] of this.ctx.workers) {
|
|
31
|
+
// Check if worker has capacity
|
|
32
|
+
const availableSlots = worker.concurrency - worker.activeJobs.size;
|
|
33
|
+
|
|
34
|
+
if (availableSlots <= 0) {
|
|
35
|
+
continue;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
// Try to acquire jobs up to available slots
|
|
39
|
+
for (let i = 0; i < availableSlots; i++) {
|
|
40
|
+
if (!this.ctx.isRunning()) {
|
|
41
|
+
return;
|
|
42
|
+
}
|
|
43
|
+
const job = await this.acquireJob(name);
|
|
44
|
+
|
|
45
|
+
if (job) {
|
|
46
|
+
this.processJob(job, worker).catch((error: unknown) => {
|
|
47
|
+
this.ctx.emit('job:error', { error: error as Error, job });
|
|
48
|
+
});
|
|
49
|
+
} else {
|
|
50
|
+
// No more jobs available for this worker
|
|
51
|
+
break;
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* Atomically acquire a pending job for processing using the claimedBy pattern.
|
|
59
|
+
*
|
|
60
|
+
* Uses MongoDB's `findOneAndUpdate` with atomic operations to ensure only one scheduler
|
|
61
|
+
* instance can claim a job. The query ensures the job is:
|
|
62
|
+
* - In pending status
|
|
63
|
+
* - Has nextRunAt <= now
|
|
64
|
+
* - Is not claimed by another instance (claimedBy is null/undefined)
|
|
65
|
+
*
|
|
66
|
+
* Returns `null` immediately if scheduler is stopping (`isRunning` is false).
|
|
67
|
+
*
|
|
68
|
+
* @param name - The job type to acquire
|
|
69
|
+
* @returns The acquired job with updated status, claimedBy, and heartbeat info, or `null` if no jobs available
|
|
70
|
+
*/
|
|
71
|
+
async acquireJob(name: string): Promise<PersistedJob | null> {
|
|
72
|
+
if (!this.ctx.isRunning()) {
|
|
73
|
+
return null;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
const now = new Date();
|
|
77
|
+
|
|
78
|
+
const result = await this.ctx.collection.findOneAndUpdate(
|
|
79
|
+
{
|
|
80
|
+
name,
|
|
81
|
+
status: JobStatus.PENDING,
|
|
82
|
+
nextRunAt: { $lte: now },
|
|
83
|
+
$or: [{ claimedBy: null }, { claimedBy: { $exists: false } }],
|
|
84
|
+
},
|
|
85
|
+
{
|
|
86
|
+
$set: {
|
|
87
|
+
status: JobStatus.PROCESSING,
|
|
88
|
+
claimedBy: this.ctx.instanceId,
|
|
89
|
+
lockedAt: now,
|
|
90
|
+
lastHeartbeat: now,
|
|
91
|
+
heartbeatInterval: this.ctx.options.heartbeatInterval,
|
|
92
|
+
updatedAt: now,
|
|
93
|
+
},
|
|
94
|
+
},
|
|
95
|
+
{
|
|
96
|
+
sort: { nextRunAt: 1 },
|
|
97
|
+
returnDocument: 'after',
|
|
98
|
+
},
|
|
99
|
+
);
|
|
100
|
+
|
|
101
|
+
if (!this.ctx.isRunning()) {
|
|
102
|
+
return null;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
if (!result) {
|
|
106
|
+
return null;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
return this.ctx.documentToPersistedJob(result);
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
/**
|
|
113
|
+
* Execute a job using its registered worker handler.
|
|
114
|
+
*
|
|
115
|
+
* Tracks the job as active during processing, emits lifecycle events, and handles
|
|
116
|
+
* both success and failure cases. On success, calls `completeJob()`. On failure,
|
|
117
|
+
* calls `failJob()` which implements exponential backoff retry logic.
|
|
118
|
+
*
|
|
119
|
+
* @param job - The job to process
|
|
120
|
+
* @param worker - The worker registration containing the handler and active job tracking
|
|
121
|
+
*/
|
|
122
|
+
async processJob(job: PersistedJob, worker: WorkerRegistration): Promise<void> {
|
|
123
|
+
const jobId = job._id.toString();
|
|
124
|
+
worker.activeJobs.set(jobId, job);
|
|
125
|
+
|
|
126
|
+
const startTime = Date.now();
|
|
127
|
+
this.ctx.emit('job:start', job);
|
|
128
|
+
|
|
129
|
+
try {
|
|
130
|
+
await worker.handler(job);
|
|
131
|
+
|
|
132
|
+
// Job completed successfully
|
|
133
|
+
const duration = Date.now() - startTime;
|
|
134
|
+
await this.completeJob(job);
|
|
135
|
+
this.ctx.emit('job:complete', { job, duration });
|
|
136
|
+
} catch (error) {
|
|
137
|
+
// Job failed
|
|
138
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
139
|
+
await this.failJob(job, err);
|
|
140
|
+
|
|
141
|
+
const willRetry = job.failCount + 1 < this.ctx.options.maxRetries;
|
|
142
|
+
this.ctx.emit('job:fail', { job, error: err, willRetry });
|
|
143
|
+
} finally {
|
|
144
|
+
worker.activeJobs.delete(jobId);
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
/**
|
|
149
|
+
* Mark a job as completed successfully.
|
|
150
|
+
*
|
|
151
|
+
* For recurring jobs (with `repeatInterval`), schedules the next run based on the cron
|
|
152
|
+
* expression and resets `failCount` to 0. For one-time jobs, sets status to `completed`.
|
|
153
|
+
* Clears `lockedAt` and `failReason` fields in both cases.
|
|
154
|
+
*
|
|
155
|
+
* @param job - The job that completed successfully
|
|
156
|
+
*/
|
|
157
|
+
async completeJob(job: Job): Promise<void> {
|
|
158
|
+
if (!isPersistedJob(job)) {
|
|
159
|
+
return;
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
if (job.repeatInterval) {
|
|
163
|
+
// Recurring job - schedule next run
|
|
164
|
+
const nextRunAt = getNextCronDate(job.repeatInterval);
|
|
165
|
+
await this.ctx.collection.updateOne(
|
|
166
|
+
{ _id: job._id },
|
|
167
|
+
{
|
|
168
|
+
$set: {
|
|
169
|
+
status: JobStatus.PENDING,
|
|
170
|
+
nextRunAt,
|
|
171
|
+
failCount: 0,
|
|
172
|
+
updatedAt: new Date(),
|
|
173
|
+
},
|
|
174
|
+
$unset: {
|
|
175
|
+
lockedAt: '',
|
|
176
|
+
claimedBy: '',
|
|
177
|
+
lastHeartbeat: '',
|
|
178
|
+
heartbeatInterval: '',
|
|
179
|
+
failReason: '',
|
|
180
|
+
},
|
|
181
|
+
},
|
|
182
|
+
);
|
|
183
|
+
} else {
|
|
184
|
+
// One-time job - mark as completed
|
|
185
|
+
await this.ctx.collection.updateOne(
|
|
186
|
+
{ _id: job._id },
|
|
187
|
+
{
|
|
188
|
+
$set: {
|
|
189
|
+
status: JobStatus.COMPLETED,
|
|
190
|
+
updatedAt: new Date(),
|
|
191
|
+
},
|
|
192
|
+
$unset: {
|
|
193
|
+
lockedAt: '',
|
|
194
|
+
claimedBy: '',
|
|
195
|
+
lastHeartbeat: '',
|
|
196
|
+
heartbeatInterval: '',
|
|
197
|
+
failReason: '',
|
|
198
|
+
},
|
|
199
|
+
},
|
|
200
|
+
);
|
|
201
|
+
job.status = JobStatus.COMPLETED;
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
/**
|
|
206
|
+
* Handle job failure with exponential backoff retry logic.
|
|
207
|
+
*
|
|
208
|
+
* Increments `failCount` and calculates next retry time using exponential backoff:
|
|
209
|
+
* `nextRunAt = 2^failCount × baseRetryInterval` (capped by optional `maxBackoffDelay`).
|
|
210
|
+
*
|
|
211
|
+
* If `failCount >= maxRetries`, marks job as permanently `failed`. Otherwise, resets
|
|
212
|
+
* to `pending` status for retry. Stores error message in `failReason` field.
|
|
213
|
+
*
|
|
214
|
+
* @param job - The job that failed
|
|
215
|
+
* @param error - The error that caused the failure
|
|
216
|
+
*/
|
|
217
|
+
async failJob(job: Job, error: Error): Promise<void> {
|
|
218
|
+
if (!isPersistedJob(job)) {
|
|
219
|
+
return;
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
const newFailCount = job.failCount + 1;
|
|
223
|
+
|
|
224
|
+
if (newFailCount >= this.ctx.options.maxRetries) {
|
|
225
|
+
// Permanent failure
|
|
226
|
+
await this.ctx.collection.updateOne(
|
|
227
|
+
{ _id: job._id },
|
|
228
|
+
{
|
|
229
|
+
$set: {
|
|
230
|
+
status: JobStatus.FAILED,
|
|
231
|
+
failCount: newFailCount,
|
|
232
|
+
failReason: error.message,
|
|
233
|
+
updatedAt: new Date(),
|
|
234
|
+
},
|
|
235
|
+
$unset: {
|
|
236
|
+
lockedAt: '',
|
|
237
|
+
claimedBy: '',
|
|
238
|
+
lastHeartbeat: '',
|
|
239
|
+
heartbeatInterval: '',
|
|
240
|
+
},
|
|
241
|
+
},
|
|
242
|
+
);
|
|
243
|
+
} else {
|
|
244
|
+
// Schedule retry with exponential backoff
|
|
245
|
+
const nextRunAt = calculateBackoff(
|
|
246
|
+
newFailCount,
|
|
247
|
+
this.ctx.options.baseRetryInterval,
|
|
248
|
+
this.ctx.options.maxBackoffDelay,
|
|
249
|
+
);
|
|
250
|
+
|
|
251
|
+
await this.ctx.collection.updateOne(
|
|
252
|
+
{ _id: job._id },
|
|
253
|
+
{
|
|
254
|
+
$set: {
|
|
255
|
+
status: JobStatus.PENDING,
|
|
256
|
+
failCount: newFailCount,
|
|
257
|
+
failReason: error.message,
|
|
258
|
+
nextRunAt,
|
|
259
|
+
updatedAt: new Date(),
|
|
260
|
+
},
|
|
261
|
+
$unset: {
|
|
262
|
+
lockedAt: '',
|
|
263
|
+
claimedBy: '',
|
|
264
|
+
lastHeartbeat: '',
|
|
265
|
+
heartbeatInterval: '',
|
|
266
|
+
},
|
|
267
|
+
},
|
|
268
|
+
);
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
/**
|
|
273
|
+
* Update heartbeats for all jobs claimed by this scheduler instance.
|
|
274
|
+
*
|
|
275
|
+
* This method runs periodically while the scheduler is running to indicate
|
|
276
|
+
* that jobs are still being actively processed.
|
|
277
|
+
*
|
|
278
|
+
* `lastHeartbeat` is primarily an observability signal (monitoring/debugging).
|
|
279
|
+
* Stale recovery is based on `lockedAt` + `lockTimeout`.
|
|
280
|
+
*/
|
|
281
|
+
async updateHeartbeats(): Promise<void> {
|
|
282
|
+
if (!this.ctx.isRunning()) {
|
|
283
|
+
return;
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
const now = new Date();
|
|
287
|
+
|
|
288
|
+
await this.ctx.collection.updateMany(
|
|
289
|
+
{
|
|
290
|
+
claimedBy: this.ctx.instanceId,
|
|
291
|
+
status: JobStatus.PROCESSING,
|
|
292
|
+
},
|
|
293
|
+
{
|
|
294
|
+
$set: {
|
|
295
|
+
lastHeartbeat: now,
|
|
296
|
+
updatedAt: now,
|
|
297
|
+
},
|
|
298
|
+
},
|
|
299
|
+
);
|
|
300
|
+
}
|
|
301
|
+
}
|
|
@@ -0,0 +1,411 @@
|
|
|
1
|
+
import type { Document, Filter, ObjectId, WithId } from 'mongodb';
|
|
2
|
+
|
|
3
|
+
import {
|
|
4
|
+
CursorDirection,
|
|
5
|
+
type CursorDirectionType,
|
|
6
|
+
type CursorOptions,
|
|
7
|
+
type CursorPage,
|
|
8
|
+
type GetJobsFilter,
|
|
9
|
+
type JobSelector,
|
|
10
|
+
JobStatus,
|
|
11
|
+
type PersistedJob,
|
|
12
|
+
type QueueStats,
|
|
13
|
+
} from '@/jobs';
|
|
14
|
+
import { AggregationTimeoutError, ConnectionError } from '@/shared';
|
|
15
|
+
|
|
16
|
+
import { buildSelectorQuery, decodeCursor, encodeCursor } from '../helpers.js';
|
|
17
|
+
import type { SchedulerContext } from './types.js';
|
|
18
|
+
|
|
19
|
+
/**
|
|
20
|
+
* Internal service for job query operations.
|
|
21
|
+
*
|
|
22
|
+
* Provides read-only access to jobs with filtering and cursor-based pagination.
|
|
23
|
+
* All queries use efficient index-backed access patterns.
|
|
24
|
+
*
|
|
25
|
+
* @internal Not part of public API - use Monque class methods instead.
|
|
26
|
+
*/
|
|
27
|
+
export class JobQueryService {
|
|
28
|
+
constructor(private readonly ctx: SchedulerContext) {}
|
|
29
|
+
|
|
30
|
+
/**
|
|
31
|
+
* Get a single job by its MongoDB ObjectId.
|
|
32
|
+
*
|
|
33
|
+
* Useful for retrieving job details when you have a job ID from events,
|
|
34
|
+
* logs, or stored references.
|
|
35
|
+
*
|
|
36
|
+
* @template T - The expected type of the job data payload
|
|
37
|
+
* @param id - The job's ObjectId
|
|
38
|
+
* @returns Promise resolving to the job if found, null otherwise
|
|
39
|
+
* @throws {ConnectionError} If scheduler not initialized
|
|
40
|
+
*
|
|
41
|
+
* @example Look up job from event
|
|
42
|
+
* ```typescript
|
|
43
|
+
* monque.on('job:fail', async ({ job }) => {
|
|
44
|
+
* // Later, retrieve the job to check its status
|
|
45
|
+
* const currentJob = await monque.getJob(job._id);
|
|
46
|
+
* console.log(`Job status: ${currentJob?.status}`);
|
|
47
|
+
* });
|
|
48
|
+
* ```
|
|
49
|
+
*
|
|
50
|
+
* @example Admin endpoint
|
|
51
|
+
* ```typescript
|
|
52
|
+
* app.get('/jobs/:id', async (req, res) => {
|
|
53
|
+
* const job = await monque.getJob(new ObjectId(req.params.id));
|
|
54
|
+
* if (!job) {
|
|
55
|
+
* return res.status(404).json({ error: 'Job not found' });
|
|
56
|
+
* }
|
|
57
|
+
* res.json(job);
|
|
58
|
+
* });
|
|
59
|
+
* ```
|
|
60
|
+
*/
|
|
61
|
+
async getJob<T = unknown>(id: ObjectId): Promise<PersistedJob<T> | null> {
|
|
62
|
+
try {
|
|
63
|
+
const doc = await this.ctx.collection.findOne({ _id: id });
|
|
64
|
+
if (!doc) {
|
|
65
|
+
return null;
|
|
66
|
+
}
|
|
67
|
+
return this.ctx.documentToPersistedJob<T>(doc as WithId<Document>);
|
|
68
|
+
} catch (error) {
|
|
69
|
+
const message = error instanceof Error ? error.message : 'Unknown error during getJob';
|
|
70
|
+
throw new ConnectionError(
|
|
71
|
+
`Failed to get job: ${message}`,
|
|
72
|
+
error instanceof Error ? { cause: error } : undefined,
|
|
73
|
+
);
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* Query jobs from the queue with optional filters.
|
|
79
|
+
*
|
|
80
|
+
* Provides read-only access to job data for monitoring, debugging, and
|
|
81
|
+
* administrative purposes. Results are ordered by `nextRunAt` ascending.
|
|
82
|
+
*
|
|
83
|
+
* @template T - The expected type of the job data payload
|
|
84
|
+
* @param filter - Optional filter criteria
|
|
85
|
+
* @returns Promise resolving to array of matching jobs
|
|
86
|
+
* @throws {ConnectionError} If scheduler not initialized
|
|
87
|
+
*
|
|
88
|
+
* @example Get all pending jobs
|
|
89
|
+
* ```typescript
|
|
90
|
+
* const pendingJobs = await monque.getJobs({ status: JobStatus.PENDING });
|
|
91
|
+
* console.log(`${pendingJobs.length} jobs waiting`);
|
|
92
|
+
* ```
|
|
93
|
+
*
|
|
94
|
+
* @example Get failed email jobs
|
|
95
|
+
* ```typescript
|
|
96
|
+
* const failedEmails = await monque.getJobs({
|
|
97
|
+
* name: 'send-email',
|
|
98
|
+
* status: JobStatus.FAILED,
|
|
99
|
+
* });
|
|
100
|
+
* for (const job of failedEmails) {
|
|
101
|
+
* console.error(`Job ${job._id} failed: ${job.failReason}`);
|
|
102
|
+
* }
|
|
103
|
+
* ```
|
|
104
|
+
*
|
|
105
|
+
* @example Paginated job listing
|
|
106
|
+
* ```typescript
|
|
107
|
+
* const page1 = await monque.getJobs({ limit: 50, skip: 0 });
|
|
108
|
+
* const page2 = await monque.getJobs({ limit: 50, skip: 50 });
|
|
109
|
+
* ```
|
|
110
|
+
*
|
|
111
|
+
* @example Use with type guards from @monque/core
|
|
112
|
+
* ```typescript
|
|
113
|
+
* import { isPendingJob, isRecurringJob } from '@monque/core';
|
|
114
|
+
*
|
|
115
|
+
* const jobs = await monque.getJobs();
|
|
116
|
+
* const pendingRecurring = jobs.filter(job => isPendingJob(job) && isRecurringJob(job));
|
|
117
|
+
* ```
|
|
118
|
+
*/
|
|
119
|
+
async getJobs<T = unknown>(filter: GetJobsFilter = {}): Promise<PersistedJob<T>[]> {
|
|
120
|
+
const query: Document = {};
|
|
121
|
+
|
|
122
|
+
if (filter.name !== undefined) {
|
|
123
|
+
query['name'] = filter.name;
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
if (filter.status !== undefined) {
|
|
127
|
+
if (Array.isArray(filter.status)) {
|
|
128
|
+
query['status'] = { $in: filter.status };
|
|
129
|
+
} else {
|
|
130
|
+
query['status'] = filter.status;
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
const limit = filter.limit ?? 100;
|
|
135
|
+
const skip = filter.skip ?? 0;
|
|
136
|
+
|
|
137
|
+
try {
|
|
138
|
+
const cursor = this.ctx.collection.find(query).sort({ nextRunAt: 1 }).skip(skip).limit(limit);
|
|
139
|
+
|
|
140
|
+
const docs = await cursor.toArray();
|
|
141
|
+
return docs.map((doc) => this.ctx.documentToPersistedJob<T>(doc));
|
|
142
|
+
} catch (error) {
|
|
143
|
+
const message = error instanceof Error ? error.message : 'Unknown error during getJobs';
|
|
144
|
+
throw new ConnectionError(
|
|
145
|
+
`Failed to query jobs: ${message}`,
|
|
146
|
+
error instanceof Error ? { cause: error } : undefined,
|
|
147
|
+
);
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
/**
|
|
152
|
+
* Get a paginated list of jobs using opaque cursors.
|
|
153
|
+
*
|
|
154
|
+
* Provides stable pagination for large job lists. Supports forward and backward
|
|
155
|
+
* navigation, filtering, and efficient database access via index-based cursor queries.
|
|
156
|
+
*
|
|
157
|
+
* @template T - The job data payload type
|
|
158
|
+
* @param options - Pagination options (cursor, limit, direction, filter)
|
|
159
|
+
* @returns Page of jobs with next/prev cursors
|
|
160
|
+
* @throws {InvalidCursorError} If the provided cursor is malformed
|
|
161
|
+
* @throws {ConnectionError} If database operation fails or scheduler not initialized
|
|
162
|
+
*
|
|
163
|
+
* @example List pending jobs
|
|
164
|
+
* ```typescript
|
|
165
|
+
* const page = await monque.getJobsWithCursor({
|
|
166
|
+
* limit: 20,
|
|
167
|
+
* filter: { status: 'pending' }
|
|
168
|
+
* });
|
|
169
|
+
* const jobs = page.jobs;
|
|
170
|
+
*
|
|
171
|
+
* // Get next page
|
|
172
|
+
* if (page.hasNextPage) {
|
|
173
|
+
* const page2 = await monque.getJobsWithCursor({
|
|
174
|
+
* cursor: page.cursor,
|
|
175
|
+
* limit: 20
|
|
176
|
+
* });
|
|
177
|
+
* }
|
|
178
|
+
* ```
|
|
179
|
+
*/
|
|
180
|
+
async getJobsWithCursor<T = unknown>(options: CursorOptions = {}): Promise<CursorPage<T>> {
|
|
181
|
+
const limit = options.limit ?? 50;
|
|
182
|
+
// Default to forward if not specified.
|
|
183
|
+
const direction: CursorDirectionType = options.direction ?? CursorDirection.FORWARD;
|
|
184
|
+
let anchorId: ObjectId | null = null;
|
|
185
|
+
|
|
186
|
+
if (options.cursor) {
|
|
187
|
+
const decoded = decodeCursor(options.cursor);
|
|
188
|
+
anchorId = decoded.id;
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
// Build base query from filters
|
|
192
|
+
const query: Filter<Document> = options.filter ? buildSelectorQuery(options.filter) : {};
|
|
193
|
+
|
|
194
|
+
// Add cursor condition to query
|
|
195
|
+
const sortDir = direction === CursorDirection.FORWARD ? 1 : -1;
|
|
196
|
+
|
|
197
|
+
if (anchorId) {
|
|
198
|
+
if (direction === CursorDirection.FORWARD) {
|
|
199
|
+
query._id = { ...query._id, $gt: anchorId };
|
|
200
|
+
} else {
|
|
201
|
+
query._id = { ...query._id, $lt: anchorId };
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
// Fetch limit + 1 to detect hasNext/hasPrev
|
|
206
|
+
const fetchLimit = limit + 1;
|
|
207
|
+
|
|
208
|
+
// Sort: Always deterministic.
|
|
209
|
+
let docs: WithId<Document>[];
|
|
210
|
+
try {
|
|
211
|
+
docs = await this.ctx.collection
|
|
212
|
+
.find(query)
|
|
213
|
+
.sort({ _id: sortDir })
|
|
214
|
+
.limit(fetchLimit)
|
|
215
|
+
.toArray();
|
|
216
|
+
} catch (error) {
|
|
217
|
+
const message =
|
|
218
|
+
error instanceof Error ? error.message : 'Unknown error during getJobsWithCursor';
|
|
219
|
+
throw new ConnectionError(
|
|
220
|
+
`Failed to query jobs with cursor: ${message}`,
|
|
221
|
+
error instanceof Error ? { cause: error } : undefined,
|
|
222
|
+
);
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
let hasMore = false;
|
|
226
|
+
if (docs.length > limit) {
|
|
227
|
+
hasMore = true;
|
|
228
|
+
docs.pop(); // Remove the extra item
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
if (direction === CursorDirection.BACKWARD) {
|
|
232
|
+
docs.reverse();
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
const jobs = docs.map((doc) => this.ctx.documentToPersistedJob<T>(doc as WithId<Document>));
|
|
236
|
+
|
|
237
|
+
let nextCursor: string | null = null;
|
|
238
|
+
|
|
239
|
+
if (jobs.length > 0) {
|
|
240
|
+
const lastJob = jobs[jobs.length - 1];
|
|
241
|
+
// Check for existence to satisfy strict null checks/noUncheckedIndexedAccess
|
|
242
|
+
if (lastJob) {
|
|
243
|
+
nextCursor = encodeCursor(lastJob._id, direction);
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
let hasNextPage = false;
|
|
248
|
+
let hasPreviousPage = false;
|
|
249
|
+
|
|
250
|
+
// Determine availability of next/prev pages
|
|
251
|
+
if (direction === CursorDirection.FORWARD) {
|
|
252
|
+
hasNextPage = hasMore;
|
|
253
|
+
hasPreviousPage = !!anchorId;
|
|
254
|
+
} else {
|
|
255
|
+
hasNextPage = !!anchorId;
|
|
256
|
+
hasPreviousPage = hasMore;
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
return {
|
|
260
|
+
jobs,
|
|
261
|
+
cursor: nextCursor,
|
|
262
|
+
hasNextPage,
|
|
263
|
+
hasPreviousPage,
|
|
264
|
+
};
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
/**
|
|
268
|
+
* Get aggregate statistics for the job queue.
|
|
269
|
+
*
|
|
270
|
+
* Uses MongoDB aggregation pipeline for efficient server-side calculation.
|
|
271
|
+
* Returns counts per status and optional average processing duration for completed jobs.
|
|
272
|
+
*
|
|
273
|
+
* @param filter - Optional filter to scope statistics by job name
|
|
274
|
+
* @returns Promise resolving to queue statistics
|
|
275
|
+
* @throws {AggregationTimeoutError} If aggregation exceeds 30 second timeout
|
|
276
|
+
* @throws {ConnectionError} If database operation fails
|
|
277
|
+
*
|
|
278
|
+
* @example Get overall queue statistics
|
|
279
|
+
* ```typescript
|
|
280
|
+
* const stats = await monque.getQueueStats();
|
|
281
|
+
* console.log(`Pending: ${stats.pending}, Failed: ${stats.failed}`);
|
|
282
|
+
* ```
|
|
283
|
+
*
|
|
284
|
+
* @example Get statistics for a specific job type
|
|
285
|
+
* ```typescript
|
|
286
|
+
* const emailStats = await monque.getQueueStats({ name: 'send-email' });
|
|
287
|
+
* console.log(`${emailStats.total} email jobs in queue`);
|
|
288
|
+
* ```
|
|
289
|
+
*/
|
|
290
|
+
async getQueueStats(filter?: Pick<JobSelector, 'name'>): Promise<QueueStats> {
|
|
291
|
+
const matchStage: Document = {};
|
|
292
|
+
|
|
293
|
+
if (filter?.name) {
|
|
294
|
+
matchStage['name'] = filter.name;
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
const pipeline: Document[] = [
|
|
298
|
+
// Optional match stage for filtering by name
|
|
299
|
+
...(Object.keys(matchStage).length > 0 ? [{ $match: matchStage }] : []),
|
|
300
|
+
// Facet to calculate counts and avg processing duration in parallel
|
|
301
|
+
{
|
|
302
|
+
$facet: {
|
|
303
|
+
// Count by status
|
|
304
|
+
statusCounts: [
|
|
305
|
+
{
|
|
306
|
+
$group: {
|
|
307
|
+
_id: '$status',
|
|
308
|
+
count: { $sum: 1 },
|
|
309
|
+
},
|
|
310
|
+
},
|
|
311
|
+
],
|
|
312
|
+
// Calculate average job lifetime for completed jobs.
|
|
313
|
+
// Uses createdAt → updatedAt (total lifetime = queue wait + processing)
|
|
314
|
+
// since completeJob() unsets lockedAt, making pure processing time unavailable.
|
|
315
|
+
avgDuration: [
|
|
316
|
+
{
|
|
317
|
+
$match: {
|
|
318
|
+
status: JobStatus.COMPLETED,
|
|
319
|
+
},
|
|
320
|
+
},
|
|
321
|
+
{
|
|
322
|
+
$group: {
|
|
323
|
+
_id: null,
|
|
324
|
+
avgMs: {
|
|
325
|
+
$avg: {
|
|
326
|
+
$subtract: ['$updatedAt', '$createdAt'],
|
|
327
|
+
},
|
|
328
|
+
},
|
|
329
|
+
},
|
|
330
|
+
},
|
|
331
|
+
],
|
|
332
|
+
// Total count
|
|
333
|
+
total: [{ $count: 'count' }],
|
|
334
|
+
},
|
|
335
|
+
},
|
|
336
|
+
];
|
|
337
|
+
|
|
338
|
+
try {
|
|
339
|
+
const results = await this.ctx.collection.aggregate(pipeline, { maxTimeMS: 30000 }).toArray();
|
|
340
|
+
|
|
341
|
+
const result = results[0];
|
|
342
|
+
|
|
343
|
+
// Initialize with zeros
|
|
344
|
+
const stats: QueueStats = {
|
|
345
|
+
pending: 0,
|
|
346
|
+
processing: 0,
|
|
347
|
+
completed: 0,
|
|
348
|
+
failed: 0,
|
|
349
|
+
cancelled: 0,
|
|
350
|
+
total: 0,
|
|
351
|
+
};
|
|
352
|
+
|
|
353
|
+
if (!result) {
|
|
354
|
+
return stats;
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
// Map status counts to stats
|
|
358
|
+
const statusCounts = result['statusCounts'] as Array<{ _id: string; count: number }>;
|
|
359
|
+
for (const entry of statusCounts) {
|
|
360
|
+
const status = entry._id;
|
|
361
|
+
const count = entry.count;
|
|
362
|
+
|
|
363
|
+
switch (status) {
|
|
364
|
+
case JobStatus.PENDING:
|
|
365
|
+
stats.pending = count;
|
|
366
|
+
break;
|
|
367
|
+
case JobStatus.PROCESSING:
|
|
368
|
+
stats.processing = count;
|
|
369
|
+
break;
|
|
370
|
+
case JobStatus.COMPLETED:
|
|
371
|
+
stats.completed = count;
|
|
372
|
+
break;
|
|
373
|
+
case JobStatus.FAILED:
|
|
374
|
+
stats.failed = count;
|
|
375
|
+
break;
|
|
376
|
+
case JobStatus.CANCELLED:
|
|
377
|
+
stats.cancelled = count;
|
|
378
|
+
break;
|
|
379
|
+
}
|
|
380
|
+
}
|
|
381
|
+
|
|
382
|
+
// Extract total
|
|
383
|
+
const totalResult = result['total'] as Array<{ count: number }>;
|
|
384
|
+
if (totalResult.length > 0 && totalResult[0]) {
|
|
385
|
+
stats.total = totalResult[0].count;
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
// Extract average processing duration
|
|
389
|
+
const avgDurationResult = result['avgDuration'] as Array<{ avgMs: number }>;
|
|
390
|
+
if (avgDurationResult.length > 0 && avgDurationResult[0]) {
|
|
391
|
+
const avgMs = avgDurationResult[0].avgMs;
|
|
392
|
+
if (typeof avgMs === 'number' && !Number.isNaN(avgMs)) {
|
|
393
|
+
stats.avgProcessingDurationMs = Math.round(avgMs);
|
|
394
|
+
}
|
|
395
|
+
}
|
|
396
|
+
|
|
397
|
+
return stats;
|
|
398
|
+
} catch (error) {
|
|
399
|
+
// Check for timeout error
|
|
400
|
+
if (error instanceof Error && error.message.includes('exceeded time limit')) {
|
|
401
|
+
throw new AggregationTimeoutError();
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
const message = error instanceof Error ? error.message : 'Unknown error during getQueueStats';
|
|
405
|
+
throw new ConnectionError(
|
|
406
|
+
`Failed to get queue stats: ${message}`,
|
|
407
|
+
error instanceof Error ? { cause: error } : undefined,
|
|
408
|
+
);
|
|
409
|
+
}
|
|
410
|
+
}
|
|
411
|
+
}
|