mongo-job-scheduler 0.1.15 → 0.1.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +22 -0
- package/dist/core/scheduler.js +16 -0
- package/dist/store/in-memory-job-store.d.ts +1 -0
- package/dist/store/in-memory-job-store.js +18 -0
- package/dist/store/job-store.d.ts +5 -0
- package/dist/store/mongo/mongo-job-store.d.ts +1 -3
- package/dist/store/mongo/mongo-job-store.js +151 -40
- package/dist/types/job.d.ts +14 -0
- package/dist/types/schedule.d.ts +5 -0
- package/dist/worker/worker.js +32 -25
- package/package.json +1 -2
package/README.md
CHANGED
|
@@ -11,6 +11,7 @@ A production-grade MongoDB-backed job scheduler for Node.js with distributed loc
|
|
|
11
11
|
- ✅ **Distributed locking** — safe for multiple instances
|
|
12
12
|
- ✅ **Atomic job execution** — no double processing
|
|
13
13
|
- ✅ **Job priority** — process important jobs first
|
|
14
|
+
- ✅ **Concurrency limits** — rate-limit job execution
|
|
14
15
|
- ✅ **Automatic retries** — with configurable backoff
|
|
15
16
|
- ✅ **Cron scheduling** — timezone-aware, non-drifting
|
|
16
17
|
- ✅ **Interval jobs** — repeated execution
|
|
@@ -207,6 +208,27 @@ await scheduler.updateJob(jobId, { priority: 2 });
|
|
|
207
208
|
|
|
208
209
|
> **Priority Scale**: 1 (highest) → 10 (lowest). Jobs with equal priority run in FIFO order by `nextRunAt`.
|
|
209
210
|
|
|
211
|
+
### Concurrency Limits
|
|
212
|
+
|
|
213
|
+
Limit how many instances of a job type can run simultaneously (useful for rate-limiting API calls):
|
|
214
|
+
|
|
215
|
+
```typescript
|
|
216
|
+
// Max 5 concurrent "api-sync" jobs globally
|
|
217
|
+
await scheduler.schedule({
|
|
218
|
+
name: "api-sync",
|
|
219
|
+
concurrency: 5,
|
|
220
|
+
});
|
|
221
|
+
|
|
222
|
+
// Max 2 concurrent "webhook" jobs
|
|
223
|
+
await scheduler.schedule({
|
|
224
|
+
name: "webhook",
|
|
225
|
+
data: { url: "https://..." },
|
|
226
|
+
concurrency: 2,
|
|
227
|
+
});
|
|
228
|
+
```
|
|
229
|
+
|
|
230
|
+
> **Note**: Concurrency is enforced globally across all workers. Jobs exceeding the limit wait until a slot frees up.
|
|
231
|
+
|
|
210
232
|
### Retries with Backoff
|
|
211
233
|
|
|
212
234
|
```typescript
|
package/dist/core/scheduler.js
CHANGED
|
@@ -42,6 +42,12 @@ class Scheduler {
|
|
|
42
42
|
throw new Error("Priority must be an integer between 1 and 10");
|
|
43
43
|
}
|
|
44
44
|
}
|
|
45
|
+
// Concurrency validation
|
|
46
|
+
if (options.concurrency !== undefined) {
|
|
47
|
+
if (!Number.isInteger(options.concurrency) || options.concurrency < 1) {
|
|
48
|
+
throw new Error("Concurrency must be a positive integer");
|
|
49
|
+
}
|
|
50
|
+
}
|
|
45
51
|
// ------------------------
|
|
46
52
|
// Normalize run time
|
|
47
53
|
// ------------------------
|
|
@@ -59,6 +65,8 @@ class Scheduler {
|
|
|
59
65
|
repeat: options.repeat,
|
|
60
66
|
dedupeKey: options.dedupeKey,
|
|
61
67
|
priority: options.priority,
|
|
68
|
+
concurrency: options.concurrency,
|
|
69
|
+
lockVersion: 0,
|
|
62
70
|
createdAt: now,
|
|
63
71
|
updatedAt: now,
|
|
64
72
|
};
|
|
@@ -88,6 +96,12 @@ class Scheduler {
|
|
|
88
96
|
throw new Error("Priority must be an integer between 1 and 10");
|
|
89
97
|
}
|
|
90
98
|
}
|
|
99
|
+
// Concurrency validation
|
|
100
|
+
if (options.concurrency !== undefined) {
|
|
101
|
+
if (!Number.isInteger(options.concurrency) || options.concurrency < 1) {
|
|
102
|
+
throw new Error("Concurrency must be a positive integer");
|
|
103
|
+
}
|
|
104
|
+
}
|
|
91
105
|
const job = {
|
|
92
106
|
name: options.name,
|
|
93
107
|
data: options.data,
|
|
@@ -97,6 +111,8 @@ class Scheduler {
|
|
|
97
111
|
retry: options.retry,
|
|
98
112
|
dedupeKey: options.dedupeKey,
|
|
99
113
|
priority: options.priority,
|
|
114
|
+
concurrency: options.concurrency,
|
|
115
|
+
lockVersion: 0,
|
|
100
116
|
};
|
|
101
117
|
if (isNaN(job.nextRunAt.getTime())) {
|
|
102
118
|
throw new Error("Invalid Date provided for runAt");
|
|
@@ -25,5 +25,6 @@ export declare class InMemoryJobStore implements JobStore {
|
|
|
25
25
|
findById(jobId: unknown): Promise<Job | null>;
|
|
26
26
|
renewLock(jobId: unknown, workerId: string): Promise<void>;
|
|
27
27
|
update(jobId: unknown, updates: JobUpdates): Promise<void>;
|
|
28
|
+
countRunning(jobName: string): Promise<number>;
|
|
28
29
|
findAll(query: JobQuery): Promise<Job[]>;
|
|
29
30
|
}
|
|
@@ -24,6 +24,7 @@ class InMemoryJobStore {
|
|
|
24
24
|
...job,
|
|
25
25
|
_id: id,
|
|
26
26
|
priority: job.priority ?? 5,
|
|
27
|
+
lockVersion: job.lockVersion ?? 0,
|
|
27
28
|
createdAt: job.createdAt ?? new Date(),
|
|
28
29
|
updatedAt: job.updatedAt ?? new Date(),
|
|
29
30
|
};
|
|
@@ -54,9 +55,19 @@ class InMemoryJobStore {
|
|
|
54
55
|
now.getTime() - job.lockedAt.getTime() < lockTimeoutMs) {
|
|
55
56
|
continue;
|
|
56
57
|
}
|
|
58
|
+
// Check concurrency limit if defined
|
|
59
|
+
if (job.concurrency !== undefined && job.concurrency > 0) {
|
|
60
|
+
const runningCount = Array.from(this.jobs.values()).filter((j) => j.name === job.name && j.status === "running").length;
|
|
61
|
+
if (runningCount >= job.concurrency) {
|
|
62
|
+
// At concurrency limit, skip this job
|
|
63
|
+
continue;
|
|
64
|
+
}
|
|
65
|
+
}
|
|
57
66
|
job.status = "running";
|
|
58
67
|
job.lockedAt = now;
|
|
59
68
|
job.lockedBy = workerId;
|
|
69
|
+
job.lockUntil = new Date(now.getTime() + lockTimeoutMs);
|
|
70
|
+
job.lockVersion = (job.lockVersion ?? 0) + 1;
|
|
60
71
|
job.updatedAt = new Date();
|
|
61
72
|
job.lastRunAt = now;
|
|
62
73
|
return { ...job };
|
|
@@ -109,6 +120,7 @@ class InMemoryJobStore {
|
|
|
109
120
|
job.status = "pending";
|
|
110
121
|
job.lockedAt = undefined;
|
|
111
122
|
job.lockedBy = undefined;
|
|
123
|
+
job.lockUntil = undefined;
|
|
112
124
|
job.updatedAt = new Date();
|
|
113
125
|
recovered++;
|
|
114
126
|
}
|
|
@@ -165,8 +177,14 @@ class InMemoryJobStore {
|
|
|
165
177
|
if (updates.priority !== undefined) {
|
|
166
178
|
job.priority = updates.priority;
|
|
167
179
|
}
|
|
180
|
+
if (updates.concurrency !== undefined) {
|
|
181
|
+
job.concurrency = updates.concurrency;
|
|
182
|
+
}
|
|
168
183
|
job.updatedAt = new Date();
|
|
169
184
|
}
|
|
185
|
+
async countRunning(jobName) {
|
|
186
|
+
return Array.from(this.jobs.values()).filter((j) => j.name === jobName && j.status === "running").length;
|
|
187
|
+
}
|
|
170
188
|
async findAll(query) {
|
|
171
189
|
let jobs = Array.from(this.jobs.values());
|
|
172
190
|
// Filter
|
|
@@ -60,6 +60,10 @@ export interface JobStore {
|
|
|
60
60
|
* Find all jobs matching query
|
|
61
61
|
*/
|
|
62
62
|
findAll(query: JobQuery): Promise<Job[]>;
|
|
63
|
+
/**
|
|
64
|
+
* Count running jobs by name (for concurrency limits)
|
|
65
|
+
*/
|
|
66
|
+
countRunning(jobName: string): Promise<number>;
|
|
63
67
|
}
|
|
64
68
|
import { RetryOptions } from "../types/retry";
|
|
65
69
|
import { RepeatOptions } from "../types/repeat";
|
|
@@ -72,4 +76,5 @@ export interface JobUpdates {
|
|
|
72
76
|
status?: JobStatus;
|
|
73
77
|
attempts?: number;
|
|
74
78
|
priority?: number;
|
|
79
|
+
concurrency?: number;
|
|
75
80
|
}
|
|
@@ -10,9 +10,6 @@ export declare class MongoJobStore implements JobStore {
|
|
|
10
10
|
private readonly collection;
|
|
11
11
|
private readonly defaultLockTimeoutMs;
|
|
12
12
|
constructor(db: Db, options?: MongoJobStoreOptions);
|
|
13
|
-
/**
|
|
14
|
-
* Create necessary indexes for optimal query performance
|
|
15
|
-
*/
|
|
16
13
|
private ensureIndexes;
|
|
17
14
|
create(job: Job): Promise<Job>;
|
|
18
15
|
createBulk(jobs: Job[]): Promise<Job[]>;
|
|
@@ -35,5 +32,6 @@ export declare class MongoJobStore implements JobStore {
|
|
|
35
32
|
}): Promise<number>;
|
|
36
33
|
renewLock(id: ObjectId, workerId: string): Promise<void>;
|
|
37
34
|
update(id: ObjectId, updates: JobUpdates): Promise<void>;
|
|
35
|
+
countRunning(jobName: string): Promise<number>;
|
|
38
36
|
findAll(query: JobQuery): Promise<Job[]>;
|
|
39
37
|
}
|
|
@@ -10,31 +10,23 @@ class MongoJobStore {
|
|
|
10
10
|
console.error("Failed to create indexes:", err);
|
|
11
11
|
});
|
|
12
12
|
}
|
|
13
|
-
/**
|
|
14
|
-
* Create necessary indexes for optimal query performance
|
|
15
|
-
*/
|
|
16
13
|
async ensureIndexes() {
|
|
17
14
|
await Promise.all([
|
|
18
|
-
// Primary index for job polling (findAndLockNext) with priority
|
|
19
15
|
this.collection.createIndex({ status: 1, priority: 1, nextRunAt: 1 }, { background: true }),
|
|
20
|
-
// Index for deduplication
|
|
21
16
|
this.collection.createIndex({ dedupeKey: 1 }, { unique: true, sparse: true, background: true }),
|
|
22
|
-
|
|
23
|
-
this.collection.createIndex({
|
|
17
|
+
this.collection.createIndex({ lockUntil: 1 }, { sparse: true, background: true }),
|
|
18
|
+
this.collection.createIndex({ name: 1, status: 1 }, { background: true }),
|
|
24
19
|
]);
|
|
25
20
|
}
|
|
26
|
-
// --------------------------------------------------
|
|
27
|
-
// CREATE
|
|
28
|
-
// --------------------------------------------------
|
|
29
21
|
async create(job) {
|
|
30
22
|
const now = new Date();
|
|
31
|
-
// IMPORTANT: strip _id completely
|
|
32
23
|
const { _id, ...jobWithoutId } = job;
|
|
33
24
|
const doc = {
|
|
34
25
|
...jobWithoutId,
|
|
35
26
|
status: job.status ?? "pending",
|
|
36
27
|
attempts: job.attempts ?? 0,
|
|
37
28
|
priority: job.priority ?? 5,
|
|
29
|
+
lockVersion: job.lockVersion ?? 0,
|
|
38
30
|
createdAt: now,
|
|
39
31
|
updatedAt: now,
|
|
40
32
|
};
|
|
@@ -42,7 +34,6 @@ class MongoJobStore {
|
|
|
42
34
|
delete doc.dedupeKey;
|
|
43
35
|
}
|
|
44
36
|
if (job.dedupeKey) {
|
|
45
|
-
// Idempotent insert
|
|
46
37
|
const result = await this.collection.findOneAndUpdate({ dedupeKey: job.dedupeKey }, { $setOnInsert: doc }, { upsert: true, returnDocument: "after" });
|
|
47
38
|
return result;
|
|
48
39
|
}
|
|
@@ -52,13 +43,13 @@ class MongoJobStore {
|
|
|
52
43
|
async createBulk(jobs) {
|
|
53
44
|
const now = new Date();
|
|
54
45
|
const docs = jobs.map((job) => {
|
|
55
|
-
// IMPORTANT: strip _id completely
|
|
56
46
|
const { _id, ...jobWithoutId } = job;
|
|
57
47
|
const doc = {
|
|
58
48
|
...jobWithoutId,
|
|
59
49
|
status: job.status ?? "pending",
|
|
60
50
|
attempts: job.attempts ?? 0,
|
|
61
51
|
priority: job.priority ?? 5,
|
|
52
|
+
lockVersion: job.lockVersion ?? 0,
|
|
62
53
|
createdAt: now,
|
|
63
54
|
updatedAt: now,
|
|
64
55
|
};
|
|
@@ -75,36 +66,150 @@ class MongoJobStore {
|
|
|
75
66
|
_id: result.insertedIds[index],
|
|
76
67
|
}));
|
|
77
68
|
}
|
|
78
|
-
//
|
|
79
|
-
// ATOMIC FIND & LOCK
|
|
80
|
-
// --------------------------------------------------
|
|
69
|
+
// Atomic find & lock with version-based optimistic locking
|
|
81
70
|
async findAndLockNext(options) {
|
|
82
71
|
const { now, workerId, lockTimeoutMs } = options;
|
|
83
|
-
const
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
nextRunAt: { $lte: now },
|
|
72
|
+
const lockUntil = new Date(now.getTime() + lockTimeoutMs);
|
|
73
|
+
// Fast path: jobs without concurrency limits
|
|
74
|
+
const simpleQuery = {
|
|
87
75
|
$or: [
|
|
88
|
-
|
|
89
|
-
{
|
|
76
|
+
// Pending jobs (not locked)
|
|
77
|
+
{
|
|
78
|
+
status: "pending",
|
|
79
|
+
nextRunAt: { $lte: now },
|
|
80
|
+
$or: [{ lockedBy: { $exists: false } }, { lockedBy: null }],
|
|
81
|
+
},
|
|
82
|
+
// Stale running jobs (lock expired - crash recovery)
|
|
83
|
+
{
|
|
84
|
+
status: "running",
|
|
85
|
+
nextRunAt: { $lte: now },
|
|
86
|
+
lockUntil: { $lte: now },
|
|
87
|
+
},
|
|
90
88
|
],
|
|
91
|
-
|
|
89
|
+
$and: [
|
|
90
|
+
{ $or: [{ concurrency: { $exists: false } }, { concurrency: null }] },
|
|
91
|
+
],
|
|
92
|
+
};
|
|
93
|
+
const simpleResult = await this.collection.findOneAndUpdate(simpleQuery, {
|
|
92
94
|
$set: {
|
|
93
95
|
lockedAt: now,
|
|
94
96
|
lockedBy: workerId,
|
|
97
|
+
lockUntil: lockUntil,
|
|
95
98
|
status: "running",
|
|
96
99
|
lastRunAt: now,
|
|
97
100
|
updatedAt: now,
|
|
98
101
|
},
|
|
102
|
+
$inc: { lockVersion: 1 },
|
|
99
103
|
}, {
|
|
100
104
|
sort: { priority: 1, nextRunAt: 1 },
|
|
101
105
|
returnDocument: "after",
|
|
102
106
|
});
|
|
103
|
-
|
|
107
|
+
if (simpleResult) {
|
|
108
|
+
return simpleResult;
|
|
109
|
+
}
|
|
110
|
+
// Now handle jobs with concurrency limits
|
|
111
|
+
// We need to check concurrency before locking
|
|
112
|
+
const maxAttempts = 20;
|
|
113
|
+
const checkedNames = new Set();
|
|
114
|
+
for (let attempt = 0; attempt < maxAttempts; attempt++) {
|
|
115
|
+
// Find a candidate with concurrency limit that we haven't checked yet
|
|
116
|
+
const concurrencyQuery = {
|
|
117
|
+
$or: [
|
|
118
|
+
{
|
|
119
|
+
status: "pending",
|
|
120
|
+
nextRunAt: { $lte: now },
|
|
121
|
+
// Pending jobs should not have lockedBy set
|
|
122
|
+
$or: [{ lockedBy: { $exists: false } }, { lockedBy: null }],
|
|
123
|
+
},
|
|
124
|
+
{
|
|
125
|
+
// Stale running jobs (lock expired)
|
|
126
|
+
status: "running",
|
|
127
|
+
nextRunAt: { $lte: now },
|
|
128
|
+
lockUntil: { $lte: now },
|
|
129
|
+
},
|
|
130
|
+
],
|
|
131
|
+
concurrency: { $exists: true, $gt: 0 },
|
|
132
|
+
};
|
|
133
|
+
if (checkedNames.size > 0) {
|
|
134
|
+
concurrencyQuery.name = { $nin: Array.from(checkedNames) };
|
|
135
|
+
}
|
|
136
|
+
const candidate = await this.collection.findOne(concurrencyQuery, {
|
|
137
|
+
sort: { priority: 1, nextRunAt: 1 },
|
|
138
|
+
projection: { name: 1, concurrency: 1, lockVersion: 1 },
|
|
139
|
+
});
|
|
140
|
+
if (!candidate) {
|
|
141
|
+
return null; // No more candidates with concurrency limits
|
|
142
|
+
}
|
|
143
|
+
const runningCount = await this.collection.countDocuments({
|
|
144
|
+
name: candidate.name,
|
|
145
|
+
status: "running",
|
|
146
|
+
});
|
|
147
|
+
if (runningCount >= candidate.concurrency) {
|
|
148
|
+
// At limit for this job name, skip all jobs with this name
|
|
149
|
+
checkedNames.add(candidate.name);
|
|
150
|
+
continue;
|
|
151
|
+
}
|
|
152
|
+
const lockResult = await this.collection.findOneAndUpdate({
|
|
153
|
+
name: candidate.name,
|
|
154
|
+
concurrency: candidate.concurrency,
|
|
155
|
+
$or: [
|
|
156
|
+
{
|
|
157
|
+
status: "pending",
|
|
158
|
+
$or: [{ lockedBy: { $exists: false } }, { lockedBy: null }],
|
|
159
|
+
},
|
|
160
|
+
{
|
|
161
|
+
status: "running",
|
|
162
|
+
lockUntil: { $lte: now },
|
|
163
|
+
},
|
|
164
|
+
],
|
|
165
|
+
nextRunAt: { $lte: now },
|
|
166
|
+
}, {
|
|
167
|
+
$set: {
|
|
168
|
+
lockedAt: now,
|
|
169
|
+
lockedBy: workerId,
|
|
170
|
+
lockUntil: lockUntil,
|
|
171
|
+
status: "running",
|
|
172
|
+
lastRunAt: now,
|
|
173
|
+
updatedAt: now,
|
|
174
|
+
},
|
|
175
|
+
$inc: { lockVersion: 1 },
|
|
176
|
+
}, {
|
|
177
|
+
sort: { priority: 1, nextRunAt: 1 },
|
|
178
|
+
returnDocument: "after",
|
|
179
|
+
});
|
|
180
|
+
if (lockResult) {
|
|
181
|
+
// Verify concurrency wasn't exceeded by race condition
|
|
182
|
+
const currentRunning = await this.collection.countDocuments({
|
|
183
|
+
name: lockResult.name,
|
|
184
|
+
status: "running",
|
|
185
|
+
});
|
|
186
|
+
if (currentRunning > lockResult.concurrency) {
|
|
187
|
+
// We exceeded concurrency - release this job back to pending
|
|
188
|
+
await this.collection.updateOne({
|
|
189
|
+
_id: lockResult._id,
|
|
190
|
+
lockedBy: workerId,
|
|
191
|
+
lockVersion: lockResult.lockVersion,
|
|
192
|
+
}, {
|
|
193
|
+
$set: {
|
|
194
|
+
status: "pending",
|
|
195
|
+
updatedAt: new Date(),
|
|
196
|
+
},
|
|
197
|
+
$unset: {
|
|
198
|
+
lockedAt: "",
|
|
199
|
+
lockedBy: "",
|
|
200
|
+
lockUntil: "",
|
|
201
|
+
lastRunAt: "",
|
|
202
|
+
},
|
|
203
|
+
});
|
|
204
|
+
continue;
|
|
205
|
+
}
|
|
206
|
+
return lockResult;
|
|
207
|
+
}
|
|
208
|
+
// Lock failed (another worker got it), try next job name
|
|
209
|
+
checkedNames.add(candidate.name);
|
|
210
|
+
}
|
|
211
|
+
return null;
|
|
104
212
|
}
|
|
105
|
-
// --------------------------------------------------
|
|
106
|
-
// MARK COMPLETED
|
|
107
|
-
// --------------------------------------------------
|
|
108
213
|
async markCompleted(id) {
|
|
109
214
|
await this.collection.updateOne({ _id: id }, {
|
|
110
215
|
$set: {
|
|
@@ -114,12 +219,10 @@ class MongoJobStore {
|
|
|
114
219
|
$unset: {
|
|
115
220
|
lockedAt: "",
|
|
116
221
|
lockedBy: "",
|
|
222
|
+
lockUntil: "",
|
|
117
223
|
},
|
|
118
224
|
});
|
|
119
225
|
}
|
|
120
|
-
// --------------------------------------------------
|
|
121
|
-
// MARK FAILED
|
|
122
|
-
// --------------------------------------------------
|
|
123
226
|
async markFailed(id, error) {
|
|
124
227
|
await this.collection.updateOne({ _id: id }, {
|
|
125
228
|
$set: {
|
|
@@ -130,12 +233,10 @@ class MongoJobStore {
|
|
|
130
233
|
$unset: {
|
|
131
234
|
lockedAt: "",
|
|
132
235
|
lockedBy: "",
|
|
236
|
+
lockUntil: "",
|
|
133
237
|
},
|
|
134
238
|
});
|
|
135
239
|
}
|
|
136
|
-
// --------------------------------------------------
|
|
137
|
-
// RESCHEDULE
|
|
138
|
-
// --------------------------------------------------
|
|
139
240
|
async reschedule(id, nextRunAt, updates) {
|
|
140
241
|
const result = await this.collection.updateOne({ _id: id }, {
|
|
141
242
|
$set: {
|
|
@@ -147,12 +248,10 @@ class MongoJobStore {
|
|
|
147
248
|
$unset: {
|
|
148
249
|
lockedAt: "",
|
|
149
250
|
lockedBy: "",
|
|
251
|
+
lockUntil: "",
|
|
150
252
|
},
|
|
151
253
|
});
|
|
152
254
|
}
|
|
153
|
-
// --------------------------------------------------
|
|
154
|
-
// CANCEL
|
|
155
|
-
// --------------------------------------------------
|
|
156
255
|
async cancel(id) {
|
|
157
256
|
await this.collection.updateOne({ _id: id }, {
|
|
158
257
|
$set: {
|
|
@@ -162,6 +261,7 @@ class MongoJobStore {
|
|
|
162
261
|
$unset: {
|
|
163
262
|
lockedAt: "",
|
|
164
263
|
lockedBy: "",
|
|
264
|
+
lockUntil: "",
|
|
165
265
|
},
|
|
166
266
|
});
|
|
167
267
|
}
|
|
@@ -171,14 +271,14 @@ class MongoJobStore {
|
|
|
171
271
|
return null;
|
|
172
272
|
return doc;
|
|
173
273
|
}
|
|
174
|
-
// --------------------------------------------------
|
|
175
|
-
// RECOVER STALE JOBS
|
|
176
|
-
// --------------------------------------------------
|
|
177
274
|
async recoverStaleJobs(options) {
|
|
178
275
|
const { now, lockTimeoutMs } = options;
|
|
179
276
|
const expiry = new Date(now.getTime() - lockTimeoutMs);
|
|
180
277
|
const result = await this.collection.updateMany({
|
|
181
|
-
|
|
278
|
+
$or: [
|
|
279
|
+
{ lockUntil: { $lte: now } },
|
|
280
|
+
{ lockUntil: { $exists: false }, lockedAt: { $lte: expiry } },
|
|
281
|
+
],
|
|
182
282
|
}, {
|
|
183
283
|
$set: {
|
|
184
284
|
status: "pending",
|
|
@@ -187,6 +287,7 @@ class MongoJobStore {
|
|
|
187
287
|
$unset: {
|
|
188
288
|
lockedAt: "",
|
|
189
289
|
lockedBy: "",
|
|
290
|
+
lockUntil: "",
|
|
190
291
|
},
|
|
191
292
|
});
|
|
192
293
|
return result.modifiedCount;
|
|
@@ -201,7 +302,9 @@ class MongoJobStore {
|
|
|
201
302
|
$set: {
|
|
202
303
|
lockedAt: now,
|
|
203
304
|
updatedAt: now,
|
|
305
|
+
lockUntil: new Date(now.getTime() + this.defaultLockTimeoutMs),
|
|
204
306
|
},
|
|
307
|
+
$inc: { lockVersion: 1 },
|
|
205
308
|
});
|
|
206
309
|
if (result.matchedCount === 0) {
|
|
207
310
|
throw new Error("Job lock lost or owner changed");
|
|
@@ -225,8 +328,16 @@ class MongoJobStore {
|
|
|
225
328
|
$set.attempts = updates.attempts;
|
|
226
329
|
if (updates.priority !== undefined)
|
|
227
330
|
$set.priority = updates.priority;
|
|
331
|
+
if (updates.concurrency !== undefined)
|
|
332
|
+
$set.concurrency = updates.concurrency;
|
|
228
333
|
await this.collection.updateOne({ _id: id }, { $set });
|
|
229
334
|
}
|
|
335
|
+
async countRunning(jobName) {
|
|
336
|
+
return this.collection.countDocuments({
|
|
337
|
+
name: jobName,
|
|
338
|
+
status: "running",
|
|
339
|
+
});
|
|
340
|
+
}
|
|
230
341
|
async findAll(query) {
|
|
231
342
|
const filter = {};
|
|
232
343
|
if (query.name) {
|
package/dist/types/job.d.ts
CHANGED
|
@@ -11,6 +11,15 @@ export interface Job<Data = unknown> {
|
|
|
11
11
|
lastScheduledAt?: Date;
|
|
12
12
|
lockedAt?: Date;
|
|
13
13
|
lockedBy?: string;
|
|
14
|
+
/**
|
|
15
|
+
* Lock expiry time. Job can be taken by another worker after this time.
|
|
16
|
+
*/
|
|
17
|
+
lockUntil?: Date;
|
|
18
|
+
/**
|
|
19
|
+
* Optimistic locking version. Incremented on each lock acquisition.
|
|
20
|
+
* Prevents race conditions in distributed environments.
|
|
21
|
+
*/
|
|
22
|
+
lockVersion?: number;
|
|
14
23
|
attempts: number;
|
|
15
24
|
lastError?: string;
|
|
16
25
|
retry?: RetryOptions | number;
|
|
@@ -21,6 +30,11 @@ export interface Job<Data = unknown> {
|
|
|
21
30
|
* Default: 5
|
|
22
31
|
*/
|
|
23
32
|
priority?: number;
|
|
33
|
+
/**
|
|
34
|
+
* Max concurrent running jobs with this name.
|
|
35
|
+
* undefined = no limit.
|
|
36
|
+
*/
|
|
37
|
+
concurrency?: number;
|
|
24
38
|
createdAt: Date;
|
|
25
39
|
updatedAt: Date;
|
|
26
40
|
}
|
package/dist/types/schedule.d.ts
CHANGED
package/dist/worker/worker.js
CHANGED
|
@@ -44,7 +44,6 @@ class Worker {
|
|
|
44
44
|
}
|
|
45
45
|
async loop() {
|
|
46
46
|
while (this.running) {
|
|
47
|
-
// stop requested before poll
|
|
48
47
|
if (!this.running)
|
|
49
48
|
break;
|
|
50
49
|
const job = await this.store.findAndLockNext({
|
|
@@ -52,7 +51,6 @@ class Worker {
|
|
|
52
51
|
workerId: this.workerId,
|
|
53
52
|
lockTimeoutMs: this.lockTimeout,
|
|
54
53
|
});
|
|
55
|
-
// stop requested after polling
|
|
56
54
|
if (!this.running)
|
|
57
55
|
break;
|
|
58
56
|
if (!job) {
|
|
@@ -65,24 +63,7 @@ class Worker {
|
|
|
65
63
|
async execute(job) {
|
|
66
64
|
this.emitter.emitSafe("job:start", job);
|
|
67
65
|
const now = Date.now();
|
|
68
|
-
//
|
|
69
|
-
// CRON: pre-schedule BEFORE execution
|
|
70
|
-
// ---------------------------
|
|
71
|
-
if (job.repeat?.cron) {
|
|
72
|
-
let base = job.lastScheduledAt ?? job.nextRunAt ?? new Date(now);
|
|
73
|
-
let next = (0, repeat_1.getNextRunAt)(job.repeat, base, this.defaultTimezone);
|
|
74
|
-
// skip missed cron slots
|
|
75
|
-
while (next.getTime() <= now) {
|
|
76
|
-
base = next;
|
|
77
|
-
next = (0, repeat_1.getNextRunAt)(job.repeat, base, this.defaultTimezone);
|
|
78
|
-
}
|
|
79
|
-
// persist schedule immediately
|
|
80
|
-
job.lastScheduledAt = next;
|
|
81
|
-
await this.store.reschedule(job._id, next);
|
|
82
|
-
}
|
|
83
|
-
// ---------------------------
|
|
84
|
-
// HEARTBEAT
|
|
85
|
-
// ---------------------------
|
|
66
|
+
// Heartbeat to prevent lock expiry during long jobs
|
|
86
67
|
const heartbeatIntervalMs = Math.max(50, this.lockTimeout / 2);
|
|
87
68
|
const heartbeatParams = {
|
|
88
69
|
jobId: job._id,
|
|
@@ -105,16 +86,42 @@ class Worker {
|
|
|
105
86
|
};
|
|
106
87
|
const heartbeatPromise = heartbeatLoop();
|
|
107
88
|
try {
|
|
89
|
+
// Verify we still own the lock before any modifications
|
|
90
|
+
// (another worker might have stolen it via stale recovery)
|
|
108
91
|
const current = await this.store.findById(job._id);
|
|
109
|
-
if (current
|
|
92
|
+
if (!current) {
|
|
93
|
+
stopHeartbeat = true;
|
|
94
|
+
return;
|
|
95
|
+
}
|
|
96
|
+
if (current.status === "cancelled") {
|
|
110
97
|
this.emitter.emitSafe("job:complete", job);
|
|
111
|
-
stopHeartbeat = true;
|
|
98
|
+
stopHeartbeat = true;
|
|
99
|
+
return;
|
|
100
|
+
}
|
|
101
|
+
if (current.lockedBy !== this.workerId) {
|
|
102
|
+
this.emitter.emitSafe("worker:error", new Error(`Lock stolen for job ${job._id}: owned by ${current.lockedBy}, we are ${this.workerId}`));
|
|
103
|
+
stopHeartbeat = true;
|
|
112
104
|
return;
|
|
113
105
|
}
|
|
106
|
+
if (current.status !== "running") {
|
|
107
|
+
this.emitter.emitSafe("worker:error", new Error(`Job ${job._id} is no longer running (status: ${current.status})`));
|
|
108
|
+
stopHeartbeat = true;
|
|
109
|
+
return;
|
|
110
|
+
}
|
|
111
|
+
// CRON: pre-schedule before execution (after lock verification)
|
|
112
|
+
if (job.repeat?.cron) {
|
|
113
|
+
let base = job.lastScheduledAt ?? job.nextRunAt ?? new Date(now);
|
|
114
|
+
let next = (0, repeat_1.getNextRunAt)(job.repeat, base, this.defaultTimezone);
|
|
115
|
+
// skip missed cron slots
|
|
116
|
+
while (next.getTime() <= now) {
|
|
117
|
+
base = next;
|
|
118
|
+
next = (0, repeat_1.getNextRunAt)(job.repeat, base, this.defaultTimezone);
|
|
119
|
+
}
|
|
120
|
+
job.lastScheduledAt = next;
|
|
121
|
+
await this.store.reschedule(job._id, next);
|
|
122
|
+
}
|
|
114
123
|
await this.handler(job);
|
|
115
|
-
//
|
|
116
|
-
// INTERVAL: schedule AFTER execution
|
|
117
|
-
// ---------------------------
|
|
124
|
+
// INTERVAL: schedule after execution
|
|
118
125
|
if (job.repeat?.every != null) {
|
|
119
126
|
const next = new Date(Date.now() + Math.max(job.repeat.every, 100));
|
|
120
127
|
await this.store.reschedule(job._id, next);
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "mongo-job-scheduler",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.17",
|
|
4
4
|
"description": "Production-grade MongoDB-backed job scheduler with retries, cron, timezone support, and crash recovery",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"author": "Darshan Bhut",
|
|
@@ -43,7 +43,6 @@
|
|
|
43
43
|
"build": "tsc -p tsconfig.build.json",
|
|
44
44
|
"test": "jest",
|
|
45
45
|
"test:mongo": "jest tests/mongo",
|
|
46
|
-
"test:stress": "jest tests/stress",
|
|
47
46
|
"prepublishOnly": "npm run build && npm test"
|
|
48
47
|
},
|
|
49
48
|
"dependencies": {
|