@valentinkolb/sync 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/publish.yml +72 -0
- package/CLAUDE.md +106 -0
- package/LICENSE +21 -0
- package/README.md +292 -0
- package/bun.lock +29 -0
- package/compose.test.yml +7 -0
- package/index.ts +18 -0
- package/package.json +21 -0
- package/src/jobs.ts +568 -0
- package/src/mutex.ts +203 -0
- package/src/ratelimit.ts +143 -0
- package/tests/jobs.test.ts +465 -0
- package/tests/mutex.test.ts +223 -0
- package/tests/preload.ts +2 -0
- package/tests/ratelimit.test.ts +119 -0
- package/tsconfig.json +31 -0
package/src/jobs.ts
ADDED
|
@@ -0,0 +1,568 @@
|
|
|
1
|
+
import { redis, sleep } from "bun";
|
|
2
|
+
import { randomBytes } from "crypto";
|
|
3
|
+
import type { z } from "zod";
|
|
4
|
+
|
|
5
|
+
// ==========================
|
|
6
|
+
// Types
|
|
7
|
+
// ==========================
|
|
8
|
+
|
|
9
|
+
type JobStatus = "waiting" | "delayed" | "active" | "completed" | "failed";
|
|
10
|
+
|
|
11
|
+
export type Job<T> = {
|
|
12
|
+
id: string;
|
|
13
|
+
data: T;
|
|
14
|
+
status: JobStatus;
|
|
15
|
+
attempts: number;
|
|
16
|
+
maxRetries: number;
|
|
17
|
+
timeout: number;
|
|
18
|
+
interval?: number;
|
|
19
|
+
createdAt: number;
|
|
20
|
+
scheduledAt?: number;
|
|
21
|
+
startedAt?: number;
|
|
22
|
+
completedAt?: number;
|
|
23
|
+
error?: string;
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
type BaseOptions = {
|
|
27
|
+
/** Number of retries on failure (default: 0) */
|
|
28
|
+
retries?: number;
|
|
29
|
+
/** Max processing time in ms before job is marked as failed (default: 30000) */
|
|
30
|
+
timeout?: number;
|
|
31
|
+
};
|
|
32
|
+
|
|
33
|
+
type DelayOptions = BaseOptions & {
|
|
34
|
+
/** Delay execution by this many milliseconds */
|
|
35
|
+
delay: number;
|
|
36
|
+
at?: never;
|
|
37
|
+
interval?: never;
|
|
38
|
+
startImmediately?: never;
|
|
39
|
+
};
|
|
40
|
+
|
|
41
|
+
type AtOptions = BaseOptions & {
|
|
42
|
+
/** Schedule execution at this timestamp (ms since epoch) */
|
|
43
|
+
at: number;
|
|
44
|
+
delay?: never;
|
|
45
|
+
interval?: never;
|
|
46
|
+
startImmediately?: never;
|
|
47
|
+
};
|
|
48
|
+
|
|
49
|
+
type IntervalOptions = BaseOptions & {
|
|
50
|
+
/** Repeat job every X milliseconds (for periodic tasks) */
|
|
51
|
+
interval: number;
|
|
52
|
+
/** Start first run immediately instead of waiting for first interval (default: false) */
|
|
53
|
+
startImmediately?: boolean;
|
|
54
|
+
delay?: never;
|
|
55
|
+
at?: never;
|
|
56
|
+
};
|
|
57
|
+
|
|
58
|
+
type ImmediateOptions = BaseOptions & {
|
|
59
|
+
delay?: never;
|
|
60
|
+
at?: never;
|
|
61
|
+
interval?: never;
|
|
62
|
+
startImmediately?: never;
|
|
63
|
+
};
|
|
64
|
+
|
|
65
|
+
export type SendOptions = DelayOptions | AtOptions | IntervalOptions | ImmediateOptions;
|
|
66
|
+
|
|
67
|
+
export type JobsConfig<T extends z.ZodType> = {
|
|
68
|
+
/** Job queue name (used as Redis key prefix) */
|
|
69
|
+
name: string;
|
|
70
|
+
/** Zod schema for job data validation */
|
|
71
|
+
schema: T;
|
|
72
|
+
/** Prefix for all Redis keys (default: "jobs") */
|
|
73
|
+
prefix?: string;
|
|
74
|
+
};
|
|
75
|
+
|
|
76
|
+
export type ProcessOptions = {
|
|
77
|
+
/** Number of concurrent jobs to process (default: 1) */
|
|
78
|
+
concurrency?: number;
|
|
79
|
+
/** Poll interval in ms when queue is empty (default: 1000) */
|
|
80
|
+
pollInterval?: number;
|
|
81
|
+
/** Called when a job completes successfully */
|
|
82
|
+
onSuccess?: (job: Job<unknown>) => void | Promise<void>;
|
|
83
|
+
/** Called when a job fails (after all retries exhausted, or interval job rescheduled after failure) */
|
|
84
|
+
onError?: (job: Job<unknown>, error: Error) => void | Promise<void>;
|
|
85
|
+
/** Called after every job attempt (success or failure) */
|
|
86
|
+
onFinally?: (job: Job<unknown>) => void | Promise<void>;
|
|
87
|
+
};
|
|
88
|
+
|
|
89
|
+
export type Jobs<T> = {
|
|
90
|
+
/** Send a job to the queue */
|
|
91
|
+
send: (data: T, options?: SendOptions) => Promise<Job<T>>;
|
|
92
|
+
/** Process jobs from the queue */
|
|
93
|
+
process: (handler: (job: Job<T>) => Promise<void> | void, options?: ProcessOptions) => () => void;
|
|
94
|
+
};
|
|
95
|
+
|
|
96
|
+
// ==========================
|
|
97
|
+
// Defaults
|
|
98
|
+
// ==========================
|
|
99
|
+
|
|
100
|
+
const DEFAULT_TIMEOUT = 30000;
|
|
101
|
+
const DEFAULT_RETRIES = 0;
|
|
102
|
+
const DEFAULT_POLL_INTERVAL = 1000;
|
|
103
|
+
const DEFAULT_CONCURRENCY = 1;
|
|
104
|
+
|
|
105
|
+
// ==========================
|
|
106
|
+
// Errors
|
|
107
|
+
// ==========================
|
|
108
|
+
|
|
109
|
+
export class JobsError extends Error {
|
|
110
|
+
readonly code: string;
|
|
111
|
+
|
|
112
|
+
constructor(message: string, code: string) {
|
|
113
|
+
super(message);
|
|
114
|
+
this.name = "JobsError";
|
|
115
|
+
this.code = code;
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
export class ValidationError extends JobsError {
|
|
120
|
+
readonly issues: z.ZodIssue[];
|
|
121
|
+
|
|
122
|
+
constructor(issues: z.ZodIssue[]) {
|
|
123
|
+
super("Job data validation failed", "VALIDATION_ERROR");
|
|
124
|
+
this.name = "ValidationError";
|
|
125
|
+
this.issues = issues;
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
// ==========================
|
|
130
|
+
// Lua Scripts for Atomic Operations
|
|
131
|
+
// ==========================
|
|
132
|
+
|
|
133
|
+
// Atomically move job from waiting to active and update job status
|
|
134
|
+
const CLAIM_JOB_SCRIPT = `
|
|
135
|
+
local jobId = redis.call("RPOPLPUSH", KEYS[1], KEYS[2])
|
|
136
|
+
if not jobId then
|
|
137
|
+
return nil
|
|
138
|
+
end
|
|
139
|
+
|
|
140
|
+
local jobData = redis.call("HGET", KEYS[3], jobId)
|
|
141
|
+
if not jobData then
|
|
142
|
+
redis.call("LREM", KEYS[2], 1, jobId)
|
|
143
|
+
return nil
|
|
144
|
+
end
|
|
145
|
+
|
|
146
|
+
local job = cjson.decode(jobData)
|
|
147
|
+
job.status = "active"
|
|
148
|
+
job.startedAt = tonumber(ARGV[1])
|
|
149
|
+
job.attempts = job.attempts + 1
|
|
150
|
+
|
|
151
|
+
redis.call("HSET", KEYS[3], jobId, cjson.encode(job))
|
|
152
|
+
|
|
153
|
+
-- Store deadline based on job's timeout
|
|
154
|
+
local deadline = tonumber(ARGV[1]) + job.timeout
|
|
155
|
+
redis.call("ZADD", KEYS[4], deadline, jobId)
|
|
156
|
+
|
|
157
|
+
return cjson.encode(job)
|
|
158
|
+
`;
|
|
159
|
+
|
|
160
|
+
// Atomically complete a job (and reschedule if interval)
|
|
161
|
+
const COMPLETE_JOB_SCRIPT = `
|
|
162
|
+
local jobId = ARGV[1]
|
|
163
|
+
local completedAt = tonumber(ARGV[2])
|
|
164
|
+
|
|
165
|
+
local jobData = redis.call("HGET", KEYS[1], jobId)
|
|
166
|
+
if not jobData then
|
|
167
|
+
return 0
|
|
168
|
+
end
|
|
169
|
+
|
|
170
|
+
local job = cjson.decode(jobData)
|
|
171
|
+
|
|
172
|
+
redis.call("LREM", KEYS[2], 1, jobId)
|
|
173
|
+
redis.call("ZREM", KEYS[4], jobId)
|
|
174
|
+
|
|
175
|
+
-- If job has interval, reschedule it
|
|
176
|
+
if job.interval and job.interval > 0 then
|
|
177
|
+
job.status = "delayed"
|
|
178
|
+
job.attempts = 0
|
|
179
|
+
job.error = nil
|
|
180
|
+
job.startedAt = nil
|
|
181
|
+
job.completedAt = nil
|
|
182
|
+
job.scheduledAt = completedAt + job.interval
|
|
183
|
+
|
|
184
|
+
redis.call("HSET", KEYS[1], jobId, cjson.encode(job))
|
|
185
|
+
redis.call("ZADD", KEYS[5], job.scheduledAt, jobId)
|
|
186
|
+
return 2
|
|
187
|
+
else
|
|
188
|
+
job.status = "completed"
|
|
189
|
+
job.completedAt = completedAt
|
|
190
|
+
|
|
191
|
+
redis.call("HSET", KEYS[1], jobId, cjson.encode(job))
|
|
192
|
+
redis.call("SADD", KEYS[3], jobId)
|
|
193
|
+
return 1
|
|
194
|
+
end
|
|
195
|
+
`;
|
|
196
|
+
|
|
197
|
+
// Atomically fail or retry a job
|
|
198
|
+
// Returns: 1 = retried, 2 = failed permanently, 3 = interval rescheduled
|
|
199
|
+
const FAIL_OR_RETRY_SCRIPT = `
|
|
200
|
+
local jobId = ARGV[1]
|
|
201
|
+
local errorMsg = ARGV[2]
|
|
202
|
+
local completedAt = tonumber(ARGV[3])
|
|
203
|
+
|
|
204
|
+
local jobData = redis.call("HGET", KEYS[1], jobId)
|
|
205
|
+
if not jobData then
|
|
206
|
+
return 0
|
|
207
|
+
end
|
|
208
|
+
|
|
209
|
+
local job = cjson.decode(jobData)
|
|
210
|
+
job.error = errorMsg
|
|
211
|
+
|
|
212
|
+
redis.call("LREM", KEYS[2], 1, jobId)
|
|
213
|
+
redis.call("ZREM", KEYS[5], jobId)
|
|
214
|
+
|
|
215
|
+
-- Check if we can retry (attempts <= maxRetries means 1 initial + N retries)
|
|
216
|
+
if job.attempts <= job.maxRetries then
|
|
217
|
+
job.status = "waiting"
|
|
218
|
+
redis.call("HSET", KEYS[1], jobId, cjson.encode(job))
|
|
219
|
+
redis.call("LPUSH", KEYS[3], jobId)
|
|
220
|
+
return 1
|
|
221
|
+
end
|
|
222
|
+
|
|
223
|
+
-- No more retries - check if interval job should be rescheduled
|
|
224
|
+
if job.interval and job.interval > 0 then
|
|
225
|
+
job.status = "delayed"
|
|
226
|
+
job.attempts = 0
|
|
227
|
+
job.startedAt = nil
|
|
228
|
+
job.scheduledAt = completedAt + job.interval
|
|
229
|
+
redis.call("HSET", KEYS[1], jobId, cjson.encode(job))
|
|
230
|
+
redis.call("ZADD", KEYS[6], job.scheduledAt, jobId)
|
|
231
|
+
return 3
|
|
232
|
+
end
|
|
233
|
+
|
|
234
|
+
-- Permanent failure
|
|
235
|
+
job.status = "failed"
|
|
236
|
+
job.completedAt = completedAt
|
|
237
|
+
redis.call("HSET", KEYS[1], jobId, cjson.encode(job))
|
|
238
|
+
redis.call("SADD", KEYS[4], jobId)
|
|
239
|
+
return 2
|
|
240
|
+
`;
|
|
241
|
+
|
|
242
|
+
// Fail timed out jobs (hard fail, no recovery)
|
|
243
|
+
const FAIL_TIMED_OUT_SCRIPT = `
|
|
244
|
+
local now = tonumber(ARGV[1])
|
|
245
|
+
|
|
246
|
+
local timedOutJobs = redis.call("ZRANGEBYSCORE", KEYS[1], "0", tostring(now))
|
|
247
|
+
local failed = 0
|
|
248
|
+
|
|
249
|
+
for _, jobId in ipairs(timedOutJobs) do
|
|
250
|
+
local jobData = redis.call("HGET", KEYS[2], jobId)
|
|
251
|
+
if jobData then
|
|
252
|
+
local job = cjson.decode(jobData)
|
|
253
|
+
|
|
254
|
+
redis.call("ZREM", KEYS[1], jobId)
|
|
255
|
+
redis.call("LREM", KEYS[3], 0, jobId)
|
|
256
|
+
|
|
257
|
+
job.status = "failed"
|
|
258
|
+
job.completedAt = now
|
|
259
|
+
job.error = "Job timed out"
|
|
260
|
+
redis.call("HSET", KEYS[2], jobId, cjson.encode(job))
|
|
261
|
+
redis.call("SADD", KEYS[4], jobId)
|
|
262
|
+
failed = failed + 1
|
|
263
|
+
else
|
|
264
|
+
redis.call("ZREM", KEYS[1], jobId)
|
|
265
|
+
end
|
|
266
|
+
end
|
|
267
|
+
|
|
268
|
+
return failed
|
|
269
|
+
`;
|
|
270
|
+
|
|
271
|
+
// Promote delayed jobs that are ready
|
|
272
|
+
const PROMOTE_DELAYED_SCRIPT = `
|
|
273
|
+
local now = tonumber(ARGV[1])
|
|
274
|
+
local readyJobs = redis.call("ZRANGEBYSCORE", KEYS[1], "0", tostring(now))
|
|
275
|
+
local promoted = 0
|
|
276
|
+
|
|
277
|
+
for _, jobId in ipairs(readyJobs) do
|
|
278
|
+
local removed = redis.call("ZREM", KEYS[1], jobId)
|
|
279
|
+
if removed > 0 then
|
|
280
|
+
local jobData = redis.call("HGET", KEYS[3], jobId)
|
|
281
|
+
if jobData then
|
|
282
|
+
local job = cjson.decode(jobData)
|
|
283
|
+
job.status = "waiting"
|
|
284
|
+
redis.call("HSET", KEYS[3], jobId, cjson.encode(job))
|
|
285
|
+
redis.call("LPUSH", KEYS[2], jobId)
|
|
286
|
+
promoted = promoted + 1
|
|
287
|
+
end
|
|
288
|
+
end
|
|
289
|
+
end
|
|
290
|
+
|
|
291
|
+
return promoted
|
|
292
|
+
`;
|
|
293
|
+
|
|
294
|
+
// ==========================
|
|
295
|
+
// Jobs Factory
|
|
296
|
+
// ==========================
|
|
297
|
+
|
|
298
|
+
/**
|
|
299
|
+
* Create a typed job queue with Zod schema validation.
|
|
300
|
+
* All state is stored in Redis, making it safe to use across multiple processes.
|
|
301
|
+
*
|
|
302
|
+
* @example
|
|
303
|
+
* ```ts
|
|
304
|
+
* import { z } from "zod";
|
|
305
|
+
* import { jobs } from "@valentinkolb/sync";
|
|
306
|
+
*
|
|
307
|
+
* const emailJobs = jobs.create({
|
|
308
|
+
* name: "emails",
|
|
309
|
+
* schema: z.object({
|
|
310
|
+
* to: z.string().email(),
|
|
311
|
+
* subject: z.string(),
|
|
312
|
+
* body: z.string(),
|
|
313
|
+
* }),
|
|
314
|
+
* });
|
|
315
|
+
*
|
|
316
|
+
* // Send a job immediately
|
|
317
|
+
* await emailJobs.send({ to: "user@example.com", subject: "Hello", body: "World" });
|
|
318
|
+
*
|
|
319
|
+
* // Send with delay and retries
|
|
320
|
+
* await emailJobs.send(data, { delay: 5000, retries: 3 });
|
|
321
|
+
*
|
|
322
|
+
* // Send scheduled with custom timeout
|
|
323
|
+
* await emailJobs.send(data, { at: Date.now() + 60000, timeout: 60000 });
|
|
324
|
+
*
|
|
325
|
+
* // Send periodic job (runs every hour)
|
|
326
|
+
* await emailJobs.send(data, { interval: 3600000 });
|
|
327
|
+
*
|
|
328
|
+
* // Process jobs (in worker process)
|
|
329
|
+
* const stop = emailJobs.process(async (job) => {
|
|
330
|
+
* await sendEmail(job.data);
|
|
331
|
+
* }, { concurrency: 10 });
|
|
332
|
+
*
|
|
333
|
+
* // Stop processing
|
|
334
|
+
* stop();
|
|
335
|
+
* ```
|
|
336
|
+
*/
|
|
337
|
+
const create = <T extends z.ZodType>(config: JobsConfig<T>): Jobs<z.infer<T>> => {
|
|
338
|
+
type Data = z.infer<T>;
|
|
339
|
+
|
|
340
|
+
const { name, schema, prefix = "jobs" } = config;
|
|
341
|
+
|
|
342
|
+
// Redis keys
|
|
343
|
+
const keys = {
|
|
344
|
+
jobs: `${prefix}:${name}:data`,
|
|
345
|
+
waiting: `${prefix}:${name}:waiting`,
|
|
346
|
+
delayed: `${prefix}:${name}:delayed`,
|
|
347
|
+
active: `${prefix}:${name}:active`,
|
|
348
|
+
deadlines: `${prefix}:${name}:deadlines`,
|
|
349
|
+
completed: `${prefix}:${name}:completed`,
|
|
350
|
+
failed: `${prefix}:${name}:failed`,
|
|
351
|
+
id: `${prefix}:${name}:id`,
|
|
352
|
+
};
|
|
353
|
+
|
|
354
|
+
// ==========================
|
|
355
|
+
// Internal Helpers
|
|
356
|
+
// ==========================
|
|
357
|
+
|
|
358
|
+
const generateId = async (): Promise<string> => {
|
|
359
|
+
const counter = await redis.incr(keys.id);
|
|
360
|
+
const random = randomBytes(4).toString("hex");
|
|
361
|
+
return `${counter}-${random}`;
|
|
362
|
+
};
|
|
363
|
+
|
|
364
|
+
const deserializeJob = (data: string): Job<Data> => {
|
|
365
|
+
const job = JSON.parse(data) as Job<Data>;
|
|
366
|
+
const result = schema.safeParse(job.data);
|
|
367
|
+
if (!result.success) {
|
|
368
|
+
throw new ValidationError(result.error.issues);
|
|
369
|
+
}
|
|
370
|
+
return job;
|
|
371
|
+
};
|
|
372
|
+
|
|
373
|
+
// ==========================
|
|
374
|
+
// Public API
|
|
375
|
+
// ==========================
|
|
376
|
+
|
|
377
|
+
const send = async (data: Data, options: SendOptions = {}): Promise<Job<Data>> => {
|
|
378
|
+
const result = schema.safeParse(data);
|
|
379
|
+
if (!result.success) {
|
|
380
|
+
throw new ValidationError(result.error.issues);
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
const id = await generateId();
|
|
384
|
+
const now = Date.now();
|
|
385
|
+
|
|
386
|
+
// Extract interval from options (TypeScript knows it's only present in IntervalOptions)
|
|
387
|
+
const interval = "interval" in options ? options.interval : undefined;
|
|
388
|
+
const startImmediately = "startImmediately" in options ? options.startImmediately : undefined;
|
|
389
|
+
|
|
390
|
+
const job: Job<Data> = {
|
|
391
|
+
id,
|
|
392
|
+
data: result.data,
|
|
393
|
+
status: "waiting",
|
|
394
|
+
attempts: 0,
|
|
395
|
+
maxRetries: options.retries ?? DEFAULT_RETRIES,
|
|
396
|
+
timeout: options.timeout ?? DEFAULT_TIMEOUT,
|
|
397
|
+
interval,
|
|
398
|
+
createdAt: now,
|
|
399
|
+
};
|
|
400
|
+
|
|
401
|
+
// Handle scheduling
|
|
402
|
+
if ("delay" in options && options.delay !== undefined) {
|
|
403
|
+
// Delayed job
|
|
404
|
+
job.status = "delayed";
|
|
405
|
+
job.scheduledAt = now + options.delay;
|
|
406
|
+
await redis.hset(keys.jobs, id, JSON.stringify(job));
|
|
407
|
+
await redis.send("ZADD", [keys.delayed, job.scheduledAt.toString(), id]);
|
|
408
|
+
} else if ("at" in options && options.at !== undefined) {
|
|
409
|
+
// Scheduled job
|
|
410
|
+
job.status = "delayed";
|
|
411
|
+
job.scheduledAt = options.at;
|
|
412
|
+
await redis.hset(keys.jobs, id, JSON.stringify(job));
|
|
413
|
+
await redis.send("ZADD", [keys.delayed, job.scheduledAt.toString(), id]);
|
|
414
|
+
} else if (interval !== undefined && !startImmediately) {
|
|
415
|
+
// Interval job, start after first interval
|
|
416
|
+
job.status = "delayed";
|
|
417
|
+
job.scheduledAt = now + interval;
|
|
418
|
+
await redis.hset(keys.jobs, id, JSON.stringify(job));
|
|
419
|
+
await redis.send("ZADD", [keys.delayed, job.scheduledAt.toString(), id]);
|
|
420
|
+
} else {
|
|
421
|
+
// Immediate job (or interval with startImmediately)
|
|
422
|
+
await redis.hset(keys.jobs, id, JSON.stringify(job));
|
|
423
|
+
await redis.lpush(keys.waiting, id);
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
return job;
|
|
427
|
+
};
|
|
428
|
+
|
|
429
|
+
const process = (handler: (job: Job<Data>) => Promise<void> | void, options: ProcessOptions = {}): (() => void) => {
|
|
430
|
+
const {
|
|
431
|
+
concurrency = DEFAULT_CONCURRENCY,
|
|
432
|
+
pollInterval = DEFAULT_POLL_INTERVAL,
|
|
433
|
+
onSuccess,
|
|
434
|
+
onError,
|
|
435
|
+
onFinally,
|
|
436
|
+
} = options;
|
|
437
|
+
|
|
438
|
+
let running = true;
|
|
439
|
+
|
|
440
|
+
const processLoop = async (): Promise<void> => {
|
|
441
|
+
while (running) {
|
|
442
|
+
try {
|
|
443
|
+
// Promote delayed jobs
|
|
444
|
+
await redis.send("EVAL", [
|
|
445
|
+
PROMOTE_DELAYED_SCRIPT,
|
|
446
|
+
"3",
|
|
447
|
+
keys.delayed,
|
|
448
|
+
keys.waiting,
|
|
449
|
+
keys.jobs,
|
|
450
|
+
Date.now().toString(),
|
|
451
|
+
]);
|
|
452
|
+
|
|
453
|
+
// Fail timed out jobs
|
|
454
|
+
await redis.send("EVAL", [
|
|
455
|
+
FAIL_TIMED_OUT_SCRIPT,
|
|
456
|
+
"4",
|
|
457
|
+
keys.deadlines,
|
|
458
|
+
keys.jobs,
|
|
459
|
+
keys.active,
|
|
460
|
+
keys.failed,
|
|
461
|
+
Date.now().toString(),
|
|
462
|
+
]);
|
|
463
|
+
|
|
464
|
+
// Claim a job
|
|
465
|
+
const now = Date.now();
|
|
466
|
+
|
|
467
|
+
const jobJson = (await redis.send("EVAL", [
|
|
468
|
+
CLAIM_JOB_SCRIPT,
|
|
469
|
+
"4",
|
|
470
|
+
keys.waiting,
|
|
471
|
+
keys.active,
|
|
472
|
+
keys.jobs,
|
|
473
|
+
keys.deadlines,
|
|
474
|
+
now.toString(),
|
|
475
|
+
])) as string | null;
|
|
476
|
+
|
|
477
|
+
if (!jobJson) {
|
|
478
|
+
await sleep(pollInterval);
|
|
479
|
+
continue;
|
|
480
|
+
}
|
|
481
|
+
|
|
482
|
+
const job = deserializeJob(jobJson);
|
|
483
|
+
|
|
484
|
+
try {
|
|
485
|
+
await handler(job);
|
|
486
|
+
|
|
487
|
+
await redis.send("EVAL", [
|
|
488
|
+
COMPLETE_JOB_SCRIPT,
|
|
489
|
+
"5",
|
|
490
|
+
keys.jobs,
|
|
491
|
+
keys.active,
|
|
492
|
+
keys.completed,
|
|
493
|
+
keys.deadlines,
|
|
494
|
+
keys.delayed,
|
|
495
|
+
job.id,
|
|
496
|
+
Date.now().toString(),
|
|
497
|
+
]);
|
|
498
|
+
|
|
499
|
+
// Call onSuccess
|
|
500
|
+
if (onSuccess) {
|
|
501
|
+
try {
|
|
502
|
+
await onSuccess(job);
|
|
503
|
+
} catch (onSuccessError) {
|
|
504
|
+
console.error("onSuccess callback failed:", onSuccessError);
|
|
505
|
+
}
|
|
506
|
+
}
|
|
507
|
+
} catch (error) {
|
|
508
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
509
|
+
|
|
510
|
+
// Returns: 1 = retried, 2 = failed permanently, 3 = interval rescheduled
|
|
511
|
+
const result = (await redis.send("EVAL", [
|
|
512
|
+
FAIL_OR_RETRY_SCRIPT,
|
|
513
|
+
"6",
|
|
514
|
+
keys.jobs,
|
|
515
|
+
keys.active,
|
|
516
|
+
keys.waiting,
|
|
517
|
+
keys.failed,
|
|
518
|
+
keys.deadlines,
|
|
519
|
+
keys.delayed,
|
|
520
|
+
job.id,
|
|
521
|
+
err.message,
|
|
522
|
+
Date.now().toString(),
|
|
523
|
+
])) as number;
|
|
524
|
+
|
|
525
|
+
// Call onError if job permanently failed or interval rescheduled after failure
|
|
526
|
+
if (onError && (result === 2 || result === 3)) {
|
|
527
|
+
try {
|
|
528
|
+
await onError(job, err);
|
|
529
|
+
} catch (onErrorError) {
|
|
530
|
+
console.error("onError callback failed:", onErrorError);
|
|
531
|
+
}
|
|
532
|
+
}
|
|
533
|
+
} finally {
|
|
534
|
+
// Call onFinally after every attempt
|
|
535
|
+
if (onFinally) {
|
|
536
|
+
try {
|
|
537
|
+
await onFinally(job);
|
|
538
|
+
} catch (onFinallyError) {
|
|
539
|
+
console.error("onFinally callback failed:", onFinallyError);
|
|
540
|
+
}
|
|
541
|
+
}
|
|
542
|
+
}
|
|
543
|
+
} catch (error) {
|
|
544
|
+
console.error("Worker error:", error);
|
|
545
|
+
await sleep(pollInterval);
|
|
546
|
+
}
|
|
547
|
+
}
|
|
548
|
+
};
|
|
549
|
+
|
|
550
|
+
// Start workers
|
|
551
|
+
for (let i = 0; i < concurrency; i++) {
|
|
552
|
+
processLoop();
|
|
553
|
+
}
|
|
554
|
+
|
|
555
|
+
// Return stop function
|
|
556
|
+
return () => {
|
|
557
|
+
running = false;
|
|
558
|
+
};
|
|
559
|
+
};
|
|
560
|
+
|
|
561
|
+
return { send, process };
|
|
562
|
+
};
|
|
563
|
+
|
|
564
|
+
// ==========================
|
|
565
|
+
// Export
|
|
566
|
+
// ==========================
|
|
567
|
+
|
|
568
|
+
export const jobs = { create };
|