@valentinkolb/sync 0.1.0 → 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.js +1059 -0
- package/package.json +15 -11
- package/.github/workflows/publish.yml +0 -72
- package/CLAUDE.md +0 -106
- package/LICENSE +0 -21
- package/README.md +0 -292
- package/bun.lock +0 -29
- package/compose.test.yml +0 -7
- package/index.ts +0 -18
- package/src/jobs.ts +0 -568
- package/src/mutex.ts +0 -203
- package/src/ratelimit.ts +0 -143
- package/tests/jobs.test.ts +0 -465
- package/tests/mutex.test.ts +0 -223
- package/tests/preload.ts +0 -2
- package/tests/ratelimit.test.ts +0 -119
- package/tsconfig.json +0 -31
package/index.js
ADDED
|
@@ -0,0 +1,1059 @@
|
|
|
1
|
+
// @bun
|
|
2
|
+
// src/ratelimit.ts
|
|
3
|
+
var {redis } = globalThis.Bun;
|
|
4
|
+
import { createHash } from "crypto";
|
|
5
|
+
|
|
6
|
+
class RateLimitError extends Error {
|
|
7
|
+
remaining;
|
|
8
|
+
resetIn;
|
|
9
|
+
constructor(result) {
|
|
10
|
+
super("Rate limit exceeded");
|
|
11
|
+
this.name = "RateLimitError";
|
|
12
|
+
this.remaining = result.remaining;
|
|
13
|
+
this.resetIn = result.resetIn;
|
|
14
|
+
}
|
|
15
|
+
}
|
|
16
|
+
var RATE_LIMIT_SCRIPT = `
|
|
17
|
+
local currentKey = KEYS[1]
|
|
18
|
+
local previousKey = KEYS[2]
|
|
19
|
+
local windowSecs = tonumber(ARGV[1])
|
|
20
|
+
local limit = tonumber(ARGV[2])
|
|
21
|
+
local elapsedRatio = tonumber(ARGV[3])
|
|
22
|
+
|
|
23
|
+
-- Get previous window count
|
|
24
|
+
local previousCount = tonumber(redis.call("GET", previousKey) or "0")
|
|
25
|
+
|
|
26
|
+
-- Increment current window and set expiry atomically
|
|
27
|
+
local currentCount = redis.call("INCR", currentKey)
|
|
28
|
+
if currentCount == 1 then
|
|
29
|
+
redis.call("EXPIRE", currentKey, windowSecs * 2)
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
-- Calculate weighted count
|
|
33
|
+
local weightedCount = previousCount * (1 - elapsedRatio) + currentCount
|
|
34
|
+
|
|
35
|
+
return {currentCount, previousCount, weightedCount}
|
|
36
|
+
`;
|
|
37
|
+
var MAX_IDENTIFIER_LENGTH = 128;
|
|
38
|
+
var normalizeIdentifier = (identifier) => {
|
|
39
|
+
if (identifier.length <= MAX_IDENTIFIER_LENGTH)
|
|
40
|
+
return identifier;
|
|
41
|
+
const hash = createHash("sha256").update(identifier).digest("hex");
|
|
42
|
+
return `hash:${hash}`;
|
|
43
|
+
};
|
|
44
|
+
var create = (config) => {
|
|
45
|
+
const { limit, windowSecs = 1, prefix = "ratelimit" } = config;
|
|
46
|
+
const check = async (identifier) => {
|
|
47
|
+
const safeIdentifier = normalizeIdentifier(identifier);
|
|
48
|
+
const now = Date.now();
|
|
49
|
+
const windowMs = windowSecs * 1000;
|
|
50
|
+
const currentWindow = Math.floor(now / windowMs);
|
|
51
|
+
const previousWindow = currentWindow - 1;
|
|
52
|
+
const elapsedInWindow = now % windowMs;
|
|
53
|
+
const elapsedRatio = elapsedInWindow / windowMs;
|
|
54
|
+
const currentKey = `${prefix}:${safeIdentifier}:${currentWindow}`;
|
|
55
|
+
const previousKey = `${prefix}:${safeIdentifier}:${previousWindow}`;
|
|
56
|
+
const result = await redis.send("EVAL", [
|
|
57
|
+
RATE_LIMIT_SCRIPT,
|
|
58
|
+
"2",
|
|
59
|
+
currentKey,
|
|
60
|
+
previousKey,
|
|
61
|
+
windowSecs.toString(),
|
|
62
|
+
limit.toString(),
|
|
63
|
+
elapsedRatio.toString()
|
|
64
|
+
]);
|
|
65
|
+
const [, , weightedCount] = result;
|
|
66
|
+
const limited = weightedCount > limit;
|
|
67
|
+
const remaining = Math.max(0, Math.floor(limit - weightedCount));
|
|
68
|
+
const resetIn = windowMs - elapsedInWindow;
|
|
69
|
+
return { limited, remaining, resetIn };
|
|
70
|
+
};
|
|
71
|
+
const checkOrThrow = async (identifier) => {
|
|
72
|
+
const result = await check(identifier);
|
|
73
|
+
if (result.limited) {
|
|
74
|
+
throw new RateLimitError(result);
|
|
75
|
+
}
|
|
76
|
+
return result;
|
|
77
|
+
};
|
|
78
|
+
return { check, checkOrThrow };
|
|
79
|
+
};
|
|
80
|
+
var ratelimit = { create, RateLimitError };
|
|
81
|
+
// src/mutex.ts
|
|
82
|
+
var {redis: redis2, sleep } = globalThis.Bun;
|
|
83
|
+
import { createHash as createHash2, randomBytes } from "crypto";
|
|
84
|
+
var DEFAULT_RETRY_COUNT = 10;
|
|
85
|
+
var DEFAULT_RETRY_DELAY = 200;
|
|
86
|
+
var DEFAULT_TTL = 1e4;
|
|
87
|
+
var MAX_RESOURCE_LENGTH = 128;
|
|
88
|
+
var normalizeResource = (resource) => {
|
|
89
|
+
if (resource.length <= MAX_RESOURCE_LENGTH)
|
|
90
|
+
return resource;
|
|
91
|
+
const hash = createHash2("sha256").update(resource).digest("hex");
|
|
92
|
+
return `hash:${hash}`;
|
|
93
|
+
};
|
|
94
|
+
|
|
95
|
+
class LockError extends Error {
|
|
96
|
+
resource;
|
|
97
|
+
constructor(resource) {
|
|
98
|
+
super(`Failed to acquire lock on resource: ${resource}`);
|
|
99
|
+
this.name = "LockError";
|
|
100
|
+
this.resource = resource;
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
var create2 = (config = {}) => {
|
|
104
|
+
const {
|
|
105
|
+
prefix = "mutex",
|
|
106
|
+
retryCount = DEFAULT_RETRY_COUNT,
|
|
107
|
+
retryDelay = DEFAULT_RETRY_DELAY,
|
|
108
|
+
defaultTtl = DEFAULT_TTL
|
|
109
|
+
} = config;
|
|
110
|
+
const acquire = async (resource, ttl = defaultTtl) => {
|
|
111
|
+
const safeResource = normalizeResource(resource);
|
|
112
|
+
const key = `${prefix}:${safeResource}`;
|
|
113
|
+
const value = randomBytes(16).toString("hex");
|
|
114
|
+
for (let attempt = 0;attempt <= retryCount; attempt++) {
|
|
115
|
+
try {
|
|
116
|
+
const result = await redis2.send("SET", [key, value, "NX", "PX", ttl.toString()]);
|
|
117
|
+
if (result === "OK") {
|
|
118
|
+
return {
|
|
119
|
+
resource: key,
|
|
120
|
+
value,
|
|
121
|
+
ttl,
|
|
122
|
+
expiration: Date.now() + ttl
|
|
123
|
+
};
|
|
124
|
+
}
|
|
125
|
+
} catch (error) {
|
|
126
|
+
console.error(`Lock acquire attempt ${attempt} failed:`, error);
|
|
127
|
+
}
|
|
128
|
+
if (attempt < retryCount) {
|
|
129
|
+
await sleep(retryDelay + Math.random() * 100);
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
return null;
|
|
133
|
+
};
|
|
134
|
+
const release = async (lock) => {
|
|
135
|
+
try {
|
|
136
|
+
const releaseScript = `
|
|
137
|
+
if redis.call("get", KEYS[1]) == ARGV[1] then
|
|
138
|
+
return redis.call("del", KEYS[1])
|
|
139
|
+
else
|
|
140
|
+
return 0
|
|
141
|
+
end
|
|
142
|
+
`;
|
|
143
|
+
await redis2.send("EVAL", [releaseScript, "1", lock.resource, lock.value]);
|
|
144
|
+
} catch (error) {
|
|
145
|
+
console.error("Error releasing lock:", error);
|
|
146
|
+
}
|
|
147
|
+
};
|
|
148
|
+
const extend = async (lock, ttl = defaultTtl) => {
|
|
149
|
+
try {
|
|
150
|
+
const extendScript = `
|
|
151
|
+
if redis.call("get", KEYS[1]) == ARGV[1] then
|
|
152
|
+
return redis.call("pexpire", KEYS[1], ARGV[2])
|
|
153
|
+
else
|
|
154
|
+
return 0
|
|
155
|
+
end
|
|
156
|
+
`;
|
|
157
|
+
const result = await redis2.send("EVAL", [extendScript, "1", lock.resource, lock.value, ttl.toString()]);
|
|
158
|
+
if (result === 1) {
|
|
159
|
+
lock.ttl = ttl;
|
|
160
|
+
lock.expiration = Date.now() + ttl;
|
|
161
|
+
return true;
|
|
162
|
+
}
|
|
163
|
+
return false;
|
|
164
|
+
} catch (error) {
|
|
165
|
+
console.error("Error extending lock:", error);
|
|
166
|
+
return false;
|
|
167
|
+
}
|
|
168
|
+
};
|
|
169
|
+
const withLock = async (resource, fn, ttl) => {
|
|
170
|
+
const lock = await acquire(resource, ttl);
|
|
171
|
+
if (!lock)
|
|
172
|
+
return null;
|
|
173
|
+
try {
|
|
174
|
+
return await fn(lock);
|
|
175
|
+
} finally {
|
|
176
|
+
await release(lock);
|
|
177
|
+
}
|
|
178
|
+
};
|
|
179
|
+
const withLockOrThrow = async (resource, fn, ttl) => {
|
|
180
|
+
const lock = await acquire(resource, ttl);
|
|
181
|
+
if (!lock) {
|
|
182
|
+
throw new LockError(resource);
|
|
183
|
+
}
|
|
184
|
+
try {
|
|
185
|
+
return await fn(lock);
|
|
186
|
+
} finally {
|
|
187
|
+
await release(lock);
|
|
188
|
+
}
|
|
189
|
+
};
|
|
190
|
+
return { acquire, release, withLock, withLockOrThrow, extend };
|
|
191
|
+
};
|
|
192
|
+
var mutex = { create: create2 };
|
|
193
|
+
// src/jobs.ts
|
|
194
|
+
var {redis: redis3, RedisClient, sleep: sleep2 } = globalThis.Bun;
|
|
195
|
+
import { randomBytes as randomBytes2 } from "crypto";
|
|
196
|
+
var DEFAULT_TIMEOUT = 30000;
|
|
197
|
+
var DEFAULT_RETRIES = 0;
|
|
198
|
+
var DEFAULT_POLL_INTERVAL = 1000;
|
|
199
|
+
var DEFAULT_CONCURRENCY = 1;
|
|
200
|
+
|
|
201
|
+
class JobsError extends Error {
|
|
202
|
+
code;
|
|
203
|
+
constructor(message, code) {
|
|
204
|
+
super(message);
|
|
205
|
+
this.name = "JobsError";
|
|
206
|
+
this.code = code;
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
class ValidationError extends JobsError {
|
|
211
|
+
issues;
|
|
212
|
+
constructor(issues) {
|
|
213
|
+
super("Job data validation failed", "VALIDATION_ERROR");
|
|
214
|
+
this.name = "ValidationError";
|
|
215
|
+
this.issues = issues;
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
class DuplicateJobError extends JobsError {
|
|
220
|
+
key;
|
|
221
|
+
constructor(key) {
|
|
222
|
+
super(`Job with key "${key}" already exists`, "DUPLICATE_JOB");
|
|
223
|
+
this.name = "DuplicateJobError";
|
|
224
|
+
this.key = key;
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
var CLAIM_JOB_BY_ID_SCRIPT = `
|
|
228
|
+
local jobId = ARGV[1]
|
|
229
|
+
local jobData = redis.call("HGET", KEYS[1], jobId)
|
|
230
|
+
if not jobData then
|
|
231
|
+
redis.call("LREM", KEYS[2], 1, jobId)
|
|
232
|
+
return nil
|
|
233
|
+
end
|
|
234
|
+
|
|
235
|
+
local ok, job = pcall(cjson.decode, jobData)
|
|
236
|
+
if not ok then
|
|
237
|
+
local now = tonumber(ARGV[2])
|
|
238
|
+
local failedJob = {
|
|
239
|
+
id = jobId,
|
|
240
|
+
data = cjson.null,
|
|
241
|
+
status = "failed",
|
|
242
|
+
attempts = 1,
|
|
243
|
+
maxRetries = 0,
|
|
244
|
+
timeout = 0,
|
|
245
|
+
createdAt = now,
|
|
246
|
+
completedAt = now,
|
|
247
|
+
error = "Invalid job payload in Redis",
|
|
248
|
+
invalidData = true
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
redis.call("LREM", KEYS[2], 1, jobId)
|
|
252
|
+
redis.call("HSET", KEYS[1], jobId, cjson.encode(failedJob))
|
|
253
|
+
redis.call("SADD", KEYS[4], jobId)
|
|
254
|
+
|
|
255
|
+
return {jobId, cjson.encode(failedJob), "invalid"}
|
|
256
|
+
end
|
|
257
|
+
|
|
258
|
+
job.status = "active"
|
|
259
|
+
job.startedAt = tonumber(ARGV[2])
|
|
260
|
+
job.attempts = job.attempts + 1
|
|
261
|
+
|
|
262
|
+
redis.call("HSET", KEYS[1], jobId, cjson.encode(job))
|
|
263
|
+
|
|
264
|
+
local deadline = tonumber(ARGV[2]) + job.timeout
|
|
265
|
+
redis.call("ZADD", KEYS[3], deadline, jobId)
|
|
266
|
+
|
|
267
|
+
return {jobId, cjson.encode(job)}
|
|
268
|
+
`;
|
|
269
|
+
var COMPLETE_JOB_SCRIPT = `
|
|
270
|
+
local jobId = ARGV[1]
|
|
271
|
+
local completedAt = tonumber(ARGV[2])
|
|
272
|
+
|
|
273
|
+
local jobData = redis.call("HGET", KEYS[1], jobId)
|
|
274
|
+
if not jobData then
|
|
275
|
+
return 0
|
|
276
|
+
end
|
|
277
|
+
|
|
278
|
+
local job = cjson.decode(jobData)
|
|
279
|
+
|
|
280
|
+
redis.call("LREM", KEYS[2], 1, jobId)
|
|
281
|
+
redis.call("ZREM", KEYS[4], jobId)
|
|
282
|
+
|
|
283
|
+
-- If job has interval, reschedule it (keep unique key active)
|
|
284
|
+
if job.interval and job.interval > 0 then
|
|
285
|
+
job.status = "delayed"
|
|
286
|
+
job.attempts = 0
|
|
287
|
+
job.error = cjson.null
|
|
288
|
+
job.startedAt = cjson.null
|
|
289
|
+
job.completedAt = cjson.null
|
|
290
|
+
job.scheduledAt = completedAt + job.interval
|
|
291
|
+
|
|
292
|
+
redis.call("HSET", KEYS[1], jobId, cjson.encode(job))
|
|
293
|
+
redis.call("ZADD", KEYS[5], job.scheduledAt, jobId)
|
|
294
|
+
return 2
|
|
295
|
+
else
|
|
296
|
+
job.status = "completed"
|
|
297
|
+
job.completedAt = completedAt
|
|
298
|
+
|
|
299
|
+
redis.call("HSET", KEYS[1], jobId, cjson.encode(job))
|
|
300
|
+
redis.call("SADD", KEYS[3], jobId)
|
|
301
|
+
|
|
302
|
+
-- Release unique key
|
|
303
|
+
if job.key then
|
|
304
|
+
redis.call("HDEL", KEYS[6], job.key)
|
|
305
|
+
end
|
|
306
|
+
|
|
307
|
+
return 1
|
|
308
|
+
end
|
|
309
|
+
`;
|
|
310
|
+
var FAIL_OR_RETRY_SCRIPT = `
|
|
311
|
+
local jobId = ARGV[1]
|
|
312
|
+
local errorMsg = ARGV[2]
|
|
313
|
+
local completedAt = tonumber(ARGV[3])
|
|
314
|
+
|
|
315
|
+
local jobData = redis.call("HGET", KEYS[1], jobId)
|
|
316
|
+
if not jobData then
|
|
317
|
+
return 0
|
|
318
|
+
end
|
|
319
|
+
|
|
320
|
+
local job = cjson.decode(jobData)
|
|
321
|
+
job.error = errorMsg
|
|
322
|
+
|
|
323
|
+
redis.call("LREM", KEYS[2], 1, jobId)
|
|
324
|
+
redis.call("ZREM", KEYS[5], jobId)
|
|
325
|
+
|
|
326
|
+
-- Check if we can retry (attempts <= maxRetries means 1 initial + N retries)
|
|
327
|
+
if job.attempts <= job.maxRetries then
|
|
328
|
+
-- Check for backoff
|
|
329
|
+
if job.backoff and job.backoff > 0 then
|
|
330
|
+
-- Exponential backoff: backoff * 2^(attempts-1)
|
|
331
|
+
local delay = job.backoff * math.pow(2, job.attempts - 1)
|
|
332
|
+
job.status = "delayed"
|
|
333
|
+
job.scheduledAt = completedAt + delay
|
|
334
|
+
redis.call("HSET", KEYS[1], jobId, cjson.encode(job))
|
|
335
|
+
redis.call("ZADD", KEYS[6], job.scheduledAt, jobId)
|
|
336
|
+
return 4
|
|
337
|
+
else
|
|
338
|
+
job.status = "waiting"
|
|
339
|
+
redis.call("HSET", KEYS[1], jobId, cjson.encode(job))
|
|
340
|
+
redis.call("LPUSH", KEYS[3], jobId)
|
|
341
|
+
return 1
|
|
342
|
+
end
|
|
343
|
+
end
|
|
344
|
+
|
|
345
|
+
-- No more retries - check if interval job should be rescheduled
|
|
346
|
+
if job.interval and job.interval > 0 then
|
|
347
|
+
job.status = "delayed"
|
|
348
|
+
job.attempts = 0
|
|
349
|
+
job.startedAt = cjson.null
|
|
350
|
+
job.scheduledAt = completedAt + job.interval
|
|
351
|
+
redis.call("HSET", KEYS[1], jobId, cjson.encode(job))
|
|
352
|
+
redis.call("ZADD", KEYS[6], job.scheduledAt, jobId)
|
|
353
|
+
return 3
|
|
354
|
+
end
|
|
355
|
+
|
|
356
|
+
-- Permanent failure
|
|
357
|
+
job.status = "failed"
|
|
358
|
+
job.completedAt = completedAt
|
|
359
|
+
redis.call("HSET", KEYS[1], jobId, cjson.encode(job))
|
|
360
|
+
redis.call("SADD", KEYS[4], jobId)
|
|
361
|
+
|
|
362
|
+
-- Release unique key
|
|
363
|
+
if job.key then
|
|
364
|
+
redis.call("HDEL", KEYS[7], job.key)
|
|
365
|
+
end
|
|
366
|
+
|
|
367
|
+
return 2
|
|
368
|
+
`;
|
|
369
|
+
var FAIL_TIMED_OUT_SCRIPT = `
|
|
370
|
+
local now = tonumber(ARGV[1])
|
|
371
|
+
|
|
372
|
+
local timedOutJobs = redis.call("ZRANGEBYSCORE", KEYS[1], "0", tostring(now))
|
|
373
|
+
local failed = 0
|
|
374
|
+
|
|
375
|
+
for _, jobId in ipairs(timedOutJobs) do
|
|
376
|
+
local jobData = redis.call("HGET", KEYS[2], jobId)
|
|
377
|
+
if jobData then
|
|
378
|
+
local job = cjson.decode(jobData)
|
|
379
|
+
|
|
380
|
+
redis.call("ZREM", KEYS[1], jobId)
|
|
381
|
+
redis.call("LREM", KEYS[3], 0, jobId)
|
|
382
|
+
|
|
383
|
+
job.status = "failed"
|
|
384
|
+
job.completedAt = now
|
|
385
|
+
job.error = "Job timed out"
|
|
386
|
+
redis.call("HSET", KEYS[2], jobId, cjson.encode(job))
|
|
387
|
+
redis.call("SADD", KEYS[4], jobId)
|
|
388
|
+
|
|
389
|
+
-- Release unique key
|
|
390
|
+
if job.key then
|
|
391
|
+
redis.call("HDEL", KEYS[5], job.key)
|
|
392
|
+
end
|
|
393
|
+
|
|
394
|
+
failed = failed + 1
|
|
395
|
+
else
|
|
396
|
+
redis.call("ZREM", KEYS[1], jobId)
|
|
397
|
+
end
|
|
398
|
+
end
|
|
399
|
+
|
|
400
|
+
return failed
|
|
401
|
+
`;
|
|
402
|
+
var RECOVER_ACTIVE_JOBS_SCRIPT = `
|
|
403
|
+
local now = tonumber(ARGV[1])
|
|
404
|
+
local activeJobs = redis.call("LRANGE", KEYS[1], 0, -1)
|
|
405
|
+
local recovered = 0
|
|
406
|
+
|
|
407
|
+
for _, jobId in ipairs(activeJobs) do
|
|
408
|
+
local jobData = redis.call("HGET", KEYS[2], jobId)
|
|
409
|
+
if jobData then
|
|
410
|
+
local job = cjson.decode(jobData)
|
|
411
|
+
local startedAt = tonumber(job.startedAt) or 0
|
|
412
|
+
local timeout = tonumber(job.timeout) or 30000
|
|
413
|
+
|
|
414
|
+
if startedAt > 0 and (startedAt + timeout) < now then
|
|
415
|
+
redis.call("LREM", KEYS[1], 1, jobId)
|
|
416
|
+
redis.call("ZREM", KEYS[3], jobId)
|
|
417
|
+
job.error = "Recovered after worker failure"
|
|
418
|
+
|
|
419
|
+
if job.attempts <= job.maxRetries then
|
|
420
|
+
job.status = "waiting"
|
|
421
|
+
redis.call("HSET", KEYS[2], jobId, cjson.encode(job))
|
|
422
|
+
redis.call("LPUSH", KEYS[4], jobId)
|
|
423
|
+
elseif job.interval and job.interval > 0 then
|
|
424
|
+
job.status = "delayed"
|
|
425
|
+
job.attempts = 0
|
|
426
|
+
job.startedAt = cjson.null
|
|
427
|
+
job.scheduledAt = now + job.interval
|
|
428
|
+
redis.call("HSET", KEYS[2], jobId, cjson.encode(job))
|
|
429
|
+
redis.call("ZADD", KEYS[6], job.scheduledAt, jobId)
|
|
430
|
+
else
|
|
431
|
+
job.status = "failed"
|
|
432
|
+
job.completedAt = now
|
|
433
|
+
redis.call("HSET", KEYS[2], jobId, cjson.encode(job))
|
|
434
|
+
redis.call("SADD", KEYS[5], jobId)
|
|
435
|
+
|
|
436
|
+
-- Release unique key on permanent failure
|
|
437
|
+
if job.key then
|
|
438
|
+
redis.call("HDEL", KEYS[7], job.key)
|
|
439
|
+
end
|
|
440
|
+
end
|
|
441
|
+
recovered = recovered + 1
|
|
442
|
+
end
|
|
443
|
+
else
|
|
444
|
+
redis.call("LREM", KEYS[1], 1, jobId)
|
|
445
|
+
redis.call("ZREM", KEYS[3], jobId)
|
|
446
|
+
end
|
|
447
|
+
end
|
|
448
|
+
|
|
449
|
+
return recovered
|
|
450
|
+
`;
|
|
451
|
+
var LIST_JOBS_SCRIPT = `
|
|
452
|
+
local filterByTag = ARGV[1] == "1"
|
|
453
|
+
local filterByStatus = ARGV[2] == "1"
|
|
454
|
+
local statusFilter = ARGV[3]
|
|
455
|
+
local offset = tonumber(ARGV[4])
|
|
456
|
+
local limit = tonumber(ARGV[5])
|
|
457
|
+
|
|
458
|
+
local candidateIds
|
|
459
|
+
if filterByTag then
|
|
460
|
+
candidateIds = redis.call("SMEMBERS", KEYS[2])
|
|
461
|
+
else
|
|
462
|
+
candidateIds = redis.call("HKEYS", KEYS[1])
|
|
463
|
+
end
|
|
464
|
+
|
|
465
|
+
local matched = {}
|
|
466
|
+
for _, jobId in ipairs(candidateIds) do
|
|
467
|
+
local jobData = redis.call("HGET", KEYS[1], jobId)
|
|
468
|
+
if jobData then
|
|
469
|
+
if filterByStatus then
|
|
470
|
+
local job = cjson.decode(jobData)
|
|
471
|
+
if job.status == statusFilter then
|
|
472
|
+
matched[#matched + 1] = { id = jobId, data = jobData }
|
|
473
|
+
end
|
|
474
|
+
else
|
|
475
|
+
matched[#matched + 1] = { id = jobId, data = jobData }
|
|
476
|
+
end
|
|
477
|
+
end
|
|
478
|
+
end
|
|
479
|
+
|
|
480
|
+
table.sort(matched, function(a, b) return a.id < b.id end)
|
|
481
|
+
|
|
482
|
+
local total = #matched
|
|
483
|
+
local startIdx = offset + 1
|
|
484
|
+
local endIdx = math.min(startIdx + limit - 1, total)
|
|
485
|
+
|
|
486
|
+
local results = {}
|
|
487
|
+
for i = startIdx, endIdx do
|
|
488
|
+
results[#results + 1] = matched[i].data
|
|
489
|
+
end
|
|
490
|
+
|
|
491
|
+
if #results == 0 then
|
|
492
|
+
return { total }
|
|
493
|
+
end
|
|
494
|
+
return { total, unpack(results) }
|
|
495
|
+
`;
|
|
496
|
+
var CANCEL_JOB_SCRIPT = `
|
|
497
|
+
local jobId = ARGV[1]
|
|
498
|
+
local prefix = ARGV[2]
|
|
499
|
+
|
|
500
|
+
local jobData = redis.call("HGET", KEYS[1], jobId)
|
|
501
|
+
if not jobData then
|
|
502
|
+
return 0
|
|
503
|
+
end
|
|
504
|
+
|
|
505
|
+
local job = cjson.decode(jobData)
|
|
506
|
+
|
|
507
|
+
if job.status == "waiting" then
|
|
508
|
+
redis.call("LREM", KEYS[2], 0, jobId)
|
|
509
|
+
elseif job.status == "delayed" then
|
|
510
|
+
redis.call("ZREM", KEYS[3], jobId)
|
|
511
|
+
elseif job.status == "active" then
|
|
512
|
+
redis.call("LREM", KEYS[4], 0, jobId)
|
|
513
|
+
redis.call("ZREM", KEYS[5], jobId)
|
|
514
|
+
elseif job.status == "completed" then
|
|
515
|
+
redis.call("SREM", KEYS[6], jobId)
|
|
516
|
+
elseif job.status == "failed" then
|
|
517
|
+
redis.call("SREM", KEYS[7], jobId)
|
|
518
|
+
end
|
|
519
|
+
|
|
520
|
+
redis.call("HDEL", KEYS[1], jobId)
|
|
521
|
+
|
|
522
|
+
if job.tag then
|
|
523
|
+
redis.call("SREM", prefix .. ":tag:" .. job.tag, jobId)
|
|
524
|
+
end
|
|
525
|
+
|
|
526
|
+
-- Release unique key
|
|
527
|
+
if job.key then
|
|
528
|
+
redis.call("HDEL", KEYS[8], job.key)
|
|
529
|
+
end
|
|
530
|
+
|
|
531
|
+
return 1
|
|
532
|
+
`;
|
|
533
|
+
var PROMOTE_DELAYED_SCRIPT = `
|
|
534
|
+
local now = tonumber(ARGV[1])
|
|
535
|
+
local readyJobs = redis.call("ZRANGEBYSCORE", KEYS[1], "0", tostring(now))
|
|
536
|
+
local promoted = 0
|
|
537
|
+
|
|
538
|
+
for _, jobId in ipairs(readyJobs) do
|
|
539
|
+
local removed = redis.call("ZREM", KEYS[1], jobId)
|
|
540
|
+
if removed > 0 then
|
|
541
|
+
local jobData = redis.call("HGET", KEYS[3], jobId)
|
|
542
|
+
if jobData then
|
|
543
|
+
local job = cjson.decode(jobData)
|
|
544
|
+
job.status = "waiting"
|
|
545
|
+
redis.call("HSET", KEYS[3], jobId, cjson.encode(job))
|
|
546
|
+
redis.call("LPUSH", KEYS[2], jobId)
|
|
547
|
+
promoted = promoted + 1
|
|
548
|
+
end
|
|
549
|
+
end
|
|
550
|
+
end
|
|
551
|
+
|
|
552
|
+
return promoted
|
|
553
|
+
`;
|
|
554
|
+
var create3 = (config) => {
|
|
555
|
+
const { name, schema, prefix = "jobs" } = config;
|
|
556
|
+
const keyPrefix = `${prefix}:${name}`;
|
|
557
|
+
const keys = {
|
|
558
|
+
jobs: `${keyPrefix}:data`,
|
|
559
|
+
waiting: `${keyPrefix}:waiting`,
|
|
560
|
+
delayed: `${keyPrefix}:delayed`,
|
|
561
|
+
active: `${keyPrefix}:active`,
|
|
562
|
+
deadlines: `${keyPrefix}:deadlines`,
|
|
563
|
+
completed: `${keyPrefix}:completed`,
|
|
564
|
+
failed: `${keyPrefix}:failed`,
|
|
565
|
+
id: `${keyPrefix}:id`,
|
|
566
|
+
tags: `${keyPrefix}:tags`,
|
|
567
|
+
unique: `${keyPrefix}:unique`
|
|
568
|
+
};
|
|
569
|
+
const generateId = async () => {
|
|
570
|
+
const counter = await redis3.incr(keys.id);
|
|
571
|
+
const random = randomBytes2(4).toString("hex");
|
|
572
|
+
return `${counter}-${random}`;
|
|
573
|
+
};
|
|
574
|
+
const deserializeJob = (data) => {
|
|
575
|
+
let job;
|
|
576
|
+
try {
|
|
577
|
+
job = JSON.parse(data);
|
|
578
|
+
} catch (error) {
|
|
579
|
+
throw new JobsError("Invalid job payload in Redis", "INVALID_JOB_DATA");
|
|
580
|
+
}
|
|
581
|
+
if (job.invalidData) {
|
|
582
|
+
return job;
|
|
583
|
+
}
|
|
584
|
+
const result = schema.safeParse(job.data);
|
|
585
|
+
if (!result.success) {
|
|
586
|
+
throw new ValidationError(result.error.issues);
|
|
587
|
+
}
|
|
588
|
+
return job;
|
|
589
|
+
};
|
|
590
|
+
const markPoisonedJob = async (jobId, jobData, error) => {
|
|
591
|
+
const now = Date.now();
|
|
592
|
+
let parsed = null;
|
|
593
|
+
try {
|
|
594
|
+
parsed = JSON.parse(jobData);
|
|
595
|
+
} catch {
|
|
596
|
+
parsed = null;
|
|
597
|
+
}
|
|
598
|
+
const poisoned = parsed ? {
|
|
599
|
+
...parsed,
|
|
600
|
+
status: "failed",
|
|
601
|
+
completedAt: now,
|
|
602
|
+
error: error.message,
|
|
603
|
+
invalidData: true
|
|
604
|
+
} : {
|
|
605
|
+
id: jobId,
|
|
606
|
+
data: null,
|
|
607
|
+
status: "failed",
|
|
608
|
+
attempts: 1,
|
|
609
|
+
maxRetries: 0,
|
|
610
|
+
timeout: 0,
|
|
611
|
+
createdAt: now,
|
|
612
|
+
completedAt: now,
|
|
613
|
+
error: error.message,
|
|
614
|
+
invalidData: true
|
|
615
|
+
};
|
|
616
|
+
await redis3.send("LREM", [keys.active, "0", jobId]);
|
|
617
|
+
await redis3.send("ZREM", [keys.deadlines, jobId]);
|
|
618
|
+
await redis3.send("HSET", [keys.jobs, jobId, JSON.stringify(poisoned)]);
|
|
619
|
+
await redis3.send("SADD", [keys.failed, jobId]);
|
|
620
|
+
if (parsed?.key) {
|
|
621
|
+
await redis3.send("HDEL", [keys.unique, parsed.key]);
|
|
622
|
+
}
|
|
623
|
+
};
|
|
624
|
+
const send = async (data, options = {}) => {
|
|
625
|
+
const result = schema.safeParse(data);
|
|
626
|
+
if (!result.success) {
|
|
627
|
+
throw new ValidationError(result.error.issues);
|
|
628
|
+
}
|
|
629
|
+
const id = await generateId();
|
|
630
|
+
const now = Date.now();
|
|
631
|
+
const interval = "interval" in options ? options.interval : undefined;
|
|
632
|
+
const startImmediately = "startImmediately" in options ? options.startImmediately : undefined;
|
|
633
|
+
const tag = options.tag;
|
|
634
|
+
const key = options.key;
|
|
635
|
+
const backoff = options.backoff;
|
|
636
|
+
if (key) {
|
|
637
|
+
const added = await redis3.send("HSETNX", [keys.unique, key, id]);
|
|
638
|
+
if (added === 0) {
|
|
639
|
+
throw new DuplicateJobError(key);
|
|
640
|
+
}
|
|
641
|
+
}
|
|
642
|
+
const job = {
|
|
643
|
+
id,
|
|
644
|
+
data: result.data,
|
|
645
|
+
status: "waiting",
|
|
646
|
+
attempts: 0,
|
|
647
|
+
maxRetries: options.retries ?? DEFAULT_RETRIES,
|
|
648
|
+
timeout: options.timeout ?? DEFAULT_TIMEOUT,
|
|
649
|
+
backoff,
|
|
650
|
+
interval,
|
|
651
|
+
tag,
|
|
652
|
+
key,
|
|
653
|
+
createdAt: now
|
|
654
|
+
};
|
|
655
|
+
if ("delay" in options && options.delay !== undefined) {
|
|
656
|
+
job.status = "delayed";
|
|
657
|
+
job.scheduledAt = now + options.delay;
|
|
658
|
+
await redis3.hset(keys.jobs, id, JSON.stringify(job));
|
|
659
|
+
await redis3.send("ZADD", [keys.delayed, job.scheduledAt.toString(), id]);
|
|
660
|
+
} else if ("at" in options && options.at !== undefined) {
|
|
661
|
+
job.status = "delayed";
|
|
662
|
+
job.scheduledAt = options.at;
|
|
663
|
+
await redis3.hset(keys.jobs, id, JSON.stringify(job));
|
|
664
|
+
await redis3.send("ZADD", [keys.delayed, job.scheduledAt.toString(), id]);
|
|
665
|
+
} else if (interval !== undefined && !startImmediately) {
|
|
666
|
+
job.status = "delayed";
|
|
667
|
+
job.scheduledAt = now + interval;
|
|
668
|
+
await redis3.hset(keys.jobs, id, JSON.stringify(job));
|
|
669
|
+
await redis3.send("ZADD", [keys.delayed, job.scheduledAt.toString(), id]);
|
|
670
|
+
} else {
|
|
671
|
+
await redis3.hset(keys.jobs, id, JSON.stringify(job));
|
|
672
|
+
await redis3.lpush(keys.waiting, id);
|
|
673
|
+
}
|
|
674
|
+
if (tag) {
|
|
675
|
+
await redis3.sadd(keys.tags, tag);
|
|
676
|
+
await redis3.sadd(`${keyPrefix}:tag:${tag}`, id);
|
|
677
|
+
}
|
|
678
|
+
return job;
|
|
679
|
+
};
|
|
680
|
+
const process = (handler, options = {}) => {
|
|
681
|
+
const {
|
|
682
|
+
concurrency = DEFAULT_CONCURRENCY,
|
|
683
|
+
blockingTimeoutSecs,
|
|
684
|
+
pollInterval,
|
|
685
|
+
maintenanceIntervalMs,
|
|
686
|
+
onSuccess,
|
|
687
|
+
onError,
|
|
688
|
+
onFinally
|
|
689
|
+
} = options;
|
|
690
|
+
const blockingTimeoutSecsValue = blockingTimeoutSecs ?? Math.max(1, Math.ceil((pollInterval ?? DEFAULT_POLL_INTERVAL) / 1000));
|
|
691
|
+
const maintenanceIntervalMsValue = Math.max(10, maintenanceIntervalMs ?? pollInterval ?? DEFAULT_POLL_INTERVAL);
|
|
692
|
+
let running = true;
|
|
693
|
+
let activeCount = 0;
|
|
694
|
+
let drainResolve = null;
|
|
695
|
+
redis3.send("EVAL", [
|
|
696
|
+
RECOVER_ACTIVE_JOBS_SCRIPT,
|
|
697
|
+
"7",
|
|
698
|
+
keys.active,
|
|
699
|
+
keys.jobs,
|
|
700
|
+
keys.deadlines,
|
|
701
|
+
keys.waiting,
|
|
702
|
+
keys.failed,
|
|
703
|
+
keys.delayed,
|
|
704
|
+
keys.unique,
|
|
705
|
+
Date.now().toString()
|
|
706
|
+
]).catch((err) => {
|
|
707
|
+
console.error("Recovery of active jobs failed:", err);
|
|
708
|
+
});
|
|
709
|
+
const processLoop = async (client) => {
|
|
710
|
+
if (!client.connected) {
|
|
711
|
+
await client.connect();
|
|
712
|
+
}
|
|
713
|
+
while (running) {
|
|
714
|
+
try {
|
|
715
|
+
const jobId = await client.send("BRPOPLPUSH", [
|
|
716
|
+
keys.waiting,
|
|
717
|
+
keys.active,
|
|
718
|
+
blockingTimeoutSecsValue.toString()
|
|
719
|
+
]);
|
|
720
|
+
if (!jobId) {
|
|
721
|
+
continue;
|
|
722
|
+
}
|
|
723
|
+
const now = Date.now();
|
|
724
|
+
const claimResult = await redis3.send("EVAL", [
|
|
725
|
+
CLAIM_JOB_BY_ID_SCRIPT,
|
|
726
|
+
"4",
|
|
727
|
+
keys.jobs,
|
|
728
|
+
keys.active,
|
|
729
|
+
keys.deadlines,
|
|
730
|
+
keys.failed,
|
|
731
|
+
jobId,
|
|
732
|
+
now.toString()
|
|
733
|
+
]);
|
|
734
|
+
if (!claimResult) {
|
|
735
|
+
continue;
|
|
736
|
+
}
|
|
737
|
+
const [, jobData, claimStatus] = claimResult;
|
|
738
|
+
if (claimStatus === "invalid") {
|
|
739
|
+
continue;
|
|
740
|
+
}
|
|
741
|
+
let job;
|
|
742
|
+
try {
|
|
743
|
+
job = deserializeJob(jobData);
|
|
744
|
+
} catch (error) {
|
|
745
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
746
|
+
await markPoisonedJob(jobId, jobData, err);
|
|
747
|
+
continue;
|
|
748
|
+
}
|
|
749
|
+
activeCount++;
|
|
750
|
+
try {
|
|
751
|
+
await handler(job);
|
|
752
|
+
await redis3.send("EVAL", [
|
|
753
|
+
COMPLETE_JOB_SCRIPT,
|
|
754
|
+
"6",
|
|
755
|
+
keys.jobs,
|
|
756
|
+
keys.active,
|
|
757
|
+
keys.completed,
|
|
758
|
+
keys.deadlines,
|
|
759
|
+
keys.delayed,
|
|
760
|
+
keys.unique,
|
|
761
|
+
job.id,
|
|
762
|
+
Date.now().toString()
|
|
763
|
+
]);
|
|
764
|
+
if (onSuccess) {
|
|
765
|
+
try {
|
|
766
|
+
await onSuccess(job);
|
|
767
|
+
} catch (onSuccessError) {
|
|
768
|
+
console.error("onSuccess callback failed:", onSuccessError);
|
|
769
|
+
}
|
|
770
|
+
}
|
|
771
|
+
} catch (error) {
|
|
772
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
773
|
+
const result = await redis3.send("EVAL", [
|
|
774
|
+
FAIL_OR_RETRY_SCRIPT,
|
|
775
|
+
"7",
|
|
776
|
+
keys.jobs,
|
|
777
|
+
keys.active,
|
|
778
|
+
keys.waiting,
|
|
779
|
+
keys.failed,
|
|
780
|
+
keys.deadlines,
|
|
781
|
+
keys.delayed,
|
|
782
|
+
keys.unique,
|
|
783
|
+
job.id,
|
|
784
|
+
err.message,
|
|
785
|
+
Date.now().toString()
|
|
786
|
+
]);
|
|
787
|
+
if (onError && (result === 2 || result === 3)) {
|
|
788
|
+
try {
|
|
789
|
+
await onError(job, err);
|
|
790
|
+
} catch (onErrorError) {
|
|
791
|
+
console.error("onError callback failed:", onErrorError);
|
|
792
|
+
}
|
|
793
|
+
}
|
|
794
|
+
} finally {
|
|
795
|
+
activeCount--;
|
|
796
|
+
if (onFinally) {
|
|
797
|
+
try {
|
|
798
|
+
await onFinally(job);
|
|
799
|
+
} catch (onFinallyError) {
|
|
800
|
+
console.error("onFinally callback failed:", onFinallyError);
|
|
801
|
+
}
|
|
802
|
+
}
|
|
803
|
+
if (!running && activeCount === 0 && drainResolve) {
|
|
804
|
+
drainResolve();
|
|
805
|
+
}
|
|
806
|
+
}
|
|
807
|
+
} catch (error) {
|
|
808
|
+
if (!running)
|
|
809
|
+
break;
|
|
810
|
+
console.error("Worker error:", error);
|
|
811
|
+
await sleep2(100);
|
|
812
|
+
}
|
|
813
|
+
}
|
|
814
|
+
};
|
|
815
|
+
const workerPromises = [];
|
|
816
|
+
const workerClients = [];
|
|
817
|
+
for (let i = 0;i < concurrency; i++) {
|
|
818
|
+
const workerClient = new RedisClient;
|
|
819
|
+
workerClients.push(workerClient);
|
|
820
|
+
workerPromises.push(processLoop(workerClient));
|
|
821
|
+
}
|
|
822
|
+
const maintenanceLoop = async () => {
|
|
823
|
+
while (running) {
|
|
824
|
+
try {
|
|
825
|
+
await redis3.send("EVAL", [
|
|
826
|
+
PROMOTE_DELAYED_SCRIPT,
|
|
827
|
+
"3",
|
|
828
|
+
keys.delayed,
|
|
829
|
+
keys.waiting,
|
|
830
|
+
keys.jobs,
|
|
831
|
+
Date.now().toString()
|
|
832
|
+
]);
|
|
833
|
+
await redis3.send("EVAL", [
|
|
834
|
+
FAIL_TIMED_OUT_SCRIPT,
|
|
835
|
+
"5",
|
|
836
|
+
keys.deadlines,
|
|
837
|
+
keys.jobs,
|
|
838
|
+
keys.active,
|
|
839
|
+
keys.failed,
|
|
840
|
+
keys.unique,
|
|
841
|
+
Date.now().toString()
|
|
842
|
+
]);
|
|
843
|
+
} catch (error) {
|
|
844
|
+
if (!running)
|
|
845
|
+
break;
|
|
846
|
+
console.error("Worker error:", error);
|
|
847
|
+
}
|
|
848
|
+
await sleep2(maintenanceIntervalMsValue);
|
|
849
|
+
}
|
|
850
|
+
};
|
|
851
|
+
maintenanceLoop();
|
|
852
|
+
const safeClose = (client) => {
|
|
853
|
+
if (!client.connected)
|
|
854
|
+
return;
|
|
855
|
+
try {
|
|
856
|
+
client.close();
|
|
857
|
+
} catch (error) {
|
|
858
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
859
|
+
if (!("code" in err) || err.code !== "ERR_REDIS_CONNECTION_CLOSED") {
|
|
860
|
+
console.error("Failed to close worker client:", error);
|
|
861
|
+
}
|
|
862
|
+
}
|
|
863
|
+
};
|
|
864
|
+
return async () => {
|
|
865
|
+
running = false;
|
|
866
|
+
for (const client of workerClients) {
|
|
867
|
+
safeClose(client);
|
|
868
|
+
}
|
|
869
|
+
if (activeCount > 0) {
|
|
870
|
+
await new Promise((resolve) => {
|
|
871
|
+
drainResolve = resolve;
|
|
872
|
+
});
|
|
873
|
+
}
|
|
874
|
+
};
|
|
875
|
+
};
|
|
876
|
+
const list = async (options = {}) => {
|
|
877
|
+
const { tag, status, offset = 0, limit = 100 } = options;
|
|
878
|
+
const filterByTag = tag ? "1" : "0";
|
|
879
|
+
const filterByStatus = status ? "1" : "0";
|
|
880
|
+
const tagKey = tag ? `${keyPrefix}:tag:${tag}` : "";
|
|
881
|
+
const result = await redis3.send("EVAL", [
|
|
882
|
+
LIST_JOBS_SCRIPT,
|
|
883
|
+
"2",
|
|
884
|
+
keys.jobs,
|
|
885
|
+
tagKey,
|
|
886
|
+
filterByTag,
|
|
887
|
+
filterByStatus,
|
|
888
|
+
status ?? "",
|
|
889
|
+
offset.toString(),
|
|
890
|
+
limit.toString()
|
|
891
|
+
]);
|
|
892
|
+
const [total, ...jobStrings] = result;
|
|
893
|
+
const jobList = jobStrings.map((str) => deserializeJob(str));
|
|
894
|
+
return { total, jobs: jobList };
|
|
895
|
+
};
|
|
896
|
+
const cancel = async (jobId) => {
|
|
897
|
+
const result = await redis3.send("EVAL", [
|
|
898
|
+
CANCEL_JOB_SCRIPT,
|
|
899
|
+
"8",
|
|
900
|
+
keys.jobs,
|
|
901
|
+
keys.waiting,
|
|
902
|
+
keys.delayed,
|
|
903
|
+
keys.active,
|
|
904
|
+
keys.deadlines,
|
|
905
|
+
keys.completed,
|
|
906
|
+
keys.failed,
|
|
907
|
+
keys.unique,
|
|
908
|
+
jobId,
|
|
909
|
+
keyPrefix
|
|
910
|
+
]);
|
|
911
|
+
return result === 1;
|
|
912
|
+
};
|
|
913
|
+
return { send, process, list, cancel };
|
|
914
|
+
};
|
|
915
|
+
var jobs = { create: create3 };
|
|
916
|
+
// src/queue.ts
|
|
917
|
+
var {redis: redis4, RedisClient: RedisClient2, sleep: sleep3 } = globalThis.Bun;
|
|
918
|
+
var DEFAULT_POLL_INTERVAL2 = 1000;
|
|
919
|
+
var DEFAULT_CONCURRENCY2 = 1;
|
|
920
|
+
|
|
921
|
+
class QueueError extends Error {
|
|
922
|
+
code;
|
|
923
|
+
constructor(message, code) {
|
|
924
|
+
super(message);
|
|
925
|
+
this.name = "QueueError";
|
|
926
|
+
this.code = code;
|
|
927
|
+
}
|
|
928
|
+
}
|
|
929
|
+
|
|
930
|
+
class QueueValidationError extends QueueError {
|
|
931
|
+
issues;
|
|
932
|
+
constructor(issues) {
|
|
933
|
+
super("Queue message validation failed", "VALIDATION_ERROR");
|
|
934
|
+
this.name = "QueueValidationError";
|
|
935
|
+
this.issues = issues;
|
|
936
|
+
}
|
|
937
|
+
}
|
|
938
|
+
var create4 = (config) => {
|
|
939
|
+
const { name, schema, prefix = "queue" } = config;
|
|
940
|
+
const key = `${prefix}:${name}`;
|
|
941
|
+
const send = async (data) => {
|
|
942
|
+
const result = schema.safeParse(data);
|
|
943
|
+
if (!result.success) {
|
|
944
|
+
throw new QueueValidationError(result.error.issues);
|
|
945
|
+
}
|
|
946
|
+
await redis4.lpush(key, JSON.stringify(result.data));
|
|
947
|
+
};
|
|
948
|
+
const sendBatch = async (data) => {
|
|
949
|
+
if (data.length === 0)
|
|
950
|
+
return;
|
|
951
|
+
const serialized = [];
|
|
952
|
+
for (const item of data) {
|
|
953
|
+
const result = schema.safeParse(item);
|
|
954
|
+
if (!result.success) {
|
|
955
|
+
throw new QueueValidationError(result.error.issues);
|
|
956
|
+
}
|
|
957
|
+
serialized.push(JSON.stringify(result.data));
|
|
958
|
+
}
|
|
959
|
+
await redis4.send("LPUSH", [key, ...serialized]);
|
|
960
|
+
};
|
|
961
|
+
const process = (handler, options = {}) => {
|
|
962
|
+
const {
|
|
963
|
+
concurrency = DEFAULT_CONCURRENCY2,
|
|
964
|
+
blockingTimeoutSecs,
|
|
965
|
+
pollInterval,
|
|
966
|
+
onSuccess,
|
|
967
|
+
onError
|
|
968
|
+
} = options;
|
|
969
|
+
const blockingTimeoutSecsValue = blockingTimeoutSecs ?? Math.max(1, Math.ceil((pollInterval ?? DEFAULT_POLL_INTERVAL2) / 1000));
|
|
970
|
+
let running = true;
|
|
971
|
+
const processLoop = async (client) => {
|
|
972
|
+
if (!client.connected) {
|
|
973
|
+
await client.connect();
|
|
974
|
+
}
|
|
975
|
+
while (running) {
|
|
976
|
+
try {
|
|
977
|
+
const popResult = await client.send("BRPOP", [key, blockingTimeoutSecsValue.toString()]);
|
|
978
|
+
if (!popResult) {
|
|
979
|
+
continue;
|
|
980
|
+
}
|
|
981
|
+
const [, raw] = popResult;
|
|
982
|
+
const parsed = JSON.parse(raw);
|
|
983
|
+
const parsedResult = schema.safeParse(parsed);
|
|
984
|
+
if (!parsedResult.success) {
|
|
985
|
+
console.error("Invalid message in queue:", parsedResult.error.issues);
|
|
986
|
+
continue;
|
|
987
|
+
}
|
|
988
|
+
const data = parsedResult.data;
|
|
989
|
+
try {
|
|
990
|
+
await handler(data);
|
|
991
|
+
if (onSuccess) {
|
|
992
|
+
try {
|
|
993
|
+
await onSuccess(data);
|
|
994
|
+
} catch (callbackError) {
|
|
995
|
+
console.error("onSuccess callback failed:", callbackError);
|
|
996
|
+
}
|
|
997
|
+
}
|
|
998
|
+
} catch (error) {
|
|
999
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
1000
|
+
if (onError) {
|
|
1001
|
+
try {
|
|
1002
|
+
await onError(data, err);
|
|
1003
|
+
} catch (callbackError) {
|
|
1004
|
+
console.error("onError callback failed:", callbackError);
|
|
1005
|
+
}
|
|
1006
|
+
}
|
|
1007
|
+
}
|
|
1008
|
+
} catch (error) {
|
|
1009
|
+
if (!running)
|
|
1010
|
+
break;
|
|
1011
|
+
console.error("Queue worker error:", error);
|
|
1012
|
+
await sleep3(100);
|
|
1013
|
+
}
|
|
1014
|
+
}
|
|
1015
|
+
};
|
|
1016
|
+
const workerClients = [];
|
|
1017
|
+
for (let i = 0;i < concurrency; i++) {
|
|
1018
|
+
const workerClient = new RedisClient2;
|
|
1019
|
+
workerClients.push(workerClient);
|
|
1020
|
+
processLoop(workerClient);
|
|
1021
|
+
}
|
|
1022
|
+
return () => {
|
|
1023
|
+
running = false;
|
|
1024
|
+
for (const client of workerClients) {
|
|
1025
|
+
if (!client.connected)
|
|
1026
|
+
continue;
|
|
1027
|
+
try {
|
|
1028
|
+
client.close();
|
|
1029
|
+
} catch (error) {
|
|
1030
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
1031
|
+
if (!("code" in err) || err.code !== "ERR_REDIS_CONNECTION_CLOSED") {
|
|
1032
|
+
console.error("Failed to close worker client:", error);
|
|
1033
|
+
}
|
|
1034
|
+
}
|
|
1035
|
+
}
|
|
1036
|
+
};
|
|
1037
|
+
};
|
|
1038
|
+
const size = async () => {
|
|
1039
|
+
return await redis4.llen(key);
|
|
1040
|
+
};
|
|
1041
|
+
const purge = async () => {
|
|
1042
|
+
await redis4.del(key);
|
|
1043
|
+
};
|
|
1044
|
+
return { send, sendBatch, process, size, purge };
|
|
1045
|
+
};
|
|
1046
|
+
var queue = { create: create4 };
|
|
1047
|
+
export {
|
|
1048
|
+
ratelimit,
|
|
1049
|
+
queue,
|
|
1050
|
+
mutex,
|
|
1051
|
+
jobs,
|
|
1052
|
+
ValidationError,
|
|
1053
|
+
RateLimitError,
|
|
1054
|
+
QueueValidationError,
|
|
1055
|
+
QueueError,
|
|
1056
|
+
LockError,
|
|
1057
|
+
JobsError,
|
|
1058
|
+
DuplicateJobError
|
|
1059
|
+
};
|