@valentinkolb/sync 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/publish.yml +72 -0
- package/CLAUDE.md +106 -0
- package/LICENSE +21 -0
- package/README.md +292 -0
- package/bun.lock +29 -0
- package/compose.test.yml +7 -0
- package/index.ts +18 -0
- package/package.json +21 -0
- package/src/jobs.ts +568 -0
- package/src/mutex.ts +203 -0
- package/src/ratelimit.ts +143 -0
- package/tests/jobs.test.ts +465 -0
- package/tests/mutex.test.ts +223 -0
- package/tests/preload.ts +2 -0
- package/tests/ratelimit.test.ts +119 -0
- package/tsconfig.json +31 -0
|
@@ -0,0 +1,465 @@
|
|
|
1
|
+
import { test, expect, beforeEach } from "bun:test";
|
|
2
|
+
import { redis } from "bun";
|
|
3
|
+
import { z } from "zod";
|
|
4
|
+
import { jobs, ValidationError } from "../index";
|
|
5
|
+
|
|
6
|
+
const testSchema = z.object({
|
|
7
|
+
message: z.string(),
|
|
8
|
+
});
|
|
9
|
+
|
|
10
|
+
// Clean up Redis before each test
|
|
11
|
+
beforeEach(async () => {
|
|
12
|
+
const keys = await redis.send("KEYS", ["jobs:test:*"]);
|
|
13
|
+
if (Array.isArray(keys) && keys.length > 0) {
|
|
14
|
+
await redis.send("DEL", keys as string[]);
|
|
15
|
+
}
|
|
16
|
+
});
|
|
17
|
+
|
|
18
|
+
test("send creates a job with correct properties", async () => {
|
|
19
|
+
const q = jobs.create({
|
|
20
|
+
name: "test:send",
|
|
21
|
+
schema: testSchema,
|
|
22
|
+
prefix: "jobs:test",
|
|
23
|
+
});
|
|
24
|
+
|
|
25
|
+
const job = await q.send({ message: "hello" });
|
|
26
|
+
|
|
27
|
+
expect(job.id).toBeDefined();
|
|
28
|
+
expect(job.data.message).toBe("hello");
|
|
29
|
+
expect(job.status).toBe("waiting");
|
|
30
|
+
expect(job.attempts).toBe(0);
|
|
31
|
+
expect(job.maxRetries).toBe(0);
|
|
32
|
+
expect(job.timeout).toBe(30000);
|
|
33
|
+
expect(job.createdAt).toBeGreaterThan(0);
|
|
34
|
+
});
|
|
35
|
+
|
|
36
|
+
test("send validates data against schema", async () => {
|
|
37
|
+
const q = jobs.create({
|
|
38
|
+
name: "test:validate",
|
|
39
|
+
schema: testSchema,
|
|
40
|
+
prefix: "jobs:test",
|
|
41
|
+
});
|
|
42
|
+
|
|
43
|
+
try {
|
|
44
|
+
// @ts-expect-error - intentionally invalid
|
|
45
|
+
await q.send({ invalid: "data" });
|
|
46
|
+
expect(true).toBe(false); // Should not reach
|
|
47
|
+
} catch (e) {
|
|
48
|
+
expect(e).toBeInstanceOf(ValidationError);
|
|
49
|
+
}
|
|
50
|
+
});
|
|
51
|
+
|
|
52
|
+
test("send with delay creates delayed job", async () => {
|
|
53
|
+
const q = jobs.create({
|
|
54
|
+
name: "test:delay",
|
|
55
|
+
schema: testSchema,
|
|
56
|
+
prefix: "jobs:test",
|
|
57
|
+
});
|
|
58
|
+
|
|
59
|
+
const job = await q.send({ message: "delayed" }, { delay: 5000 });
|
|
60
|
+
|
|
61
|
+
expect(job.status).toBe("delayed");
|
|
62
|
+
expect(job.scheduledAt).toBeGreaterThan(Date.now());
|
|
63
|
+
});
|
|
64
|
+
|
|
65
|
+
test("send with at creates scheduled job", async () => {
|
|
66
|
+
const q = jobs.create({
|
|
67
|
+
name: "test:at",
|
|
68
|
+
schema: testSchema,
|
|
69
|
+
prefix: "jobs:test",
|
|
70
|
+
});
|
|
71
|
+
|
|
72
|
+
const scheduledTime = Date.now() + 10000;
|
|
73
|
+
const job = await q.send({ message: "scheduled" }, { at: scheduledTime });
|
|
74
|
+
|
|
75
|
+
expect(job.status).toBe("delayed");
|
|
76
|
+
expect(job.scheduledAt).toBe(scheduledTime);
|
|
77
|
+
});
|
|
78
|
+
|
|
79
|
+
test("send with interval creates periodic job", async () => {
|
|
80
|
+
const q = jobs.create({
|
|
81
|
+
name: "test:interval",
|
|
82
|
+
schema: testSchema,
|
|
83
|
+
prefix: "jobs:test",
|
|
84
|
+
});
|
|
85
|
+
|
|
86
|
+
const job = await q.send({ message: "periodic" }, { interval: 60000 });
|
|
87
|
+
|
|
88
|
+
expect(job.status).toBe("delayed"); // Starts delayed
|
|
89
|
+
expect(job.interval).toBe(60000);
|
|
90
|
+
expect(job.scheduledAt).toBeGreaterThan(Date.now());
|
|
91
|
+
});
|
|
92
|
+
|
|
93
|
+
test("send with interval and startImmediately creates immediate periodic job", async () => {
|
|
94
|
+
const q = jobs.create({
|
|
95
|
+
name: "test:interval-immediate",
|
|
96
|
+
schema: testSchema,
|
|
97
|
+
prefix: "jobs:test",
|
|
98
|
+
});
|
|
99
|
+
|
|
100
|
+
const job = await q.send({ message: "periodic" }, { interval: 60000, startImmediately: true });
|
|
101
|
+
|
|
102
|
+
expect(job.status).toBe("waiting"); // Starts immediately
|
|
103
|
+
expect(job.interval).toBe(60000);
|
|
104
|
+
});
|
|
105
|
+
|
|
106
|
+
test("send with retries sets maxRetries", async () => {
|
|
107
|
+
const q = jobs.create({
|
|
108
|
+
name: "test:retries",
|
|
109
|
+
schema: testSchema,
|
|
110
|
+
prefix: "jobs:test",
|
|
111
|
+
});
|
|
112
|
+
|
|
113
|
+
const job = await q.send({ message: "retry" }, { retries: 3 });
|
|
114
|
+
|
|
115
|
+
expect(job.maxRetries).toBe(3);
|
|
116
|
+
});
|
|
117
|
+
|
|
118
|
+
test("send with timeout sets timeout", async () => {
|
|
119
|
+
const q = jobs.create({
|
|
120
|
+
name: "test:timeout",
|
|
121
|
+
schema: testSchema,
|
|
122
|
+
prefix: "jobs:test",
|
|
123
|
+
});
|
|
124
|
+
|
|
125
|
+
const job = await q.send({ message: "timeout" }, { timeout: 60000 });
|
|
126
|
+
|
|
127
|
+
expect(job.timeout).toBe(60000);
|
|
128
|
+
});
|
|
129
|
+
|
|
130
|
+
test("process executes handler for jobs", async () => {
|
|
131
|
+
const q = jobs.create({
|
|
132
|
+
name: "test:process",
|
|
133
|
+
schema: testSchema,
|
|
134
|
+
prefix: "jobs:test",
|
|
135
|
+
});
|
|
136
|
+
|
|
137
|
+
const processed: string[] = [];
|
|
138
|
+
|
|
139
|
+
await q.send({ message: "job1" });
|
|
140
|
+
await q.send({ message: "job2" });
|
|
141
|
+
|
|
142
|
+
const stop = q.process(
|
|
143
|
+
async (job) => {
|
|
144
|
+
processed.push(job.data.message);
|
|
145
|
+
},
|
|
146
|
+
{ pollInterval: 50 },
|
|
147
|
+
);
|
|
148
|
+
|
|
149
|
+
// Wait for processing
|
|
150
|
+
await Bun.sleep(200);
|
|
151
|
+
stop();
|
|
152
|
+
|
|
153
|
+
expect(processed).toContain("job1");
|
|
154
|
+
expect(processed).toContain("job2");
|
|
155
|
+
});
|
|
156
|
+
|
|
157
|
+
test("process calls onSuccess on successful job", async () => {
|
|
158
|
+
const q = jobs.create({
|
|
159
|
+
name: "test:onsuccess",
|
|
160
|
+
schema: testSchema,
|
|
161
|
+
prefix: "jobs:test",
|
|
162
|
+
});
|
|
163
|
+
|
|
164
|
+
let successCalled = false;
|
|
165
|
+
let successJobId = "";
|
|
166
|
+
|
|
167
|
+
await q.send({ message: "success" });
|
|
168
|
+
|
|
169
|
+
const stop = q.process(
|
|
170
|
+
async () => {
|
|
171
|
+
// Success
|
|
172
|
+
},
|
|
173
|
+
{
|
|
174
|
+
pollInterval: 50,
|
|
175
|
+
onSuccess: (job) => {
|
|
176
|
+
successCalled = true;
|
|
177
|
+
successJobId = job.id;
|
|
178
|
+
},
|
|
179
|
+
},
|
|
180
|
+
);
|
|
181
|
+
|
|
182
|
+
await Bun.sleep(200);
|
|
183
|
+
stop();
|
|
184
|
+
|
|
185
|
+
expect(successCalled).toBe(true);
|
|
186
|
+
expect(successJobId).toBeDefined();
|
|
187
|
+
});
|
|
188
|
+
|
|
189
|
+
test("process retries failed jobs", async () => {
|
|
190
|
+
const q = jobs.create({
|
|
191
|
+
name: "test:retry",
|
|
192
|
+
schema: testSchema,
|
|
193
|
+
prefix: "jobs:test",
|
|
194
|
+
});
|
|
195
|
+
|
|
196
|
+
let attempts = 0;
|
|
197
|
+
|
|
198
|
+
await q.send({ message: "retry" }, { retries: 2 });
|
|
199
|
+
|
|
200
|
+
const stop = q.process(
|
|
201
|
+
async () => {
|
|
202
|
+
attempts++;
|
|
203
|
+
if (attempts < 3) {
|
|
204
|
+
throw new Error("fail");
|
|
205
|
+
}
|
|
206
|
+
},
|
|
207
|
+
{ pollInterval: 50 },
|
|
208
|
+
);
|
|
209
|
+
|
|
210
|
+
await Bun.sleep(500);
|
|
211
|
+
stop();
|
|
212
|
+
|
|
213
|
+
expect(attempts).toBe(3); // 1 initial + 2 retries
|
|
214
|
+
});
|
|
215
|
+
|
|
216
|
+
test("process calls onError when job permanently fails", async () => {
|
|
217
|
+
const q = jobs.create({
|
|
218
|
+
name: "test:onerror",
|
|
219
|
+
schema: testSchema,
|
|
220
|
+
prefix: "jobs:test",
|
|
221
|
+
});
|
|
222
|
+
|
|
223
|
+
let errorCalled = false;
|
|
224
|
+
let errorMessage = "";
|
|
225
|
+
|
|
226
|
+
await q.send({ message: "fail" }, { retries: 0 });
|
|
227
|
+
|
|
228
|
+
const stop = q.process(
|
|
229
|
+
async () => {
|
|
230
|
+
throw new Error("permanent failure");
|
|
231
|
+
},
|
|
232
|
+
{
|
|
233
|
+
pollInterval: 50,
|
|
234
|
+
onError: (job, error) => {
|
|
235
|
+
errorCalled = true;
|
|
236
|
+
errorMessage = error.message;
|
|
237
|
+
},
|
|
238
|
+
},
|
|
239
|
+
);
|
|
240
|
+
|
|
241
|
+
await Bun.sleep(200);
|
|
242
|
+
stop();
|
|
243
|
+
|
|
244
|
+
expect(errorCalled).toBe(true);
|
|
245
|
+
expect(errorMessage).toBe("permanent failure");
|
|
246
|
+
});
|
|
247
|
+
|
|
248
|
+
test("process calls onFinally after every attempt", async () => {
|
|
249
|
+
const q = jobs.create({
|
|
250
|
+
name: "test:onfinally",
|
|
251
|
+
schema: testSchema,
|
|
252
|
+
prefix: "jobs:test",
|
|
253
|
+
});
|
|
254
|
+
|
|
255
|
+
let finallyCalls = 0;
|
|
256
|
+
|
|
257
|
+
await q.send({ message: "finally" }, { retries: 1 });
|
|
258
|
+
|
|
259
|
+
const stop = q.process(
|
|
260
|
+
async () => {
|
|
261
|
+
throw new Error("fail");
|
|
262
|
+
},
|
|
263
|
+
{
|
|
264
|
+
pollInterval: 50,
|
|
265
|
+
onFinally: () => {
|
|
266
|
+
finallyCalls++;
|
|
267
|
+
},
|
|
268
|
+
},
|
|
269
|
+
);
|
|
270
|
+
|
|
271
|
+
await Bun.sleep(300);
|
|
272
|
+
stop();
|
|
273
|
+
|
|
274
|
+
expect(finallyCalls).toBe(2); // 1 initial + 1 retry
|
|
275
|
+
});
|
|
276
|
+
|
|
277
|
+
test("delayed jobs are promoted when ready", async () => {
|
|
278
|
+
const q = jobs.create({
|
|
279
|
+
name: "test:promote",
|
|
280
|
+
schema: testSchema,
|
|
281
|
+
prefix: "jobs:test",
|
|
282
|
+
});
|
|
283
|
+
|
|
284
|
+
let processed = false;
|
|
285
|
+
|
|
286
|
+
await q.send({ message: "delayed" }, { delay: 100 });
|
|
287
|
+
|
|
288
|
+
const stop = q.process(
|
|
289
|
+
async () => {
|
|
290
|
+
processed = true;
|
|
291
|
+
},
|
|
292
|
+
{ pollInterval: 50 },
|
|
293
|
+
);
|
|
294
|
+
|
|
295
|
+
// Should not be processed yet
|
|
296
|
+
await Bun.sleep(50);
|
|
297
|
+
expect(processed).toBe(false);
|
|
298
|
+
|
|
299
|
+
// Should be processed after delay
|
|
300
|
+
await Bun.sleep(200);
|
|
301
|
+
expect(processed).toBe(true);
|
|
302
|
+
|
|
303
|
+
stop();
|
|
304
|
+
});
|
|
305
|
+
|
|
306
|
+
test("interval jobs are rescheduled after completion", async () => {
|
|
307
|
+
const q = jobs.create({
|
|
308
|
+
name: "test:interval-reschedule",
|
|
309
|
+
schema: testSchema,
|
|
310
|
+
prefix: "jobs:test",
|
|
311
|
+
});
|
|
312
|
+
|
|
313
|
+
let processCount = 0;
|
|
314
|
+
|
|
315
|
+
await q.send({ message: "interval" }, { interval: 100, startImmediately: true });
|
|
316
|
+
|
|
317
|
+
const stop = q.process(
|
|
318
|
+
async () => {
|
|
319
|
+
processCount++;
|
|
320
|
+
},
|
|
321
|
+
{ pollInterval: 50 },
|
|
322
|
+
);
|
|
323
|
+
|
|
324
|
+
await Bun.sleep(350);
|
|
325
|
+
stop();
|
|
326
|
+
|
|
327
|
+
expect(processCount).toBeGreaterThanOrEqual(2);
|
|
328
|
+
});
|
|
329
|
+
|
|
330
|
+
test("interval jobs are rescheduled after failure", async () => {
|
|
331
|
+
const q = jobs.create({
|
|
332
|
+
name: "test:interval-fail",
|
|
333
|
+
schema: testSchema,
|
|
334
|
+
prefix: "jobs:test",
|
|
335
|
+
});
|
|
336
|
+
|
|
337
|
+
let processCount = 0;
|
|
338
|
+
let errorCount = 0;
|
|
339
|
+
|
|
340
|
+
await q.send({ message: "interval-fail" }, { interval: 100, startImmediately: true, retries: 0 });
|
|
341
|
+
|
|
342
|
+
const stop = q.process(
|
|
343
|
+
async () => {
|
|
344
|
+
processCount++;
|
|
345
|
+
throw new Error("always fail");
|
|
346
|
+
},
|
|
347
|
+
{
|
|
348
|
+
pollInterval: 50,
|
|
349
|
+
onError: () => {
|
|
350
|
+
errorCount++;
|
|
351
|
+
},
|
|
352
|
+
},
|
|
353
|
+
);
|
|
354
|
+
|
|
355
|
+
await Bun.sleep(350);
|
|
356
|
+
stop();
|
|
357
|
+
|
|
358
|
+
expect(processCount).toBeGreaterThanOrEqual(2);
|
|
359
|
+
expect(errorCount).toBeGreaterThanOrEqual(2);
|
|
360
|
+
});
|
|
361
|
+
|
|
362
|
+
test("concurrent processing with multiple workers", async () => {
|
|
363
|
+
const q = jobs.create({
|
|
364
|
+
name: "test:concurrent",
|
|
365
|
+
schema: testSchema,
|
|
366
|
+
prefix: "jobs:test",
|
|
367
|
+
});
|
|
368
|
+
|
|
369
|
+
const processed: string[] = [];
|
|
370
|
+
const startTimes: number[] = [];
|
|
371
|
+
|
|
372
|
+
// Send 5 jobs
|
|
373
|
+
for (let i = 0; i < 5; i++) {
|
|
374
|
+
await q.send({ message: `job${i}` });
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
const stop = q.process(
|
|
378
|
+
async (job) => {
|
|
379
|
+
startTimes.push(Date.now());
|
|
380
|
+
await Bun.sleep(100); // Simulate work
|
|
381
|
+
processed.push(job.data.message);
|
|
382
|
+
},
|
|
383
|
+
{ concurrency: 3, pollInterval: 50 },
|
|
384
|
+
);
|
|
385
|
+
|
|
386
|
+
await Bun.sleep(500);
|
|
387
|
+
stop();
|
|
388
|
+
|
|
389
|
+
expect(processed.length).toBe(5);
|
|
390
|
+
|
|
391
|
+
// Check that some jobs started concurrently (within 50ms of each other)
|
|
392
|
+
const concurrent = startTimes.filter((t, i) => i > 0 && t - startTimes[i - 1] < 50);
|
|
393
|
+
expect(concurrent.length).toBeGreaterThan(0);
|
|
394
|
+
});
|
|
395
|
+
|
|
396
|
+
test("job timeout causes hard fail", async () => {
|
|
397
|
+
const q = jobs.create({
|
|
398
|
+
name: "test:timeout-fail",
|
|
399
|
+
schema: testSchema,
|
|
400
|
+
prefix: "jobs:test",
|
|
401
|
+
});
|
|
402
|
+
|
|
403
|
+
await q.send({ message: "slow" }, { timeout: 100, retries: 0 });
|
|
404
|
+
|
|
405
|
+
// Start a processor that will claim the job but not finish it
|
|
406
|
+
let jobClaimed = false;
|
|
407
|
+
const stop = q.process(
|
|
408
|
+
async () => {
|
|
409
|
+
jobClaimed = true;
|
|
410
|
+
// Simulate a hanging job - sleep longer than timeout
|
|
411
|
+
await Bun.sleep(500);
|
|
412
|
+
},
|
|
413
|
+
{ pollInterval: 50 },
|
|
414
|
+
);
|
|
415
|
+
|
|
416
|
+
// Wait for job to be claimed
|
|
417
|
+
await Bun.sleep(80);
|
|
418
|
+
expect(jobClaimed).toBe(true);
|
|
419
|
+
|
|
420
|
+
// Stop processor so we can start a new one that will check timeouts
|
|
421
|
+
stop();
|
|
422
|
+
|
|
423
|
+
// Wait for timeout to expire
|
|
424
|
+
await Bun.sleep(100);
|
|
425
|
+
|
|
426
|
+
// Start new processor to trigger timeout check
|
|
427
|
+
let timeoutDetected = false;
|
|
428
|
+
const stop2 = q.process(async () => {}, {
|
|
429
|
+
pollInterval: 50,
|
|
430
|
+
});
|
|
431
|
+
|
|
432
|
+
// Wait for timeout check to run
|
|
433
|
+
await Bun.sleep(150);
|
|
434
|
+
stop2();
|
|
435
|
+
|
|
436
|
+
// Verify job was marked as failed
|
|
437
|
+
const failedCount = await redis.scard("jobs:test:test:timeout-fail:failed");
|
|
438
|
+
expect(failedCount).toBe(1);
|
|
439
|
+
});
|
|
440
|
+
|
|
441
|
+
test("stop function stops processing", async () => {
|
|
442
|
+
const q = jobs.create({
|
|
443
|
+
name: "test:stop",
|
|
444
|
+
schema: testSchema,
|
|
445
|
+
prefix: "jobs:test",
|
|
446
|
+
});
|
|
447
|
+
|
|
448
|
+
let processCount = 0;
|
|
449
|
+
|
|
450
|
+
const stop = q.process(
|
|
451
|
+
async () => {
|
|
452
|
+
processCount++;
|
|
453
|
+
},
|
|
454
|
+
{ pollInterval: 50 },
|
|
455
|
+
);
|
|
456
|
+
|
|
457
|
+
stop();
|
|
458
|
+
|
|
459
|
+
// Send jobs after stop
|
|
460
|
+
await q.send({ message: "after-stop" });
|
|
461
|
+
|
|
462
|
+
await Bun.sleep(200);
|
|
463
|
+
|
|
464
|
+
expect(processCount).toBe(0);
|
|
465
|
+
});
|
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
import { test, expect, beforeEach } from "bun:test";
|
|
2
|
+
import { redis } from "bun";
|
|
3
|
+
import { mutex, LockError } from "../index";
|
|
4
|
+
|
|
5
|
+
// Clean up Redis before each test
|
|
6
|
+
beforeEach(async () => {
|
|
7
|
+
const keys = await redis.send("KEYS", ["mutex:test:*"]);
|
|
8
|
+
if (Array.isArray(keys) && keys.length > 0) {
|
|
9
|
+
await redis.send("DEL", keys as string[]);
|
|
10
|
+
}
|
|
11
|
+
});
|
|
12
|
+
|
|
13
|
+
test("acquires lock successfully", async () => {
|
|
14
|
+
const m = mutex.create({ prefix: "mutex:test" });
|
|
15
|
+
|
|
16
|
+
const lock = await m.acquire("resource:1");
|
|
17
|
+
expect(lock).not.toBeNull();
|
|
18
|
+
expect(lock!.resource).toBe("mutex:test:resource:1");
|
|
19
|
+
expect(lock!.value).toHaveLength(32); // 16 bytes hex = 32 chars
|
|
20
|
+
|
|
21
|
+
await m.release(lock!);
|
|
22
|
+
});
|
|
23
|
+
|
|
24
|
+
test("only one lock can be held at a time", async () => {
|
|
25
|
+
const m = mutex.create({
|
|
26
|
+
prefix: "mutex:test",
|
|
27
|
+
retryCount: 0, // Don't retry
|
|
28
|
+
});
|
|
29
|
+
|
|
30
|
+
const lock1 = await m.acquire("resource:2");
|
|
31
|
+
expect(lock1).not.toBeNull();
|
|
32
|
+
|
|
33
|
+
const lock2 = await m.acquire("resource:2");
|
|
34
|
+
expect(lock2).toBeNull();
|
|
35
|
+
|
|
36
|
+
await m.release(lock1!);
|
|
37
|
+
});
|
|
38
|
+
|
|
39
|
+
test("lock is released and can be acquired again", async () => {
|
|
40
|
+
const m = mutex.create({
|
|
41
|
+
prefix: "mutex:test",
|
|
42
|
+
retryCount: 0,
|
|
43
|
+
});
|
|
44
|
+
|
|
45
|
+
const lock1 = await m.acquire("resource:3");
|
|
46
|
+
expect(lock1).not.toBeNull();
|
|
47
|
+
|
|
48
|
+
await m.release(lock1!);
|
|
49
|
+
|
|
50
|
+
const lock2 = await m.acquire("resource:3");
|
|
51
|
+
expect(lock2).not.toBeNull();
|
|
52
|
+
|
|
53
|
+
await m.release(lock2!);
|
|
54
|
+
});
|
|
55
|
+
|
|
56
|
+
test("lock expires after TTL", async () => {
|
|
57
|
+
const m = mutex.create({
|
|
58
|
+
prefix: "mutex:test",
|
|
59
|
+
retryCount: 0,
|
|
60
|
+
defaultTtl: 100, // 100ms TTL
|
|
61
|
+
});
|
|
62
|
+
|
|
63
|
+
const lock1 = await m.acquire("resource:4");
|
|
64
|
+
expect(lock1).not.toBeNull();
|
|
65
|
+
|
|
66
|
+
// Wait for lock to expire
|
|
67
|
+
await Bun.sleep(150);
|
|
68
|
+
|
|
69
|
+
// Should be able to acquire now
|
|
70
|
+
const lock2 = await m.acquire("resource:4");
|
|
71
|
+
expect(lock2).not.toBeNull();
|
|
72
|
+
|
|
73
|
+
await m.release(lock2!);
|
|
74
|
+
});
|
|
75
|
+
|
|
76
|
+
test("withLock executes function and releases lock", async () => {
|
|
77
|
+
const m = mutex.create({ prefix: "mutex:test" });
|
|
78
|
+
|
|
79
|
+
let executed = false;
|
|
80
|
+
const result = await m.withLock("resource:5", async () => {
|
|
81
|
+
executed = true;
|
|
82
|
+
return 42;
|
|
83
|
+
});
|
|
84
|
+
|
|
85
|
+
expect(executed).toBe(true);
|
|
86
|
+
expect(result).toBe(42);
|
|
87
|
+
|
|
88
|
+
// Lock should be released, can acquire again
|
|
89
|
+
const lock = await m.acquire("resource:5");
|
|
90
|
+
expect(lock).not.toBeNull();
|
|
91
|
+
await m.release(lock!);
|
|
92
|
+
});
|
|
93
|
+
|
|
94
|
+
test("withLock releases lock on error", async () => {
|
|
95
|
+
const m = mutex.create({ prefix: "mutex:test", retryCount: 0 });
|
|
96
|
+
|
|
97
|
+
try {
|
|
98
|
+
await m.withLock("resource:6", async () => {
|
|
99
|
+
throw new Error("test error");
|
|
100
|
+
});
|
|
101
|
+
} catch (e) {
|
|
102
|
+
expect((e as Error).message).toBe("test error");
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
// Lock should be released
|
|
106
|
+
const lock = await m.acquire("resource:6");
|
|
107
|
+
expect(lock).not.toBeNull();
|
|
108
|
+
await m.release(lock!);
|
|
109
|
+
});
|
|
110
|
+
|
|
111
|
+
test("withLock returns null when lock cannot be acquired", async () => {
|
|
112
|
+
const m = mutex.create({ prefix: "mutex:test", retryCount: 0 });
|
|
113
|
+
|
|
114
|
+
// Hold the lock
|
|
115
|
+
const lock1 = await m.acquire("resource:7");
|
|
116
|
+
|
|
117
|
+
// Try withLock - should return null
|
|
118
|
+
const result = await m.withLock("resource:7", async () => {
|
|
119
|
+
return 42;
|
|
120
|
+
});
|
|
121
|
+
|
|
122
|
+
expect(result).toBeNull();
|
|
123
|
+
|
|
124
|
+
await m.release(lock1!);
|
|
125
|
+
});
|
|
126
|
+
|
|
127
|
+
test("withLockOrThrow throws LockError when lock cannot be acquired", async () => {
|
|
128
|
+
const m = mutex.create({ prefix: "mutex:test", retryCount: 0 });
|
|
129
|
+
|
|
130
|
+
// Hold the lock
|
|
131
|
+
const lock1 = await m.acquire("resource:8");
|
|
132
|
+
|
|
133
|
+
try {
|
|
134
|
+
await m.withLockOrThrow("resource:8", async () => {
|
|
135
|
+
return 42;
|
|
136
|
+
});
|
|
137
|
+
expect(true).toBe(false); // Should not reach
|
|
138
|
+
} catch (e) {
|
|
139
|
+
expect(e).toBeInstanceOf(LockError);
|
|
140
|
+
expect((e as LockError).resource).toBe("resource:8");
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
await m.release(lock1!);
|
|
144
|
+
});
|
|
145
|
+
|
|
146
|
+
test("extend prolongs lock TTL", async () => {
|
|
147
|
+
const m = mutex.create({
|
|
148
|
+
prefix: "mutex:test",
|
|
149
|
+
retryCount: 0,
|
|
150
|
+
defaultTtl: 100,
|
|
151
|
+
});
|
|
152
|
+
|
|
153
|
+
const lock = await m.acquire("resource:9");
|
|
154
|
+
expect(lock).not.toBeNull();
|
|
155
|
+
|
|
156
|
+
// Extend the lock
|
|
157
|
+
const extended = await m.extend(lock!, 1000);
|
|
158
|
+
expect(extended).toBe(true);
|
|
159
|
+
expect(lock!.ttl).toBe(1000);
|
|
160
|
+
|
|
161
|
+
// Wait past original TTL
|
|
162
|
+
await Bun.sleep(150);
|
|
163
|
+
|
|
164
|
+
// Lock should still be held (can't acquire)
|
|
165
|
+
const lock2 = await m.acquire("resource:9");
|
|
166
|
+
expect(lock2).toBeNull();
|
|
167
|
+
|
|
168
|
+
await m.release(lock!);
|
|
169
|
+
});
|
|
170
|
+
|
|
171
|
+
test("extend fails if lock was lost", async () => {
|
|
172
|
+
const m = mutex.create({
|
|
173
|
+
prefix: "mutex:test",
|
|
174
|
+
retryCount: 0,
|
|
175
|
+
defaultTtl: 100,
|
|
176
|
+
});
|
|
177
|
+
|
|
178
|
+
const lock = await m.acquire("resource:10");
|
|
179
|
+
expect(lock).not.toBeNull();
|
|
180
|
+
|
|
181
|
+
// Wait for lock to expire
|
|
182
|
+
await Bun.sleep(150);
|
|
183
|
+
|
|
184
|
+
// Try to extend expired lock
|
|
185
|
+
const extended = await m.extend(lock!, 1000);
|
|
186
|
+
expect(extended).toBe(false);
|
|
187
|
+
});
|
|
188
|
+
|
|
189
|
+
test("different resources can be locked independently", async () => {
|
|
190
|
+
const m = mutex.create({ prefix: "mutex:test", retryCount: 0 });
|
|
191
|
+
|
|
192
|
+
const lockA = await m.acquire("resource:a");
|
|
193
|
+
const lockB = await m.acquire("resource:b");
|
|
194
|
+
|
|
195
|
+
expect(lockA).not.toBeNull();
|
|
196
|
+
expect(lockB).not.toBeNull();
|
|
197
|
+
|
|
198
|
+
await m.release(lockA!);
|
|
199
|
+
await m.release(lockB!);
|
|
200
|
+
});
|
|
201
|
+
|
|
202
|
+
test("retries with delay when lock is held", async () => {
|
|
203
|
+
const m = mutex.create({
|
|
204
|
+
prefix: "mutex:test",
|
|
205
|
+
retryCount: 5,
|
|
206
|
+
retryDelay: 50,
|
|
207
|
+
defaultTtl: 100,
|
|
208
|
+
});
|
|
209
|
+
|
|
210
|
+
// Acquire lock that will expire
|
|
211
|
+
const lock1 = await m.acquire("resource:11");
|
|
212
|
+
expect(lock1).not.toBeNull();
|
|
213
|
+
|
|
214
|
+
// Try to acquire - should retry and eventually succeed
|
|
215
|
+
const start = Date.now();
|
|
216
|
+
const lock2 = await m.acquire("resource:11");
|
|
217
|
+
const elapsed = Date.now() - start;
|
|
218
|
+
|
|
219
|
+
expect(lock2).not.toBeNull();
|
|
220
|
+
expect(elapsed).toBeGreaterThanOrEqual(50); // At least one retry
|
|
221
|
+
|
|
222
|
+
await m.release(lock2!);
|
|
223
|
+
});
|
package/tests/preload.ts
ADDED