effect-distributed-lock 0.0.5 → 0.0.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +44 -6
- package/examples/concurrent.ts +111 -0
- package/examples/{index.ts → kitchen-sink.ts} +3 -1
- package/package.json +1 -1
- package/src/Backing.ts +21 -17
- package/src/DistributedSemaphore.ts +71 -16
- package/src/RedisBacking.ts +97 -7
- package/redis-semaphore/.codeclimate.yml +0 -5
- package/redis-semaphore/.fossa.yml +0 -14
- package/redis-semaphore/.github/dependabot.yml +0 -6
- package/redis-semaphore/.github/workflows/branches.yml +0 -39
- package/redis-semaphore/.github/workflows/pull-requests.yml +0 -35
- package/redis-semaphore/.mocharc.yaml +0 -6
- package/redis-semaphore/.prettierrc +0 -6
- package/redis-semaphore/.snyk +0 -4
- package/redis-semaphore/.yarnrc.yml +0 -2
- package/redis-semaphore/CHANGELOG.md +0 -70
- package/redis-semaphore/Dockerfile +0 -5
- package/redis-semaphore/LICENSE +0 -21
- package/redis-semaphore/README.md +0 -445
- package/redis-semaphore/docker-compose.yml +0 -31
- package/redis-semaphore/eslint.config.mjs +0 -73
- package/redis-semaphore/package.json +0 -79
- package/redis-semaphore/setup-redis-servers.sh +0 -2
- package/redis-semaphore/src/Lock.ts +0 -172
- package/redis-semaphore/src/RedisMultiSemaphore.ts +0 -56
- package/redis-semaphore/src/RedisMutex.ts +0 -45
- package/redis-semaphore/src/RedisSemaphore.ts +0 -49
- package/redis-semaphore/src/RedlockMultiSemaphore.ts +0 -56
- package/redis-semaphore/src/RedlockMutex.ts +0 -52
- package/redis-semaphore/src/RedlockSemaphore.ts +0 -49
- package/redis-semaphore/src/errors/LostLockError.ts +0 -1
- package/redis-semaphore/src/errors/TimeoutError.ts +0 -1
- package/redis-semaphore/src/index.ts +0 -23
- package/redis-semaphore/src/misc.ts +0 -12
- package/redis-semaphore/src/multiSemaphore/acquire/index.ts +0 -53
- package/redis-semaphore/src/multiSemaphore/acquire/lua.ts +0 -31
- package/redis-semaphore/src/multiSemaphore/refresh/index.ts +0 -32
- package/redis-semaphore/src/multiSemaphore/refresh/lua.ts +0 -31
- package/redis-semaphore/src/multiSemaphore/release/index.ts +0 -22
- package/redis-semaphore/src/multiSemaphore/release/lua.ts +0 -17
- package/redis-semaphore/src/mutex/acquire.ts +0 -42
- package/redis-semaphore/src/mutex/refresh.ts +0 -37
- package/redis-semaphore/src/mutex/release.ts +0 -30
- package/redis-semaphore/src/redlockMultiSemaphore/acquire.ts +0 -56
- package/redis-semaphore/src/redlockMultiSemaphore/refresh.ts +0 -68
- package/redis-semaphore/src/redlockMultiSemaphore/release.ts +0 -19
- package/redis-semaphore/src/redlockMutex/acquire.ts +0 -54
- package/redis-semaphore/src/redlockMutex/refresh.ts +0 -53
- package/redis-semaphore/src/redlockMutex/release.ts +0 -19
- package/redis-semaphore/src/redlockSemaphore/acquire.ts +0 -55
- package/redis-semaphore/src/redlockSemaphore/refresh.ts +0 -60
- package/redis-semaphore/src/redlockSemaphore/release.ts +0 -18
- package/redis-semaphore/src/semaphore/acquire/index.ts +0 -52
- package/redis-semaphore/src/semaphore/acquire/lua.ts +0 -25
- package/redis-semaphore/src/semaphore/refresh/index.ts +0 -31
- package/redis-semaphore/src/semaphore/refresh/lua.ts +0 -25
- package/redis-semaphore/src/semaphore/release.ts +0 -14
- package/redis-semaphore/src/types.ts +0 -63
- package/redis-semaphore/src/utils/createEval.ts +0 -45
- package/redis-semaphore/src/utils/index.ts +0 -13
- package/redis-semaphore/src/utils/redlock.ts +0 -7
- package/redis-semaphore/test/init.test.ts +0 -9
- package/redis-semaphore/test/redisClient.ts +0 -82
- package/redis-semaphore/test/setup.ts +0 -6
- package/redis-semaphore/test/shell.test.ts +0 -15
- package/redis-semaphore/test/shell.ts +0 -48
- package/redis-semaphore/test/src/Lock.test.ts +0 -37
- package/redis-semaphore/test/src/RedisMultiSemaphore.test.ts +0 -425
- package/redis-semaphore/test/src/RedisMutex.test.ts +0 -334
- package/redis-semaphore/test/src/RedisSemaphore.test.ts +0 -367
- package/redis-semaphore/test/src/RedlockMultiSemaphore.test.ts +0 -671
- package/redis-semaphore/test/src/RedlockMutex.test.ts +0 -328
- package/redis-semaphore/test/src/RedlockSemaphore.test.ts +0 -579
- package/redis-semaphore/test/src/index.test.ts +0 -22
- package/redis-semaphore/test/src/multiSemaphore/acquire/index.test.ts +0 -51
- package/redis-semaphore/test/src/multiSemaphore/acquire/internal.test.ts +0 -67
- package/redis-semaphore/test/src/multiSemaphore/refresh/index.test.ts +0 -52
- package/redis-semaphore/test/src/multiSemaphore/release/index.test.ts +0 -18
- package/redis-semaphore/test/src/mutex/acquire.test.ts +0 -78
- package/redis-semaphore/test/src/mutex/refresh.test.ts +0 -22
- package/redis-semaphore/test/src/mutex/release.test.ts +0 -17
- package/redis-semaphore/test/src/redlockMutex/acquire.test.ts +0 -90
- package/redis-semaphore/test/src/redlockMutex/refresh.test.ts +0 -27
- package/redis-semaphore/test/src/redlockMutex/release.test.ts +0 -17
- package/redis-semaphore/test/src/semaphore/acquire/index.test.ts +0 -49
- package/redis-semaphore/test/src/semaphore/acquire/internal.test.ts +0 -65
- package/redis-semaphore/test/src/semaphore/refresh/index.test.ts +0 -44
- package/redis-semaphore/test/src/semaphore/release.test.ts +0 -18
- package/redis-semaphore/test/src/utils/eval.test.ts +0 -22
- package/redis-semaphore/test/src/utils/index.test.ts +0 -19
- package/redis-semaphore/test/src/utils/redlock.test.ts +0 -31
- package/redis-semaphore/test/unhandledRejection.ts +0 -28
- package/redis-semaphore/tsconfig.build-commonjs.json +0 -9
- package/redis-semaphore/tsconfig.build-es.json +0 -9
- package/redis-semaphore/tsconfig.json +0 -11
- package/redis-semaphore/yarn.lock +0 -5338
package/README.md
CHANGED
|
@@ -12,6 +12,7 @@ It's like the built in `Effect.Semaphore`, but asynchronously distributed across
|
|
|
12
12
|
- **Scope-based resource management** — permits are automatically released when the scope closes
|
|
13
13
|
- **Automatic TTL refresh** — keeps permits alive while held, prevents deadlocks if holder crashes
|
|
14
14
|
- **Pluggable backends** — ships with Redis (single-instance), easy to implement others
|
|
15
|
+
- **Push-based waiting** — uses pub/sub for efficient notification when permits become available (optional, with polling fallback)
|
|
15
16
|
- **Configurable retry policies** — control polling interval, TTL, and backing failure retry behavior
|
|
16
17
|
- **Type-safe errors** — tagged errors for precise error handling
|
|
17
18
|
|
|
@@ -34,7 +35,7 @@ import Redis from "ioredis";
|
|
|
34
35
|
import { DistributedSemaphore, RedisBacking } from "effect-distributed-lock";
|
|
35
36
|
|
|
36
37
|
const redis = new Redis(process.env.REDIS_URL);
|
|
37
|
-
const RedisLayer = RedisBacking.layer(redis);
|
|
38
|
+
const RedisLayer = RedisBacking.layer(redis, { keyPrefix: "my-app:" });
|
|
38
39
|
|
|
39
40
|
const program = Effect.gen(function* () {
|
|
40
41
|
// Create a semaphore that allows 5 concurrent operations
|
|
@@ -216,7 +217,37 @@ import { RedisBacking } from "effect-distributed-lock";
|
|
|
216
217
|
|
|
217
218
|
// Single Redis instance
|
|
218
219
|
const redis = new Redis("redis://localhost:6379");
|
|
219
|
-
const RedisLayer = RedisBacking.layer(redis,
|
|
220
|
+
const RedisLayer = RedisBacking.layer(redis, {
|
|
221
|
+
keyPrefix: "my-prefix:",
|
|
222
|
+
pushBasedAcquireEnabled: true, // default: true
|
|
223
|
+
});
|
|
224
|
+
```
|
|
225
|
+
|
|
226
|
+
### Configuration Options
|
|
227
|
+
|
|
228
|
+
| Option | Type | Default | Description |
|
|
229
|
+
| -------------------------- | ---------------- | ------------------ | ---------------------------------------------------- |
|
|
230
|
+
| `keyPrefix` | `string` | `"semaphore:"` | Prefix for all Redis keys |
|
|
231
|
+
| `pushBasedAcquireEnabled` | `boolean` | `true` | Use pub/sub for efficient waiting (see below) |
|
|
232
|
+
| `pushStreamRetrySchedule` | `Schedule<void>` | `Schedule.forever` | Retry schedule for pub/sub stream errors |
|
|
233
|
+
|
|
234
|
+
### Push-Based Acquisition
|
|
235
|
+
|
|
236
|
+
By default, the Redis backing uses pub/sub to notify waiters when permits become available. This reduces latency and load on Redis compared to pure polling.
|
|
237
|
+
|
|
238
|
+
When permits are released, a message is published to a channel. Waiters subscribe to this channel and immediately attempt to acquire when notified. The semaphore still falls back to polling as a safety net.
|
|
239
|
+
|
|
240
|
+
**Trade-offs:**
|
|
241
|
+
- ✅ Lower latency — waiters are notified immediately
|
|
242
|
+
- ✅ Reduced Redis load — fewer polling requests
|
|
243
|
+
- ⚠️ Extra connection — each waiting semaphore uses a subscriber connection
|
|
244
|
+
|
|
245
|
+
To disable and use polling only:
|
|
246
|
+
|
|
247
|
+
```typescript
|
|
248
|
+
const RedisLayer = RedisBacking.layer(redis, {
|
|
249
|
+
pushBasedAcquireEnabled: false,
|
|
250
|
+
});
|
|
220
251
|
```
|
|
221
252
|
|
|
222
253
|
For multi-instance Redis deployments requiring Redlock, you'll need to implement a custom backing.
|
|
@@ -226,7 +257,7 @@ For multi-instance Redis deployments requiring Redlock, you'll need to implement
|
|
|
226
257
|
Implement the `DistributedSemaphoreBacking` interface to use a different store:
|
|
227
258
|
|
|
228
259
|
```typescript
|
|
229
|
-
import { Duration, Effect, Layer,
|
|
260
|
+
import { Duration, Effect, Layer, Stream } from "effect";
|
|
230
261
|
import { Backing, DistributedSemaphoreBacking } from "effect-distributed-lock";
|
|
231
262
|
|
|
232
263
|
const MyCustomBacking = Layer.succeed(DistributedSemaphoreBacking, {
|
|
@@ -241,16 +272,23 @@ const MyCustomBacking = Layer.succeed(DistributedSemaphoreBacking, {
|
|
|
241
272
|
|
|
242
273
|
getCount: (key, ttl) =>
|
|
243
274
|
Effect.succeed(0), // Return number of permits currently held
|
|
275
|
+
|
|
276
|
+
// Optional: Enable push-based waiting
|
|
277
|
+
onPermitsReleased: (key) =>
|
|
278
|
+
Stream.never, // Stream that emits when permits MAY be available
|
|
244
279
|
});
|
|
245
280
|
```
|
|
246
281
|
|
|
282
|
+
The `onPermitsReleased` method is optional. If provided, the semaphore will use it for efficient push-based waiting instead of pure polling. The stream should emit whenever permits are released on the given key. Multiple waiters may race for permits after a notification, so `tryAcquire` is still called after each notification.
|
|
283
|
+
|
|
247
284
|
## How It Works
|
|
248
285
|
|
|
249
286
|
1. **Acquire**: Atomically adds permits to a sorted set if there's room (Redis: Lua script with `ZADD`)
|
|
250
287
|
2. **Keepalive**: A background fiber refreshes the TTL periodically by updating timestamps
|
|
251
|
-
3. **Release**: Atomically removes permits
|
|
252
|
-
4. **
|
|
253
|
-
5. **
|
|
288
|
+
3. **Release**: Atomically removes permits and publishes notification to waiters (Lua script with `ZREM` + `PUBLISH`)
|
|
289
|
+
4. **Waiting**: Combines polling with pub/sub notifications — waiters are notified immediately when permits are released
|
|
290
|
+
5. **Expiration**: Expired entries (based on TTL) are cleaned up on each operation
|
|
291
|
+
6. **Crash safety**: If the holder crashes, permits expire and become available
|
|
254
292
|
|
|
255
293
|
## License
|
|
256
294
|
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Demonstrates concurrent effects competing for a distributed lock.
|
|
3
|
+
*
|
|
4
|
+
* This example runs two scenarios:
|
|
5
|
+
* 1. With push-based acquisition DISABLED (polling only)
|
|
6
|
+
* 2. With push-based acquisition ENABLED (pub/sub notifications)
|
|
7
|
+
*
|
|
8
|
+
* You'll see how push-based acquisition is faster because waiters are
|
|
9
|
+
* notified immediately when permits are released, rather than polling.
|
|
10
|
+
*
|
|
11
|
+
* Run with: bun run examples/concurrent.ts
|
|
12
|
+
* Requires REDIS_URL environment variable or local Redis at localhost:6379.
|
|
13
|
+
*/
|
|
14
|
+
import { Console, Duration, Effect, Schedule } from "effect";
|
|
15
|
+
import Redis from "ioredis";
|
|
16
|
+
import { DistributedSemaphore, RedisBacking } from "../src/index.ts";
|
|
17
|
+
|
|
18
|
+
const redis = new Redis(process.env.REDIS_URL ?? "redis://localhost:6379");
|
|
19
|
+
|
|
20
|
+
// Helper to create a task that competes for the lock
|
|
21
|
+
const makeTask = (
|
|
22
|
+
id: number,
|
|
23
|
+
mutex: DistributedSemaphore.DistributedSemaphore
|
|
24
|
+
) =>
|
|
25
|
+
Effect.gen(function* () {
|
|
26
|
+
yield* Console.log(`[Task ${id}] Starting, waiting for lock...`);
|
|
27
|
+
const startWait = Date.now();
|
|
28
|
+
|
|
29
|
+
yield* mutex.withPermits(1)(
|
|
30
|
+
Effect.gen(function* () {
|
|
31
|
+
const waitTime = Date.now() - startWait;
|
|
32
|
+
yield* Console.log(
|
|
33
|
+
`[Task ${id}] 🔒 Lock acquired! (waited ${waitTime}ms)`
|
|
34
|
+
);
|
|
35
|
+
|
|
36
|
+
// Simulate some work
|
|
37
|
+
yield* Effect.sleep(Duration.millis(200));
|
|
38
|
+
|
|
39
|
+
yield* Console.log(`[Task ${id}] 🔓 Releasing lock...`);
|
|
40
|
+
})
|
|
41
|
+
);
|
|
42
|
+
|
|
43
|
+
yield* Console.log(`[Task ${id}] Done`);
|
|
44
|
+
});
|
|
45
|
+
|
|
46
|
+
// Run a scenario with the given configuration
|
|
47
|
+
const runScenario = (name: string, pushEnabled: boolean) =>
|
|
48
|
+
Effect.gen(function* () {
|
|
49
|
+
yield* Console.log(`\n${"=".repeat(60)}`);
|
|
50
|
+
yield* Console.log(`${name}`);
|
|
51
|
+
yield* Console.log(`Push-based acquisition: ${pushEnabled ? "ON" : "OFF"}`);
|
|
52
|
+
yield* Console.log(`${"=".repeat(60)}\n`);
|
|
53
|
+
|
|
54
|
+
const startTime = Date.now();
|
|
55
|
+
|
|
56
|
+
// Create mutex with a unique key per scenario to avoid interference
|
|
57
|
+
const mutex = yield* DistributedSemaphore.make(
|
|
58
|
+
`concurrent-example-${pushEnabled ? "push" : "poll"}`,
|
|
59
|
+
{
|
|
60
|
+
acquireRetryPolicy: Schedule.spaced(Duration.millis(500)).pipe(
|
|
61
|
+
Schedule.asVoid
|
|
62
|
+
),
|
|
63
|
+
limit: 1, // Mutex - only one holder at a time
|
|
64
|
+
}
|
|
65
|
+
);
|
|
66
|
+
|
|
67
|
+
// Run 3 tasks concurrently, all competing for the same lock
|
|
68
|
+
yield* Effect.all(
|
|
69
|
+
[makeTask(1, mutex), makeTask(2, mutex), makeTask(3, mutex)],
|
|
70
|
+
{ concurrency: 3 }
|
|
71
|
+
);
|
|
72
|
+
|
|
73
|
+
const totalTime = Date.now() - startTime;
|
|
74
|
+
yield* Console.log(`\n⏱️ Total time: ${totalTime}ms\n`);
|
|
75
|
+
});
|
|
76
|
+
|
|
77
|
+
// Run both scenarios
|
|
78
|
+
const main = Effect.gen(function* () {
|
|
79
|
+
yield* Console.log("🚀 Distributed Lock Concurrency Demo");
|
|
80
|
+
yield* Console.log(
|
|
81
|
+
"Showing 3 concurrent tasks competing for a mutex (limit=1)"
|
|
82
|
+
);
|
|
83
|
+
|
|
84
|
+
// Run WITHOUT push (polling only)
|
|
85
|
+
const RedisLayerNoPush = RedisBacking.layer(redis, {
|
|
86
|
+
keyPrefix: "concurrent-demo:",
|
|
87
|
+
pushBasedAcquireEnabled: false,
|
|
88
|
+
});
|
|
89
|
+
yield* runScenario("Scenario 1: Polling Only", false).pipe(
|
|
90
|
+
Effect.provide(RedisLayerNoPush)
|
|
91
|
+
);
|
|
92
|
+
|
|
93
|
+
// Run WITH push (pub/sub notifications)
|
|
94
|
+
const RedisLayerWithPush = RedisBacking.layer(redis, {
|
|
95
|
+
keyPrefix: "concurrent-demo:",
|
|
96
|
+
pushBasedAcquireEnabled: true,
|
|
97
|
+
});
|
|
98
|
+
yield* runScenario("Scenario 2: Push-Based (Pub/Sub)", true).pipe(
|
|
99
|
+
Effect.provide(RedisLayerWithPush)
|
|
100
|
+
);
|
|
101
|
+
|
|
102
|
+
yield* Console.log("✅ Demo complete!");
|
|
103
|
+
yield* Console.log(
|
|
104
|
+
"Notice how push-based acquisition completes faster because"
|
|
105
|
+
);
|
|
106
|
+
yield* Console.log(
|
|
107
|
+
"waiters are notified immediately instead of waiting for the next poll.\n"
|
|
108
|
+
);
|
|
109
|
+
}).pipe(Effect.ensuring(Effect.promise(() => redis.quit())));
|
|
110
|
+
|
|
111
|
+
Effect.runPromise(main).catch(console.error);
|
|
@@ -12,7 +12,9 @@ import { DistributedSemaphore, RedisBacking } from "../src/index.ts";
|
|
|
12
12
|
const redis = new Redis(process.env.REDIS_URL ?? "redis://localhost:6379");
|
|
13
13
|
|
|
14
14
|
// Create the Redis backing layer
|
|
15
|
-
const RedisLayer = RedisBacking.layer(redis,
|
|
15
|
+
const RedisLayer = RedisBacking.layer(redis, {
|
|
16
|
+
keyPrefix: "example:",
|
|
17
|
+
});
|
|
16
18
|
|
|
17
19
|
// Example 1: Using withPermits for a critical section (mutex behavior)
|
|
18
20
|
const example1 = Effect.gen(function* () {
|
package/package.json
CHANGED
package/src/Backing.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { Context, Data, Duration, Effect } from "effect";
|
|
1
|
+
import { Context, Data, Duration, Effect, Stream } from "effect";
|
|
2
2
|
|
|
3
3
|
// =============================================================================
|
|
4
4
|
// Errors
|
|
@@ -24,22 +24,12 @@ export class SemaphoreBackingError extends Data.TaggedError(
|
|
|
24
24
|
|
|
25
25
|
/**
|
|
26
26
|
* Low-level backing store interface for distributed semaphore operations.
|
|
27
|
-
* Implementations handle the actual storage (Redis, etc.)
|
|
28
|
-
*
|
|
29
|
-
* The semaphore uses a sorted set model where:
|
|
30
|
-
* - Each permit holder is stored with their acquisition timestamp as the score
|
|
31
|
-
* - Expired entries are cleaned up automatically
|
|
32
|
-
* - Multiple permits can be acquired atomically
|
|
33
27
|
*/
|
|
34
28
|
export interface DistributedSemaphoreBacking {
|
|
35
29
|
/**
|
|
36
30
|
* Try to acquire `permits` from a semaphore with the given `limit`.
|
|
37
|
-
* Returns true if acquired, false if not enough permits available.
|
|
38
31
|
*
|
|
39
|
-
*
|
|
40
|
-
* 1. Clean up expired entries (based on TTL)
|
|
41
|
-
* 2. Check if there's room: currentCount + permits <= limit
|
|
42
|
-
* 3. If so, add the permits with current timestamp
|
|
32
|
+
* @returns `true` if acquired, `false` if not enough permits available.
|
|
43
33
|
*/
|
|
44
34
|
readonly tryAcquire: (
|
|
45
35
|
key: string,
|
|
@@ -50,8 +40,9 @@ export interface DistributedSemaphoreBacking {
|
|
|
50
40
|
) => Effect.Effect<boolean, SemaphoreBackingError>;
|
|
51
41
|
|
|
52
42
|
/**
|
|
53
|
-
* Release `permits` held by
|
|
54
|
-
*
|
|
43
|
+
* Release `permits` held by the given holder.
|
|
44
|
+
*
|
|
45
|
+
* @returns The number of permits actually released.
|
|
55
46
|
*/
|
|
56
47
|
readonly release: (
|
|
57
48
|
key: string,
|
|
@@ -60,8 +51,9 @@ export interface DistributedSemaphoreBacking {
|
|
|
60
51
|
) => Effect.Effect<number, SemaphoreBackingError>;
|
|
61
52
|
|
|
62
53
|
/**
|
|
63
|
-
* Refresh the TTL on permits
|
|
64
|
-
*
|
|
54
|
+
* Refresh the TTL on permits held by this holder.
|
|
55
|
+
*
|
|
56
|
+
* @returns `true` if refreshed, `false` if permits were lost (e.g., expired).
|
|
65
57
|
*/
|
|
66
58
|
readonly refresh: (
|
|
67
59
|
key: string,
|
|
@@ -73,12 +65,24 @@ export interface DistributedSemaphoreBacking {
|
|
|
73
65
|
|
|
74
66
|
/**
|
|
75
67
|
* Get the number of permits currently held (in use).
|
|
76
|
-
* Available permits = limit - getCount().
|
|
77
68
|
*/
|
|
78
69
|
readonly getCount: (
|
|
79
70
|
key: string,
|
|
80
71
|
ttl: Duration.Duration
|
|
81
72
|
) => Effect.Effect<number, SemaphoreBackingError>;
|
|
73
|
+
|
|
74
|
+
/**
|
|
75
|
+
* Optional: Stream of notifications when permits MAY be available.
|
|
76
|
+
*
|
|
77
|
+
* If provided, the semaphore layer uses this for efficient waiting instead
|
|
78
|
+
* of polling. The stream emits a signal whenever permits are released.
|
|
79
|
+
*
|
|
80
|
+
* Notes:
|
|
81
|
+
* - Multiple waiters may race for permits after a notification
|
|
82
|
+
* - The semaphore still calls `tryAcquire` after each notification
|
|
83
|
+
* - Implementations should handle reconnection internally (hence why the stream does not have an error type)
|
|
84
|
+
*/
|
|
85
|
+
readonly onPermitsReleased?: (key: string) => Stream.Stream<void>;
|
|
82
86
|
}
|
|
83
87
|
|
|
84
88
|
export const DistributedSemaphoreBacking =
|
|
@@ -3,9 +3,11 @@ import {
|
|
|
3
3
|
Duration,
|
|
4
4
|
Effect,
|
|
5
5
|
Fiber,
|
|
6
|
+
Function,
|
|
6
7
|
Option,
|
|
7
8
|
Schedule,
|
|
8
9
|
Scope,
|
|
10
|
+
Stream,
|
|
9
11
|
} from "effect";
|
|
10
12
|
import {
|
|
11
13
|
DistributedSemaphoreBacking,
|
|
@@ -376,23 +378,76 @@ export const make = (
|
|
|
376
378
|
identifier,
|
|
377
379
|
acquiredExternally: options?.acquiredExternally,
|
|
378
380
|
};
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
Effect.
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
381
|
+
|
|
382
|
+
// We use a semaphore to ensure that only one acquire attempt is made at a time.
|
|
383
|
+
// With `withPermitsIfAvailable`, if both the poll-based and push-based attempts "trigger" at the same time,
|
|
384
|
+
// one will succeed and the other will simple be a no-op.
|
|
385
|
+
const acquireSemaphore = yield* Effect.makeSemaphore(1);
|
|
386
|
+
|
|
387
|
+
const pushBasedAcquireEnabled = backing.onPermitsReleased
|
|
388
|
+
? true
|
|
389
|
+
: false;
|
|
390
|
+
|
|
391
|
+
const pollBasedAcquire = Effect.gen(function* () {
|
|
392
|
+
const maybeAcquired = yield* tryTake(permits, resolvedOptions).pipe(
|
|
393
|
+
// only apply the semaphore if push-based acquire is supported
|
|
394
|
+
pushBasedAcquireEnabled
|
|
395
|
+
? Function.compose(
|
|
396
|
+
acquireSemaphore.withPermitsIfAvailable(1),
|
|
397
|
+
Effect.map(Option.flatten)
|
|
398
|
+
)
|
|
399
|
+
: Function.identity
|
|
400
|
+
);
|
|
401
|
+
if (Option.isNone(maybeAcquired)) {
|
|
402
|
+
return yield* new NotYetAcquiredError();
|
|
403
|
+
}
|
|
404
|
+
return maybeAcquired.value;
|
|
405
|
+
}).pipe(
|
|
406
|
+
Effect.retry({
|
|
407
|
+
while: (e) => e._tag === "NotYetAcquiredError",
|
|
408
|
+
schedule: acquireRetryPolicy,
|
|
409
|
+
}),
|
|
410
|
+
Effect.catchTag("NotYetAcquiredError", () =>
|
|
411
|
+
Effect.dieMessage(
|
|
412
|
+
"Invariant violated: `take` should never return `NotYetAcquiredError` " +
|
|
413
|
+
"since it should be caught by the retry which should retry forever until permits are acquired"
|
|
414
|
+
)
|
|
393
415
|
)
|
|
394
|
-
)
|
|
395
|
-
|
|
416
|
+
);
|
|
417
|
+
|
|
418
|
+
if (!pushBasedAcquireEnabled) {
|
|
419
|
+
return yield* pollBasedAcquire;
|
|
420
|
+
}
|
|
421
|
+
|
|
422
|
+
// Push-based acquire: run both poll-based and push-based acquire in parallel, and return the first one to complete
|
|
423
|
+
const pushBasedAcquire = backing.onPermitsReleased
|
|
424
|
+
? Effect.gen(function* () {
|
|
425
|
+
if (!backing.onPermitsReleased) {
|
|
426
|
+
// SAFETY: We know that onPermitsReleased is provided because we checked it above
|
|
427
|
+
return yield* Effect.dieMessage(
|
|
428
|
+
"Invariant violated: `onPermitsReleased` is not provided"
|
|
429
|
+
);
|
|
430
|
+
}
|
|
431
|
+
return yield* backing.onPermitsReleased(key).pipe(
|
|
432
|
+
Stream.runFoldWhileEffect(
|
|
433
|
+
Option.none<
|
|
434
|
+
Fiber.Fiber<never, LockLostError | SemaphoreBackingError>
|
|
435
|
+
>(),
|
|
436
|
+
Option.isNone, // keep folding while we haven't acquired
|
|
437
|
+
() =>
|
|
438
|
+
tryTake(permits, resolvedOptions).pipe(
|
|
439
|
+
acquireSemaphore.withPermitsIfAvailable(1),
|
|
440
|
+
Effect.map(Option.flatten)
|
|
441
|
+
)
|
|
442
|
+
),
|
|
443
|
+
Effect.map(Option.getOrThrow)
|
|
444
|
+
);
|
|
445
|
+
})
|
|
446
|
+
: Effect.never;
|
|
447
|
+
|
|
448
|
+
// first to succeed (acquire permits) wins
|
|
449
|
+
return yield* Effect.race(pollBasedAcquire, pushBasedAcquire);
|
|
450
|
+
});
|
|
396
451
|
|
|
397
452
|
// Convenience: acquire permits, run effect, release when done
|
|
398
453
|
const withPermits =
|
package/src/RedisBacking.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { Duration, Effect, Layer } from "effect";
|
|
1
|
+
import { Duration, Effect, Layer, Schedule, Stream } from "effect";
|
|
2
2
|
import { Redis } from "ioredis";
|
|
3
3
|
import {
|
|
4
4
|
DistributedSemaphoreBacking,
|
|
@@ -54,26 +54,37 @@ end
|
|
|
54
54
|
/**
|
|
55
55
|
* Lua script for atomic release.
|
|
56
56
|
*
|
|
57
|
-
* Removes all permits held by this holder.
|
|
57
|
+
* Removes all permits held by this holder and optionally publishes a notification.
|
|
58
58
|
*
|
|
59
59
|
* Arguments:
|
|
60
60
|
* - KEYS[1]: the semaphore key
|
|
61
|
+
* - KEYS[2]: the release notification channel
|
|
61
62
|
* - ARGV[1]: permits to release
|
|
62
63
|
* - ARGV[2]: holderId
|
|
64
|
+
* - ARGV[3]: shouldPublish (1 = publish, 0 = don't publish)
|
|
63
65
|
*
|
|
64
66
|
* Returns the number of permits released.
|
|
65
67
|
*/
|
|
66
68
|
const RELEASE_SCRIPT = `
|
|
67
69
|
local key = KEYS[1]
|
|
70
|
+
local channel = KEYS[2]
|
|
68
71
|
local permits = tonumber(ARGV[1])
|
|
69
72
|
local holderId = ARGV[2]
|
|
73
|
+
local shouldPublish = tonumber(ARGV[3]) == 1
|
|
70
74
|
local args = {}
|
|
71
75
|
|
|
72
76
|
for i = 0, permits - 1 do
|
|
73
77
|
table.insert(args, holderId .. '_' .. i)
|
|
74
78
|
end
|
|
75
79
|
|
|
76
|
-
|
|
80
|
+
local released = redis.call('zrem', key, unpack(args))
|
|
81
|
+
|
|
82
|
+
-- Notify waiters that permits may be available
|
|
83
|
+
if released > 0 and shouldPublish then
|
|
84
|
+
redis.call('publish', channel, released)
|
|
85
|
+
end
|
|
86
|
+
|
|
87
|
+
return released
|
|
77
88
|
`;
|
|
78
89
|
|
|
79
90
|
/**
|
|
@@ -142,6 +153,33 @@ redis.call('zremrangebyscore', key, '-inf', expiredTimestamp)
|
|
|
142
153
|
return redis.call('zcard', key)
|
|
143
154
|
`;
|
|
144
155
|
|
|
156
|
+
export interface RedisBackingOptions {
|
|
157
|
+
/**
|
|
158
|
+
* Prefix for all keys in Redis.
|
|
159
|
+
* @default "semaphore:"
|
|
160
|
+
*/
|
|
161
|
+
readonly keyPrefix?: string;
|
|
162
|
+
|
|
163
|
+
/**
|
|
164
|
+
* Enable push-based acquisition using Redis pub/sub.
|
|
165
|
+
*
|
|
166
|
+
* When enabled, waiters subscribe to a channel and get notified immediately
|
|
167
|
+
* when permits are released, instead of polling. This reduces latency and
|
|
168
|
+
* load on Redis.
|
|
169
|
+
*
|
|
170
|
+
* Requires an additional Redis connection per waiting semaphore.
|
|
171
|
+
*
|
|
172
|
+
* @default true
|
|
173
|
+
*/
|
|
174
|
+
readonly pushBasedAcquireEnabled?: boolean;
|
|
175
|
+
|
|
176
|
+
/**
|
|
177
|
+
* How often to retry the stream of notifications when permits are released.
|
|
178
|
+
* @default Schedule.forever
|
|
179
|
+
*/
|
|
180
|
+
readonly pushStreamRetrySchedule?: Schedule.Schedule<void>;
|
|
181
|
+
}
|
|
182
|
+
|
|
145
183
|
/**
|
|
146
184
|
* Create a Redis-backed distributed semaphore backing layer.
|
|
147
185
|
*
|
|
@@ -151,13 +189,18 @@ return redis.call('zcard', key)
|
|
|
151
189
|
* For multi-instance Redis, consider implementing a Redlock-based backing.
|
|
152
190
|
*
|
|
153
191
|
* @param redis - An ioredis client instance (single instance, not cluster)
|
|
154
|
-
* @param
|
|
192
|
+
* @param options - Configuration options
|
|
155
193
|
*/
|
|
156
194
|
export const layer = (
|
|
157
195
|
redis: Redis,
|
|
158
|
-
|
|
196
|
+
options: RedisBackingOptions = {}
|
|
159
197
|
): Layer.Layer<DistributedSemaphoreBacking> => {
|
|
198
|
+
const keyPrefix = options.keyPrefix ?? "semaphore:";
|
|
199
|
+
const pushBasedAcquireEnabled = options.pushBasedAcquireEnabled ?? true;
|
|
200
|
+
const pushStreamRetrySchedule =
|
|
201
|
+
options.pushStreamRetrySchedule ?? Schedule.forever.pipe(Schedule.asVoid);
|
|
160
202
|
const prefixKey = (key: string) => `${keyPrefix}${key}`;
|
|
203
|
+
const releaseChannel = (key: string) => `${keyPrefix}${key}:released`;
|
|
161
204
|
|
|
162
205
|
const tryAcquire = (
|
|
163
206
|
key: string,
|
|
@@ -194,10 +237,12 @@ export const layer = (
|
|
|
194
237
|
try: async () => {
|
|
195
238
|
const result = await redis.eval(
|
|
196
239
|
RELEASE_SCRIPT,
|
|
197
|
-
|
|
240
|
+
2,
|
|
198
241
|
prefixKey(key),
|
|
242
|
+
releaseChannel(key),
|
|
199
243
|
permits.toString(),
|
|
200
|
-
holderId
|
|
244
|
+
holderId,
|
|
245
|
+
pushBasedAcquireEnabled ? "1" : "0"
|
|
201
246
|
);
|
|
202
247
|
return result as number;
|
|
203
248
|
},
|
|
@@ -251,10 +296,55 @@ export const layer = (
|
|
|
251
296
|
new SemaphoreBackingError({ operation: "getCount", cause }),
|
|
252
297
|
});
|
|
253
298
|
|
|
299
|
+
// Stream that emits when permits are released on a given key.
|
|
300
|
+
// Uses Redis pub/sub with a dedicated subscriber connection.
|
|
301
|
+
const onPermitsReleased = (key: string): Stream.Stream<void> =>
|
|
302
|
+
Stream.asyncPush<void, SemaphoreBackingError>((emit) => {
|
|
303
|
+
const channel = releaseChannel(key);
|
|
304
|
+
|
|
305
|
+
return Effect.acquireRelease(
|
|
306
|
+
Effect.gen(function* () {
|
|
307
|
+
// Create a dedicated subscriber connection
|
|
308
|
+
const subscriber = redis.duplicate();
|
|
309
|
+
|
|
310
|
+
// Set up message handler before subscribing
|
|
311
|
+
const messageHandler = (ch: string, _message: string) => {
|
|
312
|
+
if (ch === channel) {
|
|
313
|
+
emit.single(void 0);
|
|
314
|
+
}
|
|
315
|
+
};
|
|
316
|
+
subscriber.on("message", messageHandler);
|
|
317
|
+
|
|
318
|
+
// Subscribe to the channel
|
|
319
|
+
yield* Effect.tryPromise({
|
|
320
|
+
try: () => subscriber.subscribe(channel),
|
|
321
|
+
catch: (cause) =>
|
|
322
|
+
new SemaphoreBackingError({ operation: "subscribe", cause }),
|
|
323
|
+
});
|
|
324
|
+
|
|
325
|
+
return { subscriber, messageHandler };
|
|
326
|
+
}),
|
|
327
|
+
({ subscriber, messageHandler }) =>
|
|
328
|
+
Effect.promise(async () => {
|
|
329
|
+
subscriber.off("message", messageHandler);
|
|
330
|
+
await subscriber.unsubscribe(channel);
|
|
331
|
+
subscriber.disconnect();
|
|
332
|
+
})
|
|
333
|
+
);
|
|
334
|
+
}).pipe(
|
|
335
|
+
Stream.retry(pushStreamRetrySchedule),
|
|
336
|
+
Stream.catchTag("SemaphoreBackingError", () =>
|
|
337
|
+
Stream.dieMessage(
|
|
338
|
+
"Invariant violated: `onPermitsReleased` should never error because it should be retried forever"
|
|
339
|
+
)
|
|
340
|
+
)
|
|
341
|
+
);
|
|
342
|
+
|
|
254
343
|
return Layer.succeed(DistributedSemaphoreBacking, {
|
|
255
344
|
tryAcquire,
|
|
256
345
|
release,
|
|
257
346
|
refresh,
|
|
258
347
|
getCount,
|
|
348
|
+
onPermitsReleased: pushBasedAcquireEnabled ? onPermitsReleased : undefined,
|
|
259
349
|
});
|
|
260
350
|
};
|
|
@@ -1,14 +0,0 @@
|
|
|
1
|
-
# Generated by FOSSA CLI (https://github.com/fossas/fossa-cli)
|
|
2
|
-
# Visit https://fossa.com to learn more
|
|
3
|
-
|
|
4
|
-
version: 2
|
|
5
|
-
cli:
|
|
6
|
-
server: https://app.fossa.com
|
|
7
|
-
fetcher: custom
|
|
8
|
-
project: git@github.com:swarthy/redis-semaphore.git
|
|
9
|
-
analyze:
|
|
10
|
-
modules:
|
|
11
|
-
- name: .
|
|
12
|
-
type: npm
|
|
13
|
-
target: .
|
|
14
|
-
path: .
|
|
@@ -1,39 +0,0 @@
|
|
|
1
|
-
name: CI (push)
|
|
2
|
-
|
|
3
|
-
on:
|
|
4
|
-
push:
|
|
5
|
-
branches:
|
|
6
|
-
- master
|
|
7
|
-
workflow_dispatch:
|
|
8
|
-
|
|
9
|
-
jobs:
|
|
10
|
-
integration-test:
|
|
11
|
-
runs-on: ubuntu-latest
|
|
12
|
-
|
|
13
|
-
strategy:
|
|
14
|
-
matrix:
|
|
15
|
-
node-version: [18.x, 20.x, 22.x]
|
|
16
|
-
|
|
17
|
-
env:
|
|
18
|
-
COVERALLS_REPO_TOKEN: '${{ secrets.COVERALLS_REPO_TOKEN }}'
|
|
19
|
-
COVERALLS_GIT_BRANCH: '${{ github.ref }}'
|
|
20
|
-
|
|
21
|
-
steps:
|
|
22
|
-
- uses: actions/checkout@v4
|
|
23
|
-
- name: Enable Corepack
|
|
24
|
-
run: corepack enable
|
|
25
|
-
|
|
26
|
-
- name: Use Node.js ${{ matrix.node-version }}
|
|
27
|
-
uses: actions/setup-node@v4
|
|
28
|
-
with:
|
|
29
|
-
node-version: ${{ matrix.node-version }}
|
|
30
|
-
cache: 'yarn'
|
|
31
|
-
|
|
32
|
-
- run: yarn install --immutable
|
|
33
|
-
|
|
34
|
-
- run: docker compose up -d redis1 redis2 redis3
|
|
35
|
-
- run: docker compose run waiter
|
|
36
|
-
|
|
37
|
-
- run: yarn build
|
|
38
|
-
- run: yarn lint
|
|
39
|
-
- run: yarn test-ci-with-coverage
|
|
@@ -1,35 +0,0 @@
|
|
|
1
|
-
name: CI (PR)
|
|
2
|
-
|
|
3
|
-
on:
|
|
4
|
-
pull_request:
|
|
5
|
-
branches:
|
|
6
|
-
- master
|
|
7
|
-
workflow_dispatch:
|
|
8
|
-
|
|
9
|
-
jobs:
|
|
10
|
-
integration-test:
|
|
11
|
-
runs-on: ubuntu-latest
|
|
12
|
-
|
|
13
|
-
strategy:
|
|
14
|
-
matrix:
|
|
15
|
-
node-version: [18.x, 20.x, 22.x]
|
|
16
|
-
|
|
17
|
-
steps:
|
|
18
|
-
- uses: actions/checkout@v4
|
|
19
|
-
- name: Enable Corepack
|
|
20
|
-
run: corepack enable
|
|
21
|
-
|
|
22
|
-
- name: Use Node.js ${{ matrix.node-version }}
|
|
23
|
-
uses: actions/setup-node@v4
|
|
24
|
-
with:
|
|
25
|
-
node-version: ${{ matrix.node-version }}
|
|
26
|
-
cache: 'yarn'
|
|
27
|
-
|
|
28
|
-
- run: yarn install --immutable
|
|
29
|
-
|
|
30
|
-
- run: docker compose up -d redis1 redis2 redis3
|
|
31
|
-
- run: docker compose run waiter
|
|
32
|
-
|
|
33
|
-
- run: yarn build
|
|
34
|
-
- run: yarn lint
|
|
35
|
-
- run: yarn test
|