effect-distributed-lock 0.0.4 → 0.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -12,6 +12,7 @@ It's like the built in `Effect.Semaphore`, but asynchronously distributed across
12
12
  - **Scope-based resource management** — permits are automatically released when the scope closes
13
13
  - **Automatic TTL refresh** — keeps permits alive while held, prevents deadlocks if holder crashes
14
14
  - **Pluggable backends** — ships with Redis (single-instance), easy to implement others
15
+ - **Push-based waiting** — uses pub/sub for efficient notification when permits become available (optional, with polling fallback)
15
16
  - **Configurable retry policies** — control polling interval, TTL, and backing failure retry behavior
16
17
  - **Type-safe errors** — tagged errors for precise error handling
17
18
 
@@ -34,7 +35,7 @@ import Redis from "ioredis";
34
35
  import { DistributedSemaphore, RedisBacking } from "effect-distributed-lock";
35
36
 
36
37
  const redis = new Redis(process.env.REDIS_URL);
37
- const RedisLayer = RedisBacking.layer(redis);
38
+ const RedisLayer = RedisBacking.layer(redis, { keyPrefix: "my-app:" });
38
39
 
39
40
  const program = Effect.gen(function* () {
40
41
  // Create a semaphore that allows 5 concurrent operations
@@ -112,6 +113,72 @@ yield* Effect.scoped(
112
113
 
113
114
  Both `take` and `tryTake` return the keepalive fiber that refreshes the permit TTL. Errors from the keepalive (losing permits or backing store failure) are propagated through the fiber.
114
115
 
116
+ ### Acquire Options
117
+
118
+ All acquire methods (`withPermits`, `withPermitsIfAvailable`, `take`, `tryTake`) accept an optional second parameter for advanced use cases:
119
+
120
+ ```typescript
121
+ yield* myEffect.pipe(sem.withPermits(1, { identifier: "my-custom-id" }));
122
+ ```
123
+
124
+ | Option | Type | Default | Description |
125
+ | ------------------- | --------- | -------------------- | ------------------------------------------------ |
126
+ | `identifier` | `string` | `crypto.randomUUID()` | Unique ID for this permit holder |
127
+ | `acquiredExternally`| `boolean` | `false` | Assume permits already held, use refresh to verify |
128
+
129
+ #### Custom Identifiers
130
+
131
+ By default, a random UUID is generated for each acquire. Override this for:
132
+ - **Debugging/observability**: Use meaningful identifiers to trace lock holders
133
+ - **Cross-process handoff**: Share identifiers between processes
134
+
135
+ ```typescript
136
+ // Custom identifier for debugging
137
+ yield* myEffect.pipe(sem.withPermits(1, { identifier: "worker-1-job-123" }));
138
+ ```
139
+
140
+ ⚠️ **Warning**: Identifiers must be unique across concurrent holders. Using the same identifier from different processes will cause them to be treated as the same holder.
141
+
142
+ #### Resuming After Crash (`acquiredExternally`)
143
+
144
+ Use `acquiredExternally: true` to resume ownership of permits that were acquired previously but not properly released (e.g., after a process crash). This uses `refresh` instead of `acquire` to verify ownership.
145
+
146
+ ```typescript
147
+ // Store identifier persistently before doing work
148
+ const identifier = crypto.randomUUID();
149
+ yield* saveToDatabase({ jobId, lockIdentifier: identifier });
150
+
151
+ yield* Effect.gen(function* () {
152
+ yield* doWork();
153
+ yield* deleteFromDatabase(jobId);
154
+ }).pipe(sem.withPermits(1, { identifier }));
155
+
156
+ // === Later, after crash recovery ===
157
+ const { lockIdentifier } = yield* loadFromDatabase(jobId);
158
+
159
+ // Check if we still hold the lock (TTL hasn't expired)
160
+ const result = yield* Effect.gen(function* () {
161
+ yield* resumeWork();
162
+ yield* deleteFromDatabase(jobId);
163
+ }).pipe(
164
+ sem.withPermitsIfAvailable(1, {
165
+ identifier: lockIdentifier,
166
+ acquiredExternally: true
167
+ })
168
+ );
169
+
170
+ if (Option.isNone(result)) {
171
+ // Lock expired, need to re-acquire normally
172
+ yield* restartWork().pipe(sem.withPermits(1));
173
+ }
174
+ ```
175
+
176
+ This is useful for:
177
+ - **Crash recovery**: Resume work if you crashed while holding permits
178
+ - **Process restart**: Check if your previous lock is still valid
179
+
180
+ ⚠️ **Unsafe**: If the identifier is wrong or the lock expired, `tryTake`/`withPermitsIfAvailable` return `None`, while `take`/`withPermits` keep retrying forever (waiting for permits that will never come).
181
+
115
182
  #### `currentCount` — Check held permits
116
183
 
117
184
  ```typescript
@@ -150,7 +217,37 @@ import { RedisBacking } from "effect-distributed-lock";
150
217
 
151
218
  // Single Redis instance
152
219
  const redis = new Redis("redis://localhost:6379");
153
- const RedisLayer = RedisBacking.layer(redis, "my-prefix:");
220
+ const RedisLayer = RedisBacking.layer(redis, {
221
+ keyPrefix: "my-prefix:",
222
+ pushBasedAcquireEnabled: true, // default: true
223
+ });
224
+ ```
225
+
226
+ ### Configuration Options
227
+
228
+ | Option | Type | Default | Description |
229
+ | -------------------------- | ---------------- | ------------------ | ---------------------------------------------------- |
230
+ | `keyPrefix` | `string` | `"semaphore:"` | Prefix for all Redis keys |
231
+ | `pushBasedAcquireEnabled` | `boolean` | `true` | Use pub/sub for efficient waiting (see below) |
232
+ | `pushStreamRetrySchedule` | `Schedule<void>` | `Schedule.forever` | Retry schedule for pub/sub stream errors |
233
+
234
+ ### Push-Based Acquisition
235
+
236
+ By default, the Redis backing uses pub/sub to notify waiters when permits become available. This reduces latency and load on Redis compared to pure polling.
237
+
238
+ When permits are released, a message is published to a channel. Waiters subscribe to this channel and immediately attempt to acquire when notified. The semaphore still falls back to polling as a safety net.
239
+
240
+ **Trade-offs:**
241
+ - ✅ Lower latency — waiters are notified immediately
242
+ - ✅ Reduced Redis load — fewer polling requests
243
+ - ⚠️ Extra connection — each waiting semaphore uses a subscriber connection
244
+
245
+ To disable and use polling only:
246
+
247
+ ```typescript
248
+ const RedisLayer = RedisBacking.layer(redis, {
249
+ pushBasedAcquireEnabled: false,
250
+ });
154
251
  ```
155
252
 
156
253
  For multi-instance Redis deployments requiring Redlock, you'll need to implement a custom backing.
@@ -160,7 +257,7 @@ For multi-instance Redis deployments requiring Redlock, you'll need to implement
160
257
  Implement the `DistributedSemaphoreBacking` interface to use a different store:
161
258
 
162
259
  ```typescript
163
- import { Duration, Effect, Layer, Option } from "effect";
260
+ import { Duration, Effect, Layer, Stream } from "effect";
164
261
  import { Backing, DistributedSemaphoreBacking } from "effect-distributed-lock";
165
262
 
166
263
  const MyCustomBacking = Layer.succeed(DistributedSemaphoreBacking, {
@@ -175,16 +272,23 @@ const MyCustomBacking = Layer.succeed(DistributedSemaphoreBacking, {
175
272
 
176
273
  getCount: (key, ttl) =>
177
274
  Effect.succeed(0), // Return number of permits currently held
275
+
276
+ // Optional: Enable push-based waiting
277
+ onPermitsReleased: (key) =>
278
+ Stream.never, // Stream that emits when permits MAY be available
178
279
  });
179
280
  ```
180
281
 
282
+ The `onPermitsReleased` method is optional. If provided, the semaphore will use it for efficient push-based waiting instead of pure polling. The stream should emit whenever permits are released on the given key. Multiple waiters may race for permits after a notification, so `tryAcquire` is still called after each notification.
283
+
181
284
  ## How It Works
182
285
 
183
286
  1. **Acquire**: Atomically adds permits to a sorted set if there's room (Redis: Lua script with `ZADD`)
184
287
  2. **Keepalive**: A background fiber refreshes the TTL periodically by updating timestamps
185
- 3. **Release**: Atomically removes permits from the sorted set (Lua script with `ZREM`)
186
- 4. **Expiration**: Expired entries (based on TTL) are cleaned up on each operation
187
- 5. **Crash safety**: If the holder crashes, permits expire and become available
288
+ 3. **Release**: Atomically removes permits and publishes notification to waiters (Lua script with `ZREM` + `PUBLISH`)
289
+ 4. **Waiting**: Combines polling with pub/sub notifications — waiters are notified immediately when permits are released
290
+ 5. **Expiration**: Expired entries (based on TTL) are cleaned up on each operation
291
+ 6. **Crash safety**: If the holder crashes, permits expire and become available
188
292
 
189
293
  ## License
190
294
 
package/examples/index.ts CHANGED
@@ -12,7 +12,9 @@ import { DistributedSemaphore, RedisBacking } from "../src/index.ts";
12
12
  const redis = new Redis(process.env.REDIS_URL ?? "redis://localhost:6379");
13
13
 
14
14
  // Create the Redis backing layer
15
- const RedisLayer = RedisBacking.layer(redis, "example:");
15
+ const RedisLayer = RedisBacking.layer(redis, {
16
+ keyPrefix: "example:",
17
+ });
16
18
 
17
19
  // Example 1: Using withPermits for a critical section (mutex behavior)
18
20
  const example1 = Effect.gen(function* () {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "effect-distributed-lock",
3
- "version": "0.0.4",
3
+ "version": "0.0.6",
4
4
  "description": "A distributed semaphore library for Effect with pluggable backends",
5
5
  "license": "MIT",
6
6
  "repository": {
package/src/Backing.ts CHANGED
@@ -1,4 +1,4 @@
1
- import { Context, Data, Duration, Effect } from "effect";
1
+ import { Context, Data, Duration, Effect, Stream } from "effect";
2
2
 
3
3
  // =============================================================================
4
4
  // Errors
@@ -24,22 +24,12 @@ export class SemaphoreBackingError extends Data.TaggedError(
24
24
 
25
25
  /**
26
26
  * Low-level backing store interface for distributed semaphore operations.
27
- * Implementations handle the actual storage (Redis, etc.)
28
- *
29
- * The semaphore uses a sorted set model where:
30
- * - Each permit holder is stored with their acquisition timestamp as the score
31
- * - Expired entries are cleaned up automatically
32
- * - Multiple permits can be acquired atomically
33
27
  */
34
28
  export interface DistributedSemaphoreBacking {
35
29
  /**
36
30
  * Try to acquire `permits` from a semaphore with the given `limit`.
37
- * Returns true if acquired, false if not enough permits available.
38
31
  *
39
- * The implementation should:
40
- * 1. Clean up expired entries (based on TTL)
41
- * 2. Check if there's room: currentCount + permits <= limit
42
- * 3. If so, add the permits with current timestamp
32
+ * @returns `true` if acquired, `false` if not enough permits available.
43
33
  */
44
34
  readonly tryAcquire: (
45
35
  key: string,
@@ -50,8 +40,9 @@ export interface DistributedSemaphoreBacking {
50
40
  ) => Effect.Effect<boolean, SemaphoreBackingError>;
51
41
 
52
42
  /**
53
- * Release `permits` held by this holder.
54
- * Returns the number of permits actually released.
43
+ * Release `permits` held by the given holder.
44
+ *
45
+ * @returns The number of permits actually released.
55
46
  */
56
47
  readonly release: (
57
48
  key: string,
@@ -60,8 +51,9 @@ export interface DistributedSemaphoreBacking {
60
51
  ) => Effect.Effect<number, SemaphoreBackingError>;
61
52
 
62
53
  /**
63
- * Refresh the TTL on permits we hold.
64
- * Returns true if refreshed, false if permits were lost.
54
+ * Refresh the TTL on permits held by this holder.
55
+ *
56
+ * @returns `true` if refreshed, `false` if permits were lost (e.g., expired).
65
57
  */
66
58
  readonly refresh: (
67
59
  key: string,
@@ -73,12 +65,24 @@ export interface DistributedSemaphoreBacking {
73
65
 
74
66
  /**
75
67
  * Get the number of permits currently held (in use).
76
- * Available permits = limit - getCount().
77
68
  */
78
69
  readonly getCount: (
79
70
  key: string,
80
71
  ttl: Duration.Duration
81
72
  ) => Effect.Effect<number, SemaphoreBackingError>;
73
+
74
+ /**
75
+ * Optional: Stream of notifications when permits MAY be available.
76
+ *
77
+ * If provided, the semaphore layer uses this for efficient waiting instead
78
+ * of polling. The stream emits a signal whenever permits are released.
79
+ *
80
+ * Notes:
81
+ * - Multiple waiters may race for permits after a notification
82
+ * - The semaphore still calls `tryAcquire` after each notification
83
+ * - Implementations should handle reconnection internally (hence why the stream does not have an error type)
84
+ */
85
+ readonly onPermitsReleased?: (key: string) => Stream.Stream<void>;
82
86
  }
83
87
 
84
88
  export const DistributedSemaphoreBacking =
@@ -3,9 +3,11 @@ import {
3
3
  Duration,
4
4
  Effect,
5
5
  Fiber,
6
+ Function,
6
7
  Option,
7
8
  Schedule,
8
9
  Scope,
10
+ Stream,
9
11
  } from "effect";
10
12
  import {
11
13
  DistributedSemaphoreBacking,
@@ -40,9 +42,9 @@ export interface DistributedSemaphoreConfig {
40
42
 
41
43
  /**
42
44
  * How often to poll when waiting to acquire permits.
43
- * @default 100ms
45
+ * @default Schedule.spaced(Duration.millis(100))
44
46
  */
45
- readonly acquireRetryInterval?: Duration.DurationInput;
47
+ readonly acquireRetryPolicy?: Schedule.Schedule<void>;
46
48
 
47
49
  /**
48
50
  * Retry policy when a backing failure occurs.
@@ -50,16 +52,58 @@ export interface DistributedSemaphoreConfig {
50
52
  * - Trying to acquire permits
51
53
  * - Refreshing the TTL
52
54
  * - Releasing permits
55
+ * @default Schedule.recurs(3)
53
56
  */
54
57
  readonly backingFailureRetryPolicy?: Schedule.Schedule<void>;
55
58
  }
56
59
 
57
60
  const DEFAULT_LIMIT = 1;
58
61
  const DEFAULT_TTL = Duration.seconds(30);
59
- const DEFAULT_ACQUIRE_RETRY_INTERVAL = Duration.millis(100);
60
- const DEFAULT_FAILURE_RETRY_POLICY = Schedule.spaced(
61
- DEFAULT_ACQUIRE_RETRY_INTERVAL
62
- ).pipe(Schedule.asVoid);
62
+ const DEFAULT_ACQUIRE_RETRY_POLICY = Schedule.spaced(Duration.millis(100)).pipe(
63
+ Schedule.asVoid
64
+ );
65
+ const DEFAULT_FAILURE_RETRY_POLICY = Schedule.recurs(3).pipe(Schedule.asVoid);
66
+
67
+ // =============================================================================
68
+ // Acquire Options
69
+ // =============================================================================
70
+
71
+ /**
72
+ * Options for acquire operations (take, tryTake, withPermits, etc.)
73
+ */
74
+ export interface AcquireOptions {
75
+ /**
76
+ * Unique identifier for this permit holder.
77
+ *
78
+ * By default, a random UUID is generated per-acquire. Override this if you need:
79
+ * - Predictable identifiers for debugging/observability
80
+ * - Cross-process lock handoff (acquire in one process, release in another)
81
+ *
82
+ * ⚠️ **Warning**: Must be unique across concurrent holders, otherwise locks with the same
83
+ * identifier may be treated as the same holder.
84
+ *
85
+ * @default crypto.randomUUID()
86
+ */
87
+ readonly identifier?: string;
88
+
89
+ /**
90
+ * If true, assumes the permits were already acquired externally with the given identifier.
91
+ * Instead of acquiring, uses refresh to verify ownership.
92
+ *
93
+ * **Requires `identifier` to be provided.**
94
+ *
95
+ * This is useful for cross-process lock handoff:
96
+ * 1. Process A acquires permits with a known identifier
97
+ * 2. Process A passes the identifier to Process B (via message queue, etc.)
98
+ * 3. Process B calls take/withPermits with `{ identifier, acquiredExternally: true }`
99
+ * 4. Process B now owns the permits (refreshing and releasing)
100
+ *
101
+ * ⚠️ **Unsafe**: If the identifier is wrong or the lock expired, this will fail immediately.
102
+ *
103
+ * @default false
104
+ */
105
+ readonly acquiredExternally?: boolean;
106
+ }
63
107
 
64
108
  // =============================================================================
65
109
  // Distributed Semaphore Interface
@@ -107,7 +151,8 @@ export interface DistributedSemaphore {
107
151
  * The permit TTL is refreshed automatically while the effect runs.
108
152
  */
109
153
  readonly withPermits: (
110
- permits: number
154
+ permits: number,
155
+ options?: AcquireOptions
111
156
  ) => <A, E, R>(
112
157
  effect: Effect.Effect<A, E, R>
113
158
  ) => Effect.Effect<A, E | LockLostError | SemaphoreBackingError, R>;
@@ -118,7 +163,8 @@ export interface DistributedSemaphore {
118
163
  * None if permits were not available.
119
164
  */
120
165
  readonly withPermitsIfAvailable: (
121
- permits: number
166
+ permits: number,
167
+ options?: AcquireOptions
122
168
  ) => <A, E, R>(
123
169
  effect: Effect.Effect<A, E, R>
124
170
  ) => Effect.Effect<
@@ -136,7 +182,8 @@ export interface DistributedSemaphore {
136
182
  * When the scope closes, the fiber is interrupted and permits are released.
137
183
  */
138
184
  readonly take: (
139
- permits: number
185
+ permits: number,
186
+ options?: AcquireOptions
140
187
  ) => Effect.Effect<
141
188
  Fiber.Fiber<never, LockLostError | SemaphoreBackingError>,
142
189
  LockLostError | SemaphoreBackingError,
@@ -152,7 +199,8 @@ export interface DistributedSemaphore {
152
199
  * When the scope closes, the fiber is interrupted and permits are released.
153
200
  */
154
201
  readonly tryTake: (
155
- permits: number
202
+ permits: number,
203
+ options?: AcquireOptions
156
204
  ) => Effect.Effect<
157
205
  Option.Option<Fiber.Fiber<never, LockLostError | SemaphoreBackingError>>,
158
206
  LockLostError | SemaphoreBackingError,
@@ -174,7 +222,7 @@ type FullyResolvedConfig = {
174
222
  limit: number;
175
223
  ttl: Duration.Duration;
176
224
  refreshInterval: Duration.Duration;
177
- acquireRetryInterval: Duration.Duration;
225
+ acquireRetryPolicy: Schedule.Schedule<void>;
178
226
  backingFailureRetryPolicy: Schedule.Schedule<void>;
179
227
  };
180
228
 
@@ -186,9 +234,9 @@ function fullyResolveConfig(
186
234
  const refreshInterval = config.refreshInterval
187
235
  ? Duration.decode(config.refreshInterval)
188
236
  : Duration.millis(Duration.toMillis(ttl) / 3);
189
- const acquireRetryInterval = config.acquireRetryInterval
190
- ? Duration.decode(config.acquireRetryInterval)
191
- : DEFAULT_ACQUIRE_RETRY_INTERVAL;
237
+ const acquireRetryPolicy = config.acquireRetryPolicy
238
+ ? config.acquireRetryPolicy
239
+ : DEFAULT_ACQUIRE_RETRY_POLICY;
192
240
  const backingFailureRetryPolicy = config.backingFailureRetryPolicy
193
241
  ? config.backingFailureRetryPolicy
194
242
  : DEFAULT_FAILURE_RETRY_POLICY;
@@ -197,7 +245,7 @@ function fullyResolveConfig(
197
245
  limit,
198
246
  ttl,
199
247
  refreshInterval,
200
- acquireRetryInterval,
248
+ acquireRetryPolicy,
201
249
  backingFailureRetryPolicy,
202
250
  };
203
251
  }
@@ -228,15 +276,12 @@ export const make = (
228
276
  Effect.gen(function* () {
229
277
  const backing = yield* DistributedSemaphoreBacking;
230
278
 
231
- // Generate unique holder ID for this instance
232
- const holderId = crypto.randomUUID();
233
-
234
279
  // Resolve config with defaults
235
280
  const {
236
281
  limit,
237
282
  ttl,
238
283
  refreshInterval,
239
- acquireRetryInterval,
284
+ acquireRetryPolicy,
240
285
  backingFailureRetryPolicy,
241
286
  } = fullyResolveConfig(config);
242
287
 
@@ -253,12 +298,13 @@ export const make = (
253
298
  // Keep the permits alive by refreshing TTL periodically.
254
299
  // This effect runs forever until interrupted (when scope closes).
255
300
  const keepAlive = (
301
+ identifier: string,
256
302
  permits: number
257
303
  ): Effect.Effect<never, SemaphoreBackingError | LockLostError, never> =>
258
304
  Effect.repeat(
259
305
  Effect.gen(function* () {
260
306
  const refreshed = yield* backing
261
- .refresh(key, holderId, ttl, limit, permits)
307
+ .refresh(key, identifier, ttl, limit, permits)
262
308
  .pipe(withBackingErrorRetry);
263
309
 
264
310
  if (!refreshed) {
@@ -276,27 +322,40 @@ export const make = (
276
322
 
277
323
  // Try to acquire permits immediately, returns Option
278
324
  const tryTake = (
279
- permits: number
325
+ permits: number,
326
+ options?: AcquireOptions
280
327
  ): Effect.Effect<
281
328
  Option.Option<Fiber.Fiber<never, LockLostError | SemaphoreBackingError>>,
282
329
  SemaphoreBackingError,
283
330
  Scope.Scope
284
331
  > =>
285
332
  Effect.gen(function* () {
286
- const acquired = yield* backing
287
- .tryAcquire(key, holderId, ttl, limit, permits)
288
- .pipe(withBackingErrorRetry);
333
+ // Generate identifier per-acquire if not provided
334
+ const identifier = options?.identifier ?? crypto.randomUUID();
335
+ const acquiredExternally = options?.acquiredExternally ?? false;
336
+
337
+ // If acquiredExternally, use refresh to verify ownership instead of acquire
338
+ const acquired = acquiredExternally
339
+ ? yield* backing
340
+ .refresh(key, identifier, ttl, limit, permits)
341
+ .pipe(withBackingErrorRetry)
342
+ : yield* backing
343
+ .tryAcquire(key, identifier, ttl, limit, permits)
344
+ .pipe(withBackingErrorRetry);
345
+
289
346
  if (!acquired) {
290
347
  return Option.none();
291
348
  }
292
349
 
293
350
  // Start keepalive fiber, tied to this scope
294
- const keepAliveFiber = yield* Effect.forkScoped(keepAlive(permits));
351
+ const keepAliveFiber = yield* Effect.forkScoped(
352
+ keepAlive(identifier, permits)
353
+ );
295
354
 
296
355
  // Add finalizer to release permits when scope closes
297
356
  yield* Effect.addFinalizer(() =>
298
357
  backing
299
- .release(key, holderId, permits)
358
+ .release(key, identifier, permits)
300
359
  .pipe(withBackingErrorRetry, Effect.ignore)
301
360
  );
302
361
 
@@ -305,40 +364,99 @@ export const make = (
305
364
 
306
365
  // Acquire permits with retry, returns fiber when acquired
307
366
  const take = (
308
- permits: number
367
+ permits: number,
368
+ options?: AcquireOptions
309
369
  ): Effect.Effect<
310
370
  Fiber.Fiber<never, LockLostError | SemaphoreBackingError>,
311
371
  SemaphoreBackingError,
312
372
  Scope.Scope
313
373
  > =>
314
374
  Effect.gen(function* () {
315
- const maybeAcquired = yield* tryTake(permits);
316
- if (Option.isNone(maybeAcquired)) {
317
- return yield* new NotYetAcquiredError();
318
- }
319
- return maybeAcquired.value;
320
- }).pipe(
321
- Effect.retry({
322
- while: (e) => e._tag === "NotYetAcquiredError",
323
- schedule: Schedule.spaced(acquireRetryInterval),
324
- }),
325
- Effect.catchTag("NotYetAcquiredError", () =>
326
- Effect.dieMessage(
327
- "Invariant violated: `take` should never return `NotYetAcquiredError` " +
328
- "since it should be caught by the retry which should retry forever until permits are acquired"
375
+ // Generate identifier once for all retry attempts (outside the retry loop)
376
+ const identifier = options?.identifier ?? crypto.randomUUID();
377
+ const resolvedOptions: AcquireOptions = {
378
+ identifier,
379
+ acquiredExternally: options?.acquiredExternally,
380
+ };
381
+
382
+ // We use a semaphore to ensure that only one acquire attempt is made at a time.
383
+ // With `withPermitsIfAvailable`, if both the poll-based and push-based attempts "trigger" at the same time,
384
+ // one will succeed and the other will simple be a no-op.
385
+ const acquireSemaphore = yield* Effect.makeSemaphore(1);
386
+
387
+ const pushBasedAcquireEnabled = backing.onPermitsReleased
388
+ ? true
389
+ : false;
390
+
391
+ const pollBasedAcquire = Effect.gen(function* () {
392
+ const maybeAcquired = yield* tryTake(permits, resolvedOptions).pipe(
393
+ // only apply the semaphore if push-based acquire is supported
394
+ pushBasedAcquireEnabled
395
+ ? Function.compose(
396
+ acquireSemaphore.withPermitsIfAvailable(1),
397
+ Effect.map(Option.flatten)
398
+ )
399
+ : Function.identity
400
+ );
401
+ if (Option.isNone(maybeAcquired)) {
402
+ return yield* new NotYetAcquiredError();
403
+ }
404
+ return maybeAcquired.value;
405
+ }).pipe(
406
+ Effect.retry({
407
+ while: (e) => e._tag === "NotYetAcquiredError",
408
+ schedule: acquireRetryPolicy,
409
+ }),
410
+ Effect.catchTag("NotYetAcquiredError", () =>
411
+ Effect.dieMessage(
412
+ "Invariant violated: `take` should never return `NotYetAcquiredError` " +
413
+ "since it should be caught by the retry which should retry forever until permits are acquired"
414
+ )
329
415
  )
330
- )
331
- );
416
+ );
417
+
418
+ if (!pushBasedAcquireEnabled) {
419
+ return yield* pollBasedAcquire;
420
+ }
421
+
422
+ // Push-based acquire: run both poll-based and push-based acquire in parallel, and return the first one to complete
423
+ const pushBasedAcquire = backing.onPermitsReleased
424
+ ? Effect.gen(function* () {
425
+ if (!backing.onPermitsReleased) {
426
+ // SAFETY: We know that onPermitsReleased is provided because we checked it above
427
+ return yield* Effect.dieMessage(
428
+ "Invariant violated: `onPermitsReleased` is not provided"
429
+ );
430
+ }
431
+ return yield* backing.onPermitsReleased(key).pipe(
432
+ Stream.runFoldWhileEffect(
433
+ Option.none<
434
+ Fiber.Fiber<never, LockLostError | SemaphoreBackingError>
435
+ >(),
436
+ Option.isNone, // keep folding while we haven't acquired
437
+ () =>
438
+ tryTake(permits, resolvedOptions).pipe(
439
+ acquireSemaphore.withPermitsIfAvailable(1),
440
+ Effect.map(Option.flatten)
441
+ )
442
+ ),
443
+ Effect.map(Option.getOrThrow)
444
+ );
445
+ })
446
+ : Effect.never;
447
+
448
+ return yield* Effect.race(pollBasedAcquire, pushBasedAcquire);
449
+ });
332
450
 
333
451
  // Convenience: acquire permits, run effect, release when done
334
452
  const withPermits =
335
- (permits: number) =>
453
+ (permits: number, options?: AcquireOptions) =>
336
454
  <A, E, R>(
337
455
  effect: Effect.Effect<A, E, R>
338
456
  ): Effect.Effect<A, E | LockLostError | SemaphoreBackingError, R> =>
339
457
  Effect.scoped(
340
458
  Effect.gen(function* () {
341
- const keepAliveFiber = yield* take(permits);
459
+ const keepAliveFiber = yield* take(permits, options);
342
460
 
343
461
  return yield* Effect.raceFirst(effect, Fiber.join(keepAliveFiber));
344
462
  })
@@ -346,7 +464,7 @@ export const make = (
346
464
 
347
465
  // Convenience: try to acquire permits, run effect if successful
348
466
  const withPermitsIfAvailable =
349
- (permits: number) =>
467
+ (permits: number, options?: AcquireOptions) =>
350
468
  <A, E, R>(
351
469
  effect: Effect.Effect<A, E, R>
352
470
  ): Effect.Effect<
@@ -356,7 +474,7 @@ export const make = (
356
474
  > =>
357
475
  Effect.scoped(
358
476
  Effect.gen(function* () {
359
- const maybeAcquired = yield* tryTake(permits);
477
+ const maybeAcquired = yield* tryTake(permits, options);
360
478
  if (Option.isNone(maybeAcquired)) {
361
479
  return Option.none();
362
480
  }
@@ -1,4 +1,4 @@
1
- import { Duration, Effect, Layer } from "effect";
1
+ import { Duration, Effect, Layer, Schedule, Stream } from "effect";
2
2
  import { Redis } from "ioredis";
3
3
  import {
4
4
  DistributedSemaphoreBacking,
@@ -54,26 +54,37 @@ end
54
54
  /**
55
55
  * Lua script for atomic release.
56
56
  *
57
- * Removes all permits held by this holder.
57
+ * Removes all permits held by this holder and optionally publishes a notification.
58
58
  *
59
59
  * Arguments:
60
60
  * - KEYS[1]: the semaphore key
61
+ * - KEYS[2]: the release notification channel
61
62
  * - ARGV[1]: permits to release
62
63
  * - ARGV[2]: holderId
64
+ * - ARGV[3]: shouldPublish (1 = publish, 0 = don't publish)
63
65
  *
64
66
  * Returns the number of permits released.
65
67
  */
66
68
  const RELEASE_SCRIPT = `
67
69
  local key = KEYS[1]
70
+ local channel = KEYS[2]
68
71
  local permits = tonumber(ARGV[1])
69
72
  local holderId = ARGV[2]
73
+ local shouldPublish = tonumber(ARGV[3]) == 1
70
74
  local args = {}
71
75
 
72
76
  for i = 0, permits - 1 do
73
77
  table.insert(args, holderId .. '_' .. i)
74
78
  end
75
79
 
76
- return redis.call('zrem', key, unpack(args))
80
+ local released = redis.call('zrem', key, unpack(args))
81
+
82
+ -- Notify waiters that permits may be available
83
+ if released > 0 and shouldPublish then
84
+ redis.call('publish', channel, released)
85
+ end
86
+
87
+ return released
77
88
  `;
78
89
 
79
90
  /**
@@ -142,6 +153,33 @@ redis.call('zremrangebyscore', key, '-inf', expiredTimestamp)
142
153
  return redis.call('zcard', key)
143
154
  `;
144
155
 
156
+ export interface RedisBackingOptions {
157
+ /**
158
+ * Prefix for all keys in Redis.
159
+ * @default "semaphore:"
160
+ */
161
+ readonly keyPrefix?: string;
162
+
163
+ /**
164
+ * Enable push-based acquisition using Redis pub/sub.
165
+ *
166
+ * When enabled, waiters subscribe to a channel and get notified immediately
167
+ * when permits are released, instead of polling. This reduces latency and
168
+ * load on Redis.
169
+ *
170
+ * Requires an additional Redis connection per waiting semaphore.
171
+ *
172
+ * @default true
173
+ */
174
+ readonly pushBasedAcquireEnabled?: boolean;
175
+
176
+ /**
177
+ * How often to retry the stream of notifications when permits are released.
178
+ * @default Schedule.forever
179
+ */
180
+ readonly pushStreamRetrySchedule?: Schedule.Schedule<void>;
181
+ }
182
+
145
183
  /**
146
184
  * Create a Redis-backed distributed semaphore backing layer.
147
185
  *
@@ -151,13 +189,18 @@ return redis.call('zcard', key)
151
189
  * For multi-instance Redis, consider implementing a Redlock-based backing.
152
190
  *
153
191
  * @param redis - An ioredis client instance (single instance, not cluster)
154
- * @param keyPrefix - Optional prefix for all keys (default: "dsem:")
192
+ * @param options - Configuration options
155
193
  */
156
194
  export const layer = (
157
195
  redis: Redis,
158
- keyPrefix = "dsem:"
196
+ options: RedisBackingOptions = {}
159
197
  ): Layer.Layer<DistributedSemaphoreBacking> => {
198
+ const keyPrefix = options.keyPrefix ?? "semaphore:";
199
+ const pushBasedAcquireEnabled = options.pushBasedAcquireEnabled ?? true;
200
+ const pushStreamRetrySchedule =
201
+ options.pushStreamRetrySchedule ?? Schedule.forever.pipe(Schedule.asVoid);
160
202
  const prefixKey = (key: string) => `${keyPrefix}${key}`;
203
+ const releaseChannel = (key: string) => `${keyPrefix}${key}:released`;
161
204
 
162
205
  const tryAcquire = (
163
206
  key: string,
@@ -194,10 +237,12 @@ export const layer = (
194
237
  try: async () => {
195
238
  const result = await redis.eval(
196
239
  RELEASE_SCRIPT,
197
- 1,
240
+ 2,
198
241
  prefixKey(key),
242
+ releaseChannel(key),
199
243
  permits.toString(),
200
- holderId
244
+ holderId,
245
+ pushBasedAcquireEnabled ? "1" : "0"
201
246
  );
202
247
  return result as number;
203
248
  },
@@ -251,10 +296,55 @@ export const layer = (
251
296
  new SemaphoreBackingError({ operation: "getCount", cause }),
252
297
  });
253
298
 
299
+ // Stream that emits when permits are released on a given key.
300
+ // Uses Redis pub/sub with a dedicated subscriber connection.
301
+ const onPermitsReleased = (key: string): Stream.Stream<void> =>
302
+ Stream.asyncPush<void, SemaphoreBackingError>((emit) => {
303
+ const channel = releaseChannel(key);
304
+
305
+ return Effect.acquireRelease(
306
+ Effect.gen(function* () {
307
+ // Create a dedicated subscriber connection
308
+ const subscriber = redis.duplicate();
309
+
310
+ // Set up message handler before subscribing
311
+ const messageHandler = (ch: string, _message: string) => {
312
+ if (ch === channel) {
313
+ emit.single(void 0);
314
+ }
315
+ };
316
+ subscriber.on("message", messageHandler);
317
+
318
+ // Subscribe to the channel
319
+ yield* Effect.tryPromise({
320
+ try: () => subscriber.subscribe(channel),
321
+ catch: (cause) =>
322
+ new SemaphoreBackingError({ operation: "subscribe", cause }),
323
+ });
324
+
325
+ return { subscriber, messageHandler };
326
+ }),
327
+ ({ subscriber, messageHandler }) =>
328
+ Effect.sync(() => {
329
+ subscriber.off("message", messageHandler);
330
+ subscriber.unsubscribe(channel);
331
+ subscriber.disconnect();
332
+ })
333
+ );
334
+ }).pipe(
335
+ Stream.retry(pushStreamRetrySchedule),
336
+ Stream.catchTag("SemaphoreBackingError", () =>
337
+ Stream.dieMessage(
338
+ "Invariant violated: `onPermitsReleased` should never error because it should be retried forever"
339
+ )
340
+ )
341
+ );
342
+
254
343
  return Layer.succeed(DistributedSemaphoreBacking, {
255
344
  tryAcquire,
256
345
  release,
257
346
  refresh,
258
347
  getCount,
348
+ onPermitsReleased: pushBasedAcquireEnabled ? onPermitsReleased : undefined,
259
349
  });
260
350
  };