counting_semaphore 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.github/workflows/lint.yml +23 -0
- data/.github/workflows/test.yml +34 -0
- data/.gitignore +56 -0
- data/.ruby-version +1 -0
- data/AGENTS.md +4 -0
- data/Gemfile +6 -0
- data/Gemfile.lock +76 -0
- data/LICENSE +21 -0
- data/README.md +88 -0
- data/Rakefile +12 -0
- data/lib/counting_semaphore/local_semaphore.rb +94 -0
- data/lib/counting_semaphore/null_logger.rb +32 -0
- data/lib/counting_semaphore/redis_semaphore.rb +381 -0
- data/lib/counting_semaphore/shared_semaphore.rb +381 -0
- data/lib/counting_semaphore/version.rb +3 -0
- data/lib/counting_semaphore.rb +19 -0
- data/test/counting_semaphore/local_semaphore_test.rb +304 -0
- data/test/counting_semaphore/redis_semaphore_test.rb +486 -0
- data/test/test_helper.rb +10 -0
- metadata +134 -0
|
@@ -0,0 +1,381 @@
|
|
|
1
|
+
# A distributed counting semaphore that allows up to N concurrent operations across multiple processes.
|
|
2
|
+
# Uses Redis for coordination and automatically handles lease expiration for crashed processes.
|
|
3
|
+
# Uses Redis Lua scripts for atomic operations to prevent race conditions.
|
|
4
|
+
require "digest"
|
|
5
|
+
require "securerandom"
|
|
6
|
+
|
|
7
|
+
module CountingSemaphore
|
|
8
|
+
class RedisSemaphore
|
|
9
|
+
# Custom exception for lease acquisition timeouts
|
|
10
|
+
class LeaseTimeout < StandardError
|
|
11
|
+
def initialize(token_count, timeout_seconds)
|
|
12
|
+
super("Failed to acquire #{token_count} tokens within #{timeout_seconds} seconds")
|
|
13
|
+
end
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
LEASE_EXPIRATION_SECONDS = 5
|
|
17
|
+
|
|
18
|
+
# Lua script for atomic lease acquisition
|
|
19
|
+
# Returns: [success, lease_key, current_usage]
|
|
20
|
+
# success: 1 if lease was acquired, 0 if no capacity
|
|
21
|
+
# lease_key: the key of the acquired lease (if successful)
|
|
22
|
+
# current_usage: current usage count after operation
|
|
23
|
+
GET_LEASE_SCRIPT = <<~LUA
|
|
24
|
+
local lease_key = KEYS[1]
|
|
25
|
+
local lease_set_key = KEYS[2]
|
|
26
|
+
local capacity = tonumber(ARGV[1])
|
|
27
|
+
local token_count = tonumber(ARGV[2])
|
|
28
|
+
local expiration_seconds = tonumber(ARGV[3])
|
|
29
|
+
|
|
30
|
+
-- Get all active leases from the set and calculate current usage
|
|
31
|
+
local lease_keys = redis.call('SMEMBERS', lease_set_key)
|
|
32
|
+
local current_usage = 0
|
|
33
|
+
local valid_leases = {}
|
|
34
|
+
|
|
35
|
+
for i, key in ipairs(lease_keys) do
|
|
36
|
+
local tokens = redis.call('GET', key)
|
|
37
|
+
if tokens then
|
|
38
|
+
local tokens_from_lease = tonumber(tokens)
|
|
39
|
+
if tokens_from_lease then
|
|
40
|
+
current_usage = current_usage + tokens_from_lease
|
|
41
|
+
table.insert(valid_leases, key)
|
|
42
|
+
else
|
|
43
|
+
-- Remove lease with invalid token count
|
|
44
|
+
redis.call('DEL', key)
|
|
45
|
+
redis.call('SREM', lease_set_key, key)
|
|
46
|
+
end
|
|
47
|
+
else
|
|
48
|
+
-- Lease key doesn't exist, remove from set
|
|
49
|
+
redis.call('SREM', lease_set_key, key)
|
|
50
|
+
end
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
-- Check if we have capacity
|
|
54
|
+
local available = capacity - current_usage
|
|
55
|
+
if available >= token_count then
|
|
56
|
+
-- Set lease with TTL (value is just the token count)
|
|
57
|
+
redis.call('SETEX', lease_key, expiration_seconds, token_count)
|
|
58
|
+
-- Add lease key to the set
|
|
59
|
+
redis.call('SADD', lease_set_key, lease_key)
|
|
60
|
+
-- Set TTL on the set (4x the lease TTL to ensure cleanup)
|
|
61
|
+
redis.call('EXPIRE', lease_set_key, expiration_seconds * 4)
|
|
62
|
+
|
|
63
|
+
return {1, lease_key, current_usage + token_count}
|
|
64
|
+
else
|
|
65
|
+
return {0, '', current_usage}
|
|
66
|
+
end
|
|
67
|
+
LUA
|
|
68
|
+
|
|
69
|
+
# Lua script for getting current usage
|
|
70
|
+
# Returns: current_usage (integer)
|
|
71
|
+
GET_USAGE_SCRIPT = <<~LUA
|
|
72
|
+
local lease_set_key = KEYS[1]
|
|
73
|
+
local expiration_seconds = tonumber(ARGV[1])
|
|
74
|
+
|
|
75
|
+
-- Get all active leases from the set and calculate current usage
|
|
76
|
+
local lease_keys = redis.call('SMEMBERS', lease_set_key)
|
|
77
|
+
local current_usage = 0
|
|
78
|
+
local has_valid_leases = false
|
|
79
|
+
|
|
80
|
+
for i, lease_key in ipairs(lease_keys) do
|
|
81
|
+
local tokens = redis.call('GET', lease_key)
|
|
82
|
+
if tokens then
|
|
83
|
+
local tokens_from_lease = tonumber(tokens)
|
|
84
|
+
if tokens_from_lease then
|
|
85
|
+
current_usage = current_usage + tokens_from_lease
|
|
86
|
+
has_valid_leases = true
|
|
87
|
+
else
|
|
88
|
+
-- Remove lease with invalid token count
|
|
89
|
+
redis.call('DEL', lease_key)
|
|
90
|
+
redis.call('SREM', lease_set_key, lease_key)
|
|
91
|
+
end
|
|
92
|
+
else
|
|
93
|
+
-- Lease key doesn't exist, remove from set
|
|
94
|
+
redis.call('SREM', lease_set_key, lease_key)
|
|
95
|
+
end
|
|
96
|
+
end
|
|
97
|
+
|
|
98
|
+
-- Refresh TTL on the set if there are valid leases (4x the lease TTL)
|
|
99
|
+
if has_valid_leases then
|
|
100
|
+
redis.call('EXPIRE', lease_set_key, expiration_seconds * 4)
|
|
101
|
+
end
|
|
102
|
+
|
|
103
|
+
return current_usage
|
|
104
|
+
LUA
|
|
105
|
+
|
|
106
|
+
# Lua script for atomic lease release and signal
|
|
107
|
+
# Returns: 1 (success)
|
|
108
|
+
RELEASE_LEASE_SCRIPT = <<~LUA
|
|
109
|
+
local lease_key = KEYS[1]
|
|
110
|
+
local queue_key = KEYS[2]
|
|
111
|
+
local lease_set_key = KEYS[3]
|
|
112
|
+
local token_count = tonumber(ARGV[1])
|
|
113
|
+
local max_signals = tonumber(ARGV[2])
|
|
114
|
+
|
|
115
|
+
-- Remove the lease
|
|
116
|
+
redis.call('DEL', lease_key)
|
|
117
|
+
-- Remove from the lease set
|
|
118
|
+
redis.call('SREM', lease_set_key, lease_key)
|
|
119
|
+
|
|
120
|
+
-- Signal waiting clients about the released tokens
|
|
121
|
+
redis.call('LPUSH', queue_key, 'tokens:' .. token_count)
|
|
122
|
+
|
|
123
|
+
-- Trim queue to prevent indefinite growth (atomic)
|
|
124
|
+
redis.call('LTRIM', queue_key, 0, max_signals - 1)
|
|
125
|
+
|
|
126
|
+
return 1
|
|
127
|
+
LUA
|
|
128
|
+
|
|
129
|
+
# Precomputed script SHAs
|
|
130
|
+
GET_LEASE_SCRIPT_SHA = Digest::SHA1.hexdigest(GET_LEASE_SCRIPT)
|
|
131
|
+
GET_USAGE_SCRIPT_SHA = Digest::SHA1.hexdigest(GET_USAGE_SCRIPT)
|
|
132
|
+
RELEASE_LEASE_SCRIPT_SHA = Digest::SHA1.hexdigest(RELEASE_LEASE_SCRIPT)
|
|
133
|
+
|
|
134
|
+
# @return [Integer]
|
|
135
|
+
attr_reader :capacity
|
|
136
|
+
|
|
137
|
+
# Initialize the semaphore with a maximum capacity and required namespace.
|
|
138
|
+
#
|
|
139
|
+
# @param capacity [Integer] Maximum number of concurrent operations allowed
|
|
140
|
+
# @param namespace [String] Required namespace for Redis keys
|
|
141
|
+
# @param redis [Redis, ConnectionPool] Optional Redis client or connection pool (defaults to new Redis instance)
|
|
142
|
+
# @param logger [Logger] the logger
|
|
143
|
+
# @raise [ArgumentError] if capacity is not positive
|
|
144
|
+
def initialize(capacity, namespace, redis: nil, logger: CountingSemaphore::NullLogger, lease_expiration_seconds: LEASE_EXPIRATION_SECONDS)
|
|
145
|
+
raise ArgumentError, "Capacity must be positive, got #{capacity}" unless capacity > 0
|
|
146
|
+
|
|
147
|
+
# Require Redis only when SharedSemaphore is used
|
|
148
|
+
require "redis" unless defined?(Redis)
|
|
149
|
+
|
|
150
|
+
@capacity = capacity
|
|
151
|
+
@redis_connection_pool = wrap_redis_client_with_pool(redis || Redis.new)
|
|
152
|
+
@namespace = namespace
|
|
153
|
+
@lease_expiration_seconds = lease_expiration_seconds
|
|
154
|
+
@logger = logger
|
|
155
|
+
|
|
156
|
+
# Scripts are precomputed and will be loaded on-demand if needed
|
|
157
|
+
end
|
|
158
|
+
|
|
159
|
+
# Null pool for bare Redis connections that don't need connection pooling
|
|
160
|
+
class NullPool
|
|
161
|
+
def initialize(redis_connection)
|
|
162
|
+
@redis_connection = redis_connection
|
|
163
|
+
end
|
|
164
|
+
|
|
165
|
+
def with(&block)
|
|
166
|
+
block.call(@redis_connection)
|
|
167
|
+
end
|
|
168
|
+
end
|
|
169
|
+
|
|
170
|
+
# Acquire a lease for the specified number of tokens and execute the block.
|
|
171
|
+
# Blocks until sufficient resources are available.
|
|
172
|
+
#
|
|
173
|
+
# @param token_count [Integer] Number of tokens to acquire
|
|
174
|
+
# @param timeout_seconds [Integer] Maximum time to wait for lease acquisition (default: 30 seconds)
|
|
175
|
+
# @yield The block to execute while holding the lease
|
|
176
|
+
# @return The result of the block
|
|
177
|
+
# @raise [ArgumentError] if token_count is negative or exceeds the semaphore capacity
|
|
178
|
+
# @raise [LeaseTimeout] if lease cannot be acquired within timeout
|
|
179
|
+
def with_lease(token_count, timeout_seconds: 30)
|
|
180
|
+
raise ArgumentError, "Token count must be non-negative, got #{token_count}" if token_count < 0
|
|
181
|
+
if token_count > @capacity
|
|
182
|
+
raise ArgumentError, "Cannot lease #{token_count} slots as we only allow #{@capacity}"
|
|
183
|
+
end
|
|
184
|
+
|
|
185
|
+
# Handle zero tokens case - no Redis coordination needed
|
|
186
|
+
return yield if token_count.zero?
|
|
187
|
+
|
|
188
|
+
lease_key = acquire_lease(token_count, timeout_seconds: timeout_seconds)
|
|
189
|
+
begin
|
|
190
|
+
@logger.debug "🚦Leased #{token_count} tokens with lease #{lease_key}"
|
|
191
|
+
yield
|
|
192
|
+
ensure
|
|
193
|
+
release_lease(lease_key, token_count)
|
|
194
|
+
end
|
|
195
|
+
end
|
|
196
|
+
|
|
197
|
+
# Get current usage and active leases for debugging
|
|
198
|
+
def debug_info
|
|
199
|
+
usage = get_current_usage
|
|
200
|
+
lease_set_key = "#{@namespace}:lease_set"
|
|
201
|
+
lease_keys = with_redis { |redis| redis.smembers(lease_set_key) }
|
|
202
|
+
active_leases = []
|
|
203
|
+
|
|
204
|
+
lease_keys.each do |lease_key|
|
|
205
|
+
tokens = with_redis { |redis| redis.get(lease_key) }
|
|
206
|
+
next unless tokens
|
|
207
|
+
|
|
208
|
+
active_leases << {
|
|
209
|
+
key: lease_key,
|
|
210
|
+
tokens: tokens.to_i
|
|
211
|
+
}
|
|
212
|
+
end
|
|
213
|
+
|
|
214
|
+
{
|
|
215
|
+
usage: usage,
|
|
216
|
+
capacity: @capacity,
|
|
217
|
+
available: @capacity - usage,
|
|
218
|
+
active_leases: active_leases
|
|
219
|
+
}
|
|
220
|
+
end
|
|
221
|
+
|
|
222
|
+
private
|
|
223
|
+
|
|
224
|
+
# Wraps a Redis client to support both ConnectionPool and bare Redis connections
|
|
225
|
+
# @param redis [Redis, ConnectionPool] The Redis client or connection pool
|
|
226
|
+
# @return [Object] A wrapper that supports the `with` method
|
|
227
|
+
def wrap_redis_client_with_pool(redis)
|
|
228
|
+
# If it's already a ConnectionPool, return it as-is
|
|
229
|
+
return redis if redis.respond_to?(:with)
|
|
230
|
+
|
|
231
|
+
# For bare Redis connections, wrap in a NullPool
|
|
232
|
+
NullPool.new(redis)
|
|
233
|
+
end
|
|
234
|
+
|
|
235
|
+
# Executes a block with a Redis connection from the pool
|
|
236
|
+
# @yield [redis] The Redis connection
|
|
237
|
+
# @return The result of the block
|
|
238
|
+
def with_redis(&block)
|
|
239
|
+
@redis_connection_pool.with(&block)
|
|
240
|
+
end
|
|
241
|
+
|
|
242
|
+
# Executes a Redis script with automatic fallback to EVAL on NOSCRIPT error
|
|
243
|
+
# @param script_type [Symbol] The type of script (:get_lease, :release_lease, :get_usage)
|
|
244
|
+
# @param keys [Array] Keys for the script
|
|
245
|
+
# @param argv [Array] Arguments for the script
|
|
246
|
+
# @return The result of the script execution
|
|
247
|
+
def execute_script(script_type, keys: [], argv: [])
|
|
248
|
+
script_sha, script_body = case script_type
|
|
249
|
+
when :get_lease then [GET_LEASE_SCRIPT_SHA, GET_LEASE_SCRIPT]
|
|
250
|
+
when :release_lease then [RELEASE_LEASE_SCRIPT_SHA, RELEASE_LEASE_SCRIPT]
|
|
251
|
+
when :get_usage then [GET_USAGE_SCRIPT_SHA, GET_USAGE_SCRIPT]
|
|
252
|
+
else raise ArgumentError, "Unknown script type: #{script_type}"
|
|
253
|
+
end
|
|
254
|
+
|
|
255
|
+
with_redis do |redis|
|
|
256
|
+
redis.evalsha(script_sha, keys: keys, argv: argv)
|
|
257
|
+
end
|
|
258
|
+
rescue Redis::CommandError => e
|
|
259
|
+
if e.message.include?("NOSCRIPT")
|
|
260
|
+
@logger.debug "🚦Script not found, using EVAL: #{e.message}"
|
|
261
|
+
# Fall back to EVAL with the script body
|
|
262
|
+
with_redis do |redis|
|
|
263
|
+
redis.eval(script_body, keys: keys, argv: argv)
|
|
264
|
+
end
|
|
265
|
+
else
|
|
266
|
+
raise
|
|
267
|
+
end
|
|
268
|
+
end
|
|
269
|
+
|
|
270
|
+
def acquire_lease(token_count, timeout_seconds: 30)
|
|
271
|
+
start_time = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
|
272
|
+
|
|
273
|
+
loop do
|
|
274
|
+
# Check if we've exceeded the timeout using monotonic time
|
|
275
|
+
elapsed_time = Process.clock_gettime(Process::CLOCK_MONOTONIC) - start_time
|
|
276
|
+
if elapsed_time >= timeout_seconds
|
|
277
|
+
raise LeaseTimeout.new(token_count, timeout_seconds)
|
|
278
|
+
end
|
|
279
|
+
|
|
280
|
+
# Try optimistic acquisition first
|
|
281
|
+
lease_key = attempt_lease_acquisition(token_count)
|
|
282
|
+
return lease_key if lease_key
|
|
283
|
+
|
|
284
|
+
# If failed, wait for signals with timeout
|
|
285
|
+
lease_key = wait_for_tokens(token_count, timeout_seconds - elapsed_time)
|
|
286
|
+
return lease_key if lease_key
|
|
287
|
+
end
|
|
288
|
+
end
|
|
289
|
+
|
|
290
|
+
def wait_for_tokens(token_count, remaining_timeout)
|
|
291
|
+
# Ensure minimum timeout to prevent infinite blocking
|
|
292
|
+
# BLPOP with timeout 0 blocks forever, so we need at least a small positive timeout
|
|
293
|
+
minimum_timeout = 0.1
|
|
294
|
+
if remaining_timeout <= minimum_timeout
|
|
295
|
+
@logger.debug "🚦Remaining timeout (#{remaining_timeout}s) too small, not waiting"
|
|
296
|
+
return nil
|
|
297
|
+
end
|
|
298
|
+
|
|
299
|
+
# Block with timeout (longer than lease expiration to handle stale leases)
|
|
300
|
+
# But don't exceed the remaining timeout
|
|
301
|
+
timeout = [@lease_expiration_seconds + 2, remaining_timeout].min
|
|
302
|
+
@logger.debug "🚦Unable to lease #{token_count}, waiting for signals (timeout: #{timeout}s)"
|
|
303
|
+
|
|
304
|
+
with_redis { |redis| redis.blpop("#{@namespace}:waiting_queue", timeout: timeout.to_f) }
|
|
305
|
+
|
|
306
|
+
# Try to acquire after any signal or timeout
|
|
307
|
+
lease_key = attempt_lease_acquisition(token_count)
|
|
308
|
+
if lease_key
|
|
309
|
+
return lease_key
|
|
310
|
+
end
|
|
311
|
+
|
|
312
|
+
# If still can't acquire, return nil to continue the loop in acquire_lease
|
|
313
|
+
@logger.debug "🚦Still unable to lease #{token_count} after signal/timeout, continuing to wait"
|
|
314
|
+
nil
|
|
315
|
+
end
|
|
316
|
+
|
|
317
|
+
def attempt_lease_acquisition(token_count)
|
|
318
|
+
lease_id = generate_lease_id
|
|
319
|
+
lease_key = "#{@namespace}:leases:#{lease_id}"
|
|
320
|
+
lease_set_key = "#{@namespace}:lease_set"
|
|
321
|
+
|
|
322
|
+
# Use Lua script for atomic lease acquisition
|
|
323
|
+
result = execute_script(
|
|
324
|
+
:get_lease,
|
|
325
|
+
keys: [lease_key, lease_set_key],
|
|
326
|
+
argv: [
|
|
327
|
+
@capacity.to_s,
|
|
328
|
+
token_count.to_s,
|
|
329
|
+
@lease_expiration_seconds.to_s
|
|
330
|
+
]
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
success, full_lease_key, current_usage = result
|
|
334
|
+
|
|
335
|
+
if success == 1
|
|
336
|
+
# Extract just the lease ID from the full key for return value
|
|
337
|
+
lease_id = full_lease_key.split(":").last
|
|
338
|
+
@logger.debug "🚦Acquired lease #{lease_id}, current usage: #{current_usage}/#{@capacity}"
|
|
339
|
+
lease_id
|
|
340
|
+
else
|
|
341
|
+
@logger.debug "🚦No capacity available, current usage: #{current_usage}/#{@capacity}"
|
|
342
|
+
nil
|
|
343
|
+
end
|
|
344
|
+
end
|
|
345
|
+
|
|
346
|
+
def release_lease(lease_key, token_count)
|
|
347
|
+
return if lease_key.nil?
|
|
348
|
+
|
|
349
|
+
full_lease_key = "#{@namespace}:leases:#{lease_key}"
|
|
350
|
+
queue_key = "#{@namespace}:waiting_queue"
|
|
351
|
+
lease_set_key = "#{@namespace}:lease_set"
|
|
352
|
+
|
|
353
|
+
# Use Lua script for atomic lease release and signal
|
|
354
|
+
execute_script(
|
|
355
|
+
:release_lease,
|
|
356
|
+
keys: [full_lease_key, queue_key, lease_set_key],
|
|
357
|
+
argv: [
|
|
358
|
+
token_count.to_s,
|
|
359
|
+
(@capacity * 2).to_s
|
|
360
|
+
]
|
|
361
|
+
)
|
|
362
|
+
|
|
363
|
+
@logger.debug "🚦Returned #{token_count} leased tokens (lease: #{lease_key}) and signaled waiting clients"
|
|
364
|
+
end
|
|
365
|
+
|
|
366
|
+
def get_current_usage
|
|
367
|
+
lease_set_key = "#{@namespace}:lease_set"
|
|
368
|
+
|
|
369
|
+
# Use the dedicated usage script that calculates current usage
|
|
370
|
+
execute_script(
|
|
371
|
+
:get_usage,
|
|
372
|
+
keys: [lease_set_key],
|
|
373
|
+
argv: [@lease_expiration_seconds.to_s]
|
|
374
|
+
)
|
|
375
|
+
end
|
|
376
|
+
|
|
377
|
+
def generate_lease_id
|
|
378
|
+
SecureRandom.uuid
|
|
379
|
+
end
|
|
380
|
+
end
|
|
381
|
+
end
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
require_relative "counting_semaphore/version"
|
|
2
|
+
|
|
3
|
+
module CountingSemaphore
|
|
4
|
+
# Custom exception for lease acquisition timeouts
|
|
5
|
+
class LeaseTimeout < StandardError
|
|
6
|
+
attr_reader :semaphore, :token_count, :timeout_seconds
|
|
7
|
+
|
|
8
|
+
def initialize(token_count, timeout_seconds, semaphore = nil)
|
|
9
|
+
@token_count = token_count
|
|
10
|
+
@timeout_seconds = timeout_seconds
|
|
11
|
+
@semaphore = semaphore
|
|
12
|
+
super("Failed to acquire #{token_count} tokens within #{timeout_seconds} seconds")
|
|
13
|
+
end
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
autoload :LocalSemaphore, "counting_semaphore/local_semaphore"
|
|
17
|
+
autoload :RedisSemaphore, "counting_semaphore/redis_semaphore"
|
|
18
|
+
autoload :NullLogger, "counting_semaphore/null_logger"
|
|
19
|
+
end
|
|
@@ -0,0 +1,304 @@
|
|
|
1
|
+
require "test_helper"
|
|
2
|
+
require "counting_semaphore"
|
|
3
|
+
|
|
4
|
+
class LocalSemaphoreTest < Minitest::Test
|
|
5
|
+
def test_initializes_with_correct_capacity
|
|
6
|
+
semaphore = CountingSemaphore::LocalSemaphore.new(5)
|
|
7
|
+
assert_equal 5, semaphore.instance_variable_get(:@capacity)
|
|
8
|
+
end
|
|
9
|
+
|
|
10
|
+
def test_capacity_attribute_returns_the_initialized_capacity
|
|
11
|
+
semaphore = CountingSemaphore::LocalSemaphore.new(10)
|
|
12
|
+
assert_equal 10, semaphore.capacity
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
def test_capacity_attribute_is_immutable
|
|
16
|
+
semaphore = CountingSemaphore::LocalSemaphore.new(7)
|
|
17
|
+
assert_equal 7, semaphore.capacity
|
|
18
|
+
|
|
19
|
+
# Verify that capacity cannot be modified directly
|
|
20
|
+
assert_raises(NoMethodError) do
|
|
21
|
+
semaphore.capacity = 5
|
|
22
|
+
end
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
def test_raises_error_for_negative_capacity
|
|
26
|
+
assert_raises(ArgumentError, "Capacity must be positive, got -3") do
|
|
27
|
+
CountingSemaphore::LocalSemaphore.new(-3)
|
|
28
|
+
end
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
def test_raises_error_for_zero_capacity
|
|
32
|
+
assert_raises(ArgumentError, "Capacity must be positive, got 0") do
|
|
33
|
+
CountingSemaphore::LocalSemaphore.new(0)
|
|
34
|
+
end
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
def test_allows_operations_within_capacity
|
|
38
|
+
semaphore = CountingSemaphore::LocalSemaphore.new(2)
|
|
39
|
+
result = nil
|
|
40
|
+
|
|
41
|
+
semaphore.with_lease(1) do
|
|
42
|
+
result = "success"
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
assert_equal "success", result
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
def test_blocks_when_capacity_exceeded
|
|
49
|
+
semaphore = CountingSemaphore::LocalSemaphore.new(1)
|
|
50
|
+
start_time = Time.now
|
|
51
|
+
completed = false
|
|
52
|
+
|
|
53
|
+
# Start a thread that will hold the semaphore
|
|
54
|
+
thread1 = Thread.new do
|
|
55
|
+
semaphore.with_lease(1) do
|
|
56
|
+
sleep(0.1) # Hold the semaphore briefly
|
|
57
|
+
"thread1"
|
|
58
|
+
end
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
# Start another thread that should block
|
|
62
|
+
thread2 = Thread.new do
|
|
63
|
+
semaphore.with_lease(1) do
|
|
64
|
+
completed = true
|
|
65
|
+
"thread2"
|
|
66
|
+
end
|
|
67
|
+
end
|
|
68
|
+
|
|
69
|
+
thread1.join
|
|
70
|
+
thread2.join
|
|
71
|
+
|
|
72
|
+
# The second thread should have completed after the first released
|
|
73
|
+
assert completed
|
|
74
|
+
assert (Time.now - start_time) >= 0.1
|
|
75
|
+
end
|
|
76
|
+
|
|
77
|
+
def test_raises_error_when_requesting_more_tokens_than_capacity
|
|
78
|
+
semaphore = CountingSemaphore::LocalSemaphore.new(2)
|
|
79
|
+
|
|
80
|
+
assert_raises(ArgumentError) do
|
|
81
|
+
semaphore.with_lease(3) do
|
|
82
|
+
"should not reach here"
|
|
83
|
+
end
|
|
84
|
+
end
|
|
85
|
+
end
|
|
86
|
+
|
|
87
|
+
def test_raises_error_for_negative_token_count
|
|
88
|
+
semaphore = CountingSemaphore::LocalSemaphore.new(2)
|
|
89
|
+
|
|
90
|
+
assert_raises(ArgumentError, "Token count must be non-negative, got -1") do
|
|
91
|
+
semaphore.with_lease(-1) do
|
|
92
|
+
"should not reach here"
|
|
93
|
+
end
|
|
94
|
+
end
|
|
95
|
+
end
|
|
96
|
+
|
|
97
|
+
def test_allows_zero_token_count
|
|
98
|
+
semaphore = CountingSemaphore::LocalSemaphore.new(2)
|
|
99
|
+
result = nil
|
|
100
|
+
|
|
101
|
+
semaphore.with_lease(0) do
|
|
102
|
+
result = "success"
|
|
103
|
+
end
|
|
104
|
+
|
|
105
|
+
assert_equal "success", result
|
|
106
|
+
end
|
|
107
|
+
|
|
108
|
+
def test_properly_releases_tokens_after_block_completion
|
|
109
|
+
semaphore = CountingSemaphore::LocalSemaphore.new(1)
|
|
110
|
+
results = []
|
|
111
|
+
|
|
112
|
+
# First operation should succeed immediately
|
|
113
|
+
semaphore.with_lease(1) do
|
|
114
|
+
results << "first"
|
|
115
|
+
end
|
|
116
|
+
|
|
117
|
+
# Second operation should also succeed immediately (tokens were released)
|
|
118
|
+
semaphore.with_lease(1) do
|
|
119
|
+
results << "second"
|
|
120
|
+
end
|
|
121
|
+
|
|
122
|
+
assert_equal ["first", "second"], results
|
|
123
|
+
end
|
|
124
|
+
|
|
125
|
+
def test_handles_exceptions_and_still_releases_tokens
|
|
126
|
+
semaphore = CountingSemaphore::LocalSemaphore.new(1)
|
|
127
|
+
results = []
|
|
128
|
+
|
|
129
|
+
# First operation succeeds
|
|
130
|
+
semaphore.with_lease(1) do
|
|
131
|
+
results << "first"
|
|
132
|
+
end
|
|
133
|
+
|
|
134
|
+
# Second operation should fail but still release tokens
|
|
135
|
+
assert_raises(RuntimeError) do
|
|
136
|
+
semaphore.with_lease(1) do
|
|
137
|
+
results << "second"
|
|
138
|
+
raise "test error"
|
|
139
|
+
end
|
|
140
|
+
end
|
|
141
|
+
|
|
142
|
+
# Third operation should succeed (tokens were released despite exception)
|
|
143
|
+
semaphore.with_lease(1) do
|
|
144
|
+
results << "third"
|
|
145
|
+
end
|
|
146
|
+
|
|
147
|
+
assert_equal ["first", "second", "third"], results
|
|
148
|
+
end
|
|
149
|
+
|
|
150
|
+
def test_supports_multiple_tokens_per_lease
|
|
151
|
+
semaphore = CountingSemaphore::LocalSemaphore.new(3)
|
|
152
|
+
results = []
|
|
153
|
+
|
|
154
|
+
# Should be able to lease 2 tokens at once
|
|
155
|
+
semaphore.with_lease(2) do
|
|
156
|
+
results << "two_tokens"
|
|
157
|
+
end
|
|
158
|
+
|
|
159
|
+
# Should be able to lease 1 more token
|
|
160
|
+
semaphore.with_lease(1) do
|
|
161
|
+
results << "one_token"
|
|
162
|
+
end
|
|
163
|
+
|
|
164
|
+
assert_equal ["two_tokens", "one_token"], results
|
|
165
|
+
end
|
|
166
|
+
|
|
167
|
+
def test_with_lease_accepts_timeout_parameter
|
|
168
|
+
semaphore = CountingSemaphore::LocalSemaphore.new(1)
|
|
169
|
+
|
|
170
|
+
# Test that timeout parameter is accepted
|
|
171
|
+
result = semaphore.with_lease(1, timeout_seconds: 5) do
|
|
172
|
+
"success"
|
|
173
|
+
end
|
|
174
|
+
|
|
175
|
+
assert_equal "success", result
|
|
176
|
+
end
|
|
177
|
+
|
|
178
|
+
def test_with_lease_raises_timeout_error_when_timeout_is_exceeded
|
|
179
|
+
semaphore = CountingSemaphore::LocalSemaphore.new(1)
|
|
180
|
+
|
|
181
|
+
# Fill the semaphore
|
|
182
|
+
semaphore.with_lease(1) do
|
|
183
|
+
# Try to acquire another token with a very short timeout
|
|
184
|
+
assert_raises(CountingSemaphore::LeaseTimeout) do
|
|
185
|
+
semaphore.with_lease(1, timeout_seconds: 0.1) do
|
|
186
|
+
"should not reach here"
|
|
187
|
+
end
|
|
188
|
+
end
|
|
189
|
+
end
|
|
190
|
+
end
|
|
191
|
+
|
|
192
|
+
def test_lease_timeout_includes_semaphore_reference
|
|
193
|
+
semaphore = CountingSemaphore::LocalSemaphore.new(1)
|
|
194
|
+
exception = nil
|
|
195
|
+
|
|
196
|
+
# Fill the semaphore
|
|
197
|
+
semaphore.with_lease(1) do
|
|
198
|
+
# Try to acquire another token with a very short timeout
|
|
199
|
+
|
|
200
|
+
semaphore.with_lease(1, timeout_seconds: 0.1) do
|
|
201
|
+
"should not reach here"
|
|
202
|
+
end
|
|
203
|
+
rescue CountingSemaphore::LeaseTimeout => e
|
|
204
|
+
exception = e
|
|
205
|
+
end
|
|
206
|
+
|
|
207
|
+
refute_nil exception
|
|
208
|
+
assert_equal semaphore, exception.semaphore
|
|
209
|
+
assert_equal 1, exception.token_count
|
|
210
|
+
assert_equal 0.1, exception.timeout_seconds
|
|
211
|
+
end
|
|
212
|
+
|
|
213
|
+
def test_with_lease_uses_default_token_count_of_1
|
|
214
|
+
semaphore = CountingSemaphore::LocalSemaphore.new(2)
|
|
215
|
+
result = nil
|
|
216
|
+
|
|
217
|
+
# Should work with default token count (1)
|
|
218
|
+
semaphore.with_lease do
|
|
219
|
+
result = "success"
|
|
220
|
+
end
|
|
221
|
+
|
|
222
|
+
assert_equal "success", result
|
|
223
|
+
end
|
|
224
|
+
|
|
225
|
+
def test_with_lease_default_blocks_when_capacity_exceeded
|
|
226
|
+
semaphore = CountingSemaphore::LocalSemaphore.new(1)
|
|
227
|
+
start_time = Time.now
|
|
228
|
+
completed = false
|
|
229
|
+
|
|
230
|
+
# Start a thread that will hold the semaphore
|
|
231
|
+
thread1 = Thread.new do
|
|
232
|
+
semaphore.with_lease do # Uses default token count of 1
|
|
233
|
+
sleep(0.1) # Hold the semaphore briefly
|
|
234
|
+
"thread1"
|
|
235
|
+
end
|
|
236
|
+
end
|
|
237
|
+
|
|
238
|
+
# Start another thread that should block
|
|
239
|
+
thread2 = Thread.new do
|
|
240
|
+
semaphore.with_lease do # Uses default token count of 1
|
|
241
|
+
completed = true
|
|
242
|
+
"thread2"
|
|
243
|
+
end
|
|
244
|
+
end
|
|
245
|
+
|
|
246
|
+
thread1.join
|
|
247
|
+
thread2.join
|
|
248
|
+
|
|
249
|
+
# The second thread should have completed after the first released
|
|
250
|
+
assert completed
|
|
251
|
+
assert (Time.now - start_time) >= 0.1
|
|
252
|
+
end
|
|
253
|
+
|
|
254
|
+
def test_currently_leased_returns_zero_initially
|
|
255
|
+
semaphore = CountingSemaphore::LocalSemaphore.new(5)
|
|
256
|
+
assert_equal 0, semaphore.currently_leased
|
|
257
|
+
end
|
|
258
|
+
|
|
259
|
+
def test_currently_leased_increases_during_lease
|
|
260
|
+
semaphore = CountingSemaphore::LocalSemaphore.new(5)
|
|
261
|
+
usage_during_lease = nil
|
|
262
|
+
|
|
263
|
+
semaphore.with_lease(2) do
|
|
264
|
+
usage_during_lease = semaphore.currently_leased
|
|
265
|
+
end
|
|
266
|
+
|
|
267
|
+
assert_equal 2, usage_during_lease
|
|
268
|
+
assert_equal 0, semaphore.currently_leased
|
|
269
|
+
end
|
|
270
|
+
|
|
271
|
+
def test_currently_leased_returns_to_zero_after_lease
|
|
272
|
+
semaphore = CountingSemaphore::LocalSemaphore.new(3)
|
|
273
|
+
|
|
274
|
+
semaphore.with_lease(2) do
|
|
275
|
+
assert_equal 2, semaphore.currently_leased
|
|
276
|
+
end
|
|
277
|
+
|
|
278
|
+
assert_equal 0, semaphore.currently_leased
|
|
279
|
+
end
|
|
280
|
+
|
|
281
|
+
def test_currently_leased_with_multiple_concurrent_leases
|
|
282
|
+
semaphore = CountingSemaphore::LocalSemaphore.new(5)
|
|
283
|
+
usage_values = []
|
|
284
|
+
mutex = Mutex.new
|
|
285
|
+
|
|
286
|
+
# Start multiple threads that will hold leases
|
|
287
|
+
threads = []
|
|
288
|
+
3.times do |i|
|
|
289
|
+
threads << Thread.new do
|
|
290
|
+
semaphore.with_lease(1) do
|
|
291
|
+
mutex.synchronize { usage_values << semaphore.currently_leased }
|
|
292
|
+
sleep(0.1) # Hold the lease briefly
|
|
293
|
+
end
|
|
294
|
+
end
|
|
295
|
+
end
|
|
296
|
+
|
|
297
|
+
threads.each(&:join)
|
|
298
|
+
|
|
299
|
+
# Should have seen usage values of 1, 2, and 3 (or some combination)
|
|
300
|
+
assert usage_values.any? { |usage| usage >= 1 }
|
|
301
|
+
assert usage_values.any? { |usage| usage <= 3 }
|
|
302
|
+
assert_equal 0, semaphore.currently_leased
|
|
303
|
+
end
|
|
304
|
+
end
|