redis-single-file 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.rspec +3 -0
- data/.rubocop.yml +43 -0
- data/CHANGELOG.md +5 -0
- data/LICENSE.txt +21 -0
- data/README.md +200 -0
- data/Rakefile +12 -0
- data/benchmark.rb +41 -0
- data/lib/redis_single_file/configuration.rb +64 -0
- data/lib/redis_single_file/semaphore.rb +153 -0
- data/lib/redis_single_file/version.rb +5 -0
- data/lib/redis_single_file.rb +34 -0
- data/sig/redis_single_file.rbs +4 -0
- data/test.rb +54 -0
- metadata +69 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: b6bf43bb784d2365093662b00e3ebc71844803636a685c03d5d8b6d31922e188
|
4
|
+
data.tar.gz: 273fc20ad00f05d559c19423bb8ab267bcd63d0c3b3b103049e342e9bba14630
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 1542f984beb11a1024fc21e05f9e3e68954d95c05ad51208582db251d80d385bff609ba6bf836694996d6b0c0200f5836d9dc827b2e8f9c0b0167fba23bcd8f6
|
7
|
+
data.tar.gz: c5ee9aba31008d6708bed29864b8deb8b783668c2f7c9d11013035cc741701afb9759f457bb92f5b7c1a03238be3beeae1ec007949408df8618b9a8c0e93433a
|
data/.rspec
ADDED
data/.rubocop.yml
ADDED
@@ -0,0 +1,43 @@
|
|
1
|
+
require:
|
2
|
+
- rubocop-rake
|
3
|
+
- rubocop-rspec
|
4
|
+
- rubocop-factory_bot
|
5
|
+
|
6
|
+
AllCops:
|
7
|
+
NewCops: enable
|
8
|
+
TargetRubyVersion: 3.2
|
9
|
+
Exclude:
|
10
|
+
- 'test.rb'
|
11
|
+
- 'vendor/bundle/**/*'
|
12
|
+
|
13
|
+
Metrics/MethodLength:
|
14
|
+
Enabled: false
|
15
|
+
|
16
|
+
Metrics/AbcSize:
|
17
|
+
Enabled: false
|
18
|
+
|
19
|
+
# TODO: revisit this exclusion - (zero?)
|
20
|
+
Style/NumericPredicate:
|
21
|
+
Exclude:
|
22
|
+
- lib/redis_single_file/semaphore.rb
|
23
|
+
|
24
|
+
#
|
25
|
+
# rspec stuff
|
26
|
+
#
|
27
|
+
|
28
|
+
# Prefer have_received for setting message expectations.
|
29
|
+
# Setup redis_mock as a spy using allow or instance_spy.
|
30
|
+
RSpec/MessageSpies:
|
31
|
+
Enabled: false
|
32
|
+
|
33
|
+
# Prefer allow over expect when configuring a response.
|
34
|
+
RSpec/StubbedMock:
|
35
|
+
Enabled: false
|
36
|
+
|
37
|
+
# Example has too many expectations [3/1].
|
38
|
+
RSpec/MultipleExpectations:
|
39
|
+
Max: 6
|
40
|
+
|
41
|
+
# Example has too many lines. [8/5]
|
42
|
+
RSpec/ExampleLength:
|
43
|
+
Max: 10
|
data/CHANGELOG.md
ADDED
data/LICENSE.txt
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
The MIT License (MIT)
|
2
|
+
|
3
|
+
Copyright (c) 2025 LifeBCE
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in
|
13
|
+
all copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
21
|
+
THE SOFTWARE.
|
data/README.md
ADDED
@@ -0,0 +1,200 @@
|
|
1
|
+
[](https://github.com/lifeBCE/redis-single-file/actions/workflows/build.yml)
|
2
|
+
[](https://github.com/lifeBCE/redis-single-file/actions/workflows/rspec.yml)
|
3
|
+
[](https://github.com/lifeBCE/redis-single-file/actions/workflows/codeql.yml)
|
4
|
+
[](https://github.com/lifeBCE/redis-single-file/actions/workflows/rubocop.yml)
|
5
|
+
[](https://github.com/lifeBCE/redis-single-file/actions/workflows/benchmark.yml)
|
6
|
+
|
7
|
+
# Redis Single File - Distributed Execution Synchronization
|
8
|
+
|
9
|
+
Redis single file is a queue-based implementation of a remote/shared semaphore
|
10
|
+
for distributed execution synchronization. A distributed semaphore may be useful
|
11
|
+
for synchronizing execution across numerous instances or between the application
|
12
|
+
and background job workers.
|
13
|
+
|
14
|
+
## Installation
|
15
|
+
|
16
|
+
Add this line to your application's Gemfile:
|
17
|
+
|
18
|
+
```ruby
|
19
|
+
gem 'redis-single-file'
|
20
|
+
```
|
21
|
+
|
22
|
+
And then execute:
|
23
|
+
|
24
|
+
$ bundle
|
25
|
+
|
26
|
+
Or install it yourself as:
|
27
|
+
|
28
|
+
$ gem install redis-single-file
|
29
|
+
|
30
|
+
## Configuration
|
31
|
+
|
32
|
+
Configure redis single file via its configuration object.
|
33
|
+
|
34
|
+
```ruby
|
35
|
+
RedisSingleFile.configuration do |config|
|
36
|
+
# config.host = 'localhost'
|
37
|
+
# config.port = '6379'
|
38
|
+
# config.name = 'default'
|
39
|
+
# config.expire_in = 300
|
40
|
+
end
|
41
|
+
```
|
42
|
+
|
43
|
+
## Usage Examples
|
44
|
+
|
45
|
+
#### Default lock name and infinite blocking
|
46
|
+
```ruby
|
47
|
+
semaphore = RedisSingleFile.new
|
48
|
+
semaphore.synchronize do
|
49
|
+
# synchronized logic defined here...
|
50
|
+
end
|
51
|
+
```
|
52
|
+
|
53
|
+
#### Named locks can provide exclusive synchronization
|
54
|
+
```ruby
|
55
|
+
semaphore = RedisSingleFile.new(name: :user_cache_update)
|
56
|
+
semaphore.synchronize do
|
57
|
+
# synchronized logic defined here...
|
58
|
+
end
|
59
|
+
```
|
60
|
+
|
61
|
+
#### Prevent deadlocks by providing a timeout
|
62
|
+
```ruby
|
63
|
+
semaphore = RedisSingleFile.new(name: :s3_file_upload)
|
64
|
+
semaphore.synchronize(timeout: 15) do
|
65
|
+
# synchronized logic defined here...
|
66
|
+
end
|
67
|
+
```
|
68
|
+
|
69
|
+
#### Use your own redis client instance
|
70
|
+
```ruby
|
71
|
+
redis = Redis.new(...)
|
72
|
+
semaphore = RedisSingleFile.new(redis:)
|
73
|
+
semaphore.synchronize do
|
74
|
+
# synchronized logic defined here...
|
75
|
+
end
|
76
|
+
```
|
77
|
+
|
78
|
+
## Documentation
|
79
|
+
|
80
|
+
### Distributed Queue Design
|
81
|
+
|
82
|
+
The redis `blpop` command will attempt to pop (delete and return) a value from
|
83
|
+
a queue but will block when no values are present in the queue. A timeout can
|
84
|
+
be provided to prevent deadlock situations.
|
85
|
+
|
86
|
+
To unblock (unlock) an instance, add/push an item to the queue. This is done
|
87
|
+
one at a time to controll the serialization of the distrubuted execution. Redis
|
88
|
+
selects the instance waiting the longest each time a new token is added.
|
89
|
+
|
90
|
+
### Auto Expiration
|
91
|
+
|
92
|
+
All redis keys are expired and automatically removed after a certain period
|
93
|
+
but will be recreated again on the next use. Each new client should face one
|
94
|
+
of two scenarios when entering synchronization.
|
95
|
+
|
96
|
+
1. The mutex key is not set causing the client to create the keys and prime
|
97
|
+
the queue with its first token unlocking it for the first execution.
|
98
|
+
|
99
|
+
2. The mutex key is already set so the client will skip the priming and enter
|
100
|
+
directly into the queue where it should immediately find a token left by
|
101
|
+
the last client upon completion or block waiting for the current client to
|
102
|
+
finish execution.
|
103
|
+
|
104
|
+
### Considerations over redlock approach
|
105
|
+
|
106
|
+
[Redlock](https://github.com/leandromoreira/redlock-rb) is the current standard and the official approach [suggested by redis themselves](https://redis.io/docs/latest/develop/use/patterns/distributed-locks/) but the design does have some complexities/drawbacks that some may wish to avoid. The following is a list of pros and cons of redis single file over redlock.
|
107
|
+
|
108
|
+
<details>
|
109
|
+
<summary><code>Pro:</code> Multi-master redis node configuration not required</summary>
|
110
|
+
<br />
|
111
|
+
<blockquote>
|
112
|
+
The redlock design requires a multi-master redis node setup where each node is completely independent of the others (no replication). This would be uncommon in most standard application deployment environments so a seperate redis setup would be required just for the distributed lock management.
|
113
|
+
<br /><br />
|
114
|
+
Redis single file will work with your existing redis configuration so no need to maintain a seperate redis setup for the application of distributed semaphores.
|
115
|
+
</blockquote>
|
116
|
+
</details>
|
117
|
+
|
118
|
+
<details>
|
119
|
+
<summary><code>Pro:</code> No polling or waiting logic needed as redis does all the blocking</summary>
|
120
|
+
<br />
|
121
|
+
<blockquote>
|
122
|
+
The redlock design requires the client to enter into a polling loop checking for the ability to execute its logic repeatedly. This approach is less efficient and requires quite a bit more logic to accomplish also making it more prone to error.
|
123
|
+
<br /><br />
|
124
|
+
Redis single file pushes much of this responsibility off to redis itself with the use of the <code>blpop</code> command. Redis will block on that call when no item is present in the queue and will allocate tokens to competing clients waiting their turn on a `first-come, first-served basis`.
|
125
|
+
</blockquote>
|
126
|
+
</details>
|
127
|
+
|
128
|
+
<details>
|
129
|
+
<summary><code>Pro:</code> Replication lag is not a concern with <code>blpop</code></summary>
|
130
|
+
<br />
|
131
|
+
<blockquote>
|
132
|
+
The redlock design requires a multi-master setup given it utilizes read operations that could be delegated to a read replica in a standard clustered redis deployement. Redis replication is handled in an async manner so replication lag can hinder distributed synchronization when using read operations against a cluster utlizing replication.
|
133
|
+
<br /><br />
|
134
|
+
Redis single file is not susceptible to this limitation given that <code>blpop</code> is a write operation meaning it will always be handled by the master node eliminating concerns over replication lag.
|
135
|
+
</blockquote>
|
136
|
+
</details>
|
137
|
+
|
138
|
+
<details>
|
139
|
+
<summary><code>Con:</code> Redis cluster failover could disrupt currently queued clients</summary>
|
140
|
+
<br />
|
141
|
+
<blockquote>
|
142
|
+
Redis single file does attempt to recognize a connection failure and proceeds in rejoining the queue when detected but there is still a small chance that a cluster failover could cause already queued clients to have issues.
|
143
|
+
<br /><br />
|
144
|
+
Redlock is not susceptible to this given the use of the multi-master deployment and absence of read-replicas so cluster failover (and recovery) is not a concern.
|
145
|
+
</blockquote>
|
146
|
+
</details>
|
147
|
+
|
148
|
+
## Run Tests
|
149
|
+
|
150
|
+
$ bundle exec rspec
|
151
|
+
|
152
|
+
```spec
|
153
|
+
Finished in 0.00818 seconds (files took 0.09999 seconds to load)
|
154
|
+
22 examples, 0 failures
|
155
|
+
```
|
156
|
+
|
157
|
+
## Benchmark
|
158
|
+
|
159
|
+
$ bundle exec ruby benchmark.rb
|
160
|
+
|
161
|
+
```ruby
|
162
|
+
ruby 3.2.0 (2022-12-25 revision a528908271) [arm64-darwin22]
|
163
|
+
Warming up --------------------------------------
|
164
|
+
synchronize 434.000 i/100ms
|
165
|
+
synchronize! 434.000 i/100ms
|
166
|
+
threaded (10x) 29.000 i/100ms
|
167
|
+
forked (10x) 8.000 i/100ms
|
168
|
+
Calculating -------------------------------------
|
169
|
+
synchronize 4.329k (± 1.9%) i/s (230.98 μs/i) - 21.700k in 5.014460s
|
170
|
+
synchronize! 4.352k (± 0.3%) i/s (229.79 μs/i) - 22.134k in 5.086272s
|
171
|
+
threaded (10x) 249.794 (±28.4%) i/s (4.00 ms/i) - 1.073k in 5.058461s
|
172
|
+
forked (10x) 56.588 (± 3.5%) i/s (17.67 ms/i) - 288.000 in 5.097885s
|
173
|
+
|
174
|
+
Comparison:
|
175
|
+
synchronize!: 4351.8 i/s
|
176
|
+
synchronize: 4329.4 i/s - same-ish: difference falls within error
|
177
|
+
threaded (10x): 249.8 i/s - 17.42x slower
|
178
|
+
forked (10x): 56.6 i/s - 76.90x slower
|
179
|
+
```
|
180
|
+
|
181
|
+
## Disclaimer
|
182
|
+
|
183
|
+
> [!WARNING]
|
184
|
+
> Make sure you understand the limitations and reliability inherent in this implementation prior to using it in a production environment. No guarantees are made. Use at your own risk!
|
185
|
+
|
186
|
+
## Inspiration
|
187
|
+
|
188
|
+
Inspiration for this gem was taken from a number of existing projects. It would be beneficial for anyone interested to take a look at all 3.
|
189
|
+
|
190
|
+
1. [Redlock](https://github.com/leandromoreira/redlock-rb)
|
191
|
+
2. [redis-semaphore](https://github.com/dv/redis-semaphore)
|
192
|
+
3. [redis-mutex](https://github.com/kenn/redis-mutex)
|
193
|
+
|
194
|
+
## Contributing
|
195
|
+
|
196
|
+
1. [Fork it](https://github.com/lifeBCE/redis-single-file/fork)
|
197
|
+
2. Create your feature branch (`git checkout -b my-new-feature`)
|
198
|
+
3. Commit your changes (`git commit -am 'Add some feature'`)
|
199
|
+
4. Push to the branch (`git push origin my-new-feature`)
|
200
|
+
5. Create a new Pull Request
|
data/Rakefile
ADDED
data/benchmark.rb
ADDED
@@ -0,0 +1,41 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'benchmark/ips'
|
4
|
+
require 'redis_single_file'
|
5
|
+
|
6
|
+
scenario_1_semaphore = RedisSingleFile.new(name: :scenario1)
|
7
|
+
scenario_2_semaphore = RedisSingleFile.new(name: :scenario2)
|
8
|
+
|
9
|
+
Benchmark.ips do |x|
|
10
|
+
x.report('synchronize') do
|
11
|
+
scenario_1_semaphore.synchronize { nil }
|
12
|
+
end
|
13
|
+
|
14
|
+
x.report('synchronize!') do
|
15
|
+
scenario_2_semaphore.synchronize! { nil }
|
16
|
+
end
|
17
|
+
|
18
|
+
x.report('threaded (10x)') do
|
19
|
+
threads = 10.times.map do
|
20
|
+
Thread.new do
|
21
|
+
scenario_3_semaphore = RedisSingleFile.new(name: :scenario3)
|
22
|
+
scenario_3_semaphore.synchronize { nil }
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
threads.each { _1.join(0.05) } while threads.any?(&:alive?)
|
27
|
+
end
|
28
|
+
|
29
|
+
x.report('forked (10x)') do
|
30
|
+
10.times.each do
|
31
|
+
fork do
|
32
|
+
scenario_4_semaphore = RedisSingleFile.new(name: :scenario4)
|
33
|
+
scenario_4_semaphore.synchronize { nil }
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
Process.waitall
|
38
|
+
end
|
39
|
+
|
40
|
+
x.compare!
|
41
|
+
end
|
@@ -0,0 +1,64 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RedisSingleFile
|
4
|
+
#
|
5
|
+
# This class provides the ability to configure redis single file.
|
6
|
+
#
|
7
|
+
# @author lifeBCE
|
8
|
+
#
|
9
|
+
# @example RedisSingleFile configuration
|
10
|
+
# RedisSingleFile.configuration do |config|
|
11
|
+
# config.host = 'localhost'
|
12
|
+
# config.port = '6379'
|
13
|
+
# config.name = 'default'
|
14
|
+
# config.expire_in = 300
|
15
|
+
# end
|
16
|
+
#
|
17
|
+
# @return [self] the configuration instance
|
18
|
+
class Configuration
|
19
|
+
include Singleton
|
20
|
+
|
21
|
+
# configuration defaults when not provided
|
22
|
+
DEFAULT_HOST = 'localhost'
|
23
|
+
DEFAULT_PORT = '6379'
|
24
|
+
DEFAULT_NAME = 'default'
|
25
|
+
DEFAULT_EXPIRE_IN = 300 # 5 mins
|
26
|
+
DEFAULT_MUTEX_KEY = 'RedisSingleFile/Mutex/%s'
|
27
|
+
DEFAULT_QUEUE_KEY = 'RedisSingleFile/Queue/%s'
|
28
|
+
|
29
|
+
# class delegation methods to singleton instance
|
30
|
+
#
|
31
|
+
# Example:
|
32
|
+
# Configuration.host => Configuration.instance.host
|
33
|
+
# Configuration.port => Configuration.instance.port
|
34
|
+
#
|
35
|
+
class << self
|
36
|
+
%i[host port name expire_in mutex_key queue_key].each do |attr|
|
37
|
+
define_method(attr) { instance.send(attr) }
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
# writers used in config block to set new values
|
42
|
+
attr_writer :host, :port, :name, :expire_in
|
43
|
+
|
44
|
+
# @return [String] redis server hostname value
|
45
|
+
def host = @host || DEFAULT_HOST
|
46
|
+
|
47
|
+
# @return [String] redis server port value
|
48
|
+
def port = @port || DEFAULT_PORT
|
49
|
+
|
50
|
+
# @return [String] default queue name when omitted
|
51
|
+
def name = @name || DEFAULT_NAME
|
52
|
+
|
53
|
+
# @return [String] redis keys expiration value
|
54
|
+
def expire_in = @expire_in || DEFAULT_EXPIRE_IN
|
55
|
+
|
56
|
+
# @note This attr is not configurable
|
57
|
+
# @return [String] synchronization mutex key name
|
58
|
+
def mutex_key = @mutex_key || DEFAULT_MUTEX_KEY
|
59
|
+
|
60
|
+
# @note This attr is not configurable
|
61
|
+
# @return [String] synchronization queue key name
|
62
|
+
def queue_key = @queue_key || DEFAULT_QUEUE_KEY
|
63
|
+
end
|
64
|
+
end
|
@@ -0,0 +1,153 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RedisSingleFile
|
4
|
+
#
|
5
|
+
# This class acts as the main synchronization engine for distributed logic
|
6
|
+
# execution by utilizing the redis blpop command to facilitate a distributed
|
7
|
+
# synchronous queue.
|
8
|
+
#
|
9
|
+
# @author lifeBCE
|
10
|
+
#
|
11
|
+
# @attr redis [...] redis client instance
|
12
|
+
# @attr name [String] custom sync queue name
|
13
|
+
# @attr host [String] host for redis server
|
14
|
+
# @attr port [String] port for redis server
|
15
|
+
#
|
16
|
+
# @example Default lock name and infinite blocking
|
17
|
+
# semaphore = RedisSingleFile::Semaphore.new
|
18
|
+
# semaphore.synchronize do
|
19
|
+
# # synchronized logic defined here...
|
20
|
+
# end
|
21
|
+
#
|
22
|
+
# @example Named locks can provide exclusive synchronization
|
23
|
+
# semaphore = RedisSingleFile::Semaphore.new(name: :user_cache_update)
|
24
|
+
# semaphore.synchronize do
|
25
|
+
# # synchronized logic defined here...
|
26
|
+
# end
|
27
|
+
#
|
28
|
+
# @example Prevent deadlocks by providing a timeout
|
29
|
+
# semaphore = RedisSingleFile::Semaphore.new(name: s3_file_upload)
|
30
|
+
# semaphore.synchronize(timeout: 15) do
|
31
|
+
# # synchronized logic defined here...
|
32
|
+
# end
|
33
|
+
#
|
34
|
+
# @example Use your own redis client instance
|
35
|
+
# redis = Redis.new(...)
|
36
|
+
# semaphore = RedisSingleFile::Semaphore.new(redis:)
|
37
|
+
# semaphore.synchronize do
|
38
|
+
# # synchronized logic defined here...
|
39
|
+
# end
|
40
|
+
#
|
41
|
+
# @return [self] the semaphore instance
|
42
|
+
class Semaphore
|
43
|
+
#
|
44
|
+
# @note redis:
|
45
|
+
# Any more advanced configuration than host and port should be applied
|
46
|
+
# to an instance outside of redis single file and passed in via this
|
47
|
+
# attribute.
|
48
|
+
#
|
49
|
+
# @note name:
|
50
|
+
# Distributed semaphores are coordinated by name. Each client that wishes
|
51
|
+
# to synchronize a particular block should do so under the same name.
|
52
|
+
#
|
53
|
+
# #note host:
|
54
|
+
# Each synchronized execution can be done on a different redis server
|
55
|
+
# than globally configured. Passing a value for this attribute will
|
56
|
+
# redirect to that host.
|
57
|
+
#
|
58
|
+
# @note port:
|
59
|
+
# Each synchronized execution can be done on a different redis port
|
60
|
+
# than globally configured. Passing a value for this attribute will
|
61
|
+
# redirect to that port.
|
62
|
+
#
|
63
|
+
# @return [self] semaphore instance
|
64
|
+
def initialize(
|
65
|
+
redis: nil, # provide your own redis instance
|
66
|
+
name: Configuration.name, # designate queue name per session
|
67
|
+
host: Configuration.host, # designate redis host per session
|
68
|
+
port: Configuration.port # designate redis port per session
|
69
|
+
)
|
70
|
+
@redis = redis || Redis.new(host:, port:)
|
71
|
+
|
72
|
+
@mutex_val = name
|
73
|
+
@mutex_key = format(Configuration.mutex_key, @mutex_val)
|
74
|
+
@queue_key = format(Configuration.queue_key, @mutex_val)
|
75
|
+
end
|
76
|
+
|
77
|
+
# Queues up client and waits for turn to execute. Returns nil
|
78
|
+
# when queue wait time expires.
|
79
|
+
#
|
80
|
+
# @param timeout [Integer] seconds for client to wait in queue
|
81
|
+
# @yieldreturn [...] response from synchronized block execution
|
82
|
+
# @return [nil] redis blpop timeout
|
83
|
+
def synchronize(timeout: 0, &)
|
84
|
+
synchronize!(timeout:, &)
|
85
|
+
rescue QueueTimeout => _e
|
86
|
+
nil
|
87
|
+
end
|
88
|
+
|
89
|
+
# Queues up client and waits for turn to execute. Raise exception
|
90
|
+
# when queue wait time expires.
|
91
|
+
#
|
92
|
+
# @param timeout [Integer] seconds for blpop to wait in queue
|
93
|
+
# @yieldreturn [...] response from synchronized block execution
|
94
|
+
# @raise [QueueTimeout] redis blpop timeout
|
95
|
+
def synchronize!(timeout: 0)
|
96
|
+
return unless block_given?
|
97
|
+
|
98
|
+
with_retry_protection do
|
99
|
+
prime_queue unless redis.getset(mutex_key, mutex_val)
|
100
|
+
raise QueueTimeout unless redis.blpop(queue_key, timeout:)
|
101
|
+
|
102
|
+
redis.multi do
|
103
|
+
redis.persist(mutex_key) # unexpire during execution
|
104
|
+
redis.persist(queue_key) # unexpire during execution
|
105
|
+
end
|
106
|
+
end
|
107
|
+
|
108
|
+
yield
|
109
|
+
ensure
|
110
|
+
# always cycle the queue when exiting
|
111
|
+
unlock_queue if block_given?
|
112
|
+
end
|
113
|
+
|
114
|
+
private #===================================================================
|
115
|
+
|
116
|
+
attr_reader :redis, :mutex_key, :mutex_val, :queue_key
|
117
|
+
|
118
|
+
def expire_in
|
119
|
+
@expire_in ||= Configuration.expire_in
|
120
|
+
end
|
121
|
+
|
122
|
+
def prime_queue
|
123
|
+
with_retry_protection do
|
124
|
+
redis.multi do
|
125
|
+
redis.del(queue_key) # remove existing queue
|
126
|
+
redis.lpush(queue_key, '1') # create and prime new queue
|
127
|
+
end
|
128
|
+
end
|
129
|
+
end
|
130
|
+
|
131
|
+
def unlock_queue
|
132
|
+
with_retry_protection do
|
133
|
+
redis.multi do
|
134
|
+
# queue next client execution
|
135
|
+
redis.lpush(queue_key, '1') if redis.llen(queue_key) == 0
|
136
|
+
redis.expire(mutex_key, expire_in) # set expiration for auto removal
|
137
|
+
redis.expire(queue_key, expire_in) # set expiration for auto removal
|
138
|
+
end
|
139
|
+
end
|
140
|
+
end
|
141
|
+
|
142
|
+
def with_retry_protection
|
143
|
+
yield if block_given?
|
144
|
+
rescue Redis::ConnectionError => _e
|
145
|
+
retry_count ||= 0
|
146
|
+
retry_count += 1
|
147
|
+
|
148
|
+
# retry 5 times over 15 seconds then give up
|
149
|
+
sleep(retry_count) && retry if retry_count < 6
|
150
|
+
raise # re-raise after all retries exhausted
|
151
|
+
end
|
152
|
+
end
|
153
|
+
end
|
@@ -0,0 +1,34 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'redis'
|
4
|
+
require 'singleton'
|
5
|
+
|
6
|
+
require_relative 'redis_single_file/version'
|
7
|
+
require_relative 'redis_single_file/configuration'
|
8
|
+
require_relative 'redis_single_file/semaphore'
|
9
|
+
|
10
|
+
#
|
11
|
+
# RedisSingleFile - Distributed Execution Synchronization
|
12
|
+
#
|
13
|
+
# Redis single file is a queue-based implementation of a remote/shared
|
14
|
+
# semaphore for distributed execution synchronization. A distributed
|
15
|
+
# semaphore may be useful for synchronizing execution across numerous
|
16
|
+
# instances or between the application and background job workers.
|
17
|
+
#
|
18
|
+
# @author lifeBCE
|
19
|
+
#
|
20
|
+
module RedisSingleFile
|
21
|
+
# alias semaphore as mutex
|
22
|
+
Mutex = Semaphore
|
23
|
+
|
24
|
+
# internal blpop timeout exception class
|
25
|
+
QueueTimeout = Class.new(StandardError)
|
26
|
+
|
27
|
+
class << self
|
28
|
+
def configuration
|
29
|
+
yield Configuration.instance if block_given?
|
30
|
+
end
|
31
|
+
|
32
|
+
def new(...) = Semaphore.new(...)
|
33
|
+
end
|
34
|
+
end
|
data/test.rb
ADDED
@@ -0,0 +1,54 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
require 'pry'
|
4
|
+
require 'redis_single_file'
|
5
|
+
|
6
|
+
RUN_ID = 'same-same' #SecureRandom.uuid
|
7
|
+
|
8
|
+
ITERATIONS = (ARGV[0] || 10).to_i
|
9
|
+
WORK_LOAD = (ARGV[1] || 1).to_i
|
10
|
+
TIMEOUT = ITERATIONS * WORK_LOAD
|
11
|
+
|
12
|
+
#semaphore = RedisSingleFile::Mutex.new(name: RUN_ID)
|
13
|
+
#semaphore.synchronize!(timeout: 10) do
|
14
|
+
# puts "Hello World!"
|
15
|
+
# sleep 1
|
16
|
+
#end
|
17
|
+
|
18
|
+
#exit
|
19
|
+
|
20
|
+
#10.times.map do
|
21
|
+
# fork do
|
22
|
+
# semaphore = RedisSingleFile::Mutex.new(name: RUN_ID)
|
23
|
+
# semaphore.synchronize!(timeout: TIMEOUT) do
|
24
|
+
# puts "Hello World!"
|
25
|
+
# sleep WORK_LOAD
|
26
|
+
# end
|
27
|
+
# end
|
28
|
+
#
|
29
|
+
# sleep 0.05
|
30
|
+
#end
|
31
|
+
#
|
32
|
+
#Process.waitall
|
33
|
+
|
34
|
+
# exit
|
35
|
+
|
36
|
+
|
37
|
+
#while true do
|
38
|
+
threads = ITERATIONS.times.map do
|
39
|
+
thread = Thread.new do
|
40
|
+
semaphore = RedisSingleFile::Mutex.new(name: RUN_ID)
|
41
|
+
semaphore.synchronize(timeout: TIMEOUT) do
|
42
|
+
puts "Hello World!"
|
43
|
+
sleep WORK_LOAD
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
# sleep 0.05
|
48
|
+
thread
|
49
|
+
end
|
50
|
+
|
51
|
+
while threads.any?(&:alive?) do
|
52
|
+
threads.each { _1.join(0.5) }
|
53
|
+
end
|
54
|
+
#end
|
metadata
ADDED
@@ -0,0 +1,69 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: redis-single-file
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.1.1
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- LifeBCE
|
8
|
+
bindir: exe
|
9
|
+
cert_chain: []
|
10
|
+
date: 2025-02-04 00:00:00.000000000 Z
|
11
|
+
dependencies:
|
12
|
+
- !ruby/object:Gem::Dependency
|
13
|
+
name: redis
|
14
|
+
requirement: !ruby/object:Gem::Requirement
|
15
|
+
requirements:
|
16
|
+
- - "~>"
|
17
|
+
- !ruby/object:Gem::Version
|
18
|
+
version: 5.3.0
|
19
|
+
type: :runtime
|
20
|
+
prerelease: false
|
21
|
+
version_requirements: !ruby/object:Gem::Requirement
|
22
|
+
requirements:
|
23
|
+
- - "~>"
|
24
|
+
- !ruby/object:Gem::Version
|
25
|
+
version: 5.3.0
|
26
|
+
description: Synchronize execution across numerous instances.
|
27
|
+
email:
|
28
|
+
- eric06@gmail.com
|
29
|
+
executables: []
|
30
|
+
extensions: []
|
31
|
+
extra_rdoc_files: []
|
32
|
+
files:
|
33
|
+
- ".rspec"
|
34
|
+
- ".rubocop.yml"
|
35
|
+
- CHANGELOG.md
|
36
|
+
- LICENSE.txt
|
37
|
+
- README.md
|
38
|
+
- Rakefile
|
39
|
+
- benchmark.rb
|
40
|
+
- lib/redis_single_file.rb
|
41
|
+
- lib/redis_single_file/configuration.rb
|
42
|
+
- lib/redis_single_file/semaphore.rb
|
43
|
+
- lib/redis_single_file/version.rb
|
44
|
+
- sig/redis_single_file.rbs
|
45
|
+
- test.rb
|
46
|
+
homepage: https://github.com/lifeBCE/redis-single-file
|
47
|
+
licenses:
|
48
|
+
- MIT
|
49
|
+
metadata:
|
50
|
+
homepage_uri: https://github.com/lifeBCE/redis-single-file
|
51
|
+
rubygems_mfa_required: 'true'
|
52
|
+
rdoc_options: []
|
53
|
+
require_paths:
|
54
|
+
- lib
|
55
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
56
|
+
requirements:
|
57
|
+
- - ">="
|
58
|
+
- !ruby/object:Gem::Version
|
59
|
+
version: 3.2.0
|
60
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
61
|
+
requirements:
|
62
|
+
- - ">="
|
63
|
+
- !ruby/object:Gem::Version
|
64
|
+
version: '0'
|
65
|
+
requirements: []
|
66
|
+
rubygems_version: 3.6.3
|
67
|
+
specification_version: 4
|
68
|
+
summary: Distributed semaphore implementation with redis.
|
69
|
+
test_files: []
|