redis-em-mutex 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/README.rdoc +179 -0
- data/Rakefile +35 -0
- data/lib/redis-em-mutex.rb +5 -0
- data/lib/redis/em-mutex.rb +555 -0
- data/redis-em-mutex.gemspec +28 -0
- data/spec/redis-em-mutex-features.rb +33 -0
- data/spec/redis-em-mutex-semaphores.rb +337 -0
- metadata +126 -0
data/README.rdoc
ADDED
@@ -0,0 +1,179 @@
|
|
1
|
+
= redis-em-mutex
|
2
|
+
|
3
|
+
Author:: Rafał Michalski (mailto:rafal@yeondir.com)
|
4
|
+
|
5
|
+
* http://github.com/royaltm/redis-em-mutex
|
6
|
+
|
7
|
+
== DESCRIPTION
|
8
|
+
|
9
|
+
*redis-em-mutex* is the cross server-process-fiber EventMachine + Redis based semaphore.
|
10
|
+
|
11
|
+
== FEATURES
|
12
|
+
|
13
|
+
* only for EventMachine
|
14
|
+
* no CPU-intensive sleep/polling while waiting for lock to become available
|
15
|
+
* fibers waiting for the lock are signalled via Redis channel as soon as the lock
|
16
|
+
has been released (~< 1 ms)
|
17
|
+
* multi-locks (all-or-nothing) locking (to prevent possible deadlocks when
|
18
|
+
multiple semaphores are required to be locked at once)
|
19
|
+
* best served with EM-Synchrony (uses EM::Synchrony::ConnectionPool internally)
|
20
|
+
* fiber-safe
|
21
|
+
* deadlock detection (only trivial cases: locking twice the same resource from the same fiber)
|
22
|
+
* mandatory lock expiration (with refreshing)
|
23
|
+
|
24
|
+
== BUGS/LIMITATIONS
|
25
|
+
|
26
|
+
* only for EventMachine
|
27
|
+
* NOT thread-safe
|
28
|
+
* locking order between concurrent processes is undetermined (no FIFO)
|
29
|
+
* its not nifty, rather somewhat complicated
|
30
|
+
|
31
|
+
== REQUIREMENTS
|
32
|
+
|
33
|
+
* ruby >= 1.9 (tested: 1.9.3-p194, 1.9.2-p320, 1.9.1-p378)
|
34
|
+
* http://github.com/redis/redis-rb >= 3.0.1
|
35
|
+
* http://rubyeventmachine.com >= 0.12.10
|
36
|
+
* (optional) http://github.com/igrigorik/em-synchrony
|
37
|
+
|
38
|
+
== INSTALL
|
39
|
+
|
40
|
+
$ [sudo] gem install redis-em-mutex
|
41
|
+
|
42
|
+
==== Gemfile
|
43
|
+
|
44
|
+
gem "redis-em-mutex", "~> 0.1.0"
|
45
|
+
|
46
|
+
==== Github
|
47
|
+
|
48
|
+
git clone git://github.com/royaltm/redis-em-mutex.git
|
49
|
+
|
50
|
+
== USAGE
|
51
|
+
|
52
|
+
require 'em-synchrony'
|
53
|
+
require 'em-redis-mutex'
|
54
|
+
|
55
|
+
Redis::EM::Mutex.setup(size: 10, url: 'redis:///1', expire: 600)
|
56
|
+
|
57
|
+
# or
|
58
|
+
|
59
|
+
Redis::EM::Mutex.setup do |opts|
|
60
|
+
opts.size = 10
|
61
|
+
opts.url = 'redis:///1'
|
62
|
+
...
|
63
|
+
end
|
64
|
+
|
65
|
+
|
66
|
+
EM.synchrony do
|
67
|
+
Redis::EM::Mutex.synchronize('resource.lock') do
|
68
|
+
... do something with resource
|
69
|
+
end
|
70
|
+
|
71
|
+
# or
|
72
|
+
|
73
|
+
mutex = Redis::EM::Mutex.new('resource.lock')
|
74
|
+
mutex.synchronize do
|
75
|
+
... do something with resource
|
76
|
+
end
|
77
|
+
|
78
|
+
# or
|
79
|
+
|
80
|
+
begin
|
81
|
+
mutex.lock
|
82
|
+
... do something with resource
|
83
|
+
ensure
|
84
|
+
mutex.unlock
|
85
|
+
end
|
86
|
+
|
87
|
+
...
|
88
|
+
|
89
|
+
Redis::EM::Mutex.stop_watcher
|
90
|
+
EM.stop
|
91
|
+
end
|
92
|
+
|
93
|
+
=== Namespaces
|
94
|
+
|
95
|
+
Redis::EM::Mutex.setup(ns: 'my_namespace', ....)
|
96
|
+
|
97
|
+
# or multiple namespaces:
|
98
|
+
|
99
|
+
ns = Redis::EM::Mutex::NS.new('my_namespace')
|
100
|
+
|
101
|
+
EM.synchrony do
|
102
|
+
ns.synchronize('foo') do
|
103
|
+
.... do something with foo and bar
|
104
|
+
end
|
105
|
+
...
|
106
|
+
EM.stop
|
107
|
+
end
|
108
|
+
|
109
|
+
=== Multi-locks
|
110
|
+
|
111
|
+
EM.synchrony do
|
112
|
+
Redis::EM::Mutex.synchronize('foo', 'bar', 'baz') do
|
113
|
+
.... do something with foo, bar and baz
|
114
|
+
end
|
115
|
+
...
|
116
|
+
EM.stop
|
117
|
+
end
|
118
|
+
|
119
|
+
=== Locking options
|
120
|
+
|
121
|
+
EM.synchrony do
|
122
|
+
begin
|
123
|
+
Redis::EM::Mutex.synchronize('foo', 'bar', block: 0.25) do
|
124
|
+
.... do something with foo and bar
|
125
|
+
end
|
126
|
+
rescue Redis::EM::Mutex::MutexTimeout
|
127
|
+
... locking timed out
|
128
|
+
end
|
129
|
+
|
130
|
+
Redis::EM::Mutex.synchronize('foo', 'bar', expire: 60) do |mutex|
|
131
|
+
.... do something with foo and bar in less than 60 seconds
|
132
|
+
if mutex.refresh(120)
|
133
|
+
# now we have additional 120 seconds until lock expires
|
134
|
+
else
|
135
|
+
# too late
|
136
|
+
end
|
137
|
+
end
|
138
|
+
|
139
|
+
...
|
140
|
+
EM.stop
|
141
|
+
end
|
142
|
+
|
143
|
+
=== Advanced
|
144
|
+
|
145
|
+
mutex = Redis::EM::Mutex.new('resource1', 'resource2', expire: 60)
|
146
|
+
|
147
|
+
EM.synchrony do
|
148
|
+
mutex.lock
|
149
|
+
|
150
|
+
EM.fork_reactor do
|
151
|
+
Fiber.new do
|
152
|
+
mutex.locked? # true
|
153
|
+
mutex.owned? # false
|
154
|
+
mutex.synchronize do
|
155
|
+
mutex.locked? # true
|
156
|
+
mutex.owned? # true
|
157
|
+
|
158
|
+
....
|
159
|
+
end
|
160
|
+
...
|
161
|
+
Redis::EM::Mutex.stop_watcher
|
162
|
+
EM.stop
|
163
|
+
end.resume
|
164
|
+
end
|
165
|
+
|
166
|
+
mutex.locked? # true
|
167
|
+
mutex.owned? # true
|
168
|
+
|
169
|
+
mutex.unlock
|
170
|
+
mutex.owned? # false
|
171
|
+
|
172
|
+
...
|
173
|
+
Redis::EM::Mutex.stop_watcher
|
174
|
+
EM.stop
|
175
|
+
end
|
176
|
+
|
177
|
+
== LICENCE
|
178
|
+
|
179
|
+
The MIT License - Copyright (c) 2012 Rafał Michalski
|
data/Rakefile
ADDED
@@ -0,0 +1,35 @@
|
|
1
|
+
$:.unshift "lib"
|
2
|
+
|
3
|
+
task :default => [:test]
|
4
|
+
|
5
|
+
$gem_name = "redis-em-mutex"
|
6
|
+
|
7
|
+
desc "Run spec tests"
|
8
|
+
task :test do
|
9
|
+
sh "rspec spec/*.rb"
|
10
|
+
end
|
11
|
+
|
12
|
+
desc "Build the gem"
|
13
|
+
task :gem do
|
14
|
+
sh "gem build #$gem_name.gemspec"
|
15
|
+
end
|
16
|
+
|
17
|
+
desc "Install the library at local machnie"
|
18
|
+
task :install => :gem do
|
19
|
+
sh "gem install #$gem_name -l"
|
20
|
+
end
|
21
|
+
|
22
|
+
desc "Uninstall the library from local machnie"
|
23
|
+
task :uninstall do
|
24
|
+
sh "gem uninstall #$gem_name"
|
25
|
+
end
|
26
|
+
|
27
|
+
desc "Clean"
|
28
|
+
task :clean do
|
29
|
+
sh "rm #$gem_name*.gem"
|
30
|
+
end
|
31
|
+
|
32
|
+
desc "Documentation"
|
33
|
+
task :doc do
|
34
|
+
sh "rdoc --encoding=UTF-8 --title=#$gem_name --main=README.rdoc README.rdoc lib/*.rb lib/*/*.rb"
|
35
|
+
end
|
@@ -0,0 +1,555 @@
|
|
1
|
+
# -*- coding: UTF-8 -*-
|
2
|
+
# require 'digest'
|
3
|
+
# require 'base64'
|
4
|
+
require 'ostruct'
|
5
|
+
require 'securerandom'
|
6
|
+
require 'redis/connection/synchrony' unless defined? Redis::Connection::Synchrony
|
7
|
+
require 'redis'
|
8
|
+
|
9
|
+
class Redis
|
10
|
+
module EM
|
11
|
+
# Cross Machine-Process-Fiber EventMachine/Redis based semaphore.
|
12
|
+
#
|
13
|
+
# WARNING:
|
14
|
+
#
|
15
|
+
# Methods of this class are NOT thread-safe.
|
16
|
+
# They are machine/process/fiber-safe.
|
17
|
+
# All method calls must be invoked only from EventMachine's reactor thread.
|
18
|
+
#
|
19
|
+
# - The terms "lock" and "semaphore" used in documentation are synonims.
|
20
|
+
# - The term "owner" denotes a Ruby Fiber in some Process on some Machine.
|
21
|
+
#
|
22
|
+
class Mutex
|
23
|
+
VERSION = '0.1.0'
|
24
|
+
module Errors
|
25
|
+
class MutexError < RuntimeError; end
|
26
|
+
class MutexTimeout < MutexError; end
|
27
|
+
end
|
28
|
+
|
29
|
+
include Errors
|
30
|
+
extend Errors
|
31
|
+
|
32
|
+
@@connection_pool_class = nil
|
33
|
+
@@connection_retry_max = 10
|
34
|
+
@@default_expire = 3600*24
|
35
|
+
AUTO_NAME_SEED = '__@'
|
36
|
+
SIGNAL_QUEUE_CHANNEL = "::#{self.name}::"
|
37
|
+
@@name_index = AUTO_NAME_SEED
|
38
|
+
@@redis_pool = nil
|
39
|
+
@@redis_watcher = nil
|
40
|
+
@@watching = false
|
41
|
+
@@watcher_subscribed = false
|
42
|
+
@@signal_queue = Hash.new {|h,k| h[k] = []}
|
43
|
+
@@ns = nil
|
44
|
+
@@uuid = nil
|
45
|
+
|
46
|
+
attr_accessor :expire_timeout, :block_timeout
|
47
|
+
attr_reader :names, :ns
|
48
|
+
alias_method :namespace, :ns
|
49
|
+
|
50
|
+
class NS
|
51
|
+
attr_reader :ns
|
52
|
+
alias_method :namespace, :ns
|
53
|
+
# Creates a new namespace (Mutex factory).
|
54
|
+
#
|
55
|
+
# - ns = namespace
|
56
|
+
# - opts = options hash:
|
57
|
+
# - :block - default block timeout
|
58
|
+
# - :expire - default expire timeout
|
59
|
+
def initialize(ns, opts = {})
|
60
|
+
@ns = ns
|
61
|
+
@opts = (opts || {}).merge(:ns => ns)
|
62
|
+
end
|
63
|
+
|
64
|
+
# Creates a namespaced cross machine/process/fiber semaphore.
|
65
|
+
#
|
66
|
+
# for arguments see: Redis::EM::Mutex.new
|
67
|
+
def new(*args)
|
68
|
+
if args.last.kind_of?(Hash)
|
69
|
+
args[-1] = @opts.merge(args.last)
|
70
|
+
else
|
71
|
+
args.push @opts
|
72
|
+
end
|
73
|
+
Redis::EM::Mutex.new(*args)
|
74
|
+
end
|
75
|
+
|
76
|
+
# Attempts to grab the lock and waits if it isn’t available.
|
77
|
+
#
|
78
|
+
# See: Redis::EM::Mutex.lock
|
79
|
+
def lock(*args)
|
80
|
+
mutex = new(*args)
|
81
|
+
mutex if mutex.lock
|
82
|
+
end
|
83
|
+
|
84
|
+
# Executes block of code protected with namespaced semaphore.
|
85
|
+
#
|
86
|
+
# See: Redis::EM::Mutex.synchronize
|
87
|
+
def synchronize(*args, &block)
|
88
|
+
new(*args).synchronize(&block)
|
89
|
+
end
|
90
|
+
end
|
91
|
+
|
92
|
+
# Creates a new cross machine/process/fiber semaphore
|
93
|
+
#
|
94
|
+
# Redis::EM::Mutex.new(*names, opts = {})
|
95
|
+
#
|
96
|
+
# - *names = lock identifiers - if none they are auto generated
|
97
|
+
# - opts = options hash:
|
98
|
+
# - :name - same as *names (in case *names arguments were omitted)
|
99
|
+
# - :block - default block timeout
|
100
|
+
# - :expire - default expire timeout (see: Mutex#lock and Mutex#try_lock)
|
101
|
+
# - :ns - local namespace (otherwise global namespace is used)
|
102
|
+
def initialize(*args)
|
103
|
+
raise MutexError, "call #{self.class}::setup first" unless @@redis_pool
|
104
|
+
|
105
|
+
opts = args.last.kind_of?(Hash) ? args.pop : {}
|
106
|
+
|
107
|
+
@names = args
|
108
|
+
@names = Array(opts[:name] || "#{@@name_index.succ!}.lock") if @names.empty?
|
109
|
+
raise MutexError, "semaphore names must not be empty" if @names.empty?
|
110
|
+
@multi = !@names.one?
|
111
|
+
@ns = opts[:ns] || @@ns
|
112
|
+
@ns_names = @ns ? @names.map {|n| "#@ns:#@n" } : @names
|
113
|
+
@expire_timeout = opts[:expire]
|
114
|
+
@block_timeout = opts[:block]
|
115
|
+
@locked_id = nil
|
116
|
+
end
|
117
|
+
|
118
|
+
# Returns `true` if this semaphore (at least one of locked `names`) is currently being held by some owner.
|
119
|
+
def locked?
|
120
|
+
if @multi
|
121
|
+
@@redis_pool.multi do |multi|
|
122
|
+
@ns_names.each {|n| multi.exists n}
|
123
|
+
end.any?
|
124
|
+
else
|
125
|
+
@@redis_pool.exists @ns_names.first
|
126
|
+
end
|
127
|
+
end
|
128
|
+
|
129
|
+
# Returns `true` if this semaphore (all the locked `names`) is currently being held by calling fiber.
|
130
|
+
def owned?
|
131
|
+
!!if @locked_id
|
132
|
+
lock_full_ident = owner_ident(@locked_id)
|
133
|
+
@@redis_pool.mget(*@ns_names).all? {|v| v == lock_full_ident}
|
134
|
+
end
|
135
|
+
end
|
136
|
+
|
137
|
+
# Attempts to obtain the lock and returns immediately.
|
138
|
+
# Returns `true` if the lock was granted.
|
139
|
+
# Use Mutex#expire_timeout= to set custom lock expiration time in secods.
|
140
|
+
# Otherwise global Mutex.default_expire is used.
|
141
|
+
#
|
142
|
+
# This method does not lock expired semaphores.
|
143
|
+
# Use Mutex#lock with block_timeout = 0 to obtain expired lock without blocking.
|
144
|
+
def try_lock
|
145
|
+
lock_id = (Time.now + (@expire_timeout.to_f.nonzero? || @@default_expire)).to_f.to_s
|
146
|
+
!!if @multi
|
147
|
+
lock_full_ident = owner_ident(lock_id)
|
148
|
+
if @@redis_pool.msetnx(*@ns_names.map {|k| [k, lock_full_ident]}.flatten)
|
149
|
+
@locked_id = lock_id
|
150
|
+
end
|
151
|
+
elsif @@redis_pool.setnx(@ns_names.first, owner_ident(lock_id))
|
152
|
+
@locked_id = lock_id
|
153
|
+
end
|
154
|
+
end
|
155
|
+
|
156
|
+
# Refreshes lock expiration timeout.
|
157
|
+
# Returns true if refresh was successfull or false if mutex was not locked or has already expired.
|
158
|
+
def refresh(expire_timeout=nil)
|
159
|
+
ret = false
|
160
|
+
if @locked_id
|
161
|
+
new_lock_id = (Time.now + (expire_timeout.to_f.nonzero? || @expire_timeout.to_f.nonzero? || @@default_expire)).to_f.to_s
|
162
|
+
new_lock_full_ident = owner_ident(new_lock_id)
|
163
|
+
lock_full_ident = owner_ident(@locked_id)
|
164
|
+
@@redis_pool.execute(false) do |r|
|
165
|
+
r.watch(*@ns_names) do
|
166
|
+
if r.mget(*@ns_names).all? {|v| v == lock_full_ident}
|
167
|
+
ret = !!r.multi do |multi|
|
168
|
+
multi.mset(*@ns_names.map {|k| [k, new_lock_full_ident]}.flatten)
|
169
|
+
end
|
170
|
+
@locked_id = new_lock_id if ret
|
171
|
+
else
|
172
|
+
r.unwatch
|
173
|
+
end
|
174
|
+
end
|
175
|
+
end
|
176
|
+
end
|
177
|
+
ret
|
178
|
+
end
|
179
|
+
|
180
|
+
# Releases the lock unconditionally.
|
181
|
+
# If semaphore wasn’t locked by the current owner it is silently ignored.
|
182
|
+
# Returns self.
|
183
|
+
def unlock
|
184
|
+
if @locked_id
|
185
|
+
lock_full_ident = owner_ident(@locked_id)
|
186
|
+
@@redis_pool.execute(false) do |r|
|
187
|
+
r.watch(*@ns_names) do
|
188
|
+
if r.mget(*@ns_names).all? {|v| v == lock_full_ident}
|
189
|
+
r.multi do |multi|
|
190
|
+
multi.del(*@ns_names)
|
191
|
+
multi.publish SIGNAL_QUEUE_CHANNEL, Marshal.dump(@ns_names)
|
192
|
+
end
|
193
|
+
else
|
194
|
+
r.unwatch
|
195
|
+
end
|
196
|
+
end
|
197
|
+
end
|
198
|
+
end
|
199
|
+
self
|
200
|
+
end
|
201
|
+
|
202
|
+
# Attempts to grab the lock and waits if it isn’t available.
|
203
|
+
# Raises MutexError if mutex was locked by the current owner.
|
204
|
+
# Returns `true` if lock was successfully obtained.
|
205
|
+
# Returns `false` if lock wasn't available within `block_timeout` seconds.
|
206
|
+
#
|
207
|
+
# If `block_timeout` is `nil` or omited this method uses Mutex#block_timeout.
|
208
|
+
# If also Mutex#block_timeout is nil this method returns only after lock
|
209
|
+
# has been granted.
|
210
|
+
#
|
211
|
+
# Use Mutex#expire_timeout= to set lock expiration timeout.
|
212
|
+
# Otherwise global Mutex.default_expire is used.
|
213
|
+
def lock(block_timeout = nil)
|
214
|
+
block_timeout||= @block_timeout
|
215
|
+
names = @ns_names
|
216
|
+
timer = fiber = nil
|
217
|
+
try_again = false
|
218
|
+
handler = proc do
|
219
|
+
try_again = true
|
220
|
+
::EM.next_tick { fiber.resume if fiber } if fiber
|
221
|
+
end
|
222
|
+
queues = names.map {|n| @@signal_queue[n] << handler }
|
223
|
+
ident_match = owner_ident
|
224
|
+
until try_lock
|
225
|
+
Mutex.start_watcher unless @@watching == $$
|
226
|
+
start_time = Time.now.to_f
|
227
|
+
expire_time = nil
|
228
|
+
@@redis_pool.execute(false) do |r|
|
229
|
+
r.watch(*names) do
|
230
|
+
expired_names = names.zip(r.mget(*names)).map do |name, lock_value|
|
231
|
+
if lock_value
|
232
|
+
owner, exp_id = lock_value.split ' '
|
233
|
+
exp_time = exp_id.to_f
|
234
|
+
expire_time = exp_time if expire_time.nil? || exp_time < expire_time
|
235
|
+
raise MutexError, "deadlock; recursive locking #{owner}" if owner == ident_match
|
236
|
+
if exp_time < start_time
|
237
|
+
name
|
238
|
+
end
|
239
|
+
end
|
240
|
+
end
|
241
|
+
if expire_time && expire_time < start_time
|
242
|
+
r.multi do |multi|
|
243
|
+
expired_names = expired_names.compact
|
244
|
+
multi.del(*expired_names)
|
245
|
+
multi.publish SIGNAL_QUEUE_CHANNEL, Marshal.dump(expired_names)
|
246
|
+
end
|
247
|
+
else
|
248
|
+
r.unwatch
|
249
|
+
end
|
250
|
+
end
|
251
|
+
end
|
252
|
+
timeout = expire_time.to_f - start_time
|
253
|
+
timeout = block_timeout if block_timeout && block_timeout < timeout
|
254
|
+
|
255
|
+
if !try_again && timeout > 0
|
256
|
+
timer = ::EM::Timer.new(timeout) do
|
257
|
+
timer = nil
|
258
|
+
::EM.next_tick { fiber.resume if fiber } if fiber
|
259
|
+
end
|
260
|
+
fiber = Fiber.current
|
261
|
+
Fiber.yield
|
262
|
+
fiber = nil
|
263
|
+
end
|
264
|
+
finish_time = Time.now.to_f
|
265
|
+
if try_again || finish_time > expire_time
|
266
|
+
block_timeout-= finish_time - start_time if block_timeout
|
267
|
+
try_again = false
|
268
|
+
else
|
269
|
+
return false
|
270
|
+
end
|
271
|
+
end
|
272
|
+
true
|
273
|
+
ensure
|
274
|
+
timer.cancel if timer
|
275
|
+
timer = nil
|
276
|
+
queues.each {|q| q.delete handler }
|
277
|
+
names.each {|n| @@signal_queue.delete(n) if @@signal_queue[n].empty? }
|
278
|
+
@@signal_queue.inspect
|
279
|
+
end
|
280
|
+
|
281
|
+
# Execute block of code protected with semaphore.
|
282
|
+
# Returns result of code block.
|
283
|
+
#
|
284
|
+
# If `block_timeout` or Mutex#block_timeout is set and
|
285
|
+
# lock isn't obtained within `block_timeout` seconds this method raises
|
286
|
+
# MutexTimeout.
|
287
|
+
def synchronize(block_timeout = nil)
|
288
|
+
if lock(block_timeout)
|
289
|
+
begin
|
290
|
+
yield self
|
291
|
+
ensure
|
292
|
+
unlock
|
293
|
+
end
|
294
|
+
else
|
295
|
+
raise MutexTimeout
|
296
|
+
end
|
297
|
+
end
|
298
|
+
|
299
|
+
class << self
|
300
|
+
def ns; @@ns; end
|
301
|
+
def ns=(namespace); @@ns = namespace; end
|
302
|
+
alias_method :namespace, :ns
|
303
|
+
alias_method :'namespace=', :'ns='
|
304
|
+
|
305
|
+
# Default value of expiration timeout in seconds.
|
306
|
+
def default_expire; @@default_expire; end
|
307
|
+
|
308
|
+
# Assigns default value of expiration timeout in seconds.
|
309
|
+
# Must be > 0.
|
310
|
+
def default_expire=(value); @@default_expire=value.to_f.abs; end
|
311
|
+
|
312
|
+
# Setup redis database and other defaults
|
313
|
+
# MUST BE called once before any semaphore is created.
|
314
|
+
#
|
315
|
+
# opts = options Hash:
|
316
|
+
#
|
317
|
+
# global options:
|
318
|
+
#
|
319
|
+
# - :connection_pool_class - default is ::EM::Synchrony::ConnectionPool
|
320
|
+
# - :expire - sets global Mutex.default_expire
|
321
|
+
# - :ns - sets global Mutex.namespace
|
322
|
+
# - :reconnect_max - maximum num. of attempts to re-establish
|
323
|
+
# connection to redis server;
|
324
|
+
# default is 10; set to 0 to disable re-connecting;
|
325
|
+
# set to -1 to attempt forever
|
326
|
+
#
|
327
|
+
# redis connection options:
|
328
|
+
#
|
329
|
+
# - :size - redis connection pool size
|
330
|
+
#
|
331
|
+
# passed directly to Redis.new:
|
332
|
+
#
|
333
|
+
# - :url - redis server url
|
334
|
+
#
|
335
|
+
# or
|
336
|
+
#
|
337
|
+
# - :scheme - "redis" or "unix"
|
338
|
+
# - :host - redis host
|
339
|
+
# - :port - redis port
|
340
|
+
# - :password - redis password
|
341
|
+
# - :db - redis database number
|
342
|
+
# - :path - redis unix-socket path
|
343
|
+
#
|
344
|
+
# or
|
345
|
+
#
|
346
|
+
# - :redis - initialized ConnectionPool of Redis clients.
|
347
|
+
def setup(opts = {})
|
348
|
+
stop_watcher
|
349
|
+
opts = OpenStruct.new(opts)
|
350
|
+
yield opts if block_given?
|
351
|
+
@@connection_pool_class = opts.connection_pool_class if opts.connection_pool_class.kind_of?(Class)
|
352
|
+
@redis_options = redis_options = {:driver => :synchrony}
|
353
|
+
redis_updater = proc do |redis|
|
354
|
+
redis_options.update({
|
355
|
+
:scheme => redis.scheme,
|
356
|
+
:host => redis.host,
|
357
|
+
:port => redis.port,
|
358
|
+
:password => redis.password,
|
359
|
+
:db => redis.db,
|
360
|
+
:path => redis.path
|
361
|
+
}.reject {|_k, v| v.nil?})
|
362
|
+
end
|
363
|
+
if (redis = opts.redis) && !opts.url
|
364
|
+
redis_updater.call redis
|
365
|
+
elsif opts.url
|
366
|
+
redis_options[:url] = opts.url
|
367
|
+
end
|
368
|
+
redis_updater.call opts
|
369
|
+
namespace = opts.ns
|
370
|
+
pool_size = (opts.size.to_i.nonzero? || 1).abs
|
371
|
+
self.default_expire = opts.expire if opts.expire
|
372
|
+
@@connection_retry_max = opts.reconnect_max.to_i if opts.reconnect_max
|
373
|
+
@@ns = namespace if namespace
|
374
|
+
# generate machine uuid
|
375
|
+
# todo: should probably use NIC ethernet address or uuid gem
|
376
|
+
# dhash = ::Digest::SHA1.new
|
377
|
+
# rnd = Random.new
|
378
|
+
# 256.times { dhash.update [rnd.rand(0x100000000)].pack "N" }
|
379
|
+
# digest = dhash.digest
|
380
|
+
# dsize, doffs = digest.bytesize.divmod 6
|
381
|
+
# @@uuid = Base64.encode64(digest[rnd.rand(doffs + 1), dsize * 6]).chomp
|
382
|
+
@@uuid = SecureRandom.uuid
|
383
|
+
|
384
|
+
unless (@@redis_pool = redis)
|
385
|
+
unless @@connection_pool_class
|
386
|
+
begin
|
387
|
+
require 'em-synchrony/connection_pool' unless defined?(::EM::Synchrony::ConnectionPool)
|
388
|
+
rescue LoadError
|
389
|
+
raise ":connection_pool_class required; could not fall back to EM::Synchrony::ConnectionPool - gem install em-synchrony"
|
390
|
+
end
|
391
|
+
@@connection_pool_class = ::EM::Synchrony::ConnectionPool
|
392
|
+
end
|
393
|
+
@@redis_pool = @@connection_pool_class.new(size: pool_size) do
|
394
|
+
Redis.new redis_options
|
395
|
+
end
|
396
|
+
end
|
397
|
+
@@redis_watcher = Redis.new redis_options
|
398
|
+
start_watcher if ::EM.reactor_running?
|
399
|
+
end
|
400
|
+
|
401
|
+
# resets Mutex's automatic name generator
|
402
|
+
def reset_autoname
|
403
|
+
@@name_index = AUTO_NAME_SEED
|
404
|
+
end
|
405
|
+
|
406
|
+
def wakeup_queue_all
|
407
|
+
@@signal_queue.each_value do |queue|
|
408
|
+
queue.each {|h| h.call }
|
409
|
+
end
|
410
|
+
end
|
411
|
+
|
412
|
+
# Initializes the "unlock" channel watcher. Its called by Mutex.setup
|
413
|
+
# internally. Should not be used under normal circumstances.
|
414
|
+
# If EventMachine is to be re-started (or after EM.fork_reactor) this method may be used instead of
|
415
|
+
# Mutex.setup for "lightweight" startup procedure.
|
416
|
+
def start_watcher
|
417
|
+
raise MutexError, "call #{self.class}::setup first" unless @@redis_watcher
|
418
|
+
return if @@watching == $$
|
419
|
+
if @@watching
|
420
|
+
@@redis_watcher = Redis.new @redis_options
|
421
|
+
@@signal_queue.clear
|
422
|
+
end
|
423
|
+
@@watching = $$
|
424
|
+
retries = 0
|
425
|
+
Fiber.new do
|
426
|
+
begin
|
427
|
+
@@redis_watcher.subscribe(SIGNAL_QUEUE_CHANNEL) do |on|
|
428
|
+
on.subscribe do |channel,|
|
429
|
+
if channel == SIGNAL_QUEUE_CHANNEL
|
430
|
+
@@watcher_subscribed = true
|
431
|
+
retries = 0
|
432
|
+
wakeup_queue_all
|
433
|
+
end
|
434
|
+
end
|
435
|
+
on.message do |channel, message|
|
436
|
+
if channel == SIGNAL_QUEUE_CHANNEL
|
437
|
+
handlers = {}
|
438
|
+
Marshal.load(message).each do |name|
|
439
|
+
handlers[@@signal_queue[name].first] = true if @@signal_queue.key?(name)
|
440
|
+
end
|
441
|
+
handlers.keys.each do |handler|
|
442
|
+
handler.call if handler
|
443
|
+
end
|
444
|
+
end
|
445
|
+
end
|
446
|
+
on.unsubscribe do |channel,|
|
447
|
+
@@watcher_subscribed = false if channel == SIGNAL_QUEUE_CHANNEL
|
448
|
+
end
|
449
|
+
end
|
450
|
+
break
|
451
|
+
rescue Redis::BaseConnectionError, EventMachine::ConnectionError => e
|
452
|
+
@@watcher_subscribed = false
|
453
|
+
warn e.message
|
454
|
+
retries+= 1
|
455
|
+
if retries > @@connection_retry_max && @@connection_retry_max >= 0
|
456
|
+
@@watching = false
|
457
|
+
else
|
458
|
+
sleep retries > 1 ? 1 : 0.1
|
459
|
+
end
|
460
|
+
end while @@watching == $$
|
461
|
+
end.resume
|
462
|
+
until @@watcher_subscribed
|
463
|
+
raise MutexError, "Can not establish watcher channel connection!" unless @@watching == $$
|
464
|
+
fiber = Fiber.current
|
465
|
+
::EM.next_tick { fiber.resume }
|
466
|
+
Fiber.yield
|
467
|
+
end
|
468
|
+
end
|
469
|
+
|
470
|
+
def sleep(seconds)
|
471
|
+
fiber = Fiber.current
|
472
|
+
::EM::Timer.new(secs) { fiber.resume }
|
473
|
+
Fiber.yield
|
474
|
+
end
|
475
|
+
|
476
|
+
# Stops the watcher of the "unlock" channel.
|
477
|
+
# It should be called before stoping EvenMachine otherwise
|
478
|
+
# EM might wait forever for channel connection to be closed.
|
479
|
+
#
|
480
|
+
# Raises MutexError if there are still some fibers waiting for a lock.
|
481
|
+
# Pass `true` to forcefully stop it. This might instead cause
|
482
|
+
# MutexError to be raised in waiting fibers.
|
483
|
+
def stop_watcher(force = false)
|
484
|
+
return unless @@watching == $$
|
485
|
+
@@watching = false
|
486
|
+
raise MutexError, "call #{self.class}::setup first" unless @@redis_watcher
|
487
|
+
unless @@signal_queue.empty? || force
|
488
|
+
raise MutexError, "can't stop: active signal queue handlers"
|
489
|
+
end
|
490
|
+
if @@watcher_subscribed
|
491
|
+
@@redis_watcher.unsubscribe SIGNAL_QUEUE_CHANNEL
|
492
|
+
while @@watcher_subscribed
|
493
|
+
fiber = Fiber.current
|
494
|
+
::EM.next_tick { fiber.resume }
|
495
|
+
Fiber.yield
|
496
|
+
end
|
497
|
+
end
|
498
|
+
end
|
499
|
+
|
500
|
+
# Remove all current Machine/Process locks.
|
501
|
+
# Since there is no lock tracking mechanism, it might not be implemented easily.
|
502
|
+
# If the need arises then it probably should be implemented.
|
503
|
+
def sweep
|
504
|
+
raise NotImplementedError
|
505
|
+
end
|
506
|
+
|
507
|
+
# Attempts to grab the lock and waits if it isn’t available.
|
508
|
+
# Raises MutexError if mutex was locked by the current owner.
|
509
|
+
# Returns instance of Redis::EM::Mutex if lock was successfully obtained.
|
510
|
+
# Returns `nil` if lock wasn't available within `:block` seconds.
|
511
|
+
#
|
512
|
+
# Redis::EM::Mutex.lock(*names, opts = {})
|
513
|
+
#
|
514
|
+
# - *names = lock identifiers - if none they are auto generated
|
515
|
+
# - opts = options hash:
|
516
|
+
# - :name - same as name (in case *names arguments were omitted)
|
517
|
+
# - :block - block timeout
|
518
|
+
# - :expire - expire timeout (see: Mutex#lock and Mutex#try_lock)
|
519
|
+
# - :ns - namespace (otherwise global namespace is used)
|
520
|
+
def lock(*args)
|
521
|
+
mutex = new(*args)
|
522
|
+
mutex if mutex.lock
|
523
|
+
end
|
524
|
+
# Execute block of code protected with named semaphore.
|
525
|
+
# Returns result of code block.
|
526
|
+
#
|
527
|
+
# Redis::EM::Mutex.synchronize(*names, opts = {}, &block)
|
528
|
+
#
|
529
|
+
# - *names = lock identifiers - if none they are auto generated
|
530
|
+
# - opts = options hash:
|
531
|
+
# - :name - same as name (in case *names arguments were omitted)
|
532
|
+
# - :block - block timeout
|
533
|
+
# - :expire - expire timeout (see: Mutex#lock and Mutex#try_lock)
|
534
|
+
# - :ns - namespace (otherwise global namespace is used)
|
535
|
+
#
|
536
|
+
# If `:block` is set and lock isn't obtained within `:block` seconds this method raises
|
537
|
+
# MutexTimeout.
|
538
|
+
def synchronize(*args, &block)
|
539
|
+
new(*args).synchronize(&block)
|
540
|
+
end
|
541
|
+
end
|
542
|
+
|
543
|
+
private
|
544
|
+
|
545
|
+
def owner_ident(lock_id = nil)
|
546
|
+
if lock_id
|
547
|
+
"#@@uuid$#$$@#{Fiber.current.__id__} #{lock_id}"
|
548
|
+
else
|
549
|
+
"#@@uuid$#$$@#{Fiber.current.__id__}"
|
550
|
+
end
|
551
|
+
end
|
552
|
+
|
553
|
+
end
|
554
|
+
end
|
555
|
+
end
|
@@ -0,0 +1,28 @@
|
|
1
|
+
$:.unshift "lib"
|
2
|
+
require 'redis/em-mutex'
|
3
|
+
|
4
|
+
Gem::Specification.new do |s|
|
5
|
+
s.name = "redis-em-mutex"
|
6
|
+
s.version = Redis::EM::Mutex::VERSION
|
7
|
+
s.required_ruby_version = ">= 1.9.1"
|
8
|
+
s.date = "#{Time.now.strftime("%Y-%m-%d")}"
|
9
|
+
s.summary = "Cross server-process-fiber EventMachine + Redis based semaphore"
|
10
|
+
s.email = "rafal@yeondir.com"
|
11
|
+
s.homepage = "http://github.com/royaltm/redis-em-mutex"
|
12
|
+
s.require_path = "lib"
|
13
|
+
s.description = "Cross server-process-fiber EventMachine + Redis based semaphore with many features"
|
14
|
+
s.authors = ["Rafal Michalski"]
|
15
|
+
s.files = `git ls-files`.split("\n") - ['.gitignore']
|
16
|
+
s.test_files = Dir.glob("spec/**/*")
|
17
|
+
s.rdoc_options << "--title" << "redis-em-mutex" <<
|
18
|
+
"--main" << "README.rdoc"
|
19
|
+
s.has_rdoc = true
|
20
|
+
s.extra_rdoc_files = ["README.rdoc"]
|
21
|
+
s.requirements << "Redis server"
|
22
|
+
s.add_runtime_dependency "redis", ">= 3.0.0"
|
23
|
+
s.add_runtime_dependency "hiredis", "~> 0.4.5"
|
24
|
+
s.add_runtime_dependency "eventmachine", ">= 0.12.10"
|
25
|
+
s.add_development_dependency "rspec", "~> 2.8.0"
|
26
|
+
s.add_development_dependency "eventmachine", ">= 1.0.0.beta.1"
|
27
|
+
s.add_development_dependency "em-synchrony", "~> 1.0.0"
|
28
|
+
end
|
@@ -0,0 +1,33 @@
|
|
1
|
+
$:.unshift "lib"
|
2
|
+
require 'em-synchrony'
|
3
|
+
require 'redis-em-mutex'
|
4
|
+
|
5
|
+
describe Redis::EM::Mutex do
|
6
|
+
|
7
|
+
it "should raise MutexError while redis server not found on setup" do
|
8
|
+
expect {
|
9
|
+
described_class.setup(host: 'abcdefghijklmnopqrstuvwxyz', reconnect_max: 0)
|
10
|
+
}.to raise_error(described_class::MutexError, /Can not establish watcher channel connection!/)
|
11
|
+
|
12
|
+
expect {
|
13
|
+
described_class.setup(host: '255.255.255.255', reconnect_max: 0)
|
14
|
+
}.to raise_error(described_class::MutexError, /Can not establish watcher channel connection!/)
|
15
|
+
|
16
|
+
expect {
|
17
|
+
described_class.setup(port: 65535, reconnect_max: 0)
|
18
|
+
}.to raise_error(described_class::MutexError, /Can not establish watcher channel connection!/)
|
19
|
+
end
|
20
|
+
|
21
|
+
around(:each) do |testcase|
|
22
|
+
@after_em_stop = nil
|
23
|
+
::EM.synchrony do
|
24
|
+
begin
|
25
|
+
testcase.call
|
26
|
+
ensure
|
27
|
+
::EM.stop
|
28
|
+
end
|
29
|
+
end
|
30
|
+
@after_em_stop.call if @after_em_stop
|
31
|
+
end
|
32
|
+
|
33
|
+
end
|
@@ -0,0 +1,337 @@
|
|
1
|
+
$:.unshift "lib"
|
2
|
+
require 'securerandom'
|
3
|
+
require 'em-synchrony'
|
4
|
+
require 'em-synchrony/fiber_iterator'
|
5
|
+
require 'redis-em-mutex'
|
6
|
+
|
7
|
+
describe Redis::EM::Mutex do
|
8
|
+
|
9
|
+
it "should lock and prevent locking on the same semaphore" do
|
10
|
+
begin
|
11
|
+
described_class.new(@lock_names.first).owned?.should be_false
|
12
|
+
mutex = described_class.lock(@lock_names.first)
|
13
|
+
mutex.names.should eq [@lock_names.first]
|
14
|
+
mutex.locked?.should be_true
|
15
|
+
mutex.owned?.should be_true
|
16
|
+
mutex.should be_an_instance_of described_class
|
17
|
+
described_class.new(@lock_names.first).try_lock.should be_false
|
18
|
+
expect {
|
19
|
+
mutex.lock
|
20
|
+
}.to raise_error(Redis::EM::Mutex::MutexError, /deadlock; recursive locking/)
|
21
|
+
mutex.unlock.should be_an_instance_of described_class
|
22
|
+
mutex.locked?.should be_false
|
23
|
+
mutex.owned?.should be_false
|
24
|
+
mutex.try_lock.should be_true
|
25
|
+
ensure
|
26
|
+
mutex.unlock if mutex
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
it "should lock and prevent locking on the same multiple semaphores" do
|
31
|
+
begin
|
32
|
+
mutex = described_class.lock(*@lock_names)
|
33
|
+
mutex.names.should eq @lock_names
|
34
|
+
mutex.locked?.should be_true
|
35
|
+
mutex.owned?.should be_true
|
36
|
+
mutex.should be_an_instance_of described_class
|
37
|
+
described_class.new(*@lock_names).try_lock.should be_false
|
38
|
+
@lock_names.each do |name|
|
39
|
+
described_class.new(name).try_lock.should be_false
|
40
|
+
end
|
41
|
+
mutex.try_lock.should be_false
|
42
|
+
expect {
|
43
|
+
mutex.lock
|
44
|
+
}.to raise_error(Redis::EM::Mutex::MutexError, /deadlock; recursive locking/)
|
45
|
+
@lock_names.each do |name|
|
46
|
+
expect {
|
47
|
+
described_class.new(name).lock
|
48
|
+
}.to raise_error(Redis::EM::Mutex::MutexError, /deadlock; recursive locking/)
|
49
|
+
end
|
50
|
+
mutex.unlock.should be_an_instance_of described_class
|
51
|
+
mutex.locked?.should be_false
|
52
|
+
mutex.owned?.should be_false
|
53
|
+
mutex.try_lock.should be_true
|
54
|
+
ensure
|
55
|
+
mutex.unlock if mutex
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
it "should lock and prevent other fibers to lock on the same semaphore" do
|
60
|
+
begin
|
61
|
+
mutex = described_class.lock(@lock_names.first)
|
62
|
+
mutex.should be_an_instance_of described_class
|
63
|
+
mutex.owned?.should be_true
|
64
|
+
locked = true
|
65
|
+
::EM::Synchrony.next_tick do
|
66
|
+
mutex.try_lock.should be false
|
67
|
+
mutex.owned?.should be_false
|
68
|
+
start = Time.now
|
69
|
+
mutex.synchronize do
|
70
|
+
(Time.now - start).should be_within(0.01).of(0.26)
|
71
|
+
locked.should be false
|
72
|
+
locked = nil
|
73
|
+
end
|
74
|
+
end
|
75
|
+
::EM::Synchrony.sleep 0.25
|
76
|
+
locked = false
|
77
|
+
mutex.owned?.should be_true
|
78
|
+
mutex.unlock.should be_an_instance_of described_class
|
79
|
+
mutex.owned?.should be_false
|
80
|
+
::EM::Synchrony.sleep 0.1
|
81
|
+
locked.should be_nil
|
82
|
+
ensure
|
83
|
+
mutex.unlock if mutex
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|
87
|
+
it "should lock and prevent other fibers to lock on the same multiple semaphores" do
|
88
|
+
begin
|
89
|
+
mutex = described_class.lock(*@lock_names)
|
90
|
+
mutex.should be_an_instance_of described_class
|
91
|
+
mutex.owned?.should be_true
|
92
|
+
locked = true
|
93
|
+
::EM::Synchrony.next_tick do
|
94
|
+
locked.should be true
|
95
|
+
mutex.try_lock.should be false
|
96
|
+
mutex.owned?.should be_false
|
97
|
+
start = Time.now
|
98
|
+
mutex.synchronize do
|
99
|
+
mutex.owned?.should be_true
|
100
|
+
(Time.now - start).should be_within(0.01).of(0.26)
|
101
|
+
locked.should be false
|
102
|
+
end
|
103
|
+
mutex.owned?.should be_false
|
104
|
+
::EM::Synchrony.sleep 0.1
|
105
|
+
start = Time.now
|
106
|
+
::EM::Synchrony::FiberIterator.new(@lock_names, @lock_names.length).each do |name|
|
107
|
+
locked.should be true
|
108
|
+
described_class.new(name).synchronize do
|
109
|
+
(Time.now - start).should be_within(0.01).of(0.26)
|
110
|
+
locked.should be_an_instance_of Fixnum
|
111
|
+
locked-= 1
|
112
|
+
end
|
113
|
+
end
|
114
|
+
end
|
115
|
+
::EM::Synchrony.sleep 0.25
|
116
|
+
locked = false
|
117
|
+
mutex.owned?.should be_true
|
118
|
+
mutex.unlock.should be_an_instance_of described_class
|
119
|
+
mutex.owned?.should be_false
|
120
|
+
::EM::Synchrony.sleep 0.1
|
121
|
+
|
122
|
+
locked = true
|
123
|
+
mutex.lock.should be true
|
124
|
+
::EM::Synchrony.sleep 0.25
|
125
|
+
locked = 10
|
126
|
+
mutex.unlock.should be_an_instance_of described_class
|
127
|
+
::EM::Synchrony.sleep 0.1
|
128
|
+
locked.should eq 0
|
129
|
+
ensure
|
130
|
+
mutex.unlock if mutex
|
131
|
+
end
|
132
|
+
end
|
133
|
+
|
134
|
+
it "should lock and prevent other fibers to lock on the same semaphore with block timeout" do
|
135
|
+
begin
|
136
|
+
mutex = described_class.lock(*@lock_names)
|
137
|
+
mutex.should be_an_instance_of described_class
|
138
|
+
mutex.owned?.should be_true
|
139
|
+
locked = true
|
140
|
+
::EM::Synchrony.next_tick do
|
141
|
+
start = Time.now
|
142
|
+
mutex.lock(0.25).should be false
|
143
|
+
mutex.owned?.should be_false
|
144
|
+
(Time.now - start).should be_within(0.01).of(0.26)
|
145
|
+
locked.should be true
|
146
|
+
locked = nil
|
147
|
+
end
|
148
|
+
::EM::Synchrony.sleep 0.26
|
149
|
+
locked.should be_nil
|
150
|
+
locked = false
|
151
|
+
mutex.locked?.should be_true
|
152
|
+
mutex.owned?.should be_true
|
153
|
+
mutex.unlock.should be_an_instance_of described_class
|
154
|
+
mutex.locked?.should be_false
|
155
|
+
mutex.owned?.should be_false
|
156
|
+
ensure
|
157
|
+
mutex.unlock if mutex
|
158
|
+
end
|
159
|
+
end
|
160
|
+
|
161
|
+
it "should lock and expire while other fiber lock on the same semaphore with block timeout" do
|
162
|
+
begin
|
163
|
+
mutex = described_class.lock(*@lock_names, expire: 0.2499999)
|
164
|
+
mutex.expire_timeout.should eq 0.2499999
|
165
|
+
mutex.should be_an_instance_of described_class
|
166
|
+
mutex.owned?.should be_true
|
167
|
+
locked = true
|
168
|
+
::EM::Synchrony.next_tick do
|
169
|
+
mutex.owned?.should be_false
|
170
|
+
start = Time.now
|
171
|
+
mutex.lock(0.25).should be true
|
172
|
+
(Time.now - start).should be_within(0.011).of(0.26)
|
173
|
+
locked.should be true
|
174
|
+
locked = nil
|
175
|
+
mutex.locked?.should be_true
|
176
|
+
mutex.owned?.should be_true
|
177
|
+
::EM::Synchrony.sleep 0.2
|
178
|
+
locked.should be_false
|
179
|
+
mutex.unlock.should be_an_instance_of described_class
|
180
|
+
mutex.owned?.should be_false
|
181
|
+
mutex.locked?.should be_false
|
182
|
+
end
|
183
|
+
::EM::Synchrony.sleep 0.26
|
184
|
+
locked.should be_nil
|
185
|
+
locked = false
|
186
|
+
mutex.locked?.should be_true
|
187
|
+
mutex.owned?.should be_false
|
188
|
+
mutex.unlock.should be_an_instance_of described_class
|
189
|
+
mutex.locked?.should be_true
|
190
|
+
mutex.owned?.should be_false
|
191
|
+
::EM::Synchrony.sleep 0.2
|
192
|
+
ensure
|
193
|
+
mutex.unlock if mutex
|
194
|
+
end
|
195
|
+
end
|
196
|
+
|
197
|
+
it "should lock and prevent (with refresh) other fibers to lock on the same semaphore with block timeout" do
|
198
|
+
begin
|
199
|
+
mutex = described_class.lock(*@lock_names, expire: 0.11)
|
200
|
+
mutex.should be_an_instance_of described_class
|
201
|
+
mutex.owned?.should be_true
|
202
|
+
locked = true
|
203
|
+
::EM::Synchrony.next_tick do
|
204
|
+
start = Time.now
|
205
|
+
mutex.lock(0.3).should be false
|
206
|
+
mutex.owned?.should be_false
|
207
|
+
(Time.now - start).should be_within(0.01).of(0.31)
|
208
|
+
locked.should be true
|
209
|
+
locked = nil
|
210
|
+
end
|
211
|
+
::EM::Synchrony.sleep 0.08
|
212
|
+
mutex.owned?.should be_true
|
213
|
+
mutex.refresh
|
214
|
+
::EM::Synchrony.sleep 0.08
|
215
|
+
mutex.owned?.should be_true
|
216
|
+
mutex.refresh(0.5)
|
217
|
+
::EM::Synchrony.sleep 0.15
|
218
|
+
locked.should be_nil
|
219
|
+
locked = false
|
220
|
+
mutex.locked?.should be_true
|
221
|
+
mutex.owned?.should be_true
|
222
|
+
mutex.unlock.should be_an_instance_of described_class
|
223
|
+
mutex.locked?.should be_false
|
224
|
+
mutex.owned?.should be_false
|
225
|
+
ensure
|
226
|
+
mutex.unlock if mutex
|
227
|
+
end
|
228
|
+
end
|
229
|
+
|
230
|
+
it "should lock some resource and play with it safely" do
|
231
|
+
mutex = described_class.new(*@lock_names)
|
232
|
+
play_name = SecureRandom.random_bytes
|
233
|
+
result = []
|
234
|
+
::EM::Synchrony::FiberIterator.new((0..9).to_a, 10).each do |i|
|
235
|
+
was_locked = false
|
236
|
+
redis = Redis.new @redis_options
|
237
|
+
mutex.owned?.should be_false
|
238
|
+
mutex.synchronize do
|
239
|
+
mutex.owned?.should be_true
|
240
|
+
was_locked = true
|
241
|
+
redis.setnx(play_name, i).should be_true
|
242
|
+
::EM::Synchrony.sleep 0.1
|
243
|
+
redis.get(play_name).should eq i.to_s
|
244
|
+
redis.del(play_name).should eq 1
|
245
|
+
end
|
246
|
+
was_locked.should be_true
|
247
|
+
mutex.owned?.should be_false
|
248
|
+
result << i
|
249
|
+
end
|
250
|
+
mutex.locked?.should be_false
|
251
|
+
result.sort.should eq (0..9).to_a
|
252
|
+
end
|
253
|
+
|
254
|
+
it "should lock and the other fiber should acquire lock as soon as possible" do
|
255
|
+
mutex = described_class.lock(*@lock_names)
|
256
|
+
mutex.should be_an_instance_of described_class
|
257
|
+
time = nil
|
258
|
+
EM::Synchrony.next_tick do
|
259
|
+
time.should be_nil
|
260
|
+
was_locked = false
|
261
|
+
mutex.synchronize do
|
262
|
+
time.should be_an_instance_of Time
|
263
|
+
(Time.now - time).should be < 0.0009
|
264
|
+
was_locked = true
|
265
|
+
end
|
266
|
+
was_locked.should be_true
|
267
|
+
end
|
268
|
+
EM::Synchrony.sleep 0.1
|
269
|
+
mutex.owned?.should be_true
|
270
|
+
mutex.unlock.should be_an_instance_of described_class
|
271
|
+
time = Time.now
|
272
|
+
mutex.owned?.should be_false
|
273
|
+
EM::Synchrony.sleep 0.1
|
274
|
+
end
|
275
|
+
|
276
|
+
it "should lock and the other process should acquire lock as soon as possible" do
|
277
|
+
mutex = described_class.lock(*@lock_names)
|
278
|
+
mutex.should be_an_instance_of described_class
|
279
|
+
time_key1 = SecureRandom.random_bytes
|
280
|
+
time_key2 = SecureRandom.random_bytes
|
281
|
+
::EM.fork_reactor do
|
282
|
+
Fiber.new do
|
283
|
+
begin
|
284
|
+
redis = Redis.new @redis_options
|
285
|
+
redis.set time_key1, Time.now.to_f.to_s
|
286
|
+
mutex.synchronize do
|
287
|
+
redis.set time_key2, Time.now.to_f.to_s
|
288
|
+
end
|
289
|
+
described_class.stop_watcher(false)
|
290
|
+
# rescue => e
|
291
|
+
# warn e.inspect
|
292
|
+
ensure
|
293
|
+
EM.stop
|
294
|
+
end
|
295
|
+
end.resume
|
296
|
+
end
|
297
|
+
EM::Synchrony.sleep 0.25
|
298
|
+
mutex.owned?.should be_true
|
299
|
+
mutex.unlock.should be_an_instance_of described_class
|
300
|
+
time = Time.now.to_f
|
301
|
+
mutex.owned?.should be_false
|
302
|
+
EM::Synchrony.sleep 0.25
|
303
|
+
redis = Redis.new @redis_options
|
304
|
+
t1, t2 = redis.mget(time_key1, time_key2)
|
305
|
+
t1.should be_an_instance_of String
|
306
|
+
t1.to_f.should be < time - 0.25
|
307
|
+
t2.should be_an_instance_of String
|
308
|
+
t2.to_f.should be > time
|
309
|
+
t2.to_f.should be_within(0.001).of(time)
|
310
|
+
redis.del(time_key1, time_key2)
|
311
|
+
end
|
312
|
+
|
313
|
+
around(:each) do |testcase|
|
314
|
+
@after_em_stop = nil
|
315
|
+
::EM.synchrony do
|
316
|
+
begin
|
317
|
+
testcase.call
|
318
|
+
ensure
|
319
|
+
described_class.stop_watcher(false)
|
320
|
+
::EM.stop
|
321
|
+
end
|
322
|
+
end
|
323
|
+
@after_em_stop.call if @after_em_stop
|
324
|
+
end
|
325
|
+
|
326
|
+
before(:all) do
|
327
|
+
@redis_options = {}
|
328
|
+
described_class.setup @redis_options.merge(size: 11)
|
329
|
+
@lock_names = 10.times.map {
|
330
|
+
SecureRandom.random_bytes
|
331
|
+
}
|
332
|
+
end
|
333
|
+
|
334
|
+
after(:all) do
|
335
|
+
# @lock_names
|
336
|
+
end
|
337
|
+
end
|
metadata
ADDED
@@ -0,0 +1,126 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: redis-em-mutex
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.1.0
|
5
|
+
prerelease:
|
6
|
+
platform: ruby
|
7
|
+
authors:
|
8
|
+
- Rafal Michalski
|
9
|
+
autorequire:
|
10
|
+
bindir: bin
|
11
|
+
cert_chain: []
|
12
|
+
date: 2012-09-12 00:00:00.000000000 Z
|
13
|
+
dependencies:
|
14
|
+
- !ruby/object:Gem::Dependency
|
15
|
+
name: redis
|
16
|
+
requirement: &204598440 !ruby/object:Gem::Requirement
|
17
|
+
none: false
|
18
|
+
requirements:
|
19
|
+
- - ! '>='
|
20
|
+
- !ruby/object:Gem::Version
|
21
|
+
version: 3.0.0
|
22
|
+
type: :runtime
|
23
|
+
prerelease: false
|
24
|
+
version_requirements: *204598440
|
25
|
+
- !ruby/object:Gem::Dependency
|
26
|
+
name: hiredis
|
27
|
+
requirement: &204597980 !ruby/object:Gem::Requirement
|
28
|
+
none: false
|
29
|
+
requirements:
|
30
|
+
- - ~>
|
31
|
+
- !ruby/object:Gem::Version
|
32
|
+
version: 0.4.5
|
33
|
+
type: :runtime
|
34
|
+
prerelease: false
|
35
|
+
version_requirements: *204597980
|
36
|
+
- !ruby/object:Gem::Dependency
|
37
|
+
name: eventmachine
|
38
|
+
requirement: &204597520 !ruby/object:Gem::Requirement
|
39
|
+
none: false
|
40
|
+
requirements:
|
41
|
+
- - ! '>='
|
42
|
+
- !ruby/object:Gem::Version
|
43
|
+
version: 0.12.10
|
44
|
+
type: :runtime
|
45
|
+
prerelease: false
|
46
|
+
version_requirements: *204597520
|
47
|
+
- !ruby/object:Gem::Dependency
|
48
|
+
name: rspec
|
49
|
+
requirement: &204597060 !ruby/object:Gem::Requirement
|
50
|
+
none: false
|
51
|
+
requirements:
|
52
|
+
- - ~>
|
53
|
+
- !ruby/object:Gem::Version
|
54
|
+
version: 2.8.0
|
55
|
+
type: :development
|
56
|
+
prerelease: false
|
57
|
+
version_requirements: *204597060
|
58
|
+
- !ruby/object:Gem::Dependency
|
59
|
+
name: eventmachine
|
60
|
+
requirement: &204596600 !ruby/object:Gem::Requirement
|
61
|
+
none: false
|
62
|
+
requirements:
|
63
|
+
- - ! '>='
|
64
|
+
- !ruby/object:Gem::Version
|
65
|
+
version: 1.0.0.beta.1
|
66
|
+
type: :development
|
67
|
+
prerelease: false
|
68
|
+
version_requirements: *204596600
|
69
|
+
- !ruby/object:Gem::Dependency
|
70
|
+
name: em-synchrony
|
71
|
+
requirement: &204596140 !ruby/object:Gem::Requirement
|
72
|
+
none: false
|
73
|
+
requirements:
|
74
|
+
- - ~>
|
75
|
+
- !ruby/object:Gem::Version
|
76
|
+
version: 1.0.0
|
77
|
+
type: :development
|
78
|
+
prerelease: false
|
79
|
+
version_requirements: *204596140
|
80
|
+
description: Cross server-process-fiber EventMachine + Redis based semaphore with
|
81
|
+
many features
|
82
|
+
email: rafal@yeondir.com
|
83
|
+
executables: []
|
84
|
+
extensions: []
|
85
|
+
extra_rdoc_files:
|
86
|
+
- README.rdoc
|
87
|
+
files:
|
88
|
+
- README.rdoc
|
89
|
+
- Rakefile
|
90
|
+
- lib/redis-em-mutex.rb
|
91
|
+
- lib/redis/em-mutex.rb
|
92
|
+
- redis-em-mutex.gemspec
|
93
|
+
- spec/redis-em-mutex-features.rb
|
94
|
+
- spec/redis-em-mutex-semaphores.rb
|
95
|
+
homepage: http://github.com/royaltm/redis-em-mutex
|
96
|
+
licenses: []
|
97
|
+
post_install_message:
|
98
|
+
rdoc_options:
|
99
|
+
- --title
|
100
|
+
- redis-em-mutex
|
101
|
+
- --main
|
102
|
+
- README.rdoc
|
103
|
+
require_paths:
|
104
|
+
- lib
|
105
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
106
|
+
none: false
|
107
|
+
requirements:
|
108
|
+
- - ! '>='
|
109
|
+
- !ruby/object:Gem::Version
|
110
|
+
version: 1.9.1
|
111
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
112
|
+
none: false
|
113
|
+
requirements:
|
114
|
+
- - ! '>='
|
115
|
+
- !ruby/object:Gem::Version
|
116
|
+
version: '0'
|
117
|
+
requirements:
|
118
|
+
- Redis server
|
119
|
+
rubyforge_project:
|
120
|
+
rubygems_version: 1.8.17
|
121
|
+
signing_key:
|
122
|
+
specification_version: 3
|
123
|
+
summary: Cross server-process-fiber EventMachine + Redis based semaphore
|
124
|
+
test_files:
|
125
|
+
- spec/redis-em-mutex-semaphores.rb
|
126
|
+
- spec/redis-em-mutex-features.rb
|