redis-em-mutex 0.2.3 → 0.3.0
Sign up to get free protection for your applications and to get access to all the features.
- data/BENCHMARK.md +134 -0
- data/{HISTORY.rdoc → HISTORY.md} +10 -1
- data/README.md +89 -5
- data/Rakefile +23 -4
- data/benchmark_mutex.rb +99 -0
- data/lib/redis/em-connection-pool.rb +88 -0
- data/lib/redis/em-mutex/ns.rb +1 -1
- data/lib/redis/em-mutex/pure_handler.rb +245 -0
- data/lib/redis/em-mutex/script_handler.rb +414 -0
- data/lib/redis/em-mutex/version.rb +1 -1
- data/lib/redis/em-mutex.rb +79 -248
- data/redis-em-mutex.gemspec +2 -2
- data/spec/redis-em-mutex-condition.rb +3 -3
- data/spec/redis-em-mutex-features.rb +67 -5
- data/spec/redis-em-mutex-semaphores.rb +9 -4
- metadata +11 -5
@@ -0,0 +1,245 @@
|
|
1
|
+
class Redis
|
2
|
+
module EM
|
3
|
+
class Mutex
|
4
|
+
module PureHandlerMixin
|
5
|
+
include Mutex::Errors
|
6
|
+
|
7
|
+
def self.can_refresh_expired?; true end
|
8
|
+
|
9
|
+
private
|
10
|
+
|
11
|
+
def post_init(opts)
|
12
|
+
@locked_owner_id = @locked_id = nil
|
13
|
+
if (owner = opts[:owner])
|
14
|
+
self.define_singleton_method(:owner_ident) do |lock_id = nil|
|
15
|
+
if lock_id
|
16
|
+
"#{uuid}$#$$@#{owner} #{lock_id}"
|
17
|
+
else
|
18
|
+
"#{uuid}$#$$@#{owner}"
|
19
|
+
end
|
20
|
+
end
|
21
|
+
opts = nil
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
public
|
26
|
+
|
27
|
+
# Returns `true` if this semaphore (at least one of locked `names`) is currently being held by some owner.
|
28
|
+
def locked?
|
29
|
+
if @multi
|
30
|
+
redis_pool.multi do |multi|
|
31
|
+
@ns_names.each {|n| multi.exists n}
|
32
|
+
end.any?
|
33
|
+
else
|
34
|
+
redis_pool.exists @ns_names.first
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
# Returns `true` if this semaphore (all the locked `names`) is currently being held by calling owner.
|
39
|
+
# This is the method you should use to check if lock is still held and valid.
|
40
|
+
def owned?
|
41
|
+
!!if @locked_id && owner_ident(@locked_id) == (lock_full_ident = @locked_owner_id)
|
42
|
+
redis_pool.mget(*@ns_names).all? {|v| v == lock_full_ident}
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
# Returns `true` when the semaphore is being held and have already expired.
|
47
|
+
# Returns `false` when the semaphore is still locked and valid
|
48
|
+
# or `nil` if the semaphore wasn't locked by current owner.
|
49
|
+
#
|
50
|
+
# The check is performed only on the Mutex object instance and should only be used as a hint.
|
51
|
+
# For reliable lock status information use #refresh or #owned? instead.
|
52
|
+
def expired?
|
53
|
+
Time.now.to_f > @locked_id.to_f if @locked_id && owner_ident(@locked_id) == @locked_owner_id
|
54
|
+
end
|
55
|
+
|
56
|
+
# Returns the number of seconds left until the semaphore expires.
|
57
|
+
# The number of seconds less than 0 means that the semaphore expired and could be grabbed
|
58
|
+
# by some other owner.
|
59
|
+
# Returns `nil` if the semaphore wasn't locked by current owner.
|
60
|
+
#
|
61
|
+
# The check is performed only on the Mutex object instance and should only be used as a hint.
|
62
|
+
# For reliable lock status information use #refresh or #owned? instead.
|
63
|
+
def expires_in
|
64
|
+
@locked_id.to_f - Time.now.to_f if @locked_id && owner_ident(@locked_id) == @locked_owner_id
|
65
|
+
end
|
66
|
+
|
67
|
+
# Returns local time at which the semaphore will expire or have expired.
|
68
|
+
# Returns `nil` if the semaphore wasn't locked by current owner.
|
69
|
+
#
|
70
|
+
# The check is performed only on the Mutex object instance and should only be used as a hint.
|
71
|
+
# For reliable lock status information use #refresh or #owned? instead.
|
72
|
+
def expires_at
|
73
|
+
Time.at(@locked_id.to_f) if @locked_id && owner_ident(@locked_id) == @locked_owner_id
|
74
|
+
end
|
75
|
+
|
76
|
+
# Returns timestamp at which the semaphore will expire or have expired.
|
77
|
+
# Returns `nil` if the semaphore wasn't locked by current owner.
|
78
|
+
#
|
79
|
+
# The check is performed only on the Mutex object instance and should only be used as a hint.
|
80
|
+
# For reliable lock status information use #refresh or #owned? instead.
|
81
|
+
def expiration_timestamp
|
82
|
+
@locked_id.to_f if @locked_id && owner_ident(@locked_id) == @locked_owner_id
|
83
|
+
end
|
84
|
+
|
85
|
+
# This method is for internal use only.
|
86
|
+
#
|
87
|
+
# Attempts to obtain the lock and returns immediately.
|
88
|
+
# Returns `true` if the lock was granted.
|
89
|
+
# Use Mutex#expire_timeout= to set lock expiration time in secods.
|
90
|
+
# Otherwise global Mutex.default_expire is used.
|
91
|
+
#
|
92
|
+
# This method doesn't capture expired semaphores in "pure" implementation
|
93
|
+
# and therefore it should NEVER be used under normal circumstances.
|
94
|
+
# Use Mutex#lock with block_timeout = 0 to obtain expired lock without blocking.
|
95
|
+
def try_lock
|
96
|
+
lock_id = (Time.now + expire_timeout).to_f.to_s
|
97
|
+
lock_full_ident = owner_ident(lock_id)
|
98
|
+
!!if @multi
|
99
|
+
if redis_pool.msetnx(*@ns_names.map {|k| [k, lock_full_ident]}.flatten)
|
100
|
+
@locked_id = lock_id
|
101
|
+
@locked_owner_id = lock_full_ident
|
102
|
+
end
|
103
|
+
elsif redis_pool.setnx(@ns_names.first, lock_full_ident)
|
104
|
+
@locked_id = lock_id
|
105
|
+
@locked_owner_id = lock_full_ident
|
106
|
+
end
|
107
|
+
end
|
108
|
+
|
109
|
+
# Refreshes lock expiration timeout.
|
110
|
+
# Returns `true` if refresh was successfull.
|
111
|
+
# Returns `false` if the semaphore wasn't locked or when it was locked but it has expired
|
112
|
+
# and now it's got a new owner.
|
113
|
+
def refresh(expire_timeout=nil)
|
114
|
+
ret = false
|
115
|
+
if @locked_id && owner_ident(@locked_id) == (lock_full_ident = @locked_owner_id)
|
116
|
+
new_lock_id = (Time.now + (expire_timeout.to_f.nonzero? || self.expire_timeout)).to_f.to_s
|
117
|
+
new_lock_full_ident = owner_ident(new_lock_id)
|
118
|
+
redis_pool.watch(*@ns_names) do |r|
|
119
|
+
if r.mget(*@ns_names).all? {|v| v == lock_full_ident}
|
120
|
+
if r.multi {|m| m.mset(*@ns_names.map {|k| [k, new_lock_full_ident]}.flatten)}
|
121
|
+
@locked_id = new_lock_id
|
122
|
+
@locked_owner_id = new_lock_full_ident
|
123
|
+
ret = true
|
124
|
+
end
|
125
|
+
else
|
126
|
+
r.unwatch
|
127
|
+
end
|
128
|
+
end
|
129
|
+
end
|
130
|
+
ret
|
131
|
+
end
|
132
|
+
|
133
|
+
# Releases the lock. Returns self on success.
|
134
|
+
# Returns `false` if the semaphore wasn't locked or when it was locked but it has expired
|
135
|
+
# and now it's got a new owner.
|
136
|
+
# In case of unlocking multiple name semaphore this method returns self only when all
|
137
|
+
# of the names have been unlocked successfully.
|
138
|
+
def unlock!
|
139
|
+
ret = false
|
140
|
+
if (locked_id = @locked_id) && owner_ident(@locked_id) == (lock_full_ident = @locked_owner_id)
|
141
|
+
@locked_owner_id = @locked_id = nil
|
142
|
+
redis_pool.watch(*@ns_names) do |r|
|
143
|
+
if r.mget(*@ns_names).all? {|v| v == lock_full_ident}
|
144
|
+
ret = !!r.multi do |multi|
|
145
|
+
multi.del(*@ns_names)
|
146
|
+
multi.publish SIGNAL_QUEUE_CHANNEL, @marsh_names if Time.now.to_f < locked_id.to_f
|
147
|
+
end
|
148
|
+
else
|
149
|
+
r.unwatch
|
150
|
+
end
|
151
|
+
end
|
152
|
+
end
|
153
|
+
ret && self
|
154
|
+
end
|
155
|
+
|
156
|
+
# Attempts to grab the lock and waits if it isn't available.
|
157
|
+
# Raises MutexError if mutex was locked by the current owner.
|
158
|
+
# Returns `true` if lock was successfully obtained.
|
159
|
+
# Returns `false` if lock wasn't available within `block_timeout` seconds.
|
160
|
+
#
|
161
|
+
# If `block_timeout` is `nil` or omited this method uses Mutex#block_timeout.
|
162
|
+
# If also Mutex#block_timeout is nil this method returns only after lock
|
163
|
+
# has been granted.
|
164
|
+
#
|
165
|
+
# Use Mutex#expire_timeout= to set lock expiration timeout.
|
166
|
+
# Otherwise global Mutex.default_expire is used.
|
167
|
+
def lock(block_timeout = nil)
|
168
|
+
block_timeout||= self.block_timeout
|
169
|
+
names = @ns_names
|
170
|
+
timer = fiber = nil
|
171
|
+
try_again = false
|
172
|
+
sig_proc = proc do
|
173
|
+
try_again = true
|
174
|
+
::EM.next_tick { fiber.resume if fiber } if fiber
|
175
|
+
end
|
176
|
+
begin
|
177
|
+
Mutex.start_watcher unless watching?
|
178
|
+
queues = names.map {|n| signal_queue[n] << sig_proc }
|
179
|
+
ident_match = owner_ident
|
180
|
+
until try_lock
|
181
|
+
start_time = Time.now.to_f
|
182
|
+
expire_time = nil
|
183
|
+
redis_pool.watch(*names) do |r|
|
184
|
+
expired_names = names.zip(r.mget(*names)).map do |name, lock_value|
|
185
|
+
if lock_value
|
186
|
+
owner, exp_id = lock_value.split ' '
|
187
|
+
exp_time = exp_id.to_f
|
188
|
+
expire_time = exp_time if expire_time.nil? || exp_time < expire_time
|
189
|
+
if exp_time < start_time
|
190
|
+
name
|
191
|
+
elsif owner == ident_match
|
192
|
+
raise MutexError, "deadlock; recursive locking #{owner}"
|
193
|
+
end
|
194
|
+
end
|
195
|
+
end
|
196
|
+
if expire_time && expire_time < start_time
|
197
|
+
r.multi do |multi|
|
198
|
+
expired_names = expired_names.compact
|
199
|
+
multi.del(*expired_names)
|
200
|
+
end
|
201
|
+
else
|
202
|
+
r.unwatch
|
203
|
+
end
|
204
|
+
end
|
205
|
+
timeout = (expire_time = expire_time.to_f) - start_time
|
206
|
+
timeout = block_timeout if block_timeout && block_timeout < timeout
|
207
|
+
|
208
|
+
if !try_again && timeout > 0
|
209
|
+
timer = ::EM::Timer.new(timeout) do
|
210
|
+
timer = nil
|
211
|
+
::EM.next_tick { fiber.resume if fiber } if fiber
|
212
|
+
end
|
213
|
+
fiber = Fiber.current
|
214
|
+
Fiber.yield
|
215
|
+
fiber = nil
|
216
|
+
end
|
217
|
+
finish_time = Time.now.to_f
|
218
|
+
if try_again || finish_time > expire_time
|
219
|
+
block_timeout-= finish_time - start_time if block_timeout
|
220
|
+
try_again = false
|
221
|
+
else
|
222
|
+
return false
|
223
|
+
end
|
224
|
+
end
|
225
|
+
true
|
226
|
+
ensure
|
227
|
+
timer.cancel if timer
|
228
|
+
timer = nil
|
229
|
+
queues.each {|q| q.delete sig_proc }
|
230
|
+
names.each {|n| signal_queue.delete(n) if signal_queue[n].empty? }
|
231
|
+
end
|
232
|
+
end
|
233
|
+
|
234
|
+
def owner_ident(lock_id = nil)
|
235
|
+
if lock_id
|
236
|
+
"#{uuid}$#$$@#{Fiber.current.__id__} #{lock_id}"
|
237
|
+
else
|
238
|
+
"#{uuid}$#$$@#{Fiber.current.__id__}"
|
239
|
+
end
|
240
|
+
end
|
241
|
+
|
242
|
+
end
|
243
|
+
end
|
244
|
+
end
|
245
|
+
end
|
@@ -0,0 +1,414 @@
|
|
1
|
+
require 'digest'
|
2
|
+
class Redis
|
3
|
+
module EM
|
4
|
+
class Mutex
|
5
|
+
module ScriptHandlerMixin
|
6
|
+
include Mutex::Errors
|
7
|
+
|
8
|
+
def self.can_refresh_expired?; false end
|
9
|
+
|
10
|
+
private
|
11
|
+
|
12
|
+
def post_init(opts)
|
13
|
+
@locked_owner_id = @lock_expire = nil
|
14
|
+
@scripts = if @multi
|
15
|
+
Scripts::MULTI
|
16
|
+
else
|
17
|
+
Scripts::SINGLE
|
18
|
+
end
|
19
|
+
@eval_try_lock,
|
20
|
+
@eval_lock,
|
21
|
+
@eval_unlock,
|
22
|
+
@eval_refresh,
|
23
|
+
@eval_is_locked = @scripts.keys
|
24
|
+
if (owner = opts[:owner])
|
25
|
+
self.define_singleton_method(:owner_ident) do
|
26
|
+
"#{uuid}$#$$@#{owner}"
|
27
|
+
end
|
28
|
+
opts = nil
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
NOSCRIPT = 'NOSCRIPT'.freeze
|
33
|
+
|
34
|
+
def eval_safe(sha1, keys, args = nil)
|
35
|
+
redis_pool.evalsha(sha1, keys, args)
|
36
|
+
rescue CommandError => e
|
37
|
+
if e.message.start_with? NOSCRIPT
|
38
|
+
redis_pool.script :load, @scripts[sha1]
|
39
|
+
retry
|
40
|
+
else
|
41
|
+
raise
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
public
|
46
|
+
|
47
|
+
def owner_ident
|
48
|
+
"#{uuid}$#$$@#{Fiber.current.__id__}"
|
49
|
+
end
|
50
|
+
|
51
|
+
# Returns `true` if this semaphore (at least one of locked `names`) is currently being held by some owner.
|
52
|
+
def locked?
|
53
|
+
if sha1 = @eval_is_locked
|
54
|
+
1 == eval_safe(sha1, @ns_names)
|
55
|
+
else
|
56
|
+
redis_pool.exists @ns_names.first
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
# Returns `true` if this semaphore (all the locked `names`) is currently being held by calling owner.
|
61
|
+
# This is the method you should use to check if lock is still held and valid.
|
62
|
+
def owned?
|
63
|
+
!!if owner_ident == (lock_full_ident = @locked_owner_id)
|
64
|
+
redis_pool.mget(*@ns_names).all? {|v| v == lock_full_ident}
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
# Returns `true` when the semaphore is being held and have already expired.
|
69
|
+
# Returns `false` when the semaphore is still locked and valid
|
70
|
+
# or `nil` if the semaphore wasn't locked by current owner.
|
71
|
+
#
|
72
|
+
# The check is performed only on the Mutex object instance and should only be used as a hint.
|
73
|
+
# For reliable lock status information use #refresh or #owned? instead.
|
74
|
+
def expired?
|
75
|
+
Time.now.to_f > @lock_expire if @lock_expire && owner_ident == @locked_owner_id
|
76
|
+
end
|
77
|
+
|
78
|
+
# Returns the number of seconds left until the semaphore expires.
|
79
|
+
# The number of seconds less than 0 means that the semaphore expired and could be grabbed
|
80
|
+
# by some other owner.
|
81
|
+
# Returns `nil` if the semaphore wasn't locked by current owner.
|
82
|
+
#
|
83
|
+
# The check is performed only on the Mutex object instance and should only be used as a hint.
|
84
|
+
# For reliable lock status information use #refresh or #owned? instead.
|
85
|
+
def expires_in
|
86
|
+
@lock_expire.to_f - Time.now.to_f if @lock_expire && owner_ident == @locked_owner_id
|
87
|
+
end
|
88
|
+
|
89
|
+
# Returns local time at which the semaphore will expire or have expired.
|
90
|
+
# Returns `nil` if the semaphore wasn't locked by current owner.
|
91
|
+
#
|
92
|
+
# The check is performed only on the Mutex object instance and should only be used as a hint.
|
93
|
+
# For reliable lock status information use #refresh or #owned? instead.
|
94
|
+
def expires_at
|
95
|
+
Time.at(@lock_expire) if @lock_expire && owner_ident == @locked_owner_id
|
96
|
+
end
|
97
|
+
|
98
|
+
# Returns timestamp at which the semaphore will expire or have expired.
|
99
|
+
# Returns `nil` if the semaphore wasn't locked by current owner.
|
100
|
+
#
|
101
|
+
# The check is performed only on the Mutex object instance and should only be used as a hint.
|
102
|
+
# For reliable lock status information use #refresh or #owned? instead.
|
103
|
+
def expiration_timestamp
|
104
|
+
@lock_expire if @lock_expire && owner_ident == @locked_owner_id
|
105
|
+
end
|
106
|
+
|
107
|
+
# Attempts to obtain the lock and returns immediately.
|
108
|
+
# Returns `true` if the lock was granted.
|
109
|
+
# Use Mutex#expire_timeout= to set lock expiration time in secods.
|
110
|
+
# Otherwise global Mutex.default_expire is used.
|
111
|
+
#
|
112
|
+
# This method captures expired semaphores only in "script" implementation
|
113
|
+
# and therefore it should NEVER be used under normal circumstances.
|
114
|
+
# Use Mutex#lock with block_timeout = 0 to obtain expired lock without blocking.
|
115
|
+
def try_lock
|
116
|
+
lock_expire = (Time.now + expire_timeout).to_f
|
117
|
+
lock_full_ident = owner_ident
|
118
|
+
!!if 1 == eval_safe(@eval_try_lock, @ns_names, [lock_full_ident, (lock_expire*1000.0).to_i])
|
119
|
+
@lock_expire = lock_expire
|
120
|
+
@locked_owner_id = lock_full_ident
|
121
|
+
end
|
122
|
+
end
|
123
|
+
|
124
|
+
# Refreshes lock expiration timeout.
|
125
|
+
# Returns `true` if refresh was successfull.
|
126
|
+
# Returns `false` if the semaphore wasn't locked or when it was locked but it has expired
|
127
|
+
# and now it's got a new owner.
|
128
|
+
def refresh(expire_timeout=nil)
|
129
|
+
if @lock_expire && owner_ident == (lock_full_ident = @locked_owner_id)
|
130
|
+
lock_expire = (Time.now + (expire_timeout.to_f.nonzero? || self.expire_timeout)).to_f
|
131
|
+
case keys = eval_safe(@eval_refresh, @ns_names, [lock_full_ident, (lock_expire*1000.0).to_i])
|
132
|
+
when 1
|
133
|
+
@lock_expire = lock_expire
|
134
|
+
return true
|
135
|
+
end
|
136
|
+
end
|
137
|
+
return false
|
138
|
+
end
|
139
|
+
|
140
|
+
# Releases the lock. Returns self on success.
|
141
|
+
# Returns `false` if the semaphore wasn't locked or when it was locked but it has expired
|
142
|
+
# and now it's got a new owner.
|
143
|
+
# In case of unlocking multiple name semaphore this method returns self only when all
|
144
|
+
# of the names have been unlocked successfully.
|
145
|
+
def unlock!
|
146
|
+
if (lock_expire = @lock_expire) && owner_ident == (lock_full_ident = @locked_owner_id)
|
147
|
+
@locked_owner_id = @lock_expire = nil
|
148
|
+
removed = eval_safe(@eval_unlock, @ns_names, [lock_full_ident, SIGNAL_QUEUE_CHANNEL, @marsh_names])
|
149
|
+
end
|
150
|
+
return removed == @ns_names.length && self
|
151
|
+
end
|
152
|
+
|
153
|
+
# Attempts to grab the lock and waits if it isn't available.
|
154
|
+
# Raises MutexError if mutex was locked by the current owner.
|
155
|
+
# Returns `true` if lock was successfully obtained.
|
156
|
+
# Returns `false` if lock wasn't available within `block_timeout` seconds.
|
157
|
+
#
|
158
|
+
# If `block_timeout` is `nil` or omited this method uses Mutex#block_timeout.
|
159
|
+
# If also Mutex#block_timeout is nil this method returns only after lock
|
160
|
+
# has been granted.
|
161
|
+
#
|
162
|
+
# Use Mutex#expire_timeout= to set lock expiration timeout.
|
163
|
+
# Otherwise global Mutex.default_expire is used.
|
164
|
+
def lock(block_timeout = nil)
|
165
|
+
block_timeout||= self.block_timeout
|
166
|
+
names = @ns_names
|
167
|
+
timer = fiber = nil
|
168
|
+
try_again = false
|
169
|
+
sig_proc = proc do
|
170
|
+
try_again = true
|
171
|
+
::EM.next_tick { fiber.resume if fiber } if fiber
|
172
|
+
end
|
173
|
+
begin
|
174
|
+
Mutex.start_watcher unless watching?
|
175
|
+
queues = names.map {|n| signal_queue[n] << sig_proc }
|
176
|
+
ident_match = owner_ident
|
177
|
+
loop do
|
178
|
+
start_time = Time.now.to_f
|
179
|
+
case timeout = eval_safe(@eval_lock, @ns_names, [ident_match,
|
180
|
+
((lock_expire = (Time.now + expire_timeout).to_f)*1000.0).to_i])
|
181
|
+
when 'OK'
|
182
|
+
@locked_owner_id = ident_match
|
183
|
+
@lock_expire = lock_expire
|
184
|
+
break
|
185
|
+
when 'DD'
|
186
|
+
raise MutexError, "deadlock; recursive locking #{ident_match}"
|
187
|
+
else
|
188
|
+
expire_time = start_time + (timeout/=1000.0)
|
189
|
+
timeout = block_timeout if block_timeout && block_timeout < timeout
|
190
|
+
if !try_again && timeout > 0
|
191
|
+
timer = ::EM::Timer.new(timeout) do
|
192
|
+
timer = nil
|
193
|
+
::EM.next_tick { fiber.resume if fiber } if fiber
|
194
|
+
end
|
195
|
+
fiber = Fiber.current
|
196
|
+
Fiber.yield
|
197
|
+
fiber = nil
|
198
|
+
end
|
199
|
+
finish_time = Time.now.to_f
|
200
|
+
if try_again || finish_time > expire_time
|
201
|
+
block_timeout-= finish_time - start_time if block_timeout
|
202
|
+
try_again = false
|
203
|
+
else
|
204
|
+
return false
|
205
|
+
end
|
206
|
+
end
|
207
|
+
end
|
208
|
+
true
|
209
|
+
ensure
|
210
|
+
timer.cancel if timer
|
211
|
+
timer = nil
|
212
|
+
queues.each {|q| q.delete sig_proc }
|
213
|
+
names.each {|n| signal_queue.delete(n) if signal_queue[n].empty? }
|
214
|
+
end
|
215
|
+
end
|
216
|
+
|
217
|
+
module Scripts
|
218
|
+
# * lock multi *keys, lock_id, msexpire_at
|
219
|
+
# * > 1
|
220
|
+
# * > 0
|
221
|
+
TRY_LOCK_MULTI = <<-EOL
|
222
|
+
local size=#KEYS
|
223
|
+
local lock=ARGV[1]
|
224
|
+
local exp=tonumber(ARGV[2])
|
225
|
+
local args={}
|
226
|
+
for i=1,size do
|
227
|
+
args[#args+1]=KEYS[i]
|
228
|
+
args[#args+1]=lock
|
229
|
+
end
|
230
|
+
if 1==redis.call('msetnx',unpack(args)) then
|
231
|
+
for i=1,size do
|
232
|
+
redis.call('pexpireat',KEYS[i],exp)
|
233
|
+
end
|
234
|
+
return 1
|
235
|
+
end
|
236
|
+
return 0
|
237
|
+
EOL
|
238
|
+
|
239
|
+
# * lock multi *keys, lock_id, msexpire_at
|
240
|
+
# * > OK
|
241
|
+
# * > DD (deadlock)
|
242
|
+
# * > milliseconds ttl wait
|
243
|
+
LOCK_MULTI = <<-EOL
|
244
|
+
local size=#KEYS
|
245
|
+
local lock=ARGV[1]
|
246
|
+
local exp=tonumber(ARGV[2])
|
247
|
+
local args={}
|
248
|
+
for i=1,size do
|
249
|
+
args[#args+1]=KEYS[i]
|
250
|
+
args[#args+1]=lock
|
251
|
+
end
|
252
|
+
if 1==redis.call('msetnx',unpack(args)) then
|
253
|
+
for i=1,size do
|
254
|
+
redis.call('pexpireat',KEYS[i],exp)
|
255
|
+
end
|
256
|
+
return 'OK'
|
257
|
+
end
|
258
|
+
local res=redis.call('mget',unpack(KEYS))
|
259
|
+
for i=1,size do
|
260
|
+
if res[i]==lock then
|
261
|
+
return 'DD'
|
262
|
+
end
|
263
|
+
end
|
264
|
+
exp=nil
|
265
|
+
for i=1,size do
|
266
|
+
res=redis.call('pttl',KEYS[i])
|
267
|
+
if not exp or res<exp then
|
268
|
+
exp=res
|
269
|
+
end
|
270
|
+
end
|
271
|
+
return exp
|
272
|
+
EOL
|
273
|
+
|
274
|
+
# * unlock multiple *keys, lock_id, pub_channel, pub_message
|
275
|
+
# * > #keys unlocked
|
276
|
+
UNLOCK_MULTI = <<-EOL
|
277
|
+
local size=#KEYS
|
278
|
+
local lock=ARGV[1]
|
279
|
+
local args={}
|
280
|
+
local res=redis.call('mget',unpack(KEYS))
|
281
|
+
for i=1,size do
|
282
|
+
if res[i]==lock then
|
283
|
+
args[#args+1]=KEYS[i]
|
284
|
+
end
|
285
|
+
end
|
286
|
+
if #args>0 then
|
287
|
+
redis.call('del',unpack(args))
|
288
|
+
redis.call('publish',ARGV[2],ARGV[3])
|
289
|
+
end
|
290
|
+
return #args
|
291
|
+
EOL
|
292
|
+
|
293
|
+
# * refresh multi *keys, lock_id, msexpire_at
|
294
|
+
# * > 1
|
295
|
+
# * > 0
|
296
|
+
REFRESH_MULTI = <<-EOL
|
297
|
+
local size=#KEYS
|
298
|
+
local lock=ARGV[1]
|
299
|
+
local exp=tonumber(ARGV[2])
|
300
|
+
local args={}
|
301
|
+
local res=redis.call('mget',unpack(KEYS))
|
302
|
+
for i=1,size do
|
303
|
+
if res[i]==lock then
|
304
|
+
args[#args+1]=KEYS[i]
|
305
|
+
end
|
306
|
+
end
|
307
|
+
if #args==size then
|
308
|
+
for i=1,size do
|
309
|
+
if 0==redis.call('pexpireat',args[i],exp) then
|
310
|
+
redis.call('del',unpack(args))
|
311
|
+
return 0
|
312
|
+
end
|
313
|
+
end
|
314
|
+
return 1
|
315
|
+
elseif #args>0 then
|
316
|
+
redis.call('del',unpack(args))
|
317
|
+
end
|
318
|
+
return 0
|
319
|
+
EOL
|
320
|
+
|
321
|
+
# * locked? multi *keys
|
322
|
+
# * > 1
|
323
|
+
# * > 0
|
324
|
+
IS_LOCKED_MULTI = <<-EOL
|
325
|
+
for i=1,#KEYS do
|
326
|
+
if 1==redis.call('exists',KEYS[i]) then
|
327
|
+
return 1
|
328
|
+
end
|
329
|
+
end
|
330
|
+
return 0
|
331
|
+
EOL
|
332
|
+
|
333
|
+
# * try_lock single key, lock_id, msexpire_at
|
334
|
+
# * > 1
|
335
|
+
# * > 0
|
336
|
+
TRY_LOCK_SINGLE = <<-EOL
|
337
|
+
local key=KEYS[1]
|
338
|
+
local lock=ARGV[1]
|
339
|
+
if 1==redis.call('setnx',key,lock) then
|
340
|
+
return redis.call('pexpireat',key,tonumber(ARGV[2]))
|
341
|
+
end
|
342
|
+
return 0
|
343
|
+
EOL
|
344
|
+
|
345
|
+
# * lock single key, lock_id, msexpire_at
|
346
|
+
# * > OK
|
347
|
+
# * > DD (deadlock)
|
348
|
+
# * > milliseconds ttl wait
|
349
|
+
LOCK_SINGLE = <<-EOL
|
350
|
+
local key=KEYS[1]
|
351
|
+
local lock=ARGV[1]
|
352
|
+
if 1==redis.call('setnx',key,lock) then
|
353
|
+
redis.call('pexpireat',key,tonumber(ARGV[2]))
|
354
|
+
return 'OK'
|
355
|
+
end
|
356
|
+
if lock==redis.call('get',key) then
|
357
|
+
return 'DD'
|
358
|
+
end
|
359
|
+
return redis.call('pttl',key)
|
360
|
+
EOL
|
361
|
+
|
362
|
+
# * unlock single key, lock_id, pub_channel, pub_message
|
363
|
+
# * > 1
|
364
|
+
# * > 0
|
365
|
+
UNLOCK_SINGLE = <<-EOL
|
366
|
+
local key=KEYS[1]
|
367
|
+
local res=redis.call('get',key)
|
368
|
+
if res==ARGV[1] then
|
369
|
+
if 1==redis.call('del',key) then
|
370
|
+
redis.call('publish',ARGV[2],ARGV[3])
|
371
|
+
return 1
|
372
|
+
end
|
373
|
+
end
|
374
|
+
return 0
|
375
|
+
EOL
|
376
|
+
|
377
|
+
# * refresh single key, lock_id, msexpire_at
|
378
|
+
# * > 1
|
379
|
+
# * > 0
|
380
|
+
REFRESH_SINGLE = <<-EOL
|
381
|
+
local key=KEYS[1]
|
382
|
+
local res=redis.call('get',key)
|
383
|
+
if res==ARGV[1] then
|
384
|
+
return redis.call('pexpireat',key,tonumber(ARGV[2]))
|
385
|
+
end
|
386
|
+
return 0
|
387
|
+
EOL
|
388
|
+
end
|
389
|
+
Scripts.constants.each do |name|
|
390
|
+
Scripts.const_get(name).tap do |script|
|
391
|
+
script.replace script.split("\n").map(&:strip).join(" ").freeze
|
392
|
+
Scripts.const_set "#{name}_SHA1", Digest::SHA1.hexdigest(script)
|
393
|
+
end
|
394
|
+
end
|
395
|
+
module Scripts
|
396
|
+
MULTI = {
|
397
|
+
TRY_LOCK_MULTI_SHA1 => TRY_LOCK_MULTI,
|
398
|
+
LOCK_MULTI_SHA1 => LOCK_MULTI,
|
399
|
+
UNLOCK_MULTI_SHA1 => UNLOCK_MULTI,
|
400
|
+
REFRESH_MULTI_SHA1 => REFRESH_MULTI,
|
401
|
+
IS_LOCKED_MULTI_SHA1 => IS_LOCKED_MULTI
|
402
|
+
}
|
403
|
+
SINGLE = {
|
404
|
+
TRY_LOCK_SINGLE_SHA1 => TRY_LOCK_SINGLE,
|
405
|
+
LOCK_SINGLE_SHA1 => LOCK_SINGLE,
|
406
|
+
UNLOCK_SINGLE_SHA1 => UNLOCK_SINGLE,
|
407
|
+
REFRESH_SINGLE_SHA1 => REFRESH_SINGLE,
|
408
|
+
}
|
409
|
+
end
|
410
|
+
|
411
|
+
end
|
412
|
+
end
|
413
|
+
end
|
414
|
+
end
|