redis-em-mutex 0.3.0 → 0.3.1
Sign up to get free protection for your applications and to get access to all the features.
- data/BENCHMARK.md +73 -57
- data/HISTORY.md +4 -0
- data/LICENCE +19 -0
- data/README.md +78 -33
- data/Rakefile +10 -2
- data/lib/redis/em-mutex/script_handler.rb +17 -19
- data/lib/redis/em-mutex/version.rb +1 -1
- data/test/bench.rb +158 -0
- data/test/stress.rb +112 -0
- metadata +5 -3
- data/benchmark_mutex.rb +0 -99
data/BENCHMARK.md
CHANGED
@@ -1,20 +1,20 @@
|
|
1
1
|
BENCHMARK
|
2
2
|
=========
|
3
3
|
|
4
|
-
To measure the performance of {Redis::EM::Mutex} I've wrote a simple script called `
|
4
|
+
To measure the performance of {Redis::EM::Mutex} I've wrote a simple script called `test/bench.rb`.
|
5
5
|
The script is included in respository.
|
6
6
|
|
7
7
|
Below are the results of running tests against the following versions:
|
8
8
|
|
9
9
|
- redis-em-mutex v0.1.2
|
10
10
|
- redis-em-mutex v0.2.3
|
11
|
-
- redis-em-mutex v0.3.
|
12
|
-
- redis-em-mutex v0.3.
|
11
|
+
- redis-em-mutex v0.3.1 - "pure" handler
|
12
|
+
- redis-em-mutex v0.3.1 - "script" handler
|
13
13
|
|
14
14
|
To run theese tests type:
|
15
15
|
|
16
16
|
```sh
|
17
|
-
cp
|
17
|
+
cp test/bench.rb /tmp/benchmark_mutex.rb
|
18
18
|
|
19
19
|
git reset --hard v0.1.2
|
20
20
|
ruby /tmp/benchmark_mutex.rb
|
@@ -22,9 +22,9 @@ ruby /tmp/benchmark_mutex.rb
|
|
22
22
|
git reset --hard v0.2.3
|
23
23
|
ruby /tmp/benchmark_mutex.rb
|
24
24
|
|
25
|
-
git reset --hard v0.3.
|
26
|
-
REDIS_EM_MUTEX_HANDLER=pure ruby
|
27
|
-
REDIS_EM_MUTEX_HANDLER=script ruby
|
25
|
+
git reset --hard v0.3.1
|
26
|
+
REDIS_EM_MUTEX_HANDLER=pure ruby test/bench.rb
|
27
|
+
REDIS_EM_MUTEX_HANDLER=script ruby test/bench.rb
|
28
28
|
```
|
29
29
|
|
30
30
|
Here are the results of running those tests on Quad Core Xeon machine
|
@@ -41,38 +41,38 @@ Lock/unlock 1000 times using 10 concurrent fibers.
|
|
41
41
|
Version: 0.1.2, handler: N/A
|
42
42
|
lock/unlock 1000 times with concurrency: 10
|
43
43
|
user system total real
|
44
|
-
keys: 1
|
45
|
-
keys: 2
|
46
|
-
keys: 3
|
47
|
-
keys: 5
|
48
|
-
keys:10
|
44
|
+
keys: 1/ 1 0.500000 0.240000 0.740000 ( 0.987120)
|
45
|
+
keys: 2/ 3 0.700000 0.260000 0.960000 ( 1.179436)
|
46
|
+
keys: 3/ 5 0.890000 0.340000 1.230000 ( 1.336847)
|
47
|
+
keys: 5/ 9 1.010000 0.540000 1.550000 ( 1.610321)
|
48
|
+
keys:10/19 1.480000 0.520000 2.000000 ( 2.120616)
|
49
49
|
|
50
50
|
Version: 0.2.3, handler: N/A
|
51
51
|
lock/unlock 1000 times with concurrency: 10
|
52
52
|
user system total real
|
53
|
-
keys: 1
|
54
|
-
keys: 2
|
55
|
-
keys: 3
|
56
|
-
keys: 5
|
57
|
-
keys:10
|
53
|
+
keys: 1/ 1 0.550000 0.270000 0.820000 ( 0.861067)
|
54
|
+
keys: 2/ 3 0.960000 0.460000 1.420000 ( 1.724032)
|
55
|
+
keys: 3/ 5 1.590000 0.510000 2.100000 ( 2.223966)
|
56
|
+
keys: 5/ 9 2.660000 0.940000 3.600000 ( 3.784084)
|
57
|
+
keys:10/19 5.430000 1.850000 7.280000 ( 8.406377)
|
58
58
|
|
59
|
-
Version: 0.3.
|
59
|
+
Version: 0.3.1, handler: Redis::EM::Mutex::PureHandlerMixin
|
60
60
|
lock/unlock 1000 times with concurrency: 10
|
61
61
|
user system total real
|
62
|
-
keys: 1
|
63
|
-
keys: 2
|
64
|
-
keys: 3
|
65
|
-
keys: 5
|
66
|
-
keys:10
|
62
|
+
keys: 1/ 1 0.620000 0.210000 0.830000 ( 0.869947)
|
63
|
+
keys: 2/ 3 0.680000 0.310000 0.990000 ( 1.044803)
|
64
|
+
keys: 3/ 5 0.890000 0.340000 1.230000 ( 1.267044)
|
65
|
+
keys: 5/ 9 1.190000 0.370000 1.560000 ( 1.576557)
|
66
|
+
keys:10/19 1.580000 0.490000 2.070000 ( 2.123451)
|
67
67
|
|
68
|
-
Version: 0.3.
|
68
|
+
Version: 0.3.1, handler: Redis::EM::Mutex::ScriptHandlerMixin
|
69
69
|
lock/unlock 1000 times with concurrency: 10
|
70
70
|
user system total real
|
71
|
-
keys: 1
|
72
|
-
keys: 2
|
73
|
-
keys: 3
|
74
|
-
keys: 5
|
75
|
-
keys:10
|
71
|
+
keys: 1/ 1 0.270000 0.060000 0.330000 ( 0.530289)
|
72
|
+
keys: 2/ 3 0.360000 0.070000 0.430000 ( 0.664696)
|
73
|
+
keys: 3/ 5 0.430000 0.070000 0.500000 ( 0.803888)
|
74
|
+
keys: 5/ 9 0.450000 0.160000 0.610000 ( 1.040182)
|
75
|
+
keys:10/19 0.710000 0.130000 0.840000 ( 1.767735)
|
76
76
|
```
|
77
77
|
|
78
78
|
Test 2
|
@@ -98,37 +98,53 @@ during that period.
|
|
98
98
|
```
|
99
99
|
Version: 0.1.2, handler: N/A
|
100
100
|
lock/write/incr/read/del/unlock in 5 seconds + cooldown period:
|
101
|
-
|
102
|
-
keys: 1
|
103
|
-
keys: 2
|
104
|
-
keys: 3
|
105
|
-
keys: 5
|
106
|
-
keys:10
|
101
|
+
user system total real
|
102
|
+
keys: 1/ 1 3134 2.120000 1.260000 3.380000 ( 5.123046)
|
103
|
+
keys: 2/ 3 2678 2.350000 1.050000 3.400000 ( 5.134010)
|
104
|
+
keys: 3/ 5 2566 2.410000 1.300000 3.710000 ( 5.157308)
|
105
|
+
keys: 5/ 9 2256 3.260000 1.200000 4.460000 ( 5.209614)
|
106
|
+
keys:10/19 1693 3.250000 1.050000 4.300000 ( 5.230359)
|
107
107
|
|
108
108
|
Version: 0.2.3, handler: N/A
|
109
109
|
lock/write/incr/read/del/unlock in 5 seconds + cooldown period:
|
110
|
-
|
111
|
-
keys: 1
|
112
|
-
keys: 2
|
113
|
-
keys: 3
|
114
|
-
keys: 5
|
115
|
-
keys:10
|
116
|
-
|
117
|
-
Version: 0.3.
|
110
|
+
user system total real
|
111
|
+
keys: 1/ 1 3199 1.870000 1.120000 2.990000 ( 5.114284)
|
112
|
+
keys: 2/ 3 2271 2.680000 1.130000 3.810000 ( 5.221153)
|
113
|
+
keys: 3/ 5 1627 2.980000 1.120000 4.100000 ( 5.289593)
|
114
|
+
keys: 5/ 9 1094 2.980000 1.260000 4.240000 ( 5.401877)
|
115
|
+
keys:10/19 630 3.420000 1.140000 4.560000 ( 5.862919)
|
116
|
+
|
117
|
+
Version: 0.3.1, handler: Redis::EM::Mutex::PureHandlerMixin
|
118
118
|
lock/write/incr/read/del/unlock in 5 seconds + cooldown period:
|
119
|
-
|
120
|
-
keys: 1
|
121
|
-
keys: 2
|
122
|
-
keys: 3
|
123
|
-
keys: 5
|
124
|
-
keys:10
|
125
|
-
|
126
|
-
Version: 0.3.
|
119
|
+
user system total real
|
120
|
+
keys: 1/ 1 3086 2.450000 1.190000 3.640000 ( 5.128574)
|
121
|
+
keys: 2/ 3 2556 2.540000 1.100000 3.640000 ( 5.148499)
|
122
|
+
keys: 3/ 5 2423 2.490000 1.150000 3.640000 ( 5.175866)
|
123
|
+
keys: 5/ 9 1997 2.980000 1.110000 4.090000 ( 5.218399)
|
124
|
+
keys:10/19 1715 3.180000 1.130000 4.310000 ( 5.232533)
|
125
|
+
|
126
|
+
Version: 0.3.1, handler: Redis::EM::Mutex::ScriptHandlerMixin
|
127
127
|
lock/write/incr/read/del/unlock in 5 seconds + cooldown period:
|
128
|
-
|
129
|
-
keys: 1
|
130
|
-
keys: 2
|
131
|
-
keys: 3
|
132
|
-
keys: 5
|
133
|
-
keys:10
|
128
|
+
user system total real
|
129
|
+
keys: 1/ 1 4679 2.380000 0.850000 3.230000 ( 5.073898)
|
130
|
+
keys: 2/ 3 4410 2.250000 0.930000 3.180000 ( 5.101776)
|
131
|
+
keys: 3/ 5 3428 1.950000 0.730000 2.680000 ( 5.111283)
|
132
|
+
keys: 5/ 9 3279 2.050000 0.660000 2.710000 ( 5.203372)
|
133
|
+
keys:10/19 2285 1.690000 0.400000 2.090000 ( 5.163491)
|
134
|
+
```
|
135
|
+
|
136
|
+
Stress test
|
137
|
+
===========
|
138
|
+
|
139
|
+
You may also want to try this tool: `test/stress.rb`.
|
140
|
+
|
141
|
+
Written originally by [mlanett](https://github.com/mlanett/redis-lock/blob/master/test/stress.rb).
|
142
|
+
|
143
|
+
```
|
144
|
+
Usage: test/stress.rb --forks F --tries T --sleep S
|
145
|
+
-f, --forks FORKS How many processes to fork
|
146
|
+
-t, --tries TRIES How many attempts each process should try
|
147
|
+
-s, --sleep SLEEP How long processes should sleep/work
|
148
|
+
-k, --keys KEYS How many keys a process should run through
|
149
|
+
-h, --help Display this usage summary
|
134
150
|
```
|
data/HISTORY.md
CHANGED
data/LICENCE
ADDED
@@ -0,0 +1,19 @@
|
|
1
|
+
Copyright (c) 2013 by Rafal Michalski (rafal@yeondir.com)
|
2
|
+
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
4
|
+
of this software and associated documentation files (the "Software"), to deal
|
5
|
+
in the Software without restriction, including without limitation the rights
|
6
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
7
|
+
copies of the Software, and to permit persons to whom the Software is
|
8
|
+
furnished to do so, subject to the following conditions:
|
9
|
+
|
10
|
+
The above copyright notice and this permission notice shall be included in
|
11
|
+
all copies or substantial portions of the Software.
|
12
|
+
|
13
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
14
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
15
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
16
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
17
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
18
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
19
|
+
THE SOFTWARE.
|
data/README.md
CHANGED
@@ -8,31 +8,35 @@ Author: Rafał Michalski (mailto:rafal@yeondir.com)
|
|
8
8
|
DESCRIPTION
|
9
9
|
-----------
|
10
10
|
|
11
|
-
__redis-em-mutex__ is the cross server
|
11
|
+
__redis-em-mutex__ is the cross server/process/fiber|owner EventMachine + Redis based semaphore.
|
12
12
|
|
13
13
|
FEATURES
|
14
14
|
--------
|
15
15
|
|
16
|
-
*
|
17
|
-
*
|
18
|
-
|
19
|
-
|
16
|
+
* EventMachine reactor based
|
17
|
+
* carefully designed, well thought out locking pattern
|
18
|
+
(NOT the flawed SETNX/GET/GETSET one from redis documentation page)
|
19
|
+
* no CPU-intensive sleep/polling while waiting for lock to become available;
|
20
|
+
fibers waiting for the lock are signalled via Redis channel as soon as the lock
|
21
|
+
is released (~< 1 ms)
|
20
22
|
* alternative fast "script" handler (server-side LUA script based - redis-server 2.6.x)
|
21
23
|
* multi-locks (all-or-nothing) locking (to prevent possible deadlocks when
|
22
24
|
multiple semaphores are required to be locked at once)
|
23
25
|
* fiber-safe
|
24
26
|
* deadlock detection (only trivial cases: locking twice the same resource from the same owner)
|
25
|
-
* mandatory lock expiration (with refreshing)
|
27
|
+
* mandatory lock lifetime expiration (with refreshing)
|
26
28
|
* macro-style definitions (Mutex::Macro mixin)
|
27
29
|
* compatible with Synchrony::Thread::ConditionVariable
|
28
30
|
* extendable (beyond fibers) mutex ownership
|
31
|
+
* redis HA achievable with [redis-sentinel](http://redis.io/topics/sentinel) and [redis-sentinel](https://github.com/flyerhzm/redis-sentinel) gem.
|
29
32
|
|
30
33
|
BUGS/LIMITATIONS
|
31
34
|
----------------
|
32
35
|
|
33
36
|
* only for EventMachine
|
34
|
-
* NOT thread-safe
|
35
|
-
* locking order between concurrent processes is undetermined (no FIFO)
|
37
|
+
* NOT thread-safe (not meant to be)
|
38
|
+
* locking order between concurrent processes is undetermined (no FIFO between processes)
|
39
|
+
however during {file:BENCHMARK.md BENCHMARKING} no starvation effect was observed.
|
36
40
|
* it's not nifty, rather somewhat complicated
|
37
41
|
|
38
42
|
REQUIREMENTS
|
@@ -42,6 +46,8 @@ REQUIREMENTS
|
|
42
46
|
* http://github.com/redis/redis-rb ~> 3.0.2
|
43
47
|
* http://rubyeventmachine.com ~> 1.0.0
|
44
48
|
* (optional) http://github.com/igrigorik/em-synchrony
|
49
|
+
But due to the redis/synchrony dependency em-synchrony will always be bundled
|
50
|
+
and required.
|
45
51
|
|
46
52
|
INSTALL
|
47
53
|
-------
|
@@ -53,7 +59,7 @@ $ [sudo] gem install redis-em-mutex
|
|
53
59
|
#### Gemfile
|
54
60
|
|
55
61
|
```ruby
|
56
|
-
gem "redis-em-mutex", "~> 0.3.
|
62
|
+
gem "redis-em-mutex", "~> 0.3.1"
|
57
63
|
```
|
58
64
|
|
59
65
|
#### Github
|
@@ -70,7 +76,7 @@ UPGRADING
|
|
70
76
|
To upgrade redis-em-mutex on production from 0.2.x to 0.3.x you must make sure the correct handler has been
|
71
77
|
selected. See more on HANDLERS below.
|
72
78
|
|
73
|
-
The "pure" and "script" handlers are
|
79
|
+
The "pure" and "script" handlers are incompatible. Two different handlers must not utilize the same semaphore-key space.
|
74
80
|
|
75
81
|
Because only the "pure" handler is compatible with redis-em-mutex <= 0.2.x, when upgrading live production make sure to add
|
76
82
|
`handler: :pure` option to `Redis::EM::Mutex.setup` or set the environment variable on production app servers:
|
@@ -79,6 +85,7 @@ Because only the "pure" handler is compatible with redis-em-mutex <= 0.2.x, when
|
|
79
85
|
REDIS_EM_MUTEX_HANDLER=pure
|
80
86
|
export REDIS_EM_MUTEX_HANDLER
|
81
87
|
```
|
88
|
+
|
82
89
|
Upgrading from "pure" to "script" handler requires that all "pure" handler locks __MUST BE DELETED__ from redis-server beforehand.
|
83
90
|
Neglecting that will result in possible deadlocks. The "script" handler assumes that the lock expiration process is handled
|
84
91
|
by redis-server's PEXPIREAT feature. The "pure" handler does not set timeouts on keys. It handles expiration differently.
|
@@ -130,14 +137,14 @@ USAGE
|
|
130
137
|
|
131
138
|
### Handlers
|
132
139
|
|
133
|
-
There are 2 different mutex implementations since version 0.3.
|
140
|
+
There are 2 different mutex implementations since version 0.3.
|
134
141
|
|
135
142
|
* The "pure" classic handler utilizes redis optimistic transaction commands (watch/multi).
|
136
143
|
This handler works with redis-server 2.4.x and later.
|
137
144
|
* The new "script" handler takes advantage of fast atomic server side operations written in LUA.
|
138
145
|
Therefore the "script" handler is compatible only with redis-server 2.6.x and later.
|
139
146
|
|
140
|
-
__IMPORTANT__: The "pure" and "script" implementations are
|
147
|
+
__IMPORTANT__: The "pure" and "script" implementations are incompatible. The values that each handler stores in semaphore keys have different meaning to them.
|
141
148
|
You can not operate on the same set of keys using both handlers from e.g. different applications or application versions.
|
142
149
|
See UPGRADING for more info on this.
|
143
150
|
|
@@ -183,18 +190,17 @@ To detect feature of the current handler:
|
|
183
190
|
### Namespaces
|
184
191
|
|
185
192
|
```ruby
|
186
|
-
Redis::EM::Mutex.setup(ns: '
|
193
|
+
Redis::EM::Mutex.setup(ns: 'Tudor')
|
187
194
|
|
188
195
|
# or multiple namespaces:
|
189
196
|
|
190
|
-
ns = Redis::EM::Mutex::NS.new('
|
197
|
+
ns = Redis::EM::Mutex::NS.new('Tudor')
|
191
198
|
|
192
199
|
EM.synchrony do
|
193
|
-
ns.synchronize('
|
194
|
-
# .... do something with
|
200
|
+
ns.synchronize('Boscogne') do
|
201
|
+
# .... do something special with Tudor:Boscogne
|
195
202
|
end
|
196
203
|
|
197
|
-
# ...
|
198
204
|
EM.stop
|
199
205
|
end
|
200
206
|
```
|
@@ -215,7 +221,7 @@ The classic deadlock example scenario with multiple resources:
|
|
215
221
|
```ruby
|
216
222
|
EM.synchrony do
|
217
223
|
Redis::EM::Mutex.synchronize('foo', 'bar', 'baz') do
|
218
|
-
# .... do something with foo, bar and baz
|
224
|
+
# .... do something special with foo, bar and baz
|
219
225
|
end
|
220
226
|
|
221
227
|
# ...
|
@@ -251,7 +257,7 @@ The classic deadlock example scenario with multiple resources:
|
|
251
257
|
|
252
258
|
### Macro-style definition
|
253
259
|
|
254
|
-
|
260
|
+
Idea of macro-style definition was borrowed from http://github.com/kenn/redis-mutex.
|
255
261
|
Redis::EM::Mutex::Macro is a mixin which protects selected instance methods of a class with a mutex.
|
256
262
|
The locking scope will be Mutex global namespace + class name + method name.
|
257
263
|
|
@@ -396,7 +402,7 @@ their locked status in parent process will be preserved.
|
|
396
402
|
|
397
403
|
|
398
404
|
```ruby
|
399
|
-
mutex = Redis::EM::Mutex.new('
|
405
|
+
mutex = Redis::EM::Mutex.new('pirkaff', 'roshinu', expire: 60)
|
400
406
|
|
401
407
|
EM.synchrony do
|
402
408
|
mutex.lock
|
@@ -432,25 +438,64 @@ their locked status in parent process will be preserved.
|
|
432
438
|
|
433
439
|
#### Redis factory
|
434
440
|
|
435
|
-
Want to use some non-standard redis options or customized
|
436
|
-
|
441
|
+
Want to use some non-standard redis options or customized redis client?
|
442
|
+
`redis_factory` option to the rescue.
|
443
|
+
|
444
|
+
High Availability example setup with redis-sentinel:
|
437
445
|
|
438
446
|
```ruby
|
447
|
+
gem 'redis-sentinel', '~> 1.1.4'
|
448
|
+
require 'redis-em-mutex'
|
439
449
|
require 'redis-sentinel'
|
450
|
+
Redis::Client.class_eval do
|
451
|
+
define_method(:sleep) {|n| EM::Synchrony.sleep(n) }
|
452
|
+
end
|
440
453
|
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
454
|
+
REDIS_OPTS = {password: 'fight or die', db: 1}
|
455
|
+
SENTINEL_OPTS = {
|
456
|
+
master_name: "femto",
|
457
|
+
sentinels: [{host: "wyald", port: 26379}, {host: "zodd", port: 26379}],
|
458
|
+
failover_reconnect_timeout: 30
|
459
|
+
}
|
460
|
+
|
461
|
+
Redis::EM::Mutex.setup(REDIS_OPTS) do |config|
|
462
|
+
config.size = 5 # redis pool size
|
463
|
+
config.reconnect_max = :forever # reconnect watcher forever
|
464
|
+
config.redis_factory = proc do |opts|
|
465
|
+
Redis.new opts.merge SENTINEL_OPTS
|
449
466
|
end
|
450
467
|
end
|
451
468
|
```
|
452
469
|
|
453
|
-
|
454
|
-
|
470
|
+
ADVOCACY
|
471
|
+
--------
|
455
472
|
|
456
|
-
|
473
|
+
Interesting (not eventmachine oriented) ruby-redis-mutex implementations:
|
474
|
+
|
475
|
+
* [mlanett/redis-lock](https://github.com/mlanett/redis-lock)
|
476
|
+
Robust, well thought out and nice to use as it simply adds lock/unlock
|
477
|
+
commands to Redis.
|
478
|
+
Similar concept of locking/unlocking pattern (compared to the "pure" handler)
|
479
|
+
though it uses two redis keys for keeping owner and lifetime expiration separately.
|
480
|
+
"pure" handler stores both in one key, so less redis operations are involved.
|
481
|
+
Blocked lock failure is handled by sleep/polling which involves more cpu load
|
482
|
+
on ruby and redis. You may actually see it by running `time test/stress.rb`
|
483
|
+
tool on both implementations and compare user/sys load.
|
484
|
+
|
485
|
+
* [dv/redis-semaphore](https://github.com/dv/redis-semaphore)
|
486
|
+
Very promising experiment. Utilizes BLPOP to provide real FIFO queue of
|
487
|
+
lock acquiring processes. In this way it doesn't need polling nor other means
|
488
|
+
of signaling that the lock is available to those in waiting queue.
|
489
|
+
This one could be used with EM straight out without any patching.
|
490
|
+
|
491
|
+
IMHO the solution has two drawbacks:
|
492
|
+
|
493
|
+
- no lifetime expiration or other means of protection from failure of a lock owner process;
|
494
|
+
still they are trying hard to implement it [now](https://github.com/dv/redis-semaphore/pull/5)
|
495
|
+
and I hope they will succeed.
|
496
|
+
|
497
|
+
- the redis keys are used in an inversed manner: the lack of a key means that the lock is gone.
|
498
|
+
On the contrary, when the lock is being released, the key is created and kept.
|
499
|
+
This is not a problem when you have some static set of keys. However it might be a problem
|
500
|
+
when you need to use lock keys based on random resources and you would need to implement
|
501
|
+
some garbage collector to prevent redis from eating to much memory.
|
data/Rakefile
CHANGED
@@ -4,23 +4,25 @@ task :default => [:test]
|
|
4
4
|
|
5
5
|
$gem_name = "redis-em-mutex"
|
6
6
|
|
7
|
-
desc "Run spec tests"
|
8
7
|
namespace :test do
|
9
8
|
|
10
|
-
task :all => [:
|
9
|
+
task :all => [:pure, :script]
|
11
10
|
|
11
|
+
desc "Run specs against auto-detected handler"
|
12
12
|
task :auto do
|
13
13
|
Dir["spec/#{$gem_name}-*.rb"].each do |spec|
|
14
14
|
sh({'REDIS_EM_MUTEX_HANDLER' => nil}, "rspec #{spec}")
|
15
15
|
end
|
16
16
|
end
|
17
17
|
|
18
|
+
desc "Run specs against pure handler"
|
18
19
|
task :pure do
|
19
20
|
Dir["spec/#{$gem_name}-*.rb"].each do |spec|
|
20
21
|
sh({'REDIS_EM_MUTEX_HANDLER' => 'pure'}, "rspec #{spec}")
|
21
22
|
end
|
22
23
|
end
|
23
24
|
|
25
|
+
desc "Run specs against script handler"
|
24
26
|
task :script do
|
25
27
|
Dir["spec/#{$gem_name}-*.rb"].each do |spec|
|
26
28
|
sh({'REDIS_EM_MUTEX_HANDLER' => 'script'}, "rspec #{spec}")
|
@@ -28,8 +30,14 @@ namespace :test do
|
|
28
30
|
end
|
29
31
|
end
|
30
32
|
|
33
|
+
desc "Run all specs"
|
31
34
|
task :test => [:'test:all']
|
32
35
|
|
36
|
+
desc "Run stress test WARNING: flushes database on redis-server"
|
37
|
+
task :stress do
|
38
|
+
sh "test/stress.rb"
|
39
|
+
end
|
40
|
+
|
33
41
|
desc "Build the gem"
|
34
42
|
task :gem do
|
35
43
|
sh "gem build #$gem_name.gemspec"
|
@@ -123,18 +123,16 @@ class Redis
|
|
123
123
|
|
124
124
|
# Refreshes lock expiration timeout.
|
125
125
|
# Returns `true` if refresh was successfull.
|
126
|
-
# Returns `false` if the semaphore wasn't locked or when it was locked but it has expired
|
127
|
-
# and now it's got a new owner.
|
126
|
+
# Returns `false` if the semaphore wasn't locked or when it was locked but it has expired.
|
128
127
|
def refresh(expire_timeout=nil)
|
129
128
|
if @lock_expire && owner_ident == (lock_full_ident = @locked_owner_id)
|
130
129
|
lock_expire = (Time.now + (expire_timeout.to_f.nonzero? || self.expire_timeout)).to_f
|
131
|
-
|
132
|
-
when 1
|
130
|
+
!!if 1 == eval_safe(@eval_refresh, @ns_names, [lock_full_ident, (lock_expire*1000.0).to_i])
|
133
131
|
@lock_expire = lock_expire
|
134
|
-
return true
|
135
132
|
end
|
133
|
+
else
|
134
|
+
false
|
136
135
|
end
|
137
|
-
return false
|
138
136
|
end
|
139
137
|
|
140
138
|
# Releases the lock. Returns self on success.
|
@@ -176,7 +174,7 @@ class Redis
|
|
176
174
|
ident_match = owner_ident
|
177
175
|
loop do
|
178
176
|
start_time = Time.now.to_f
|
179
|
-
case timeout = eval_safe(@eval_lock,
|
177
|
+
case timeout = eval_safe(@eval_lock, names, [ident_match,
|
180
178
|
((lock_expire = (Time.now + expire_timeout).to_f)*1000.0).to_i])
|
181
179
|
when 'OK'
|
182
180
|
@locked_owner_id = ident_match
|
@@ -256,19 +254,18 @@ class Redis
|
|
256
254
|
return 'OK'
|
257
255
|
end
|
258
256
|
local res=redis.call('mget',unpack(KEYS))
|
259
|
-
for i=1,size do
|
260
|
-
if res[i]==lock then
|
261
|
-
return 'DD'
|
262
|
-
end
|
263
|
-
end
|
264
257
|
exp=nil
|
265
|
-
for i
|
266
|
-
|
267
|
-
|
268
|
-
|
258
|
+
for i, v in next, res do
|
259
|
+
if v==lock then
|
260
|
+
return 'DD'
|
261
|
+
elseif v then
|
262
|
+
v=redis.call('pttl',KEYS[i])
|
263
|
+
if not exp or v<exp then
|
264
|
+
exp=v
|
265
|
+
end
|
269
266
|
end
|
270
267
|
end
|
271
|
-
return exp
|
268
|
+
return exp or -1
|
272
269
|
EOL
|
273
270
|
|
274
271
|
# * unlock multiple *keys, lock_id, pub_channel, pub_message
|
@@ -284,8 +281,9 @@ class Redis
|
|
284
281
|
end
|
285
282
|
end
|
286
283
|
if #args>0 then
|
287
|
-
redis.call('del',unpack(args))
|
288
|
-
|
284
|
+
if redis.call('del',unpack(args)) > 0 then
|
285
|
+
redis.call('publish',ARGV[2],ARGV[3])
|
286
|
+
end
|
289
287
|
end
|
290
288
|
return #args
|
291
289
|
EOL
|
data/test/bench.rb
ADDED
@@ -0,0 +1,158 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
require 'bundler/setup'
|
3
|
+
require 'securerandom'
|
4
|
+
require 'benchmark'
|
5
|
+
require 'minitest/unit'
|
6
|
+
|
7
|
+
include Benchmark
|
8
|
+
include MiniTest::Assertions
|
9
|
+
|
10
|
+
REDIS_OPTIONS = {}
|
11
|
+
|
12
|
+
TEST_KEY = '__TEST__'
|
13
|
+
|
14
|
+
# lock and unlock 1000 times
|
15
|
+
def test1(iterator, synchronize, counter, concurrency = 10)
|
16
|
+
iterator.call((1..1000).to_a, concurrency) do
|
17
|
+
synchronize.call { counter.call }
|
18
|
+
end
|
19
|
+
assert_equal(counter.call(0), 1000)
|
20
|
+
end
|
21
|
+
|
22
|
+
# lock, set, incr, read, del, unlock, sleep as many times as possible in 5 seconds
|
23
|
+
# the cooldown period will be included in total time
|
24
|
+
def test2(iterator, synchronize, sleeper, counter, redis)
|
25
|
+
finish_at = Time.now + 5.0
|
26
|
+
iterator.call((1..100).map {|i| i/100000.0+0.001}.shuffle, 100) do |i|
|
27
|
+
while Time.now - finish_at < 0
|
28
|
+
sleeper.call(i)
|
29
|
+
synchronize.call do
|
30
|
+
# print "."
|
31
|
+
value = rand(1000000000000000000)
|
32
|
+
redis.set(TEST_KEY, value)
|
33
|
+
redis.incr(TEST_KEY)
|
34
|
+
assert_equal redis.get(TEST_KEY).to_i, value+1
|
35
|
+
redis.del(TEST_KEY)
|
36
|
+
counter.call
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
|
43
|
+
def test_all(iterator, synchronize, sleeper, counter, concurrency = 10, keysets = [1,2,3,5,10])
|
44
|
+
puts "lock/unlock 1000 times with concurrency: #{concurrency}"
|
45
|
+
Benchmark.benchmark(CAPTION, 15, FORMAT) do |x|
|
46
|
+
keysets.each do |n|
|
47
|
+
counter.call -counter.call(0)
|
48
|
+
x.report("keys:%2d/%2d " % [n, n*2-1]) { test1(iterator, synchronize[n], counter, concurrency) }
|
49
|
+
sleeper.call 1
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
puts
|
54
|
+
puts "lock/write/incr/read/del/unlock in 5 seconds + cooldown period:"
|
55
|
+
Benchmark.benchmark(CAPTION, 15, FORMAT) do |x|
|
56
|
+
redis = Redis.new REDIS_OPTIONS
|
57
|
+
keysets.each do |n|
|
58
|
+
counter.call -counter.call(0)
|
59
|
+
x.report("keys:%2d/%2d" % [n, n*2-1]) {
|
60
|
+
test2(iterator, synchronize[n], sleeper, counter, redis)
|
61
|
+
print "\b\b\b\b\b%5d" % counter.call(0)
|
62
|
+
}
|
63
|
+
sleeper.call 1
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
$:.unshift "lib"
|
69
|
+
gem 'redis', '~>3.0.2'
|
70
|
+
require 'em-synchrony'
|
71
|
+
require 'em-synchrony/fiber_iterator'
|
72
|
+
require 'redis-em-mutex'
|
73
|
+
|
74
|
+
REDIS_OPTIONS.replace(driver: :synchrony)
|
75
|
+
MUTEX_OPTIONS = {
|
76
|
+
expire: 10000,
|
77
|
+
ns: '__Benchmark',
|
78
|
+
}
|
79
|
+
|
80
|
+
RMutex = Redis::EM::Mutex
|
81
|
+
EM.synchrony do
|
82
|
+
concurrency = 10
|
83
|
+
RMutex.setup(REDIS_OPTIONS.merge(MUTEX_OPTIONS)) {|opts| opts.size = concurrency}
|
84
|
+
if RMutex.respond_to? :handler
|
85
|
+
puts "Version: #{RMutex::VERSION}, handler: #{RMutex.handler}"
|
86
|
+
else
|
87
|
+
puts "Version: #{RMutex::VERSION}, handler: N/A"
|
88
|
+
end
|
89
|
+
counter = 0
|
90
|
+
test_all(
|
91
|
+
proc do |iter, concurrency, &blk|
|
92
|
+
EM::Synchrony::FiberIterator.new(iter, concurrency).each(&blk)
|
93
|
+
end,
|
94
|
+
proc do |n|
|
95
|
+
m = n*2-1
|
96
|
+
keys = m.times.map { SecureRandom.random_bytes + '.lck' }
|
97
|
+
proc do |&blk|
|
98
|
+
RMutex.synchronize(*keys.sample(n), &blk)
|
99
|
+
end
|
100
|
+
end,
|
101
|
+
EM::Synchrony.method(:sleep),
|
102
|
+
proc do |incr=1|
|
103
|
+
counter+=incr
|
104
|
+
end,
|
105
|
+
concurrency)
|
106
|
+
RMutex.stop_watcher(true)
|
107
|
+
EM.stop
|
108
|
+
end
|
109
|
+
|
110
|
+
# #gem 'mlanett-redis-lock', require: 'redis-lock'
|
111
|
+
# $:.unshift "../redis-lock/lib"
|
112
|
+
# require 'hiredis'
|
113
|
+
# require 'redis'
|
114
|
+
# require 'redis-lock'
|
115
|
+
|
116
|
+
# REDIS_OPTIONS.replace(driver: :hiredis)
|
117
|
+
|
118
|
+
# class ThreadIterator
|
119
|
+
# def initialize(iter, concurrency)
|
120
|
+
# @iter = iter
|
121
|
+
# @concurrency = concurrency
|
122
|
+
# @threads = []
|
123
|
+
# @mutex = ::Mutex.new
|
124
|
+
# end
|
125
|
+
|
126
|
+
# def each(&blk)
|
127
|
+
# @threads = @concurrency.times.map do
|
128
|
+
# Thread.new do
|
129
|
+
# while value = @mutex.synchronize { @iter.shift }
|
130
|
+
# blk.call value
|
131
|
+
# end
|
132
|
+
# end
|
133
|
+
# end
|
134
|
+
# @threads.each {|t| t.join}
|
135
|
+
# end
|
136
|
+
# end
|
137
|
+
|
138
|
+
# concurrency = 10
|
139
|
+
# RMutex = Redis
|
140
|
+
# counter = 0
|
141
|
+
# test_all(
|
142
|
+
# proc do |iter, concurrency, &blk|
|
143
|
+
# ThreadIterator.new(iter, concurrency).each(&blk)
|
144
|
+
# end,
|
145
|
+
# proc do |keys|
|
146
|
+
# mutex = RMutex.new REDIS_OPTIONS
|
147
|
+
# opts = {sleep: 100, acquire: 21, life: 1}
|
148
|
+
# proc do |&blk|
|
149
|
+
# mutex.lock(keys[0], opts, &blk)
|
150
|
+
# end
|
151
|
+
# end,
|
152
|
+
# Kernel.method(:sleep),
|
153
|
+
# proc do |incr=1|
|
154
|
+
# print "\r#{counter}"
|
155
|
+
# counter+=incr
|
156
|
+
# end,
|
157
|
+
# concurrency,
|
158
|
+
# [1])
|
data/test/stress.rb
ADDED
@@ -0,0 +1,112 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
# Author: Mark Lanett - https://github.com/mlanett
|
3
|
+
# Origin: https://github.com/mlanett/redis-lock/blob/master/test/stress.rb
|
4
|
+
# Adapted for redis-em-mutex by: royaltm
|
5
|
+
|
6
|
+
require "bundler/setup" # set up gem paths
|
7
|
+
require "redis"
|
8
|
+
require "redis-em-mutex" # load this gem
|
9
|
+
require "optparse"
|
10
|
+
require "ostruct"
|
11
|
+
|
12
|
+
options = OpenStruct.new({
|
13
|
+
forks: 30,
|
14
|
+
tries: 10,
|
15
|
+
sleep: 2,
|
16
|
+
keys: 5
|
17
|
+
})
|
18
|
+
|
19
|
+
TEST_REDIS = { url: "redis://127.0.0.1:6379/1", driver: :synchrony }
|
20
|
+
RMutex = ::Redis::EM::Mutex
|
21
|
+
|
22
|
+
OptionParser.new do |opts|
|
23
|
+
opts.banner = "Usage: #{__FILE__} --forks F --tries T --sleep S"
|
24
|
+
opts.on( "-f", "--forks FORKS", "How many processes to fork" ) { |i| options.forks = i.to_i }
|
25
|
+
opts.on( "-t", "--tries TRIES", "How many attempts each process should try" ) { |i| options.tries = i.to_i }
|
26
|
+
opts.on( "-s", "--sleep SLEEP", "How long processes should sleep/work" ) { |i| options.sleep = i.to_i }
|
27
|
+
opts.on( "-k", "--keys KEYS", "How many keys a process should run through" ) { |i| options.keys = i.to_i }
|
28
|
+
opts.on( "-h", "--help", "Display this usage summary" ) { puts opts; exit }
|
29
|
+
end.parse!
|
30
|
+
|
31
|
+
class Runner
|
32
|
+
|
33
|
+
attr :options
|
34
|
+
|
35
|
+
def initialize( options )
|
36
|
+
@options = options
|
37
|
+
end
|
38
|
+
|
39
|
+
def redis
|
40
|
+
@redis ||= ::Redis.connect(TEST_REDIS)
|
41
|
+
end
|
42
|
+
|
43
|
+
def test( key, time )
|
44
|
+
RMutex.synchronize( key, block: time, expire: time*2 ) do
|
45
|
+
val1 = rand(65536)
|
46
|
+
redis.set( "#{key}:widget", val1 )
|
47
|
+
::EM::Synchrony.sleep( time )
|
48
|
+
val2 = redis.get("#{key}:widget").to_i
|
49
|
+
expect( val1, val2 )
|
50
|
+
end
|
51
|
+
true
|
52
|
+
rescue => x
|
53
|
+
# STDERR.puts "Failed due to #{x.inspect}"
|
54
|
+
false
|
55
|
+
end
|
56
|
+
|
57
|
+
def run
|
58
|
+
keys = Hash[ (0...options.keys).map { |i| [ i, "key:#{i}" ] } ] # i => key:i
|
59
|
+
fails = Hash[ (0...options.keys).map { |i| [ i, 0 ] } ] # i => 0
|
60
|
+
stats = OpenStruct.new( ok: 0, fails: 0 )
|
61
|
+
while keys.size > 0 do
|
62
|
+
i = keys.keys.sample
|
63
|
+
if test( keys[i], (options.sleep) ) then
|
64
|
+
keys.delete(i)
|
65
|
+
stats.ok += 1
|
66
|
+
else
|
67
|
+
fails[i] += 1
|
68
|
+
stats.fails += 1
|
69
|
+
if fails[i] >= options.tries then
|
70
|
+
keys.delete(i)
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|
74
|
+
puts "[#{Process.pid}] Complete; Ok: #{stats.ok}, Failures: #{stats.fails}"
|
75
|
+
end
|
76
|
+
|
77
|
+
def launch
|
78
|
+
EM.fork_reactor do
|
79
|
+
Fiber.new do
|
80
|
+
GC.copy_on_write_friendly = true if ( GC.copy_on_write_friendly? rescue false )
|
81
|
+
run
|
82
|
+
EM.stop
|
83
|
+
end.resume
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|
87
|
+
def expect( val1, val2 )
|
88
|
+
if val1 != val2 then
|
89
|
+
STDERR.puts "[#{Process.pid}] Value mismatch"
|
90
|
+
Kernel.abort
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
end
|
95
|
+
|
96
|
+
# main
|
97
|
+
|
98
|
+
puts "[#{Process.pid}] Starting with #{options.inspect}"
|
99
|
+
|
100
|
+
RMutex.setup(TEST_REDIS)
|
101
|
+
|
102
|
+
EM.synchrony do
|
103
|
+
redis = ::Redis.connect(TEST_REDIS)
|
104
|
+
redis.flushdb # clean before run
|
105
|
+
redis.client.disconnect # don't keep when forking
|
106
|
+
|
107
|
+
options.forks.times do
|
108
|
+
Runner.new( options ).launch
|
109
|
+
end
|
110
|
+
Process.waitall
|
111
|
+
EM.stop
|
112
|
+
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: redis-em-mutex
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.3.
|
4
|
+
version: 0.3.1
|
5
5
|
prerelease:
|
6
6
|
platform: ruby
|
7
7
|
authors:
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date: 2013-02-
|
12
|
+
date: 2013-02-27 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: redis
|
@@ -118,9 +118,9 @@ extra_rdoc_files:
|
|
118
118
|
files:
|
119
119
|
- BENCHMARK.md
|
120
120
|
- HISTORY.md
|
121
|
+
- LICENCE
|
121
122
|
- README.md
|
122
123
|
- Rakefile
|
123
|
-
- benchmark_mutex.rb
|
124
124
|
- lib/redis-em-mutex.rb
|
125
125
|
- lib/redis/em-connection-pool.rb
|
126
126
|
- lib/redis/em-mutex.rb
|
@@ -136,6 +136,8 @@ files:
|
|
136
136
|
- spec/redis-em-mutex-namespaces.rb
|
137
137
|
- spec/redis-em-mutex-owners.rb
|
138
138
|
- spec/redis-em-mutex-semaphores.rb
|
139
|
+
- test/bench.rb
|
140
|
+
- test/stress.rb
|
139
141
|
homepage: http://github.com/royaltm/redis-em-mutex
|
140
142
|
licenses: []
|
141
143
|
post_install_message:
|
data/benchmark_mutex.rb
DELETED
@@ -1,99 +0,0 @@
|
|
1
|
-
$:.unshift "lib"
|
2
|
-
gem 'redis', '~>3.0.2'
|
3
|
-
require 'securerandom'
|
4
|
-
require 'benchmark'
|
5
|
-
require 'em-synchrony'
|
6
|
-
require 'em-synchrony/fiber_iterator'
|
7
|
-
require 'redis-em-mutex'
|
8
|
-
|
9
|
-
RMutex = Redis::EM::Mutex
|
10
|
-
include Benchmark
|
11
|
-
|
12
|
-
MUTEX_OPTIONS = {
|
13
|
-
expire: 10000,
|
14
|
-
ns: '__Benchmark',
|
15
|
-
}
|
16
|
-
|
17
|
-
TEST_KEY = '__TEST__'
|
18
|
-
|
19
|
-
def assert(condition)
|
20
|
-
raise "Assertion failed: #{__FILE__}:#{__LINE__}" unless condition
|
21
|
-
end
|
22
|
-
|
23
|
-
# lock and unlock 1000 times
|
24
|
-
def test1(keys, concurrency = 10)
|
25
|
-
count = 0
|
26
|
-
mutex = RMutex.new(*keys)
|
27
|
-
EM::Synchrony::FiberIterator.new((1..1000).to_a, concurrency).each do |i|
|
28
|
-
mutex.synchronize { count+=1 }
|
29
|
-
end
|
30
|
-
assert(count == 1000)
|
31
|
-
end
|
32
|
-
|
33
|
-
# lock, set, incr, read, del, unlock, sleep as many times as possible in 5 seconds
|
34
|
-
# the cooldown period will be included in total time
|
35
|
-
def test2(keys, redis)
|
36
|
-
running = true
|
37
|
-
count = 0
|
38
|
-
playing = 0
|
39
|
-
mutex = RMutex.new(*keys)
|
40
|
-
f = Fiber.current
|
41
|
-
(1..100).map {|i| i/100000.0+0.001}.shuffle.each do |i|
|
42
|
-
EM::Synchrony.next_tick do
|
43
|
-
while running
|
44
|
-
playing+=1
|
45
|
-
EM::Synchrony.sleep(i)
|
46
|
-
mutex.synchronize do
|
47
|
-
# print "."
|
48
|
-
value = rand(1000000000000000000)
|
49
|
-
redis.set(TEST_KEY, value)
|
50
|
-
redis.incr(TEST_KEY)
|
51
|
-
assert redis.get(TEST_KEY).to_i == value+1
|
52
|
-
redis.del(TEST_KEY)
|
53
|
-
count += 1
|
54
|
-
end
|
55
|
-
playing-=1
|
56
|
-
end
|
57
|
-
end
|
58
|
-
end
|
59
|
-
EM::Synchrony.add_timer(5) do
|
60
|
-
running = false
|
61
|
-
# print "0"
|
62
|
-
EM::Synchrony.sleep(0.001) while playing > 0
|
63
|
-
EM.next_tick { f.resume }
|
64
|
-
end
|
65
|
-
Fiber.yield
|
66
|
-
print '%5d' % count
|
67
|
-
end
|
68
|
-
|
69
|
-
EM.synchrony do
|
70
|
-
concurrency = 10
|
71
|
-
RMutex.setup(MUTEX_OPTIONS) {|opts| opts.size = concurrency}
|
72
|
-
if RMutex.respond_to? :handler
|
73
|
-
puts "Version: #{RMutex::VERSION}, handler: #{RMutex.handler}"
|
74
|
-
else
|
75
|
-
puts "Version: #{RMutex::VERSION}, handler: N/A"
|
76
|
-
end
|
77
|
-
|
78
|
-
puts "lock/unlock 1000 times with concurrency: #{concurrency}"
|
79
|
-
Benchmark.benchmark(CAPTION, 7, FORMAT) do |x|
|
80
|
-
[1,2,3,5,10].each do |n|
|
81
|
-
keys = n.times.map { SecureRandom.random_bytes + '.lck' }
|
82
|
-
x.report("keys:%2d " % n) { test1(keys, concurrency) }
|
83
|
-
EM::Synchrony.sleep(1)
|
84
|
-
end
|
85
|
-
end
|
86
|
-
|
87
|
-
puts
|
88
|
-
puts "lock/write/incr/read/del/unlock in 5 seconds + cooldown period:"
|
89
|
-
Benchmark.benchmark(CAPTION, 8, FORMAT) do |x|
|
90
|
-
redis = Redis.new
|
91
|
-
[1,2,3,5,10].each do |n|
|
92
|
-
keys = n.times.map { SecureRandom.random_bytes + '.lck' }
|
93
|
-
x.report("keys:%2d " % n) { test2(keys, redis) }
|
94
|
-
EM::Synchrony.sleep(1)
|
95
|
-
end
|
96
|
-
end
|
97
|
-
RMutex.stop_watcher(true)
|
98
|
-
EM.stop
|
99
|
-
end
|