qmore 0.5.3 → 0.6.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.travis.yml +3 -0
- data/CHANGELOG +8 -0
- data/lib/qmore/job_reserver.rb +35 -14
- data/lib/qmore/version.rb +1 -1
- data/qmore.gemspec +1 -0
- data/spec/job_reserver_spec.rb +37 -16
- data/spec/{redis-test.conf → redis/qless01-test.conf} +10 -88
- data/spec/redis/qless02-test.conf +234 -0
- data/spec/spec_helper.rb +12 -13
- metadata +21 -6
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 81dfd049f15b6991030f758016c3b47f4590a591
|
4
|
+
data.tar.gz: e940b3230a609ae3d40c732d91ca017ca23d7dac
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 4a34b907609e0c0aad6502255b847cadd2e1ac1576a77db9bb8dee1c2fc2a04015c9b0d9fee7a887b12274aa258fe396b6d9109799f0439b1603507b08dcf4ca
|
7
|
+
data.tar.gz: e897e91ffa701441f469f509a67006506cb968e3728d18974c89397e46a7987fe0aa9a0c08bc672e88e1514706e75103e2d7fa260a69f087ec4752b545d0ba55
|
data/.travis.yml
CHANGED
data/CHANGELOG
CHANGED
@@ -1,3 +1,11 @@
|
|
1
|
+
0.6.0 (02/25/2014)
|
2
|
+
------------------
|
3
|
+
|
4
|
+
Merge pull request #2 from backupify/sharded-qmore <a1d051b> [Matt Conway]
|
5
|
+
add mri 2.1.0 to test targets <4b8f74c> [Matt Conway]
|
6
|
+
enable redis for travis <4aafd15> [Matt Conway]
|
7
|
+
qmore multiple client support <d1c11f6> [james-lawrence]
|
8
|
+
|
1
9
|
0.5.3 (11/12/2013)
|
2
10
|
------------------
|
3
11
|
|
data/lib/qmore/job_reserver.rb
CHANGED
@@ -1,16 +1,26 @@
|
|
1
1
|
module Qmore
|
2
2
|
class JobReserver
|
3
3
|
include Qmore::Attributes
|
4
|
+
# define queues for Qless worker to invoke.
|
4
5
|
attr_reader :queues
|
6
|
+
attr_reader :clients
|
5
7
|
|
6
8
|
def initialize(queues)
|
7
9
|
@queues = queues
|
10
|
+
# Pull the regex off of the Qless::Queue#name, we want to keep the same interface
|
11
|
+
# that Qless reservers use.
|
12
|
+
@regexes = queues.collect(&:name).uniq
|
13
|
+
@clients = {}
|
14
|
+
queues.each do |q|
|
15
|
+
@clients[q.client] ||= []
|
16
|
+
@clients[q.client] << q.name
|
17
|
+
end
|
8
18
|
end
|
9
19
|
|
10
20
|
def description
|
11
|
-
@description ||= @
|
21
|
+
@description ||= @regexes.join(', ') + " (qmore)"
|
12
22
|
end
|
13
|
-
|
23
|
+
|
14
24
|
def prep_for_work!
|
15
25
|
# nothing here on purpose
|
16
26
|
end
|
@@ -20,22 +30,33 @@ module Qmore
|
|
20
30
|
job = q.pop
|
21
31
|
return job if job
|
22
32
|
end
|
23
|
-
|
33
|
+
|
24
34
|
nil
|
25
35
|
end
|
26
|
-
|
36
|
+
|
27
37
|
private
|
28
|
-
|
38
|
+
|
29
39
|
def realize_queues
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
40
|
+
realized_queues = []
|
41
|
+
|
42
|
+
self.clients.each do |client, regexes|
|
43
|
+
# Cache the queues so we don't make multiple calls.
|
44
|
+
actual_queues = client.queues
|
45
|
+
|
46
|
+
# Grab all the actual queue names from the client.
|
47
|
+
queue_names = actual_queues.counts.collect {|h| h['name'] }
|
48
|
+
|
49
|
+
# Match the queue names against the regexes provided.
|
50
|
+
matched_names = expand_queues(regexes, queue_names)
|
51
|
+
|
52
|
+
# Prioritize the queues.
|
53
|
+
prioritized_names = prioritize_queues(get_priority_buckets, matched_names)
|
54
|
+
|
55
|
+
# add the matched queues to the resulting list.
|
56
|
+
realized_queues.concat(prioritized_names.collect {|name| actual_queues[name] })
|
57
|
+
end
|
58
|
+
|
36
59
|
realized_queues
|
37
60
|
end
|
38
|
-
|
39
61
|
end
|
40
|
-
|
41
|
-
end
|
62
|
+
end
|
data/lib/qmore/version.rb
CHANGED
data/qmore.gemspec
CHANGED
@@ -26,6 +26,7 @@ Gem::Specification.new do |s|
|
|
26
26
|
s.add_development_dependency('rake')
|
27
27
|
s.add_development_dependency('rspec')
|
28
28
|
s.add_development_dependency('rack-test')
|
29
|
+
s.add_development_dependency('pry')
|
29
30
|
# Needed for correct ordering when passing hash params to rack-test
|
30
31
|
s.add_development_dependency('orderedhash')
|
31
32
|
|
data/spec/job_reserver_spec.rb
CHANGED
@@ -2,40 +2,61 @@ require "spec_helper"
|
|
2
2
|
|
3
3
|
describe "JobReserver" do
|
4
4
|
include Qmore::Attributes
|
5
|
-
|
5
|
+
|
6
6
|
before(:each) do
|
7
7
|
Qmore.client.redis.flushall
|
8
8
|
end
|
9
|
-
|
9
|
+
|
10
|
+
context "multiple qless server environment" do
|
11
|
+
it "can reserve jobs from regex queue names on multiple clients" do
|
12
|
+
qless1 = Qless::Client.new(:redis => Redis.connect(:port => 6379))
|
13
|
+
qless2 = Qless::Client.new(:redis => Redis.connect(:port => 6380))
|
14
|
+
queue_a = qless1.queues["a"]
|
15
|
+
queue_b = qless2.queues["b"]
|
16
|
+
queue_a.put(SomeJob, [])
|
17
|
+
queue_b.put(SomeJob, [])
|
18
|
+
|
19
|
+
queue_a.length.should == 1
|
20
|
+
queue_b.length.should == 1
|
21
|
+
|
22
|
+
reserver = Qmore::JobReserver.new([qless1.queues["*"], qless2.queues["*"]])
|
23
|
+
worker = Qless::Worker.new(reserver, :run_as_single_process => true)
|
24
|
+
worker.work(0)
|
25
|
+
|
26
|
+
queue_a.length.should == 0
|
27
|
+
queue_b.length.should == 0
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
10
31
|
context "basic qless behavior still works" do
|
11
32
|
|
12
33
|
it "can reserve from multiple queues" do
|
13
|
-
high_queue = Qmore.client.queues['high']
|
14
|
-
critical_queue = Qmore.client.queues['critical']
|
34
|
+
high_queue = Qmore.client.queues['high']
|
35
|
+
critical_queue = Qmore.client.queues['critical']
|
15
36
|
|
16
37
|
high_queue.put(SomeJob, [])
|
17
38
|
critical_queue.put(SomeJob, [])
|
18
|
-
|
39
|
+
|
19
40
|
reserver = Qmore::JobReserver.new([critical_queue, high_queue])
|
20
41
|
reserver.reserve.queue.name.should == 'critical'
|
21
42
|
reserver.reserve.queue.name.should == 'high'
|
22
43
|
end
|
23
44
|
|
24
45
|
it "can work on multiple queues" do
|
25
|
-
high_queue = Qmore.client.queues['high']
|
26
|
-
critical_queue = Qmore.client.queues['critical']
|
46
|
+
high_queue = Qmore.client.queues['high']
|
47
|
+
critical_queue = Qmore.client.queues['critical']
|
27
48
|
high_queue.put(SomeJob, [])
|
28
49
|
critical_queue.put(SomeJob, [])
|
29
50
|
|
30
51
|
high_queue.length.should == 1
|
31
52
|
critical_queue.length.should == 1
|
32
|
-
|
53
|
+
|
33
54
|
reserver = Qmore::JobReserver.new([critical_queue, high_queue])
|
34
|
-
|
55
|
+
|
35
56
|
worker = Qless::Worker.new(reserver,
|
36
57
|
:run_as_single_process => true)
|
37
58
|
worker.work(0)
|
38
|
-
|
59
|
+
|
39
60
|
high_queue.length.should == 0
|
40
61
|
critical_queue.length.should == 0
|
41
62
|
end
|
@@ -43,7 +64,7 @@ describe "JobReserver" do
|
|
43
64
|
it "can work on all queues" do
|
44
65
|
queues = []
|
45
66
|
['high', 'critical', 'blahblah'].each do |q|
|
46
|
-
queue = Qmore.client.queues[q]
|
67
|
+
queue = Qmore.client.queues[q]
|
47
68
|
queue.put(SomeJob, [])
|
48
69
|
queue.length.should == 1
|
49
70
|
queues << queue
|
@@ -53,7 +74,7 @@ describe "JobReserver" do
|
|
53
74
|
worker = Qless::Worker.new(reserver,
|
54
75
|
:run_as_single_process => true)
|
55
76
|
worker.work(0)
|
56
|
-
|
77
|
+
|
57
78
|
queues.each do |q|
|
58
79
|
q.length.should == 0
|
59
80
|
end
|
@@ -63,18 +84,18 @@ describe "JobReserver" do
|
|
63
84
|
set_priority_buckets [{'pattern' => 'foo*', 'fairly' => false},
|
64
85
|
{'pattern' => 'default', 'fairly' => false},
|
65
86
|
{'pattern' => 'bar', 'fairly' => true}]
|
66
|
-
|
67
|
-
|
87
|
+
|
88
|
+
|
68
89
|
queues = []
|
69
90
|
['other', 'blah', 'foobie', 'bar', 'foo'].each do |q|
|
70
|
-
queue = Qmore.client.queues[q]
|
91
|
+
queue = Qmore.client.queues[q]
|
71
92
|
queue.put(SomeJob, [])
|
72
93
|
queue.length.should == 1
|
73
94
|
queues << queue
|
74
95
|
end
|
75
96
|
|
76
97
|
reserver = Qmore::JobReserver.new([Qmore.client.queues['*'], Qmore.client.queues['!blah']])
|
77
|
-
|
98
|
+
|
78
99
|
reserver.reserve.queue.name.should == 'foo'
|
79
100
|
reserver.reserve.queue.name.should == 'foobie'
|
80
101
|
reserver.reserve.queue.name.should == 'other'
|
@@ -18,7 +18,7 @@ daemonize yes
|
|
18
18
|
|
19
19
|
# When running daemonized, Redis writes a pid file in ./tmp/run/redis.pid by
|
20
20
|
# default. You can specify a custom pid file location here.
|
21
|
-
pidfile ./
|
21
|
+
pidfile ./qless01-test.pid
|
22
22
|
|
23
23
|
# Accept connections on the specified port, default is 6379
|
24
24
|
port 6379
|
@@ -42,7 +42,7 @@ loglevel verbose
|
|
42
42
|
# Specify the log file name. Also 'stdout' can be used to force
|
43
43
|
# Redis to log on the standard output. Note that if you use standard
|
44
44
|
# output for logging but daemonize, logs will be sent to /dev/null
|
45
|
-
logfile ./
|
45
|
+
logfile ./qless01-test-server.log
|
46
46
|
|
47
47
|
# Set the number of databases. The default database is DB 0, you can select
|
48
48
|
# a different one on a per-connection basis using SELECT <dbid> where
|
@@ -76,15 +76,15 @@ save 60 10000
|
|
76
76
|
rdbcompression yes
|
77
77
|
|
78
78
|
# The filename where to dump the DB
|
79
|
-
dbfilename dump.rdb
|
79
|
+
dbfilename ./qless01-test-dump.rdb
|
80
80
|
|
81
81
|
# The working directory.
|
82
82
|
#
|
83
83
|
# The DB will be written inside this directory, with the filename specified
|
84
84
|
# above using the 'dbfilename' configuration directive.
|
85
|
-
#
|
85
|
+
#
|
86
86
|
# Also the Append Only File will be created inside this directory.
|
87
|
-
#
|
87
|
+
#
|
88
88
|
# Note that you must specify a directory here, not a file name.
|
89
89
|
dir .
|
90
90
|
|
@@ -112,7 +112,7 @@ dir .
|
|
112
112
|
#
|
113
113
|
# This should stay commented out for backward compatibility and because most
|
114
114
|
# people do not need auth (e.g. they run their own servers).
|
115
|
-
#
|
115
|
+
#
|
116
116
|
# Warning: since Redis is pretty fast an outside user can try up to
|
117
117
|
# 150k passwords per second against a good box. This means that you should
|
118
118
|
# use a very strong password otherwise it will be very easy to break.
|
@@ -172,7 +172,7 @@ appendonly no
|
|
172
172
|
# appendfilename appendonly.aof
|
173
173
|
|
174
174
|
# The fsync() call tells the Operating System to actually write data on disk
|
175
|
-
# instead to wait for more data in the output buffer. Some OS will really flush
|
175
|
+
# instead to wait for more data in the output buffer. Some OS will really flush
|
176
176
|
# data on disk, some other OS will just try to do it ASAP.
|
177
177
|
#
|
178
178
|
# Redis supports three different modes:
|
@@ -195,91 +195,13 @@ appendonly no
|
|
195
195
|
appendfsync everysec
|
196
196
|
# appendfsync no
|
197
197
|
|
198
|
-
################################ VIRTUAL MEMORY ###############################
|
199
|
-
|
200
|
-
# Virtual Memory allows Redis to work with datasets bigger than the actual
|
201
|
-
# amount of RAM needed to hold the whole dataset in memory.
|
202
|
-
# In order to do so very used keys are taken in memory while the other keys
|
203
|
-
# are swapped into a swap file, similarly to what operating systems do
|
204
|
-
# with memory pages.
|
205
|
-
#
|
206
|
-
# To enable VM just set 'vm-enabled' to yes, and set the following three
|
207
|
-
# VM parameters accordingly to your needs.
|
208
|
-
|
209
|
-
vm-enabled no
|
210
|
-
# vm-enabled yes
|
211
|
-
|
212
|
-
# This is the path of the Redis swap file. As you can guess, swap files
|
213
|
-
# can't be shared by different Redis instances, so make sure to use a swap
|
214
|
-
# file for every redis process you are running. Redis will complain if the
|
215
|
-
# swap file is already in use.
|
216
|
-
#
|
217
|
-
# The best kind of storage for the Redis swap file (that's accessed at random)
|
218
|
-
# is a Solid State Disk (SSD).
|
219
|
-
#
|
220
|
-
# *** WARNING *** if you are using a shared hosting the default of putting
|
221
|
-
# the swap file under /tmp is not secure. Create a dir with access granted
|
222
|
-
# only to Redis user and configure Redis to create the swap file there.
|
223
|
-
vm-swap-file ./tmp/redis.swap
|
224
|
-
|
225
|
-
# vm-max-memory configures the VM to use at max the specified amount of
|
226
|
-
# RAM. Everything that deos not fit will be swapped on disk *if* possible, that
|
227
|
-
# is, if there is still enough contiguous space in the swap file.
|
228
|
-
#
|
229
|
-
# With vm-max-memory 0 the system will swap everything it can. Not a good
|
230
|
-
# default, just specify the max amount of RAM you can in bytes, but it's
|
231
|
-
# better to leave some margin. For instance specify an amount of RAM
|
232
|
-
# that's more or less between 60 and 80% of your free RAM.
|
233
|
-
vm-max-memory 0
|
234
|
-
|
235
|
-
# Redis swap files is split into pages. An object can be saved using multiple
|
236
|
-
# contiguous pages, but pages can't be shared between different objects.
|
237
|
-
# So if your page is too big, small objects swapped out on disk will waste
|
238
|
-
# a lot of space. If you page is too small, there is less space in the swap
|
239
|
-
# file (assuming you configured the same number of total swap file pages).
|
240
|
-
#
|
241
|
-
# If you use a lot of small objects, use a page size of 64 or 32 bytes.
|
242
|
-
# If you use a lot of big objects, use a bigger page size.
|
243
|
-
# If unsure, use the default :)
|
244
|
-
vm-page-size 32
|
245
|
-
|
246
|
-
# Number of total memory pages in the swap file.
|
247
|
-
# Given that the page table (a bitmap of free/used pages) is taken in memory,
|
248
|
-
# every 8 pages on disk will consume 1 byte of RAM.
|
249
|
-
#
|
250
|
-
# The total swap size is vm-page-size * vm-pages
|
251
|
-
#
|
252
|
-
# With the default of 32-bytes memory pages and 134217728 pages Redis will
|
253
|
-
# use a 4 GB swap file, that will use 16 MB of RAM for the page table.
|
254
|
-
#
|
255
|
-
# It's better to use the smallest acceptable value for your application,
|
256
|
-
# but the default is large in order to work in most conditions.
|
257
|
-
vm-pages 134217728
|
258
|
-
|
259
|
-
# Max number of VM I/O threads running at the same time.
|
260
|
-
# This threads are used to read/write data from/to swap file, since they
|
261
|
-
# also encode and decode objects from disk to memory or the reverse, a bigger
|
262
|
-
# number of threads can help with big objects even if they can't help with
|
263
|
-
# I/O itself as the physical device may not be able to couple with many
|
264
|
-
# reads/writes operations at the same time.
|
265
|
-
#
|
266
|
-
# The special value of 0 turn off threaded I/O and enables the blocking
|
267
|
-
# Virtual Memory implementation.
|
268
|
-
vm-max-threads 4
|
269
|
-
|
270
198
|
############################### ADVANCED CONFIG ###############################
|
271
|
-
|
272
|
-
# Glue small output buffers together in order to send small replies in a
|
273
|
-
# single TCP packet. Uses a bit more CPU but most of the times it is a win
|
274
|
-
# in terms of number of queries per second. Use 'yes' if unsure.
|
275
|
-
glueoutputbuf yes
|
276
|
-
|
277
199
|
# Hashes are encoded in a special way (much more memory efficient) when they
|
278
200
|
# have at max a given numer of elements, and the biggest element does not
|
279
201
|
# exceed a given threshold. You can configure this limits with the following
|
280
202
|
# configuration directives.
|
281
|
-
hash-max-
|
282
|
-
hash-max-
|
203
|
+
hash-max-ziplist-entries 64
|
204
|
+
hash-max-ziplist-value 512
|
283
205
|
|
284
206
|
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
|
285
207
|
# order to help rehashing the main Redis hash table (the one mapping top-level
|
@@ -288,7 +210,7 @@ hash-max-zipmap-value 512
|
|
288
210
|
# that is rhashing, the more rehashing "steps" are performed, so if the
|
289
211
|
# server is idle the rehashing is never complete and some more memory is used
|
290
212
|
# by the hash table.
|
291
|
-
#
|
213
|
+
#
|
292
214
|
# The default is to use this millisecond 10 times every second in order to
|
293
215
|
# active rehashing the main dictionaries, freeing memory when possible.
|
294
216
|
#
|
@@ -0,0 +1,234 @@
|
|
1
|
+
# Redis configuration file example
|
2
|
+
|
3
|
+
# Note on units: when memory size is needed, it is possible to specifiy
|
4
|
+
# it in the usual form of 1k 5GB 4M and so forth:
|
5
|
+
#
|
6
|
+
# 1k => 1000 bytes
|
7
|
+
# 1kb => 1024 bytes
|
8
|
+
# 1m => 1000000 bytes
|
9
|
+
# 1mb => 1024*1024 bytes
|
10
|
+
# 1g => 1000000000 bytes
|
11
|
+
# 1gb => 1024*1024*1024 bytes
|
12
|
+
#
|
13
|
+
# units are case insensitive so 1GB 1Gb 1gB are all the same.
|
14
|
+
|
15
|
+
# By default Redis does not run as a daemon. Use 'yes' if you need it.
|
16
|
+
# Note that Redis will write a pid file in ./tmp/run/redis.pid when daemonized.
|
17
|
+
daemonize yes
|
18
|
+
|
19
|
+
# When running daemonized, Redis writes a pid file in ./tmp/run/redis.pid by
|
20
|
+
# default. You can specify a custom pid file location here.
|
21
|
+
pidfile ./qless02-test.pid
|
22
|
+
|
23
|
+
# Accept connections on the specified port, default is 6379
|
24
|
+
port 6380
|
25
|
+
|
26
|
+
# If you want you can bind a single interface, if the bind option is not
|
27
|
+
# specified all the interfaces will listen for incoming connections.
|
28
|
+
#
|
29
|
+
bind 127.0.0.1
|
30
|
+
|
31
|
+
# Close the connection after a client is idle for N seconds (0 to disable)
|
32
|
+
timeout 300
|
33
|
+
|
34
|
+
# Set server verbosity to 'debug'
|
35
|
+
# it can be one of:
|
36
|
+
# debug (a lot of information, useful for development/testing)
|
37
|
+
# verbose (many rarely useful info, but not a mess like the debug level)
|
38
|
+
# notice (moderately verbose, what you want in production probably)
|
39
|
+
# warning (only very important / critical messages are logged)
|
40
|
+
loglevel verbose
|
41
|
+
|
42
|
+
# Specify the log file name. Also 'stdout' can be used to force
|
43
|
+
# Redis to log on the standard output. Note that if you use standard
|
44
|
+
# output for logging but daemonize, logs will be sent to /dev/null
|
45
|
+
logfile ./qless02-test-server.log
|
46
|
+
|
47
|
+
# Set the number of databases. The default database is DB 0, you can select
|
48
|
+
# a different one on a per-connection basis using SELECT <dbid> where
|
49
|
+
# dbid is a number between 0 and 'databases'-1
|
50
|
+
databases 16
|
51
|
+
|
52
|
+
################################ SNAPSHOTTING #################################
|
53
|
+
#
|
54
|
+
# Save the DB on disk:
|
55
|
+
#
|
56
|
+
# save <seconds> <changes>
|
57
|
+
#
|
58
|
+
# Will save the DB if both the given number of seconds and the given
|
59
|
+
# number of write operations against the DB occurred.
|
60
|
+
#
|
61
|
+
# In the example below the behaviour will be to save:
|
62
|
+
# after 900 sec (15 min) if at least 1 key changed
|
63
|
+
# after 300 sec (5 min) if at least 10 keys changed
|
64
|
+
# after 60 sec if at least 10000 keys changed
|
65
|
+
#
|
66
|
+
# Note: you can disable saving at all commenting all the "save" lines.
|
67
|
+
|
68
|
+
save 900 1
|
69
|
+
save 300 10
|
70
|
+
save 60 10000
|
71
|
+
|
72
|
+
# Compress string objects using LZF when dump .rdb databases?
|
73
|
+
# For default that's set to 'yes' as it's almost always a win.
|
74
|
+
# If you want to save some CPU in the saving child set it to 'no' but
|
75
|
+
# the dataset will likely be bigger if you have compressible values or keys.
|
76
|
+
rdbcompression yes
|
77
|
+
|
78
|
+
# The filename where to dump the DB
|
79
|
+
dbfilename ./qless02-test-dump.rdb
|
80
|
+
|
81
|
+
# The working directory.
|
82
|
+
#
|
83
|
+
# The DB will be written inside this directory, with the filename specified
|
84
|
+
# above using the 'dbfilename' configuration directive.
|
85
|
+
#
|
86
|
+
# Also the Append Only File will be created inside this directory.
|
87
|
+
#
|
88
|
+
# Note that you must specify a directory here, not a file name.
|
89
|
+
dir .
|
90
|
+
|
91
|
+
################################# REPLICATION #################################
|
92
|
+
|
93
|
+
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
|
94
|
+
# another Redis server. Note that the configuration is local to the slave
|
95
|
+
# so for example it is possible to configure the slave to save the DB with a
|
96
|
+
# different interval, or to listen to another port, and so on.
|
97
|
+
#
|
98
|
+
# slaveof <masterip> <masterport>
|
99
|
+
|
100
|
+
# If the master is password protected (using the "requirepass" configuration
|
101
|
+
# directive below) it is possible to tell the slave to authenticate before
|
102
|
+
# starting the replication synchronization process, otherwise the master will
|
103
|
+
# refuse the slave request.
|
104
|
+
#
|
105
|
+
# masterauth <master-password>
|
106
|
+
|
107
|
+
################################## SECURITY ###################################
|
108
|
+
|
109
|
+
# Require clients to issue AUTH <PASSWORD> before processing any other
|
110
|
+
# commands. This might be useful in environments in which you do not trust
|
111
|
+
# others with access to the host running redis-server.
|
112
|
+
#
|
113
|
+
# This should stay commented out for backward compatibility and because most
|
114
|
+
# people do not need auth (e.g. they run their own servers).
|
115
|
+
#
|
116
|
+
# Warning: since Redis is pretty fast an outside user can try up to
|
117
|
+
# 150k passwords per second against a good box. This means that you should
|
118
|
+
# use a very strong password otherwise it will be very easy to break.
|
119
|
+
#
|
120
|
+
# requirepass foobared
|
121
|
+
|
122
|
+
################################### LIMITS ####################################
|
123
|
+
|
124
|
+
# Set the max number of connected clients at the same time. By default there
|
125
|
+
# is no limit, and it's up to the number of file descriptors the Redis process
|
126
|
+
# is able to open. The special value '0' means no limits.
|
127
|
+
# Once the limit is reached Redis will close all the new connections sending
|
128
|
+
# an error 'max number of clients reached'.
|
129
|
+
#
|
130
|
+
# maxclients 128
|
131
|
+
|
132
|
+
# Don't use more memory than the specified amount of bytes.
|
133
|
+
# When the memory limit is reached Redis will try to remove keys with an
|
134
|
+
# EXPIRE set. It will try to start freeing keys that are going to expire
|
135
|
+
# in little time and preserve keys with a longer time to live.
|
136
|
+
# Redis will also try to remove objects from free lists if possible.
|
137
|
+
#
|
138
|
+
# If all this fails, Redis will start to reply with errors to commands
|
139
|
+
# that will use more memory, like SET, LPUSH, and so on, and will continue
|
140
|
+
# to reply to most read-only commands like GET.
|
141
|
+
#
|
142
|
+
# WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
|
143
|
+
# 'state' server or cache, not as a real DB. When Redis is used as a real
|
144
|
+
# database the memory usage will grow over the weeks, it will be obvious if
|
145
|
+
# it is going to use too much memory in the long run, and you'll have the time
|
146
|
+
# to upgrade. With maxmemory after the limit is reached you'll start to get
|
147
|
+
# errors for write operations, and this may even lead to DB inconsistency.
|
148
|
+
#
|
149
|
+
# maxmemory <bytes>
|
150
|
+
|
151
|
+
############################## APPEND ONLY MODE ###############################
|
152
|
+
|
153
|
+
# By default Redis asynchronously dumps the dataset on disk. If you can live
|
154
|
+
# with the idea that the latest records will be lost if something like a crash
|
155
|
+
# happens this is the preferred way to run Redis. If instead you care a lot
|
156
|
+
# about your data and don't want to that a single record can get lost you should
|
157
|
+
# enable the append only mode: when this mode is enabled Redis will append
|
158
|
+
# every write operation received in the file appendonly.aof. This file will
|
159
|
+
# be read on startup in order to rebuild the full dataset in memory.
|
160
|
+
#
|
161
|
+
# Note that you can have both the async dumps and the append only file if you
|
162
|
+
# like (you have to comment the "save" statements above to disable the dumps).
|
163
|
+
# Still if append only mode is enabled Redis will load the data from the
|
164
|
+
# log file at startup ignoring the dump.rdb file.
|
165
|
+
#
|
166
|
+
# IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
|
167
|
+
# log file in background when it gets too big.
|
168
|
+
|
169
|
+
appendonly no
|
170
|
+
|
171
|
+
# The name of the append only file (default: "appendonly.aof")
|
172
|
+
# appendfilename appendonly.aof
|
173
|
+
|
174
|
+
# The fsync() call tells the Operating System to actually write data on disk
|
175
|
+
# instead to wait for more data in the output buffer. Some OS will really flush
|
176
|
+
# data on disk, some other OS will just try to do it ASAP.
|
177
|
+
#
|
178
|
+
# Redis supports three different modes:
|
179
|
+
#
|
180
|
+
# no: don't fsync, just let the OS flush the data when it wants. Faster.
|
181
|
+
# always: fsync after every write to the append only log . Slow, Safest.
|
182
|
+
# everysec: fsync only if one second passed since the last fsync. Compromise.
|
183
|
+
#
|
184
|
+
# The default is "everysec" that's usually the right compromise between
|
185
|
+
# speed and data safety. It's up to you to understand if you can relax this to
|
186
|
+
# "no" that will will let the operating system flush the output buffer when
|
187
|
+
# it wants, for better performances (but if you can live with the idea of
|
188
|
+
# some data loss consider the default persistence mode that's snapshotting),
|
189
|
+
# or on the contrary, use "always" that's very slow but a bit safer than
|
190
|
+
# everysec.
|
191
|
+
#
|
192
|
+
# If unsure, use "everysec".
|
193
|
+
|
194
|
+
# appendfsync always
|
195
|
+
appendfsync everysec
|
196
|
+
# appendfsync no
|
197
|
+
|
198
|
+
############################### ADVANCED CONFIG ###############################
|
199
|
+
# Hashes are encoded in a special way (much more memory efficient) when they
|
200
|
+
# have at max a given numer of elements, and the biggest element does not
|
201
|
+
# exceed a given threshold. You can configure this limits with the following
|
202
|
+
# configuration directives.
|
203
|
+
hash-max-ziplist-entries 64
|
204
|
+
hash-max-ziplist-value 512
|
205
|
+
|
206
|
+
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
|
207
|
+
# order to help rehashing the main Redis hash table (the one mapping top-level
|
208
|
+
# keys to values). The hash table implementation redis uses (see dict.c)
|
209
|
+
# performs a lazy rehashing: the more operation you run into an hash table
|
210
|
+
# that is rhashing, the more rehashing "steps" are performed, so if the
|
211
|
+
# server is idle the rehashing is never complete and some more memory is used
|
212
|
+
# by the hash table.
|
213
|
+
#
|
214
|
+
# The default is to use this millisecond 10 times every second in order to
|
215
|
+
# active rehashing the main dictionaries, freeing memory when possible.
|
216
|
+
#
|
217
|
+
# If unsure:
|
218
|
+
# use "activerehashing no" if you have hard latency requirements and it is
|
219
|
+
# not a good thing in your environment that Redis can reply form time to time
|
220
|
+
# to queries with 2 milliseconds delay.
|
221
|
+
#
|
222
|
+
# use "activerehashing yes" if you don't have such hard requirements but
|
223
|
+
# want to free memory asap when possible.
|
224
|
+
activerehashing yes
|
225
|
+
|
226
|
+
################################## INCLUDES ###################################
|
227
|
+
|
228
|
+
# Include one or more other config files here. This is useful if you
|
229
|
+
# have a standard template that goes to all redis server but also need
|
230
|
+
# to customize a few per-server settings. Include files can include
|
231
|
+
# other files, so use this wisely.
|
232
|
+
#
|
233
|
+
# include /path/to/local.conf
|
234
|
+
# include /path/to/other.conf
|
data/spec/spec_helper.rb
CHANGED
@@ -1,32 +1,31 @@
|
|
1
1
|
require 'rspec'
|
2
2
|
require 'coveralls'
|
3
|
+
require 'pry'
|
3
4
|
Coveralls.wear!
|
4
5
|
|
5
6
|
require 'qmore'
|
6
7
|
|
7
8
|
# No need to start redis when running in Travis
|
8
9
|
unless ENV['CI']
|
10
|
+
redis_configs_directory = File.join(File.dirname(File.expand_path(__FILE__)), "redis")
|
11
|
+
redis_configs = Dir.entries(redis_configs_directory).select{|f| !File.directory? f}
|
9
12
|
|
10
|
-
|
11
|
-
|
12
|
-
rescue Errno::ECONNREFUSED
|
13
|
-
spec_dir = File.dirname(File.expand_path(__FILE__))
|
14
|
-
REDIS_CMD = "redis-server #{spec_dir}/redis-test.conf"
|
15
|
-
|
13
|
+
redis_configs.each do |config|
|
14
|
+
redis_cmd = "redis-server #{redis_configs_directory}/#{config}"
|
16
15
|
puts "Starting redis for testing at localhost..."
|
17
|
-
puts `cd #{
|
18
|
-
|
16
|
+
puts `cd #{redis_configs_directory}; #{redis_cmd}`
|
17
|
+
|
19
18
|
# Schedule the redis server for shutdown when tests are all finished.
|
20
19
|
at_exit do
|
20
|
+
redis_instance_name = config.chomp(".conf")
|
21
21
|
puts 'Stopping redis'
|
22
|
-
pid = File.read("#{
|
22
|
+
pid = File.read("#{redis_configs_directory}/#{redis_instance_name}.pid").to_i rescue nil
|
23
23
|
system ("kill -9 #{pid}") if pid.to_i != 0
|
24
|
-
File.delete("#{
|
25
|
-
File.delete("#{
|
26
|
-
File.delete("#{
|
24
|
+
File.delete("#{redis_configs_directory}/#{redis_instance_name}.pid") rescue nil
|
25
|
+
File.delete("#{redis_configs_directory}/#{redis_instance_name}-server.log") rescue nil
|
26
|
+
File.delete("#{redis_configs_directory}/#{redis_instance_name}-dump.rdb") rescue nil
|
27
27
|
end
|
28
28
|
end
|
29
|
-
|
30
29
|
end
|
31
30
|
|
32
31
|
def dump_redis
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: qmore
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.6.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Matt Conway
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2014-02-25 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: qless
|
@@ -80,6 +80,20 @@ dependencies:
|
|
80
80
|
- - '>='
|
81
81
|
- !ruby/object:Gem::Version
|
82
82
|
version: '0'
|
83
|
+
- !ruby/object:Gem::Dependency
|
84
|
+
name: pry
|
85
|
+
requirement: !ruby/object:Gem::Requirement
|
86
|
+
requirements:
|
87
|
+
- - '>='
|
88
|
+
- !ruby/object:Gem::Version
|
89
|
+
version: '0'
|
90
|
+
type: :development
|
91
|
+
prerelease: false
|
92
|
+
version_requirements: !ruby/object:Gem::Requirement
|
93
|
+
requirements:
|
94
|
+
- - '>='
|
95
|
+
- !ruby/object:Gem::Version
|
96
|
+
version: '0'
|
83
97
|
- !ruby/object:Gem::Dependency
|
84
98
|
name: orderedhash
|
85
99
|
requirement: !ruby/object:Gem::Requirement
|
@@ -123,7 +137,8 @@ files:
|
|
123
137
|
- qmore.gemspec
|
124
138
|
- spec/attributes_spec.rb
|
125
139
|
- spec/job_reserver_spec.rb
|
126
|
-
- spec/redis-test.conf
|
140
|
+
- spec/redis/qless01-test.conf
|
141
|
+
- spec/redis/qless02-test.conf
|
127
142
|
- spec/server_spec.rb
|
128
143
|
- spec/spec_helper.rb
|
129
144
|
homepage: ''
|
@@ -145,14 +160,14 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
145
160
|
version: '0'
|
146
161
|
requirements: []
|
147
162
|
rubyforge_project: qmore
|
148
|
-
rubygems_version: 2.
|
163
|
+
rubygems_version: 2.1.11
|
149
164
|
signing_key:
|
150
165
|
specification_version: 4
|
151
166
|
summary: A qless plugin that gives more control over how queues are processed
|
152
167
|
test_files:
|
153
168
|
- spec/attributes_spec.rb
|
154
169
|
- spec/job_reserver_spec.rb
|
155
|
-
- spec/redis-test.conf
|
170
|
+
- spec/redis/qless01-test.conf
|
171
|
+
- spec/redis/qless02-test.conf
|
156
172
|
- spec/server_spec.rb
|
157
173
|
- spec/spec_helper.rb
|
158
|
-
has_rdoc:
|