gru 0.0.2 → 0.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +7 -4
- data/lib/gru.rb +5 -5
- data/lib/gru/adapters/redis_adapter.rb +69 -34
- data/lib/gru/configuration.rb +26 -0
- data/lib/gru/version.rb +1 -1
- data/lib/gru/worker_manager.rb +6 -19
- data/spec/gru/adapters/redis_adapter_spec.rb +99 -68
- data/spec/gru/worker_manager_spec.rb +3 -7
- metadata +19 -18
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: f6abb6b68a1f26073ec9bfaa5ed2a858042db2b7
|
4
|
+
data.tar.gz: 4f43b5ef56312af91b6b2a675a55071817ef60a9
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: dbc6158473909d36783371a088deaa48caf89eef61e8dd51b9222b24b11a04cc44ea8e6e52007aacc5b0bf172e753fb18295c117d6cf095714590ed3a5b2b029
|
7
|
+
data.tar.gz: 34990eb2f962112c7fffa308d149c6ce2183c5412cbcf957efc652d68e049e97ea483679e9a4930d361593b82a5361b32b81c68d8991e9717eca509831cab6ec
|
data/README.md
CHANGED
@@ -33,10 +33,13 @@ Or install it yourself as:
|
|
33
33
|
end
|
34
34
|
end
|
35
35
|
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
36
|
+
settings = {
|
37
|
+
host_maximums: { 'test_worker' => 5 },
|
38
|
+
cluster_maximums: { 'test_worker' => 10 },
|
39
|
+
rebalance_flag: true
|
40
|
+
}
|
41
|
+
|
42
|
+
manager = Gru.create(settings)
|
40
43
|
logger = Logger.new(STDOUT)
|
41
44
|
|
42
45
|
loop do
|
data/lib/gru.rb
CHANGED
@@ -1,12 +1,12 @@
|
|
1
1
|
require 'gru/version'
|
2
2
|
require 'gru/worker_manager'
|
3
|
-
require 'gru/
|
4
|
-
require 'gru/adapters/redis_adapter'
|
3
|
+
require 'gru/configuration'
|
5
4
|
|
6
5
|
module Gru
|
7
|
-
def self.
|
8
|
-
|
9
|
-
manager.
|
6
|
+
def self.create(settings)
|
7
|
+
configuration = Gru::Configuration.new(settings)
|
8
|
+
manager = WorkerManager.new(configuration.adapter)
|
9
|
+
manager.register_workers
|
10
10
|
manager
|
11
11
|
end
|
12
12
|
end
|
@@ -5,44 +5,43 @@ module Gru
|
|
5
5
|
class RedisAdapter
|
6
6
|
attr_reader :client
|
7
7
|
|
8
|
-
def initialize(client,
|
8
|
+
def initialize(client,settings)
|
9
9
|
@client = client
|
10
|
-
@
|
10
|
+
@settings = settings
|
11
11
|
end
|
12
12
|
|
13
|
-
def
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
13
|
+
def set_worker_counts
|
14
|
+
set_rebalance_flag(@settings.rebalance_flag)
|
15
|
+
register_workers(@settings.host_maximums)
|
16
|
+
set_max_worker_counts(@settings.host_maximums)
|
17
|
+
register_global_workers(@settings.cluster_maximums)
|
18
|
+
set_max_global_worker_counts(@settings.cluster_maximums)
|
18
19
|
end
|
19
20
|
|
20
|
-
def provision_workers
|
21
|
+
def provision_workers
|
21
22
|
available = {}
|
22
23
|
workers = max_host_workers
|
23
24
|
workers.each do |worker, count|
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
i += 1 if reserve_worker(worker)
|
25
|
+
available[worker] = with_worker_counts(worker,count) do |total|
|
26
|
+
if reserve_worker?(worker)
|
27
|
+
total += 1 if reserve_worker(worker)
|
28
28
|
end
|
29
|
+
total
|
29
30
|
end
|
30
|
-
available[worker] = i
|
31
31
|
end
|
32
32
|
available
|
33
33
|
end
|
34
34
|
|
35
|
-
def expire_workers
|
35
|
+
def expire_workers
|
36
36
|
removable = {}
|
37
37
|
workers = max_host_workers
|
38
38
|
workers.each do |worker, count|
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
i -= 1 if expire_worker(worker)
|
39
|
+
removable[worker] = with_worker_counts(worker,count) do |total|
|
40
|
+
if expire_worker?(worker)
|
41
|
+
total -= 1 if expire_worker(worker)
|
43
42
|
end
|
43
|
+
total
|
44
44
|
end
|
45
|
-
removable[worker] = i
|
46
45
|
end
|
47
46
|
removable
|
48
47
|
end
|
@@ -51,9 +50,15 @@ module Gru
|
|
51
50
|
workers = max_host_workers
|
52
51
|
workers.keys.each do |worker|
|
53
52
|
host_running_count = local_running_count(worker)
|
53
|
+
running_count = host_running_count
|
54
|
+
global_running_count = host_running_count
|
54
55
|
host_running_count.times do
|
55
|
-
|
56
|
-
|
56
|
+
if global_running_count > 0
|
57
|
+
global_running_count = send_message(:hincrby, global_workers_running_key,worker,-1)
|
58
|
+
end
|
59
|
+
if running_count > 0
|
60
|
+
running_count = send_message(:hincrby, host_workers_running_key,worker,-1)
|
61
|
+
end
|
57
62
|
end
|
58
63
|
end
|
59
64
|
send_message(:del, host_workers_running_key)
|
@@ -75,23 +80,41 @@ module Gru
|
|
75
80
|
end
|
76
81
|
|
77
82
|
def set_max_global_worker_counts(workers)
|
78
|
-
workers
|
83
|
+
reset_removed_global_worker_counts(workers)
|
84
|
+
workers.each_pair{|worker,count| set_max_global_worker_count(worker,count) }
|
85
|
+
end
|
86
|
+
|
87
|
+
def set_rebalance_flag(rebalance)
|
88
|
+
send_message(:set,"#{gru_key}:rebalance",rebalance)
|
79
89
|
end
|
80
90
|
|
81
91
|
def register_worker(worker,count)
|
82
|
-
send_message(:
|
92
|
+
send_message(:hsetnx,host_workers_running_key,worker,count)
|
83
93
|
end
|
84
94
|
|
85
95
|
def register_global_worker(worker,count)
|
86
|
-
send_message(:
|
96
|
+
send_message(:hsetnx,global_workers_running_key,worker,count)
|
87
97
|
end
|
88
98
|
|
89
99
|
def set_max_worker_count(worker,count)
|
90
|
-
send_message(:hset,
|
100
|
+
send_message(:hset,host_max_worker_key,worker,count)
|
91
101
|
end
|
92
102
|
|
93
103
|
def set_max_global_worker_count(worker,count)
|
94
|
-
send_message(:hset,
|
104
|
+
send_message(:hset,global_max_worker_key,worker,count)
|
105
|
+
end
|
106
|
+
|
107
|
+
def reset_removed_global_worker_counts(workers)
|
108
|
+
global_max = max_host_workers
|
109
|
+
global_max.each_pair do |worker, count|
|
110
|
+
set_max_global_worker_count(worker,0) unless workers[worker]
|
111
|
+
end
|
112
|
+
end
|
113
|
+
|
114
|
+
def with_worker_counts(worker,count,&block)
|
115
|
+
Integer(count).times.reduce(0) do |total|
|
116
|
+
block.call(total)
|
117
|
+
end
|
95
118
|
end
|
96
119
|
|
97
120
|
def reserve_worker(worker)
|
@@ -103,7 +126,7 @@ module Gru
|
|
103
126
|
end
|
104
127
|
|
105
128
|
def adjust_workers(worker,amount)
|
106
|
-
lock_key = "
|
129
|
+
lock_key = "#{gru_key}:#{worker}"
|
107
130
|
if send_message(:setnx,lock_key,Time.now.to_i)
|
108
131
|
send_message(:hincrby,host_workers_running_key,worker,amount)
|
109
132
|
send_message(:hincrby,global_workers_running_key,worker,amount)
|
@@ -117,10 +140,14 @@ module Gru
|
|
117
140
|
send_message(:hgetall,host_max_worker_key)
|
118
141
|
end
|
119
142
|
|
120
|
-
def
|
143
|
+
def max_global_workers
|
144
|
+
send_message(:hgetall,global_max_worker_key)
|
145
|
+
end
|
146
|
+
|
147
|
+
def reserve_worker?(worker)
|
121
148
|
host_running,global_running,host_max,global_max = worker_counts(worker)
|
122
149
|
result = false
|
123
|
-
if
|
150
|
+
if rebalance_cluster?
|
124
151
|
result = host_running.to_i < max_workers_per_host(global_max,host_max)
|
125
152
|
else
|
126
153
|
result = host_running.to_i < host_max.to_i
|
@@ -128,10 +155,10 @@ module Gru
|
|
128
155
|
result && global_running.to_i < global_max.to_i
|
129
156
|
end
|
130
157
|
|
131
|
-
def expire_worker?(worker
|
158
|
+
def expire_worker?(worker)
|
132
159
|
host_running,global_running,host_max,global_max = worker_counts(worker)
|
133
160
|
result = false
|
134
|
-
if
|
161
|
+
if rebalance_cluster?
|
135
162
|
result = host_running.to_i > max_workers_per_host(global_max,host_max)
|
136
163
|
else
|
137
164
|
result = host_running.to_i > host_max.to_i
|
@@ -153,7 +180,7 @@ module Gru
|
|
153
180
|
end
|
154
181
|
|
155
182
|
def gru_host_count
|
156
|
-
send_message(:keys,"
|
183
|
+
send_message(:keys,"#{gru_key}:*:workers_running").count - 1
|
157
184
|
end
|
158
185
|
|
159
186
|
def max_workers_per_host(global_worker_max_count,host_max)
|
@@ -162,6 +189,10 @@ module Gru
|
|
162
189
|
rebalance_count <= host_max.to_i && host_count > 1 ? rebalance_count : host_max.to_i
|
163
190
|
end
|
164
191
|
|
192
|
+
def rebalance_cluster?
|
193
|
+
send_message(:get,"#{gru_key}:rebalance") == "true"
|
194
|
+
end
|
195
|
+
|
165
196
|
def host_max_worker_key
|
166
197
|
"#{host_key}:max_workers"
|
167
198
|
end
|
@@ -179,11 +210,15 @@ module Gru
|
|
179
210
|
end
|
180
211
|
|
181
212
|
def global_key
|
182
|
-
"
|
213
|
+
"#{gru_key}:global"
|
183
214
|
end
|
184
215
|
|
185
216
|
def host_key
|
186
|
-
"
|
217
|
+
"#{gru_key}:#{hostname}"
|
218
|
+
end
|
219
|
+
|
220
|
+
def gru_key
|
221
|
+
"GRU:#{@settings.environment_name}:#{@settings.cluster_name}"
|
187
222
|
end
|
188
223
|
|
189
224
|
def hostname
|
@@ -0,0 +1,26 @@
|
|
1
|
+
require 'gru/adapters'
|
2
|
+
require 'gru/adapters/redis_adapter'
|
3
|
+
|
4
|
+
module Gru
|
5
|
+
class Configuration
|
6
|
+
attr_reader :cluster_maximums, :host_maximums, :rebalance_flag, :adapter, :cluster_name, :environment_name
|
7
|
+
def initialize(settings)
|
8
|
+
@host_maximums = settings.delete(:host_maximums) || settings.delete(:cluster_maximums)
|
9
|
+
@cluster_maximums = settings.delete(:cluster_maximums) || @host_maximums
|
10
|
+
@rebalance_flag = settings.delete(:rebalance_flag) || false
|
11
|
+
@cluster_name = settings.delete(:cluster_name) || 'default'
|
12
|
+
@environment_name = settings.delete(:environment_name) || 'default'
|
13
|
+
client = initialize_client(settings.delete(:redis_config))
|
14
|
+
@adapter = Gru::Adapters::RedisAdapter.new(client,self)
|
15
|
+
if @cluster_maximums.nil?
|
16
|
+
raise ArgumentError "Need at least a cluster configuration"
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
private
|
21
|
+
|
22
|
+
def initialize_client(config=nil)
|
23
|
+
Redis.new(config || {})
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
data/lib/gru/version.rb
CHANGED
data/lib/gru/worker_manager.rb
CHANGED
@@ -1,34 +1,21 @@
|
|
1
1
|
module Gru
|
2
2
|
class WorkerManager
|
3
|
-
attr_reader :
|
3
|
+
attr_reader :adapter
|
4
4
|
|
5
|
-
def
|
6
|
-
redis = Redis.new(settings)
|
7
|
-
adapter = Gru::Adapters::RedisAdapter.new(redis)
|
8
|
-
new(adapter,workers)
|
9
|
-
end
|
10
|
-
|
11
|
-
def self.with_redis_connection(client,workers,global_config=nil,balanced=false)
|
12
|
-
adapter = Gru::Adapters::RedisAdapter.new(client,global_config)
|
13
|
-
new(adapter,workers,balanced)
|
14
|
-
end
|
15
|
-
|
16
|
-
def initialize(adapter,workers,balanced=false)
|
5
|
+
def initialize(adapter)
|
17
6
|
@adapter = adapter
|
18
|
-
@workers = workers
|
19
|
-
@balanced = balanced
|
20
7
|
end
|
21
8
|
|
22
|
-
def
|
23
|
-
@adapter.
|
9
|
+
def register_workers
|
10
|
+
@adapter.set_worker_counts
|
24
11
|
end
|
25
12
|
|
26
13
|
def provision_workers
|
27
|
-
@adapter.provision_workers
|
14
|
+
@adapter.provision_workers
|
28
15
|
end
|
29
16
|
|
30
17
|
def expire_workers
|
31
|
-
@adapter.expire_workers
|
18
|
+
@adapter.expire_workers
|
32
19
|
end
|
33
20
|
|
34
21
|
def adjust_workers
|
@@ -1,19 +1,31 @@
|
|
1
1
|
require 'rspec'
|
2
2
|
require_relative '../../../lib/gru'
|
3
|
+
require 'pry'
|
3
4
|
|
4
5
|
describe Gru::Adapters::RedisAdapter do
|
5
6
|
before(:each) do
|
6
7
|
allow(Socket).to receive(:gethostname).and_return(hostname)
|
8
|
+
allow_any_instance_of(Gru::Configuration).to receive(:initialize_client).and_return(client)
|
7
9
|
end
|
8
10
|
|
9
11
|
let(:hostname) { 'foo' }
|
10
12
|
let(:client) { double('client') }
|
13
|
+
let(:config) {
|
14
|
+
Gru::Configuration.new({
|
15
|
+
cluster_maximums: { 'test_worker' => 3 },
|
16
|
+
environment_name: 'environment',
|
17
|
+
cluster_name: 'cluster'
|
18
|
+
})
|
19
|
+
}
|
11
20
|
|
12
21
|
let(:adapter) {
|
13
|
-
Gru::Adapters::RedisAdapter.new(client)
|
22
|
+
Gru::Adapters::RedisAdapter.new(client,config)
|
23
|
+
}
|
24
|
+
|
25
|
+
let(:gru_key) {
|
26
|
+
"GRU:#{config.environment_name}:#{config.cluster_name}"
|
14
27
|
}
|
15
28
|
|
16
|
-
let(:workers) { { 'test_worker' => 3 } }
|
17
29
|
|
18
30
|
context "initialization" do
|
19
31
|
it "has a client" do
|
@@ -22,24 +34,28 @@ describe Gru::Adapters::RedisAdapter do
|
|
22
34
|
end
|
23
35
|
|
24
36
|
context "processing workers" do
|
25
|
-
|
26
37
|
it "determines the host key" do
|
27
|
-
expect(adapter.send(:host_key)).to eq("
|
38
|
+
expect(adapter.send(:host_key)).to eq("#{gru_key}:#{hostname}")
|
28
39
|
end
|
29
40
|
|
30
41
|
it "registers workers" do
|
31
|
-
expect(client).to receive(:
|
32
|
-
adapter.send(:register_workers,
|
42
|
+
expect(client).to receive(:hsetnx).with("#{gru_key}:#{hostname}:workers_running",'test_worker',0)
|
43
|
+
adapter.send(:register_workers,config.cluster_maximums)
|
33
44
|
end
|
34
45
|
|
35
46
|
it "sets worker counts" do
|
36
|
-
expect(client).to receive(:hset).with("
|
37
|
-
adapter.send(:set_max_worker_counts,
|
47
|
+
expect(client).to receive(:hset).with("#{gru_key}:#{hostname}:max_workers",'test_worker',3)
|
48
|
+
adapter.send(:set_max_worker_counts,config.cluster_maximums)
|
38
49
|
end
|
39
50
|
|
40
51
|
it "sets global worker counts" do
|
41
|
-
expect(client).to receive(:
|
42
|
-
|
52
|
+
expect(client).to receive(:hgetall).with("#{gru_key}:foo:max_workers").and_return({
|
53
|
+
'test_worker' => 2,
|
54
|
+
'foo_worker' => 5
|
55
|
+
})
|
56
|
+
expect(client).to receive(:hset).with("#{gru_key}:global:max_workers",'foo_worker',0)
|
57
|
+
expect(client).to receive(:hset).with("#{gru_key}:global:max_workers",'test_worker',3)
|
58
|
+
adapter.send(:set_max_global_worker_counts,config.cluster_maximums)
|
43
59
|
end
|
44
60
|
|
45
61
|
end
|
@@ -47,7 +63,7 @@ describe Gru::Adapters::RedisAdapter do
|
|
47
63
|
context "Determining Available Workers" do
|
48
64
|
|
49
65
|
it "gets all workers from redis" do
|
50
|
-
expect(client).to receive(:hgetall).with("
|
66
|
+
expect(client).to receive(:hgetall).with("#{gru_key}:#{hostname}:max_workers").and_return({
|
51
67
|
'test_worker' => 3
|
52
68
|
})
|
53
69
|
adapter.send(:max_host_workers)
|
@@ -55,23 +71,24 @@ describe Gru::Adapters::RedisAdapter do
|
|
55
71
|
|
56
72
|
context "Provisioning workers with same local and global max" do
|
57
73
|
before(:each) do
|
58
|
-
expect(client).to receive(:hgetall).with("
|
59
|
-
expect(client).to receive(:hget).with("
|
60
|
-
expect(client).to receive(:hget).with("
|
61
|
-
expect(client).to receive(:hget).with("
|
62
|
-
expect(client).to receive(:hget).with("
|
74
|
+
expect(client).to receive(:hgetall).with("#{gru_key}:#{hostname}:max_workers").and_return(config.cluster_maximums)
|
75
|
+
expect(client).to receive(:hget).with("#{gru_key}:#{hostname}:max_workers",'test_worker').exactly(1).times
|
76
|
+
expect(client).to receive(:hget).with("#{gru_key}:global:max_workers",'test_worker').exactly(1).times
|
77
|
+
expect(client).to receive(:hget).with("#{gru_key}:#{hostname}:workers_running",'test_worker').exactly(1).times
|
78
|
+
expect(client).to receive(:hget).with("#{gru_key}:global:workers_running",'test_worker').exactly(1).times
|
63
79
|
end
|
64
80
|
|
65
81
|
it "returns workers with 0 existing workers" do
|
66
82
|
expect(client).to receive(:multi).exactly(3).times.and_yield(client).and_return([0,0,3,3])
|
67
83
|
expect(client).to receive(:setnx).exactly(3).times.and_return(true)
|
68
|
-
expect(client).to receive(:del).with("
|
69
|
-
expect(client).to receive(:
|
70
|
-
expect(client).to receive(:hincrby).with("
|
71
|
-
expect(client).to receive(:
|
72
|
-
expect(client).to receive(:hget).with("
|
73
|
-
expect(client).to receive(:hget).with("
|
74
|
-
expect(client).to receive(:hget).with("
|
84
|
+
expect(client).to receive(:del).with("#{gru_key}:test_worker").exactly(3).times
|
85
|
+
expect(client).to receive(:get).with("#{gru_key}:rebalance").exactly(3).times
|
86
|
+
expect(client).to receive(:hincrby).with("#{gru_key}:global:workers_running",'test_worker',1).exactly(3).times
|
87
|
+
expect(client).to receive(:hincrby).with("#{gru_key}:#{hostname}:workers_running",'test_worker',1).exactly(3).times
|
88
|
+
expect(client).to receive(:hget).with("#{gru_key}:#{hostname}:max_workers",'test_worker').exactly(2).times
|
89
|
+
expect(client).to receive(:hget).with("#{gru_key}:global:max_workers",'test_worker').exactly(2).times
|
90
|
+
expect(client).to receive(:hget).with("#{gru_key}:#{hostname}:workers_running",'test_worker').exactly(2).times
|
91
|
+
expect(client).to receive(:hget).with("#{gru_key}:global:workers_running",'test_worker').exactly(2).times
|
75
92
|
available_workers = adapter.provision_workers
|
76
93
|
expect(available_workers).to eq({'test_worker' => 3})
|
77
94
|
end
|
@@ -79,53 +96,58 @@ describe Gru::Adapters::RedisAdapter do
|
|
79
96
|
it "returns workers when max local and global counts have not been reached" do
|
80
97
|
expect(client).to receive(:multi).exactly(3).times.and_yield(client).and_return([1,1,3,3])
|
81
98
|
expect(client).to receive(:setnx).exactly(3).times.and_return(true)
|
82
|
-
expect(client).to receive(:del).with("
|
83
|
-
expect(client).to receive(:
|
84
|
-
expect(client).to receive(:hincrby).with("
|
85
|
-
expect(client).to receive(:
|
86
|
-
expect(client).to receive(:hget).with("
|
87
|
-
expect(client).to receive(:hget).with("
|
88
|
-
expect(client).to receive(:hget).with("
|
99
|
+
expect(client).to receive(:del).with("#{gru_key}:test_worker").exactly(3).times
|
100
|
+
expect(client).to receive(:get).with("#{gru_key}:rebalance").exactly(3).times
|
101
|
+
expect(client).to receive(:hincrby).with("#{gru_key}:global:workers_running",'test_worker',1).exactly(3).times
|
102
|
+
expect(client).to receive(:hincrby).with("#{gru_key}:#{hostname}:workers_running",'test_worker',1).exactly(3).times
|
103
|
+
expect(client).to receive(:hget).with("#{gru_key}:#{hostname}:max_workers",'test_worker').exactly(2).times
|
104
|
+
expect(client).to receive(:hget).with("#{gru_key}:global:max_workers",'test_worker').exactly(2).times
|
105
|
+
expect(client).to receive(:hget).with("#{gru_key}:#{hostname}:workers_running",'test_worker').exactly(2).times
|
106
|
+
expect(client).to receive(:hget).with("#{gru_key}:global:workers_running",'test_worker').exactly(2).times
|
89
107
|
available_workers = adapter.provision_workers
|
90
108
|
expect(available_workers).to eq({'test_worker' => 3})
|
91
109
|
end
|
92
110
|
|
93
111
|
it "does not return workers if max global count has been reached" do
|
94
112
|
expect(client).to receive(:multi).exactly(3).times.and_yield(client).and_return([0,3,3,3])
|
95
|
-
expect(client).to receive(:
|
96
|
-
expect(client).to receive(:hget).with("
|
97
|
-
expect(client).to receive(:hget).with("
|
98
|
-
expect(client).to receive(:hget).with("
|
113
|
+
expect(client).to receive(:get).with("#{gru_key}:rebalance").exactly(3).times
|
114
|
+
expect(client).to receive(:hget).with("#{gru_key}:#{hostname}:max_workers",'test_worker').exactly(2).times
|
115
|
+
expect(client).to receive(:hget).with("#{gru_key}:global:max_workers",'test_worker').exactly(2).times
|
116
|
+
expect(client).to receive(:hget).with("#{gru_key}:#{hostname}:workers_running",'test_worker').exactly(2).times
|
117
|
+
expect(client).to receive(:hget).with("#{gru_key}:global:workers_running",'test_worker').exactly(2).times
|
99
118
|
available_workers = adapter.provision_workers
|
100
119
|
expect(available_workers).to eq({'test_worker' => 0})
|
101
120
|
end
|
102
121
|
|
103
122
|
it "doesn't return workers if max local count has been reached" do
|
104
123
|
expect(client).to receive(:multi).exactly(3).times.and_yield(client).and_return([3,4,3,6])
|
105
|
-
expect(client).to receive(:
|
106
|
-
expect(client).to receive(:hget).with("
|
107
|
-
expect(client).to receive(:hget).with("
|
108
|
-
expect(client).to receive(:hget).with("
|
124
|
+
expect(client).to receive(:get).with("#{gru_key}:rebalance").exactly(3).times
|
125
|
+
expect(client).to receive(:hget).with("#{gru_key}:#{hostname}:max_workers",'test_worker').exactly(2).times
|
126
|
+
expect(client).to receive(:hget).with("#{gru_key}:global:max_workers",'test_worker').exactly(2).times
|
127
|
+
expect(client).to receive(:hget).with("#{gru_key}:#{hostname}:workers_running",'test_worker').exactly(2).times
|
128
|
+
expect(client).to receive(:hget).with("#{gru_key}:global:workers_running",'test_worker').exactly(2).times
|
109
129
|
reserved_workers = adapter.provision_workers
|
110
130
|
expect(reserved_workers).to eq({'test_worker' => 0})
|
111
131
|
end
|
112
132
|
|
113
133
|
it "doesn't return workers if global max is 0" do
|
114
134
|
expect(client).to receive(:multi).exactly(3).times.and_yield(client).and_return([0,0,3,0])
|
115
|
-
expect(client).to receive(:
|
116
|
-
expect(client).to receive(:hget).with("
|
117
|
-
expect(client).to receive(:hget).with("
|
118
|
-
expect(client).to receive(:hget).with("
|
135
|
+
expect(client).to receive(:get).with("#{gru_key}:rebalance").exactly(3).times
|
136
|
+
expect(client).to receive(:hget).with("#{gru_key}:#{hostname}:max_workers",'test_worker').exactly(2).times
|
137
|
+
expect(client).to receive(:hget).with("#{gru_key}:global:max_workers",'test_worker').exactly(2).times
|
138
|
+
expect(client).to receive(:hget).with("#{gru_key}:#{hostname}:workers_running",'test_worker').exactly(2).times
|
139
|
+
expect(client).to receive(:hget).with("#{gru_key}:global:workers_running",'test_worker').exactly(2).times
|
119
140
|
available_workers = adapter.provision_workers
|
120
141
|
expect(available_workers).to eq({'test_worker' => 0})
|
121
142
|
end
|
122
143
|
|
123
144
|
it "doesn't provision workers if local max is 0" do
|
124
145
|
expect(client).to receive(:multi).exactly(3).times.and_yield(client).and_return([0,1,0,3])
|
125
|
-
expect(client).to receive(:
|
126
|
-
expect(client).to receive(:hget).with("
|
127
|
-
expect(client).to receive(:hget).with("
|
128
|
-
expect(client).to receive(:hget).with("
|
146
|
+
expect(client).to receive(:get).with("#{gru_key}:rebalance").exactly(3).times
|
147
|
+
expect(client).to receive(:hget).with("#{gru_key}:#{hostname}:max_workers",'test_worker').exactly(2).times
|
148
|
+
expect(client).to receive(:hget).with("#{gru_key}:global:max_workers",'test_worker').exactly(2).times
|
149
|
+
expect(client).to receive(:hget).with("#{gru_key}:#{hostname}:workers_running",'test_worker').exactly(2).times
|
150
|
+
expect(client).to receive(:hget).with("#{gru_key}:global:workers_running",'test_worker').exactly(2).times
|
129
151
|
available_workers = adapter.provision_workers
|
130
152
|
expect(available_workers).to eq({'test_worker' => 0})
|
131
153
|
end
|
@@ -138,28 +160,29 @@ describe Gru::Adapters::RedisAdapter do
|
|
138
160
|
}
|
139
161
|
|
140
162
|
before(:each) do
|
141
|
-
expect(client).to receive(:hgetall).with("
|
142
|
-
expect(client).to receive(:hget).with("
|
143
|
-
expect(client).to receive(:hget).with("
|
144
|
-
expect(client).to receive(:hget).with("
|
145
|
-
expect(client).to receive(:hget).with("
|
163
|
+
expect(client).to receive(:hgetall).with("#{gru_key}:#{hostname}:max_workers").and_return(workers)
|
164
|
+
expect(client).to receive(:hget).with("#{gru_key}:#{hostname}:max_workers",'test_worker').exactly(1).times
|
165
|
+
expect(client).to receive(:hget).with("#{gru_key}:global:max_workers",'test_worker').exactly(1).times
|
166
|
+
expect(client).to receive(:hget).with("#{gru_key}:global:workers_running",'test_worker').exactly(1).times
|
167
|
+
expect(client).to receive(:hget).with("#{gru_key}:#{hostname}:workers_running",'test_worker').exactly(1).times
|
168
|
+
expect(client).to receive(:get).with("#{gru_key}:rebalance").exactly(1).times
|
146
169
|
end
|
147
170
|
|
148
171
|
it "removes workers when local maximum has been exceeded" do
|
149
172
|
expect(client).to receive(:multi).exactly(1).times.and_yield(client).and_return([3,3,1,3])
|
150
173
|
expect(client).to receive(:setnx).exactly(1).times.and_return(true)
|
151
|
-
expect(client).to receive(:del).with("
|
152
|
-
expect(client).to receive(:hincrby).with("
|
153
|
-
expect(client).to receive(:hincrby).with("
|
174
|
+
expect(client).to receive(:del).with("#{gru_key}:test_worker").exactly(1).times
|
175
|
+
expect(client).to receive(:hincrby).with("#{gru_key}:global:workers_running",'test_worker',-1).exactly(1).times
|
176
|
+
expect(client).to receive(:hincrby).with("#{gru_key}:#{hostname}:workers_running",'test_worker',-1).exactly(1).times
|
154
177
|
expect(adapter.expire_workers).to eq({'test_worker' => -1})
|
155
178
|
end
|
156
179
|
|
157
180
|
it "removes workers when global maximum has been exceeded" do
|
158
181
|
expect(client).to receive(:multi).exactly(1).times.and_yield(client).and_return([3,3,3,1])
|
159
182
|
expect(client).to receive(:setnx).exactly(1).times.and_return(true)
|
160
|
-
expect(client).to receive(:del).with("
|
161
|
-
expect(client).to receive(:hincrby).with("
|
162
|
-
expect(client).to receive(:hincrby).with("
|
183
|
+
expect(client).to receive(:del).with("#{gru_key}:test_worker").exactly(1).times
|
184
|
+
expect(client).to receive(:hincrby).with("#{gru_key}:global:workers_running",'test_worker',-1).exactly(1).times
|
185
|
+
expect(client).to receive(:hincrby).with("#{gru_key}:#{hostname}:workers_running",'test_worker',-1).exactly(1).times
|
163
186
|
expect(adapter.expire_workers).to eq({'test_worker' => -1})
|
164
187
|
end
|
165
188
|
|
@@ -177,22 +200,30 @@ describe Gru::Adapters::RedisAdapter do
|
|
177
200
|
context "Rebalancing workers" do
|
178
201
|
|
179
202
|
before(:each) do
|
180
|
-
|
181
203
|
end
|
204
|
+
let(:config) {
|
205
|
+
Gru::Configuration.new({
|
206
|
+
cluster_maximums: { 'test_worker' => 3 },
|
207
|
+
environment_name: 'environment',
|
208
|
+
cluster_name: 'cluster',
|
209
|
+
rebalance_flag: true
|
210
|
+
})
|
211
|
+
}
|
182
212
|
|
183
213
|
it "reduces load when workers are added" do
|
184
214
|
expect(client).to receive(:multi).exactly(3).times.and_yield(client).and_return([2,4,3,5])
|
185
|
-
expect(client).to receive(:hgetall).with("
|
186
|
-
expect(client).to receive(:hget).with("
|
187
|
-
expect(client).to receive(:hget).with("
|
188
|
-
expect(client).to receive(:hget).with("
|
189
|
-
expect(client).to receive(:hget).with("
|
190
|
-
expect(client).to receive(:keys).with("
|
215
|
+
expect(client).to receive(:hgetall).with("#{gru_key}:#{hostname}:max_workers").and_return(config.cluster_maximums)
|
216
|
+
expect(client).to receive(:hget).with("#{gru_key}:#{hostname}:workers_running",'test_worker').exactly(3).times
|
217
|
+
expect(client).to receive(:hget).with("#{gru_key}:global:workers_running",'test_worker').exactly(3).times
|
218
|
+
expect(client).to receive(:hget).with("#{gru_key}:#{hostname}:max_workers",'test_worker').exactly(3).times
|
219
|
+
expect(client).to receive(:hget).with("#{gru_key}:global:max_workers",'test_worker').exactly(3).times
|
220
|
+
expect(client).to receive(:keys).with("#{gru_key}:*:workers_running").exactly(3).times.and_return(['foo'])
|
191
221
|
expect(client).to receive(:setnx).exactly(3).times.and_return(true)
|
192
|
-
expect(client).to receive(:hincrby).with("
|
193
|
-
expect(client).to receive(:hincrby).with("
|
194
|
-
expect(client).to receive(:del).with("
|
195
|
-
|
222
|
+
expect(client).to receive(:hincrby).with("#{gru_key}:global:workers_running",'test_worker',1).exactly(3).times
|
223
|
+
expect(client).to receive(:hincrby).with("#{gru_key}:#{hostname}:workers_running",'test_worker',1).exactly(3).times
|
224
|
+
expect(client).to receive(:del).with("#{gru_key}:test_worker").exactly(3).times
|
225
|
+
expect(client).to receive(:get).with("#{gru_key}:rebalance").and_return("true").exactly(3).times
|
226
|
+
adapter.provision_workers
|
196
227
|
|
197
228
|
end
|
198
229
|
|
@@ -7,7 +7,7 @@ describe Gru::WorkerManager do
|
|
7
7
|
}
|
8
8
|
|
9
9
|
let(:manager) {
|
10
|
-
Gru::WorkerManager.new(adapter
|
10
|
+
Gru::WorkerManager.new(adapter)
|
11
11
|
}
|
12
12
|
|
13
13
|
let(:workers) {
|
@@ -15,10 +15,6 @@ describe Gru::WorkerManager do
|
|
15
15
|
}
|
16
16
|
|
17
17
|
context "When initialized" do
|
18
|
-
it "has workers" do
|
19
|
-
expect(manager.workers).not_to be_nil
|
20
|
-
end
|
21
|
-
|
22
18
|
it "has an adapter instance" do
|
23
19
|
expect(manager.adapter).not_to be_nil
|
24
20
|
end
|
@@ -26,8 +22,8 @@ describe Gru::WorkerManager do
|
|
26
22
|
|
27
23
|
context "Creating Worker Queues" do
|
28
24
|
it "Creates workers" do
|
29
|
-
expect(adapter).to receive(:
|
30
|
-
manager.
|
25
|
+
expect(adapter).to receive(:set_worker_counts).and_return(true)
|
26
|
+
manager.register_workers
|
31
27
|
end
|
32
28
|
end
|
33
29
|
|
metadata
CHANGED
@@ -1,97 +1,97 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: gru
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.0.
|
4
|
+
version: 0.0.3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Jeffrey Gillis
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2015-07-
|
11
|
+
date: 2015-07-24 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: redis
|
15
15
|
requirement: !ruby/object:Gem::Requirement
|
16
16
|
requirements:
|
17
|
-
- -
|
17
|
+
- - '>'
|
18
18
|
- !ruby/object:Gem::Version
|
19
19
|
version: '0.0'
|
20
20
|
type: :runtime
|
21
21
|
prerelease: false
|
22
22
|
version_requirements: !ruby/object:Gem::Requirement
|
23
23
|
requirements:
|
24
|
-
- -
|
24
|
+
- - '>'
|
25
25
|
- !ruby/object:Gem::Version
|
26
26
|
version: '0.0'
|
27
27
|
- !ruby/object:Gem::Dependency
|
28
28
|
name: bundler
|
29
29
|
requirement: !ruby/object:Gem::Requirement
|
30
30
|
requirements:
|
31
|
-
- -
|
31
|
+
- - ~>
|
32
32
|
- !ruby/object:Gem::Version
|
33
33
|
version: '1.7'
|
34
34
|
type: :development
|
35
35
|
prerelease: false
|
36
36
|
version_requirements: !ruby/object:Gem::Requirement
|
37
37
|
requirements:
|
38
|
-
- -
|
38
|
+
- - ~>
|
39
39
|
- !ruby/object:Gem::Version
|
40
40
|
version: '1.7'
|
41
41
|
- !ruby/object:Gem::Dependency
|
42
42
|
name: rake
|
43
43
|
requirement: !ruby/object:Gem::Requirement
|
44
44
|
requirements:
|
45
|
-
- -
|
45
|
+
- - ~>
|
46
46
|
- !ruby/object:Gem::Version
|
47
47
|
version: '10.0'
|
48
48
|
type: :development
|
49
49
|
prerelease: false
|
50
50
|
version_requirements: !ruby/object:Gem::Requirement
|
51
51
|
requirements:
|
52
|
-
- -
|
52
|
+
- - ~>
|
53
53
|
- !ruby/object:Gem::Version
|
54
54
|
version: '10.0'
|
55
55
|
- !ruby/object:Gem::Dependency
|
56
56
|
name: rspec
|
57
57
|
requirement: !ruby/object:Gem::Requirement
|
58
58
|
requirements:
|
59
|
-
- -
|
59
|
+
- - ~>
|
60
60
|
- !ruby/object:Gem::Version
|
61
61
|
version: 3.1.0
|
62
62
|
type: :development
|
63
63
|
prerelease: false
|
64
64
|
version_requirements: !ruby/object:Gem::Requirement
|
65
65
|
requirements:
|
66
|
-
- -
|
66
|
+
- - ~>
|
67
67
|
- !ruby/object:Gem::Version
|
68
68
|
version: 3.1.0
|
69
69
|
- !ruby/object:Gem::Dependency
|
70
70
|
name: pry
|
71
71
|
requirement: !ruby/object:Gem::Requirement
|
72
72
|
requirements:
|
73
|
-
- -
|
73
|
+
- - '>'
|
74
74
|
- !ruby/object:Gem::Version
|
75
75
|
version: '0.0'
|
76
76
|
type: :development
|
77
77
|
prerelease: false
|
78
78
|
version_requirements: !ruby/object:Gem::Requirement
|
79
79
|
requirements:
|
80
|
-
- -
|
80
|
+
- - '>'
|
81
81
|
- !ruby/object:Gem::Version
|
82
82
|
version: '0.0'
|
83
83
|
- !ruby/object:Gem::Dependency
|
84
84
|
name: awesome_print
|
85
85
|
requirement: !ruby/object:Gem::Requirement
|
86
86
|
requirements:
|
87
|
-
- -
|
87
|
+
- - '>'
|
88
88
|
- !ruby/object:Gem::Version
|
89
89
|
version: '0.0'
|
90
90
|
type: :development
|
91
91
|
prerelease: false
|
92
92
|
version_requirements: !ruby/object:Gem::Requirement
|
93
93
|
requirements:
|
94
|
-
- -
|
94
|
+
- - '>'
|
95
95
|
- !ruby/object:Gem::Version
|
96
96
|
version: '0.0'
|
97
97
|
description: This is a worker/minion manager using different atomic data stores.
|
@@ -101,7 +101,7 @@ executables: []
|
|
101
101
|
extensions: []
|
102
102
|
extra_rdoc_files: []
|
103
103
|
files:
|
104
|
-
-
|
104
|
+
- .gitignore
|
105
105
|
- Gemfile
|
106
106
|
- LICENSE.txt
|
107
107
|
- README.md
|
@@ -110,6 +110,7 @@ files:
|
|
110
110
|
- lib/gru.rb
|
111
111
|
- lib/gru/adapters.rb
|
112
112
|
- lib/gru/adapters/redis_adapter.rb
|
113
|
+
- lib/gru/configuration.rb
|
113
114
|
- lib/gru/version.rb
|
114
115
|
- lib/gru/worker_manager.rb
|
115
116
|
- spec/gru/adapters/redis_adapter_spec.rb
|
@@ -124,17 +125,17 @@ require_paths:
|
|
124
125
|
- lib
|
125
126
|
required_ruby_version: !ruby/object:Gem::Requirement
|
126
127
|
requirements:
|
127
|
-
- -
|
128
|
+
- - '>='
|
128
129
|
- !ruby/object:Gem::Version
|
129
130
|
version: '0'
|
130
131
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
131
132
|
requirements:
|
132
|
-
- -
|
133
|
+
- - '>='
|
133
134
|
- !ruby/object:Gem::Version
|
134
135
|
version: '0'
|
135
136
|
requirements: []
|
136
137
|
rubyforge_project:
|
137
|
-
rubygems_version: 2.4.
|
138
|
+
rubygems_version: 2.4.4
|
138
139
|
signing_key:
|
139
140
|
specification_version: 4
|
140
141
|
summary: An atomic worker/minion manager.
|