gru 0.0.10 → 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 13495796cf42eb14f84201efe3ff4701a3a4b2e5
4
- data.tar.gz: d965fb4d6e6003907e3b4253a6741c0d93549b1e
3
+ metadata.gz: 3568199a19c30ad45c3e4888884901d19ebf8c2c
4
+ data.tar.gz: 987f4860c98601be7dca13be6dcd4ec590f84881
5
5
  SHA512:
6
- metadata.gz: faa6fec2b08740761f6a436ad5fba23586cd4ead30a3c7f45814b66180e2cfc4de738a834c2a1eab51b5710fa79c701b146937448c759f8f5a7ee871fccfa9e9
7
- data.tar.gz: b369f35ebec7200cfae1ff6b02fb7f079a05f440648b575245bf8eb6f7a0982a06668b44872c466c0d5d5312d8976547234a7869a0ccb4685ce4b184044b2f96
6
+ metadata.gz: 386d66bc3221a042fc314ff088dfec031a68def5245873dfb3ba99af9a8567a49096ac9e6f8d97ef038ea077ee8ac429c5e62b09803afb2a9d270b5436940238
7
+ data.tar.gz: a2ffe6817185cd9c44fd3aaa0968b17bf49a6e27ba004dcaca2007d5998f4f8a6aeffe9e3fa03b73f601ba7d60f6be2e8f7a773a9783a236d36778342e2fe038
@@ -5,9 +5,9 @@ module Gru
5
5
  class RedisAdapter
6
6
  attr_reader :client
7
7
 
8
- def initialize(client,settings)
9
- @client = client
8
+ def initialize(settings)
10
9
  @settings = settings
10
+ @client = initialize_client(settings.client_settings)
11
11
  end
12
12
 
13
13
  def set_worker_counts
@@ -18,6 +18,7 @@ module Gru
18
18
  set_max_worker_counts(@settings.host_maximums)
19
19
  register_global_workers(@settings.cluster_maximums)
20
20
  set_max_global_worker_counts(@settings.cluster_maximums)
21
+ update_heartbeat if manage_heartbeat?
21
22
  end
22
23
 
23
24
  def provision_workers
@@ -51,32 +52,31 @@ module Gru
51
52
  def release_workers
52
53
  workers = max_host_workers
53
54
  workers.keys.each do |worker|
54
- host_running_count = local_running_count(worker)
55
- running_count = host_running_count
56
- global_running_count = host_running_count
57
- host_running_count.times do
58
- if global_running_count > 0
59
- global_running_count = send_message(:hincrby, global_workers_running_key,worker,-1)
60
- end
61
- if running_count > 0
62
- running_count = send_message(:hincrby, host_workers_running_key,worker,-1)
63
- end
55
+ host_count = local_running_count(worker)
56
+ global_count = host_count
57
+ host_count.times do
58
+ global_count = send_message(:hincrby, global_workers_running_key, worker, -1) if global_count > 0
59
+ host_count = send_message(:hincrby, host_workers_running_key,worker,-1) if host_count > 0
64
60
  end
65
61
  end
66
62
  send_message(:del, host_workers_running_key)
67
63
  send_message(:del, host_max_worker_key)
64
+ send_message(:hdel, heartbeat_key, hostname)
68
65
  end
69
66
 
70
- def release_presumed_dead_workers
71
- presumed_dead_cluster_members.each_pair do |hostname,timestamp|
67
+ def release_presumed_dead_worker_hosts
68
+ return false unless manage_heartbeat?
69
+ update_heartbeat
70
+ presumed_dead_worker_hosts.each_pair do |hostname,timestamp|
72
71
  lock_key = "#{gru_key}:removing_dead_host:#{hostname}"
73
72
  if send_message(:setnx,lock_key,Time.now.to_i)
74
- remove_dead_host_workers_from_counts(hostname)
73
+ remove_worker_host(hostname)
74
+ send_message(:hdel,heartbeat_key,hostname)
75
75
  send_message(:del,lock_key)
76
76
  return true
77
77
  end
78
- false
79
78
  end
79
+ false
80
80
  end
81
81
 
82
82
  private
@@ -122,17 +122,24 @@ module Gru
122
122
  send_message(:hset,global_max_worker_key,worker,count)
123
123
  end
124
124
 
125
- def remove_dead_host_workers_from_counts(hostname)
126
- lock_key = "#{gru_key}:#{hostname}"
127
- if send_message(:setnx,lock_key,Time.now.to_i)
128
- workers_running_on_dead_host = send_message(:hgetall, "#{gru_key}:#{hostname}:workers_running")
129
- workers_running_on_dead_host.each_pair do |worker_name, count|
130
- send_message(:hincrby,"#{gru_key}:#{hostname}:workers_running",worker_name,Integer(count)*-1)
131
- send_message(:hincrby,global_workers_running_key,worker_name,Integer(count)*-1)
125
+ def manage_heartbeat?
126
+ @settings.manage_worker_heartbeats
127
+ end
128
+
129
+ def update_heartbeat
130
+ send_message(:hset,heartbeat_key,hostname,Time.now.to_i)
131
+ end
132
+
133
+ def remove_worker_host(hostname)
134
+ workers = send_message(:hgetall, "#{gru_key}:#{hostname}:workers_running")
135
+ workers.each_pair do |worker_name, count|
136
+ local_count, global_count = Integer(count), Integer(count)
137
+ Integer(count).times do
138
+ local_count = send_message(:hincrby,"#{gru_key}:#{hostname}:workers_running",worker_name,-1) if local_count > 0
139
+ global_count = send_message(:hincrby,global_workers_running_key,worker_name,-1) if global_count > 0
132
140
  end
133
- send_message(:del,lock_key)
134
- send_message(:hdel,resque_cluster_pings_key,hostname)
135
141
  end
142
+ send_message(:del,"#{gru_key}:#{hostname}:workers_running")
136
143
  end
137
144
 
138
145
  def reset_removed_global_worker_counts(workers)
@@ -175,8 +182,8 @@ module Gru
175
182
  send_message(:hgetall,global_max_worker_key)
176
183
  end
177
184
 
178
- def resque_cluster_members
179
- send_message(:hgetall, resque_cluster_pings_key)
185
+ def workers_with_heartbeats
186
+ send_message(:hgetall, heartbeat_key)
180
187
  end
181
188
 
182
189
  def reserve_worker?(worker)
@@ -215,8 +222,8 @@ module Gru
215
222
  counts
216
223
  end
217
224
 
218
- def presumed_dead_cluster_members
219
- resque_cluster_members.select{ |hostname, timestamp| Time.parse(timestamp).to_i + presume_host_dead_after < Time.now.to_i}
225
+ def presumed_dead_worker_hosts
226
+ workers_with_heartbeats.select{ |hostname, timestamp| timestamp.to_i + presume_host_dead_after < Time.now.to_i}
220
227
  end
221
228
 
222
229
  def local_running_count(worker)
@@ -270,12 +277,12 @@ module Gru
270
277
  "#{gru_key}:#{hostname}"
271
278
  end
272
279
 
273
- def gru_key
274
- "GRU:#{@settings.environment_name}:#{@settings.cluster_name}"
280
+ def heartbeat_key
281
+ "#{gru_key}:heartbeats"
275
282
  end
276
283
 
277
- def resque_cluster_pings_key
278
- "resque:cluster:#{@settings.cluster_name}:#{@settings.environment_name}:pings"
284
+ def gru_key
285
+ "GRU:#{@settings.environment_name}:#{@settings.cluster_name}"
279
286
  end
280
287
 
281
288
  def hostname
@@ -286,6 +293,9 @@ module Gru
286
293
  @client.send(action,*args)
287
294
  end
288
295
 
296
+ def initialize_client(config=nil)
297
+ Redis.new(config || {})
298
+ end
289
299
  end
290
300
  end
291
301
  end
@@ -3,25 +3,20 @@ require 'gru/adapters/redis_adapter'
3
3
 
4
4
  module Gru
5
5
  class Configuration
6
- attr_reader :cluster_maximums, :host_maximums, :rebalance_flag, :adapter, :cluster_name, :environment_name, :presume_host_dead_after
6
+ attr_reader :cluster_maximums, :host_maximums, :rebalance_flag, :adapter, :cluster_name, :environment_name, :presume_host_dead_after, :client_settings, :manage_worker_heartbeats
7
7
  def initialize(settings)
8
8
  @host_maximums = settings.delete(:host_maximums) || settings.delete(:cluster_maximums)
9
9
  @cluster_maximums = settings.delete(:cluster_maximums) || @host_maximums
10
10
  @rebalance_flag = settings.delete(:rebalance_flag) || false
11
11
  @cluster_name = settings.delete(:cluster_name) || 'default'
12
12
  @environment_name = settings.delete(:environment_name) || 'default'
13
- client = initialize_client(settings.delete(:client_settings))
14
13
  @presume_host_dead_after = settings.delete(:presume_host_dead_after)
15
- @adapter = Gru::Adapters::RedisAdapter.new(client,self)
14
+ @client_settings = settings.delete(:client_settings)
15
+ @manage_worker_heartbeats = settings.delete(:manage_worker_heartbeats) || false
16
+ @adapter = Gru::Adapters::RedisAdapter.new(self)
16
17
  if @cluster_maximums.nil?
17
- raise ArgumentError "Need at least a cluster configuration"
18
+ raise ArgumentError, "Need at least a cluster configuration"
18
19
  end
19
20
  end
20
-
21
- private
22
-
23
- def initialize_client(config=nil)
24
- Redis.new(config || {})
25
- end
26
21
  end
27
22
  end
@@ -1,3 +1,3 @@
1
1
  module Gru
2
- VERSION = "0.0.10"
2
+ VERSION = "0.1.0"
3
3
  end
@@ -7,7 +7,7 @@ module Gru
7
7
  end
8
8
 
9
9
  def expire_dead_cluster_members
10
- @adapter.release_presumed_dead_workers
10
+ @adapter.release_presumed_dead_worker_hosts
11
11
  end
12
12
 
13
13
  def register_workers
@@ -5,7 +5,7 @@ require 'pry'
5
5
  describe Gru::Adapters::RedisAdapter do
6
6
  before(:each) do
7
7
  allow(Socket).to receive(:gethostname).and_return(hostname)
8
- allow_any_instance_of(Gru::Configuration).to receive(:initialize_client).and_return(client)
8
+ allow_any_instance_of(Gru::Adapters::RedisAdapter).to receive(:initialize_client).and_return(client)
9
9
  end
10
10
 
11
11
  let(:hostname) { 'foo' }
@@ -19,14 +19,13 @@ describe Gru::Adapters::RedisAdapter do
19
19
  }
20
20
 
21
21
  let(:adapter) {
22
- Gru::Adapters::RedisAdapter.new(client,config)
22
+ Gru::Adapters::RedisAdapter.new(config)
23
23
  }
24
24
 
25
25
  let(:gru_key) {
26
26
  "GRU:#{config.environment_name}:#{config.cluster_name}"
27
27
  }
28
28
 
29
-
30
29
  context "initialization" do
31
30
  it "has a client" do
32
31
  expect(adapter.client).to eq(client)
@@ -227,7 +226,6 @@ describe Gru::Adapters::RedisAdapter do
227
226
  expect(client).to receive(:del).with("#{gru_key}:test_worker").exactly(3).times
228
227
  expect(client).to receive(:get).with("#{gru_key}:rebalance").and_return("true").exactly(3).times
229
228
  adapter.provision_workers
230
-
231
229
  end
232
230
 
233
231
  it "increases load when workers are removed" do
@@ -0,0 +1,171 @@
1
+ require './lib/gru.rb'
2
+ require 'socket'
3
+ require 'digest'
4
+ require 'redis'
5
+ require 'pry'
6
+
7
+
8
+ # This requires a redis server instance running on localhost
9
+ # un-pend to run with actual redis-server
10
+ xdescribe Gru do
11
+
12
+ after {
13
+ client.flushdb
14
+ }
15
+
16
+ let(:settings) {
17
+ {
18
+ cluster_maximums: {
19
+ 'test_worker' => '3'
20
+ }
21
+ }
22
+ }
23
+
24
+ let(:client) {
25
+ Redis.new
26
+ }
27
+
28
+ let(:hostname) {
29
+ Socket.gethostname
30
+ }
31
+
32
+ let(:host_key) {
33
+ "GRU:default:default:#{hostname}"
34
+ }
35
+
36
+ let(:global_key) {
37
+ "GRU:default:default:global"
38
+ }
39
+
40
+ context "configuration" do
41
+ it "sets configuration in redis" do
42
+ manager = Gru.create(settings.clone)
43
+ expect(client.hgetall("#{host_key}:max_workers")).to eq(settings[:cluster_maximums])
44
+ expect(manager.provision_workers).to eq({ 'test_worker' => 3 })
45
+ expect(client.hgetall("#{host_key}:workers_running")).to eq(settings[:cluster_maximums])
46
+ end
47
+ end
48
+
49
+ context "adjusting worker counts" do
50
+ it "adjusts worker counts based on changing global counts" do
51
+ manager = Gru.create(settings.clone)
52
+ expect(manager.provision_workers).to eq({ 'test_worker' => 3 })
53
+ client.hset("#{global_key}:max_workers", 'test_worker', 1)
54
+ expect(manager.adjust_workers).to eq({ 'test_worker' => -2})
55
+ expect(manager.adjust_workers).to eq({ 'test_worker' => 0})
56
+ expect(client.hget("#{global_key}:workers_running", 'test_worker')).to eq("1")
57
+ client.hset("#{global_key}:max_workers", 'test_worker', 3)
58
+ expect(manager.adjust_workers).to eq({ 'test_worker' => 2})
59
+ expect(client.hget("#{global_key}:workers_running", 'test_worker')).to eq("3")
60
+ end
61
+
62
+ it "adjusts worker counts based on changing local counts" do
63
+ manager = Gru.create(settings.clone)
64
+ expect(manager.provision_workers).to eq({ 'test_worker' => 3 })
65
+ client.hset("#{host_key}:max_workers", 'test_worker', 1)
66
+ expect(manager.adjust_workers).to eq({ 'test_worker' => -1})
67
+ expect(manager.adjust_workers).to eq({ 'test_worker' => -1})
68
+ expect(manager.adjust_workers).to eq({ 'test_worker' => 0})
69
+ expect(client.hget("#{global_key}:workers_running", 'test_worker')).to eq("1")
70
+ expect(client.hget("#{host_key}:workers_running", 'test_worker')).to eq("1")
71
+ end
72
+
73
+ it "does not exceed local maximum counts" do
74
+ manager = Gru.create(settings.clone)
75
+ expect(manager.provision_workers).to eq({ 'test_worker' => 3 })
76
+ client.hset("#{host_key}:max_workers", 'test_worker', 2)
77
+ expect(manager.adjust_workers).to eq({ 'test_worker' => -1})
78
+ expect(manager.adjust_workers).to eq({ 'test_worker' => 0})
79
+ client.hset("#{global_key}:max_workers", 'test_worker', 4)
80
+ expect(manager.adjust_workers).to eq({ 'test_worker' => 0})
81
+ expect(client.hget("#{global_key}:workers_running", 'test_worker')).to eq("2")
82
+ expect(client.hget("#{host_key}:workers_running", 'test_worker')).to eq("2")
83
+ end
84
+
85
+ it "does not exceed global maximum counts" do
86
+ manager = Gru.create(settings.clone)
87
+ expect(manager.provision_workers).to eq({ 'test_worker' => 3 })
88
+ client.hset("#{host_key}:max_workers", 'test_worker', 0)
89
+ expect(manager.adjust_workers).to eq({ 'test_worker' => 0})
90
+ client.hset("#{global_key}:max_workers", 'test_worker', 3)
91
+ expect(manager.adjust_workers).to eq({ 'test_worker' => 0})
92
+ client.hset("#{host_key}:max_workers", 'test_worker', 4)
93
+ expect(manager.adjust_workers).to eq({ 'test_worker' => 0})
94
+ expect(client.hget("#{global_key}:workers_running", 'test_worker')).to eq("3")
95
+ expect(client.hget("#{host_key}:workers_running", 'test_worker')).to eq("3")
96
+ end
97
+ end
98
+
99
+ context "multiple workers" do
100
+ let(:settings) {
101
+ {
102
+ rebalance_flag: true,
103
+ manage_worker_heartbeats: true,
104
+ cluster_maximums: {
105
+ 'test_worker' => '6'
106
+ }
107
+ }
108
+ }
109
+
110
+ let(:test_client) {
111
+ Redis.new
112
+ }
113
+
114
+ let(:adapter1) {
115
+ adapter1 = Gru::Adapters::RedisAdapter.new(Gru::Configuration.new(settings.clone))
116
+ allow(adapter1).to receive(:hostname).and_return('test1')
117
+ adapter1
118
+ }
119
+
120
+ let(:adapter2) {
121
+ adapter2 = Gru::Adapters::RedisAdapter.new(Gru::Configuration.new(settings.clone))
122
+ allow(adapter2).to receive(:hostname).and_return('test2')
123
+ adapter2
124
+ }
125
+
126
+ let(:adapter3) {
127
+ adapter3 = Gru::Adapters::RedisAdapter.new(Gru::Configuration.new(settings.clone))
128
+ allow(adapter3).to receive(:hostname).and_return('test3')
129
+ adapter3
130
+ }
131
+
132
+ it "adjusts workers when new hosts are added" do
133
+ test1 = Gru::WorkerManager.new(adapter1)
134
+ test2 = Gru::WorkerManager.new(adapter2)
135
+ test3 = Gru::WorkerManager.new(adapter3)
136
+ test1.register_workers
137
+ test2.register_workers
138
+ expect(test_client.hget('GRU:default:default:test1:workers_running', 'test_worker')).to eq('0')
139
+ expect(test1.adjust_workers).to eq( { 'test_worker' => 3 })
140
+ expect(test2.adjust_workers).to eq( { 'test_worker' => 3 })
141
+ test3.register_workers
142
+ expect(test1.adjust_workers).to eq( { 'test_worker' => -1 })
143
+ expect(test1.adjust_workers).to eq( { 'test_worker' => 0 })
144
+ expect(test3.adjust_workers).to eq( { 'test_worker' => 1 })
145
+ expect(test3.adjust_workers).to eq( { 'test_worker' => 0 })
146
+ expect(test2.adjust_workers).to eq( { 'test_worker' => -1 })
147
+ expect(test2.adjust_workers).to eq( { 'test_worker' => 0 })
148
+ expect(test3.adjust_workers).to eq( { 'test_worker' => 1 })
149
+ expect(test3.adjust_workers).to eq( { 'test_worker' => 0 })
150
+ expect(test1.adjust_workers).to eq( { 'test_worker' => 0 })
151
+ expect(test2.adjust_workers).to eq( { 'test_worker' => 0 })
152
+ end
153
+
154
+ it "recovers lost worker counts" do
155
+ test1 = Gru::WorkerManager.new(adapter1)
156
+ test2 = Gru::WorkerManager.new(adapter2)
157
+ test3 = Gru::WorkerManager.new(adapter3)
158
+ test1.register_workers
159
+ test2.register_workers
160
+ expect(test1.adjust_workers).to eq( { 'test_worker' => 3 })
161
+ expect(test2.adjust_workers).to eq( { 'test_worker' => 3 })
162
+ test_client.hset("GRU:default:default:heartbeats", 'test1', Time.now - 300)
163
+ expect(test2.adjust_workers).to eq( { 'test_worker' => 3 })
164
+ test3.register_workers
165
+ expect(test3.adjust_workers).to eq( { 'test_worker' => 0 })
166
+ expect(test2.adjust_workers).to eq( { 'test_worker' => -3 })
167
+ test_client.hset("GRU:default:default:heartbeats", 'test3', Time.now - 300)
168
+ expect(test2.adjust_workers).to eq( { 'test_worker' => 3 })
169
+ end
170
+ end
171
+ end
@@ -31,7 +31,7 @@ describe Gru::WorkerManager do
31
31
  it "determines new workers to create" do
32
32
  expect(adapter).to receive(:provision_workers).and_return({})
33
33
  expect(adapter).to receive(:expire_workers).and_return({})
34
- expect(adapter).to receive(:release_presumed_dead_workers)
34
+ expect(adapter).to receive(:release_presumed_dead_worker_hosts)
35
35
  manager.adjust_workers
36
36
  end
37
37
 
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: gru
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.10
4
+ version: 0.1.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Jeffrey Gillis
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2016-03-18 00:00:00.000000000 Z
11
+ date: 2016-03-28 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: redis
@@ -114,6 +114,7 @@ files:
114
114
  - lib/gru/version.rb
115
115
  - lib/gru/worker_manager.rb
116
116
  - spec/gru/adapters/redis_adapter_spec.rb
117
+ - spec/gru/gru_integration_spec.rb
117
118
  - spec/gru/worker_manager_spec.rb
118
119
  homepage: ''
119
120
  licenses:
@@ -141,4 +142,5 @@ specification_version: 4
141
142
  summary: An atomic worker/minion manager.
142
143
  test_files:
143
144
  - spec/gru/adapters/redis_adapter_spec.rb
145
+ - spec/gru/gru_integration_spec.rb
144
146
  - spec/gru/worker_manager_spec.rb