gru 0.1.2 → 0.1.3

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: c8569b8dac503cf08983a463bb293a277f09f301
4
- data.tar.gz: 45da184cdbe70af5c0f871d65a9d4f60de2555f6
3
+ metadata.gz: 15b0d3917dc6d07575874b7f0a87cbc5b109214c
4
+ data.tar.gz: 1d164bcef80f5b1f7e6a3594de72e6715847a85a
5
5
  SHA512:
6
- metadata.gz: ad6bc8c502243e5f9f8ce86dfe6275758f4eff9ad55f562d070ad99484782f58d1bb49730413d3314e42c5dd92b8fb6f4950a9c290fb97d1168692fd168a264a
7
- data.tar.gz: 21d48ee74d9fc7bda158b7f2485bbb97ba35b6e9401bbc2a306070a802262f8ae7a78c65df7fdb0d552d6e013cdffcedd95989cd61719e59b10c31cdab60fea8
6
+ metadata.gz: 45547c39e99a83b4c30c5e8e50aee8a47e24b6bfc120f4ca245ad6ef7df872ab54991a032716d2c57467b15531d53c6fe1fe9e0f8e36103c92fab236175b5366
7
+ data.tar.gz: d942a5d16235720661c81f3af3ab841300b55f7dd0d54732fd0c308d9da4bd18743f11535bb595fa0dfe8c15ba2ca74fd0fd74d0f55ef2404a9d519851488ea3
@@ -18,6 +18,7 @@ module Gru
18
18
  set_max_worker_counts(@settings.host_maximums)
19
19
  register_global_workers(@settings.cluster_maximums)
20
20
  set_max_global_worker_counts(@settings.cluster_maximums)
21
+ remove_stale_worker_entries
21
22
  update_heartbeat if manage_heartbeat?
22
23
  end
23
24
 
@@ -79,6 +80,13 @@ module Gru
79
80
  false
80
81
  end
81
82
 
83
+ def remove_stale_worker_entries
84
+ stale_keys = max_global_workers.keys - @settings.cluster_maximums.keys
85
+ stale_keys.each do |key|
86
+ send_message(:hdel, global_max_worker_key, key)
87
+ end
88
+ end
89
+
82
90
  private
83
91
 
84
92
  def register_workers(workers)
@@ -164,8 +172,8 @@ module Gru
164
172
  end
165
173
 
166
174
  def adjust_workers(worker,amount)
167
- lock_key = "#{gru_key}:#{worker}"
168
- if send_message(:setnx,lock_key,Time.now.to_i)
175
+ lock_key = "#{gru_key}:locks:#{worker}"
176
+ if send_message(:set,lock_key,Time.now.to_i, { nx: true, px: 10000 })
169
177
  send_message(:hincrby,host_workers_running_key,worker,amount)
170
178
  send_message(:hincrby,global_workers_running_key,worker,amount)
171
179
  send_message(:del,lock_key)
@@ -1,3 +1,3 @@
1
1
  module Gru
2
- VERSION = "0.1.2"
2
+ VERSION = "0.1.3"
3
3
  end
@@ -81,8 +81,8 @@ describe Gru::Adapters::RedisAdapter do
81
81
  expect(client).to receive(:multi).exactly(3).times.and_yield(client).and_return([0,-1,3,3])
82
82
  expect(client).to receive(:hset).with("#{gru_key}:global:workers_running",'test_worker', 0)
83
83
  expect(client).to receive(:hget).with("#{gru_key}:global:workers_running",'test_worker').exactly(1).times.and_return(0)
84
- expect(client).to receive(:setnx).exactly(3).times.and_return(true)
85
- expect(client).to receive(:del).with("#{gru_key}:test_worker").exactly(3).times
84
+ expect(client).to receive(:set).exactly(3).times.and_return(true)
85
+ expect(client).to receive(:del).with("#{gru_key}:locks:test_worker").exactly(3).times
86
86
  expect(client).to receive(:get).with("#{gru_key}:rebalance").exactly(3).times
87
87
  expect(client).to receive(:hgetall).with("#{gru_key}:#{hostname}:workers_running").exactly(3).times.and_return({'test_worker' => '1'})
88
88
  expect(client).to receive(:hincrby).with("#{gru_key}:global:workers_running",'test_worker',1).exactly(3).times
@@ -97,8 +97,8 @@ describe Gru::Adapters::RedisAdapter do
97
97
 
98
98
  it "returns workers when max local and global counts have not been reached" do
99
99
  expect(client).to receive(:multi).exactly(3).times.and_yield(client).and_return([1,1,3,3])
100
- expect(client).to receive(:setnx).exactly(3).times.and_return(true)
101
- expect(client).to receive(:del).with("#{gru_key}:test_worker").exactly(3).times
100
+ expect(client).to receive(:set).exactly(3).times.and_return(true)
101
+ expect(client).to receive(:del).with("#{gru_key}:locks:test_worker").exactly(3).times
102
102
  expect(client).to receive(:get).with("#{gru_key}:rebalance").exactly(3).times
103
103
  expect(client).to receive(:hgetall).with("#{gru_key}:#{hostname}:workers_running").exactly(3).times.and_return({'test_worker' => '1'})
104
104
  expect(client).to receive(:hincrby).with("#{gru_key}:global:workers_running",'test_worker',1).exactly(3).times
@@ -173,8 +173,8 @@ describe Gru::Adapters::RedisAdapter do
173
173
 
174
174
  it "removes workers when local maximum has been exceeded" do
175
175
  expect(client).to receive(:multi).exactly(1).times.and_yield(client).and_return([3,3,1,3])
176
- expect(client).to receive(:setnx).exactly(1).times.and_return(true)
177
- expect(client).to receive(:del).with("#{gru_key}:test_worker").exactly(1).times
176
+ expect(client).to receive(:set).exactly(1).times.and_return(true)
177
+ expect(client).to receive(:del).with("#{gru_key}:locks:test_worker").exactly(1).times
178
178
  expect(client).to receive(:hincrby).with("#{gru_key}:global:workers_running",'test_worker',-1).exactly(1).times
179
179
  expect(client).to receive(:hincrby).with("#{gru_key}:#{hostname}:workers_running",'test_worker',-1).exactly(1).times
180
180
  expect(adapter.expire_workers).to eq({'test_worker' => -1})
@@ -182,8 +182,8 @@ describe Gru::Adapters::RedisAdapter do
182
182
 
183
183
  it "removes workers when global maximum has been exceeded" do
184
184
  expect(client).to receive(:multi).exactly(1).times.and_yield(client).and_return([3,3,3,1])
185
- expect(client).to receive(:setnx).exactly(1).times.and_return(true)
186
- expect(client).to receive(:del).with("#{gru_key}:test_worker").exactly(1).times
185
+ expect(client).to receive(:set).exactly(1).times.and_return(true)
186
+ expect(client).to receive(:del).with("#{gru_key}:locks:test_worker").exactly(1).times
187
187
  expect(client).to receive(:hincrby).with("#{gru_key}:global:workers_running",'test_worker',-1).exactly(1).times
188
188
  expect(client).to receive(:hincrby).with("#{gru_key}:#{hostname}:workers_running",'test_worker',-1).exactly(1).times
189
189
  expect(adapter.expire_workers).to eq({'test_worker' => -1})
@@ -225,10 +225,10 @@ describe Gru::Adapters::RedisAdapter do
225
225
  expect(client).to receive(:hget).with("#{gru_key}:global:max_workers",'test_worker').exactly(3).times
226
226
  expect(client).to receive(:keys).with("#{gru_key}:*:workers_running").exactly(3).times.and_return(['foo'])
227
227
  expect(client).to receive(:hgetall).with("#{gru_key}:#{hostname}:workers_running").exactly(3).times.and_return({'test_worker' => '1'})
228
- expect(client).to receive(:setnx).exactly(3).times.and_return(true)
228
+ expect(client).to receive(:set).exactly(3).times.and_return(true)
229
229
  expect(client).to receive(:hincrby).with("#{gru_key}:global:workers_running",'test_worker',1).exactly(3).times
230
230
  expect(client).to receive(:hincrby).with("#{gru_key}:#{hostname}:workers_running",'test_worker',1).exactly(3).times
231
- expect(client).to receive(:del).with("#{gru_key}:test_worker").exactly(3).times
231
+ expect(client).to receive(:del).with("#{gru_key}:locks:test_worker").exactly(3).times
232
232
  expect(client).to receive(:get).with("#{gru_key}:rebalance").and_return("true").exactly(3).times
233
233
  adapter.provision_workers
234
234
  end
@@ -250,12 +250,12 @@ describe Gru::Adapters::RedisAdapter do
250
250
  it "provisions workers if local proc max hasn't been reached" do
251
251
  expect(client).to receive(:multi).exactly(3).times.and_yield(client).and_return([9,20,20,30], [10,20,20,30])
252
252
  expect(client).to receive(:hgetall).with("#{gru_key}:#{hostname}:max_workers").and_return(config.cluster_maximums)
253
- expect(client).to receive(:setnx).exactly(1).times.and_return(true)
253
+ expect(client).to receive(:set).exactly(1).times.and_return(true)
254
254
  expect(client).to receive(:hgetall).with("#{gru_key}:#{hostname}:workers_running").exactly(3).times.and_return(
255
255
  {'test_worker' => '9'}, {'test_worker' => '10'}, {'test_worker' => '10'})
256
256
  expect(client).to receive(:hincrby).with("#{gru_key}:foo:workers_running",'test_worker',1).exactly(1).times
257
257
  expect(client).to receive(:hincrby).with("#{gru_key}:global:workers_running",'test_worker',1).exactly(1).times
258
- expect(client).to receive(:del).with("#{gru_key}:test_worker").exactly(1).times
258
+ expect(client).to receive(:del).with("#{gru_key}:locks:test_worker").exactly(1).times
259
259
  expect(client).to receive(:keys).with("#{gru_key}:*:workers_running").exactly(3).times.and_return(["test1","test2"])
260
260
  expect(client).to receive(:get).with("#{gru_key}:rebalance").exactly(3).times.and_return("true")
261
261
  expect(client).to receive(:hget).with("#{gru_key}:#{hostname}:max_workers",'test_worker').exactly(3).times
@@ -485,4 +485,34 @@ xdescribe Gru do
485
485
  })
486
486
  end
487
487
  end
488
+
489
+ context "Remove Stale Workers" do
490
+ let(:new_settings) {
491
+ {
492
+ cluster_maximums: {
493
+ 'foo_worker' => '3'
494
+ }
495
+ }
496
+ }
497
+
498
+ let(:adapter1) {
499
+ adapter1 = Gru::Adapters::RedisAdapter.new(Gru::Configuration.new(settings.clone))
500
+ allow(adapter1).to receive(:hostname).and_return('test1')
501
+ adapter1
502
+ }
503
+
504
+ let(:adapter2) {
505
+ adapter2 = Gru::Adapters::RedisAdapter.new(Gru::Configuration.new(new_settings.clone))
506
+ allow(adapter2).to receive(:hostname).and_return('test2')
507
+ adapter2
508
+ }
509
+
510
+ it "removes stale worker keys" do
511
+ test1 = Gru::WorkerManager.new(adapter1)
512
+ test2 = Gru::WorkerManager.new(adapter2)
513
+ test1.register_workers
514
+ test2.register_workers
515
+ expect(client.hgetall("GRU:default:default:global:max_workers").keys).to eq(['foo_worker'])
516
+ end
517
+ end
488
518
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: gru
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.2
4
+ version: 0.1.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - Jeffrey Gillis
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2016-04-22 00:00:00.000000000 Z
11
+ date: 2016-07-18 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: redis