gru 0.1.0 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 3568199a19c30ad45c3e4888884901d19ebf8c2c
4
- data.tar.gz: 987f4860c98601be7dca13be6dcd4ec590f84881
3
+ metadata.gz: 89ae1e251f9db9637786d27b5abeb7de7fd0bef7
4
+ data.tar.gz: 0dbd72c564a5622ec09abd483d408c7c851e072b
5
5
  SHA512:
6
- metadata.gz: 386d66bc3221a042fc314ff088dfec031a68def5245873dfb3ba99af9a8567a49096ac9e6f8d97ef038ea077ee8ac429c5e62b09803afb2a9d270b5436940238
7
- data.tar.gz: a2ffe6817185cd9c44fd3aaa0968b17bf49a6e27ba004dcaca2007d5998f4f8a6aeffe9e3fa03b73f601ba7d60f6be2e8f7a773a9783a236d36778342e2fe038
6
+ metadata.gz: b3740b2ee29bc4f65968eedd061d5f6fb506a24152088329e7297ecc519b67f8f1f044b03fe515a185987a09691b7b0bf564a9fa0cc2801adca8072df8fd2796
7
+ data.tar.gz: f360078d5e374287902e5ce48d78cd6015dd7a83bf986b993ad5a771f2dda556cc64335848f48f81e2dbfe800b3f15f2af04255b013b5e9ff5efc30e4774384c
@@ -190,7 +190,8 @@ module Gru
190
190
  host_running,global_running,host_max,global_max = worker_counts(worker)
191
191
  result = false
192
192
  if rebalance_cluster?
193
- result = host_running.to_i < max_workers_per_host(global_max,host_max)
193
+ result = host_running.to_i < max_workers_per_host(global_max,host_max) &&
194
+ host_running.to_i < @settings.max_worker_processes_per_host
194
195
  else
195
196
  result = host_running.to_i < host_max.to_i
196
197
  end
@@ -201,7 +202,8 @@ module Gru
201
202
  host_running,global_running,host_max,global_max = worker_counts(worker)
202
203
  result = false
203
204
  if rebalance_cluster?
204
- result = host_running.to_i > max_workers_per_host(global_max,host_max)
205
+ result = host_running.to_i > max_workers_per_host(global_max,host_max) ||
206
+ host_running.to_i > @settings.max_worker_processes_per_host
205
207
  else
206
208
  result = host_running.to_i > host_max.to_i
207
209
  end
@@ -3,7 +3,8 @@ require 'gru/adapters/redis_adapter'
3
3
 
4
4
  module Gru
5
5
  class Configuration
6
- attr_reader :cluster_maximums, :host_maximums, :rebalance_flag, :adapter, :cluster_name, :environment_name, :presume_host_dead_after, :client_settings, :manage_worker_heartbeats
6
+ attr_reader :cluster_maximums, :host_maximums, :rebalance_flag, :adapter, :cluster_name,
7
+ :environment_name, :presume_host_dead_after, :client_settings, :manage_worker_heartbeats, :max_worker_processes_per_host
7
8
  def initialize(settings)
8
9
  @host_maximums = settings.delete(:host_maximums) || settings.delete(:cluster_maximums)
9
10
  @cluster_maximums = settings.delete(:cluster_maximums) || @host_maximums
@@ -13,6 +14,7 @@ module Gru
13
14
  @presume_host_dead_after = settings.delete(:presume_host_dead_after)
14
15
  @client_settings = settings.delete(:client_settings)
15
16
  @manage_worker_heartbeats = settings.delete(:manage_worker_heartbeats) || false
17
+ @max_worker_processes_per_host = settings.delete(:max_workers_per_host) || 30
16
18
  @adapter = Gru::Adapters::RedisAdapter.new(self)
17
19
  if @cluster_maximums.nil?
18
20
  raise ArgumentError, "Need at least a cluster configuration"
@@ -1,3 +1,3 @@
1
1
  module Gru
2
- VERSION = "0.1.0"
2
+ VERSION = "0.1.1"
3
3
  end
@@ -152,7 +152,6 @@ describe Gru::Adapters::RedisAdapter do
152
152
  available_workers = adapter.provision_workers
153
153
  expect(available_workers).to eq({'test_worker' => 0})
154
154
  end
155
-
156
155
  end
157
156
  end
158
157
 
@@ -207,8 +206,9 @@ describe Gru::Adapters::RedisAdapter do
207
206
  Gru::Configuration.new({
208
207
  cluster_maximums: { 'test_worker' => 3 },
209
208
  environment_name: 'environment',
210
- cluster_name: 'cluster',
211
- rebalance_flag: true
209
+ max_workers_per_host: 10,
210
+ rebalance_flag: true,
211
+ cluster_name: 'cluster'
212
212
  })
213
213
  }
214
214
 
@@ -228,6 +228,36 @@ describe Gru::Adapters::RedisAdapter do
228
228
  adapter.provision_workers
229
229
  end
230
230
 
231
+ it "doesn't provision workers if local proc max has been reached" do
232
+ expect(client).to receive(:multi).exactly(3).times.and_yield(client).and_return([10,20,20,30])
233
+ expect(client).to receive(:hgetall).with("#{gru_key}:#{hostname}:max_workers").and_return(config.cluster_maximums)
234
+ expect(client).to receive(:keys).with("#{gru_key}:*:workers_running").exactly(3).times.and_return(["test1","test2"])
235
+ expect(client).to receive(:get).with("#{gru_key}:rebalance").exactly(3).times.and_return("true")
236
+ expect(client).to receive(:hget).with("#{gru_key}:#{hostname}:max_workers",'test_worker').exactly(3).times
237
+ expect(client).to receive(:hget).with("#{gru_key}:global:max_workers",'test_worker').exactly(3).times
238
+ expect(client).to receive(:hget).with("#{gru_key}:#{hostname}:workers_running",'test_worker').exactly(3).times
239
+ expect(client).to receive(:hget).with("#{gru_key}:global:workers_running",'test_worker').exactly(3).times
240
+ available_workers = adapter.provision_workers
241
+ expect(available_workers).to eq({'test_worker' => 0})
242
+ end
243
+
244
+ it "provisions workers if local proc max hasn't been reached" do
245
+ expect(client).to receive(:multi).exactly(3).times.and_yield(client).and_return([9,20,20,30], [10,20,20,30])
246
+ expect(client).to receive(:hgetall).with("#{gru_key}:#{hostname}:max_workers").and_return(config.cluster_maximums)
247
+ expect(client).to receive(:setnx).exactly(1).times.and_return(true)
248
+ expect(client).to receive(:hincrby).with("#{gru_key}:foo:workers_running",'test_worker',1).exactly(1).times
249
+ expect(client).to receive(:hincrby).with("#{gru_key}:global:workers_running",'test_worker',1).exactly(1).times
250
+ expect(client).to receive(:del).with("#{gru_key}:test_worker").exactly(1).times
251
+ expect(client).to receive(:keys).with("#{gru_key}:*:workers_running").exactly(3).times.and_return(["test1","test2"])
252
+ expect(client).to receive(:get).with("#{gru_key}:rebalance").exactly(3).times.and_return("true")
253
+ expect(client).to receive(:hget).with("#{gru_key}:#{hostname}:max_workers",'test_worker').exactly(3).times
254
+ expect(client).to receive(:hget).with("#{gru_key}:global:max_workers",'test_worker').exactly(3).times
255
+ expect(client).to receive(:hget).with("#{gru_key}:#{hostname}:workers_running",'test_worker').exactly(3).times
256
+ expect(client).to receive(:hget).with("#{gru_key}:global:workers_running",'test_worker').exactly(3).times
257
+ available_workers = adapter.provision_workers
258
+ expect(available_workers).to eq({'test_worker' => 1})
259
+ end
260
+
231
261
  it "increases load when workers are removed" do
232
262
 
233
263
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: gru
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.0
4
+ version: 0.1.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Jeffrey Gillis
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2016-03-28 00:00:00.000000000 Z
11
+ date: 2016-04-21 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: redis