nexia_worker_roulette 0.2.2 → 0.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 485a631bf7929866af1715af3649ce0ef5abcd49
4
- data.tar.gz: 562bdf170332fe368447e8c2dac02ec9a6fbabd8
3
+ metadata.gz: 53d36e15910ce2db5cd63152496ee34dfa5c9b0f
4
+ data.tar.gz: 28376e7730e8e6fbe960f53a9936897307bc1bc8
5
5
  SHA512:
6
- metadata.gz: 24ad87c7f75d708465ae59e0844f9157feb850bf8c631da32deb47f3f6d480ae80c7a2848a539acf3d25263602ec9c6126418fb31f58a55eedd11f225f2e4a27
7
- data.tar.gz: d40ea3cdb9da663ae62d5413a7445b14daf5170f4c42353f77609c569c9351fb4b7e6442074e83a8c64240ef830c4c906f817b2e1bb22186d6713dd13daf0f2a
6
+ metadata.gz: aef9594153c398471a13a261e673a442d085c670c3782a17c673112ec4ffbfe55cf309fbfce5e746af8a66bbdb8ed629e71ca836d1fb160f6cc9b49b1088761e
7
+ data.tar.gz: 79cfc9c99a374508c56b2783d180f170671723530eccea31f6de1a8bd8dc7e19160e6b87dd1708b8eb3b68e96e03f32df40d9efa7d8052ca6e5a79ee21e0ec8f
@@ -0,0 +1,18 @@
1
+ require_relative "queue_metric_tracker"
2
+
3
+ module WorkerRoulette
4
+ class BatchSize
5
+ include ::QueueMetricTracker
6
+
7
+ def track(sender, work_orders, _remaining)
8
+ return unless enabled?
9
+
10
+ batch_size = work_orders.length
11
+ return if batch_size == 0
12
+
13
+ if value = calculate_stats(:batch_size, batch_size)
14
+ tracker_send(message("batch_size", channel(sender), value))
15
+ end
16
+ end
17
+ end
18
+ end
@@ -0,0 +1,9 @@
1
+ class String
2
+ def underscore
3
+ self.gsub(/::/, '/').
4
+ gsub(/([A-Z]+)([A-Z][a-z])/,'\1_\2').
5
+ gsub(/([a-z\d])([A-Z])/,'\1_\2').
6
+ tr("-", "_").
7
+ downcase
8
+ end
9
+ end
@@ -0,0 +1,15 @@
1
+ require_relative "queue_metric_tracker"
2
+
3
+ module WorkerRoulette
4
+ class QueueDepth
5
+ include ::QueueMetricTracker
6
+
7
+ def track(sender, work_orders, remaining)
8
+ return unless enabled?
9
+
10
+ if value = calculate_stats(:queue_depth, remaining)
11
+ tracker_send(message("queue_depth", channel(sender), value))
12
+ end
13
+ end
14
+ end
15
+ end
@@ -0,0 +1,32 @@
1
+ module WorkerRoulette
2
+ module QueueLatency
3
+ GRANULARITY = 1_000_000
4
+
5
+ class Foreman
6
+ def process(work_order, _channel)
7
+ work_order['headers'].merge!(
8
+ "queued_at" => (Time.now.to_f * GRANULARITY).to_i) if work_order.is_a?(Hash) && work_order["headers"]
9
+ work_order
10
+ end
11
+ end
12
+
13
+ class Tradesman
14
+ include QueueMetricTracker
15
+
16
+ def process(work_order, channel)
17
+ send_latency(work_order["headers"]["queued_at"], channel)
18
+ work_order
19
+ end
20
+
21
+ def send_latency(queued_at, channel)
22
+ return unless queued_at
23
+
24
+ latency_ns = (Time.now.to_f * GRANULARITY).to_i - queued_at
25
+
26
+ if value = calculate_stats(:queue_latency, latency_ns / 1000.0)
27
+ tracker_send(message("queue_latency(ms)", channel, value))
28
+ end
29
+ end
30
+ end
31
+ end
32
+ end
@@ -0,0 +1,71 @@
1
+ module QueueMetricTracker
2
+ def tracker_send(msg)
3
+ UDPSocket.new.send(msg, 0, config[:metric_host][:host_ip], config[:metric_host][:host_port])
4
+ end
5
+
6
+ def granularity
7
+ config[:granularity] || 100
8
+ end
9
+
10
+ def calculate_stats(stat_name, value)
11
+ calculator(stat_name).add(value)
12
+ end
13
+
14
+ def calculator(stat_name)
15
+ QueueMetricTracker.calculators[stat_name] ||= QueueMetricTracker::StatCalculator.new(granularity)
16
+ end
17
+
18
+ def channel(sender)
19
+ (sender.split ":").first
20
+ end
21
+
22
+ def config
23
+ QueueMetricTracker.config
24
+ end
25
+
26
+ def message(label, channel, value)
27
+ "#{label},server_name=#{config[:server_name]},channel=#{channel} value=#{value} #{(Time.now.to_f * 1_000_000_000).to_i}"
28
+ end
29
+
30
+ def enabled?
31
+ return false unless config && config[:metrics]
32
+
33
+ klass = self.class.to_s.split("::").last.underscore.to_sym
34
+ config[:metrics][klass] rescue false
35
+ end
36
+
37
+ class << self
38
+ attr_reader :config, :calculators
39
+ def configure(options)
40
+ @calculators = {}
41
+ @config = {
42
+ server_name: options[:server_name],
43
+ granularity: options[:granularity],
44
+ metric_host: {
45
+ host_ip: ip_address(options[:metric_host]),
46
+ host_port: options[:metric_host_port]
47
+ }
48
+ }
49
+ @config.merge!({
50
+ metrics: options[:metrics]
51
+ }) if options[:metrics]
52
+ end
53
+
54
+ def included(tracker)
55
+ @trackers ||= []
56
+ @trackers << tracker
57
+ end
58
+
59
+ def track_all(options)
60
+ @trackers.each do |tracker_class|
61
+ tracker = tracker_class.new
62
+ tracker.track(*options) if tracker.respond_to?(:track)
63
+ end
64
+ end
65
+
66
+ def ip_address(server_name)
67
+ server_name == "localhost" ? "127.0.0.1" : Resolv.new.getaddress(server_name).to_s
68
+ end
69
+
70
+ end
71
+ end
@@ -0,0 +1,24 @@
1
+ module QueueMetricTracker
2
+ class StatCalculator
3
+ attr_accessor :count, :sum, :granularity
4
+
5
+ def initialize(granularity = 100)
6
+ @granularity = granularity
7
+ @sum = 0
8
+ @count = 0
9
+ end
10
+
11
+ def add(value)
12
+ @sum += value
13
+ @count += 1
14
+
15
+ if @count == granularity
16
+ value = @sum / granularity
17
+ @sum = @count = 0
18
+ return value
19
+ end
20
+
21
+ return nil
22
+ end
23
+ end
24
+ end
@@ -134,6 +134,8 @@ module WorkerRoulette
134
134
  work_orders = results[1]
135
135
  @remaining_jobs = results[2]
136
136
  @last_sender = sender_key.split(':').last
137
+
138
+ QueueMetricTracker.track_all(results) if work_orders.any?
137
139
  work = work_orders.map { |wo| preprocess(WorkerRoulette.load(wo), channel) }
138
140
  callback.call work if callback
139
141
  work
@@ -1,3 +1,3 @@
1
1
  module WorkerRoulette
2
- VERSION = '0.2.2'
2
+ VERSION = '0.2.3'
3
3
  end
@@ -67,21 +67,22 @@ module WorkerRoulette
67
67
 
68
68
  @preprocessors = []
69
69
 
70
- configure_latency_tracker(config.delete(:latency_tracker))
70
+ configure_queue_tracker(config.delete(:metric_tracker))
71
71
  end
72
72
 
73
- def configure_latency_tracker(config)
73
+ def configure_queue_tracker(config)
74
74
  return unless config
75
75
 
76
- QueueLatencyTracker.configure(
76
+ QueueMetricTracker.configure(
77
77
  {
78
78
  server_name: `hostname`.chomp,
79
- logstash_server_ip: ip_address(config[:logstash_server_name]),
80
- logstash_port: config[:logstash_port]
79
+ metric_host: config[:metric_host],
80
+ metric_host_port: config[:metric_host_port],
81
+ metrics: config[:metrics]
81
82
  }
82
83
  )
83
84
 
84
- preprocessors << QueueLatencyTracker
85
+ preprocessors << QueueLatency
85
86
  end
86
87
 
87
88
  def foreman(sender, namespace = nil)
@@ -112,10 +113,6 @@ module WorkerRoulette
112
113
 
113
114
  private
114
115
 
115
- def ip_address(server_name)
116
- server_name == "localhost" ? "127.0.0.1" : Resolv::DNS.new.getaddress(server_name).to_s
117
- end
118
-
119
116
  def new_redis
120
117
  if @evented
121
118
  require 'eventmachine'
@@ -12,13 +12,13 @@ module WorkerRoulette
12
12
  let(:foreman_work_order) { Hash["payload" => "foreman"] }
13
13
  let(:work_orders_with_headers) { default_headers.merge({ "payload" => work_orders }) }
14
14
  let(:jsonized_work_orders_with_headers) { [WorkerRoulette.dump(work_orders_with_headers)] }
15
- let(:latency_tracker) {
15
+ let(:metric_tracker) {
16
16
  {
17
- logstash_server_name: "localhost",
18
- logstash_port: 7777
17
+ metric_host: "localhost",
18
+ metric_host_port: 7777
19
19
  }
20
20
  }
21
- let(:worker_roulette) { WorkerRoulette.start(evented: true, latency_tracker: latency_tracker) }
21
+ let(:worker_roulette) { WorkerRoulette.start(evented: true, metric_tracker: metric_tracker) }
22
22
  let(:redis) { Redis.new(worker_roulette.redis_config) }
23
23
 
24
24
  before do
@@ -14,8 +14,8 @@ module WorkerRoulette
14
14
  let(:foreman_work_order) { Hash['payload' => "foreman"] }
15
15
  let(:work_orders_with_headers) { default_headers.merge({ 'payload' => work_orders }) }
16
16
  let(:jsonized_work_orders_with_headers) { [WorkerRoulette.dump(work_orders_with_headers)] }
17
- let(:worker_roulette) { WorkerRoulette.start(evented: false, latency_tracker: latency_tracker) }
18
- let(:latency_tracker) { nil }
17
+ let(:worker_roulette) { WorkerRoulette.start(evented: false, metric_tracker: metric_tracker) }
18
+ let(:metric_tracker) { nil }
19
19
 
20
20
  let(:redis) { Redis.new(worker_roulette.redis_config) }
21
21
 
@@ -153,10 +153,10 @@ module WorkerRoulette
153
153
  context "when latency tracker is enabled" do
154
154
  let(:default_headers) { Hash["headers" => { "sender" => sender, "queued_at" => (queued_at.to_f * 1_000_000).to_i }] }
155
155
  let(:queued_at) { 1234567 }
156
- let(:latency_tracker) {
156
+ let(:metric_tracker) {
157
157
  {
158
- logstash_server_name: "localhost",
159
- logstash_port: 7777
158
+ metric_host: "localhost",
159
+ metric_host_port: 7777
160
160
  }
161
161
  }
162
162
 
@@ -1,25 +1,6 @@
1
1
  require "spec_helper"
2
2
 
3
- module QueueLatencyTracker
4
- describe ".configure" do
5
- let(:source_config) { { logstash_server_ip: ip, logstash_port: port, server_name: server_name } }
6
- let(:ip) { "1.2.3.4" }
7
- let(:port) { 123 }
8
- let(:server_name) { "server.example" }
9
-
10
-
11
- it "stores the configuration" do
12
- QueueLatencyTracker.configure(source_config)
13
-
14
- expect(QueueLatencyTracker.config).to eq({
15
- logstash: {
16
- server_ip: ip,
17
- port: port },
18
- server_name: server_name
19
- })
20
- end
21
- end
22
-
3
+ module WorkerRoulette::QueueLatency
23
4
  describe Foreman do
24
5
  describe "#process" do
25
6
  let(:channel) { "a_channel" }
@@ -39,7 +20,7 @@ module QueueLatencyTracker
39
20
  describe Tradesman do
40
21
  describe "#process" do
41
22
  let(:queued_at) { 1234567 * GRANULARITY }
42
- let(:expected_json) { %({"server_name":"#{server_name}","queue_latency (ms)":#{latency * 1000},"channel":"#{channel}"}) }
23
+ let(:host) { "a_metric_host" }
43
24
  let(:ip) { "1.2.3.4" }
44
25
  let(:port) { 123 }
45
26
  let(:latency) { 123.432 }
@@ -47,15 +28,18 @@ module QueueLatencyTracker
47
28
  let(:channel) { "a_channel" }
48
29
  let(:headers) { { "queued_at" => queued_at } }
49
30
  let(:raw_work_order) { { "headers" => headers, "payload" => "aPayload" } }
50
- let(:logstash_config) { { server_ip: ip, port: port } }
51
- let(:config) { { logstash: logstash_config, server_name: server_name } }
31
+ let(:metric_config) { { host_ip: ip, host_port: port } }
32
+ let(:config) { { metric_host: metric_config, server_name: server_name } }
33
+ let(:expected_msg) { "queue_latency(ms),server_name=server.example,channel=a_channel value=123.432 1234690432000000" }
52
34
 
53
- before { allow(QueueLatencyTracker).to receive(:config).and_return(config) }
35
+ before { allow(QueueMetricTracker).to receive(:config).and_return(config) }
54
36
  before { allow(Time).to receive(:now).and_return(queued_at / GRANULARITY + latency) }
55
37
  before { allow_any_instance_of(UDPSocket).to receive(:send) }
38
+ before { allow_any_instance_of(QueueMetricTracker).to receive(:calculate_stats).and_return(latency) }
39
+ before { allow(QueueMetricTracker).to receive(:ipaddress).and_return(ip) }
56
40
 
57
41
  it "passes the right json to logstash_send" do
58
- expect_any_instance_of(UDPSocket).to receive(:send).with(expected_json, 0, ip, port)
42
+ expect_any_instance_of(UDPSocket).to receive(:send).with(expected_msg, 0, ip, port)
59
43
 
60
44
  subject.process(raw_work_order, channel)
61
45
  end
@@ -0,0 +1,71 @@
1
+ require "spec_helper"
2
+
3
+ module QueueMetricTracker
4
+ describe QueueMetricTracker do
5
+ let(:host) { "localhost" }
6
+ let(:port) { 123 }
7
+ let(:granularity) { 3 }
8
+ let(:ip) { "1.2.3.4" }
9
+ let(:server_name) { "server.example" }
10
+ let(:source_config) { {
11
+ metric_host: host,
12
+ metric_host_port: port,
13
+ server_name: server_name,
14
+ granularity: granularity
15
+ }
16
+ }
17
+
18
+ describe ".configure" do
19
+ it "stores the configuration" do
20
+ allow(QueueMetricTracker).to receive(:ip_address).and_return(ip)
21
+ QueueMetricTracker.configure(source_config)
22
+
23
+ expect(QueueMetricTracker.config).to eq({
24
+ metric_host: {
25
+ host_ip: ip,
26
+ host_port: port },
27
+ server_name: server_name,
28
+ granularity: granularity
29
+ })
30
+ end
31
+ end
32
+
33
+ describe "#enabled?" do
34
+ let(:config) { {} }
35
+ subject(:metric) { WorkerRoulette::BatchSize }
36
+ subject(:metric_object) { metric.new }
37
+
38
+ before { QueueMetricTracker.configure(source_config.merge(config)) }
39
+
40
+ context "when the config is nil" do
41
+ it "returns false" do
42
+ expect(metric_object.enabled?).to be_falsey
43
+ end
44
+ end
45
+
46
+ context "when the config has no metrics defined" do
47
+ let(:config) { { metrics: {}} }
48
+
49
+ it "returns false" do
50
+ expect(metric_object.enabled?).to be_falsey
51
+ end
52
+ end
53
+
54
+ context "when the metric is false" do
55
+ let(:config) { { metrics: { :batch_size => false } } }
56
+
57
+ it "returns false" do
58
+ expect(metric_object.enabled?).to be_falsey
59
+ end
60
+ end
61
+
62
+ context "when the metric is true" do
63
+ let(:config) { { metrics: { :batch_size => true }} }
64
+
65
+ it "returns true" do
66
+ expect(metric_object.enabled?).to be_truthy
67
+ end
68
+ end
69
+ end
70
+ end
71
+ end
@@ -2,13 +2,13 @@ require "spec_helper"
2
2
 
3
3
  module WorkerRoulette
4
4
  describe "Read Lock" do
5
- let(:latency_tracker) {
5
+ let(:metric_tracker) {
6
6
  {
7
- logstash_server_name: "localhost",
8
- logstash_port: 7777
7
+ metric_host: "localhost",
8
+ metric_host_port: 7777
9
9
  }
10
10
  }
11
- let(:worker_roulette) { WorkerRoulette.start(evented: false, latency_tracker: latency_tracker) }
11
+ let(:worker_roulette) { WorkerRoulette.start(evented: false, metric_tracker: metric_tracker) }
12
12
  let(:redis) { Redis.new(worker_roulette.redis_config) }
13
13
  let(:sender) { "katie_80" }
14
14
  let(:work_orders) { "hello" }
@@ -0,0 +1,32 @@
1
+ require "spec_helper"
2
+
3
+ module QueueMetricTracker
4
+ describe StatCalculator do
5
+ let(:granularity) { 10 }
6
+ let(:default_granularity) { 100 }
7
+ subject(:calculator) { described_class }
8
+
9
+ it "responds to new with a granularity" do
10
+ expect(calculator.new(granularity).granularity).to eq(granularity)
11
+ end
12
+
13
+ it "granularity defaults to a value" do
14
+ expect(calculator.new().granularity).to eq(default_granularity)
15
+ end
16
+
17
+ describe "#add" do
18
+ let(:granularity) { 3 }
19
+ let(:value1) { 4 }
20
+ let(:value2) { 3 }
21
+ let(:value3) { 8 }
22
+ let(:average) { 5 }
23
+ subject(:calculator) { described_class.new(granularity) }
24
+
25
+ it "calculates average of N values" do
26
+ expect(subject.add(value1)).to be_nil
27
+ expect(subject.add(value2)).to be_nil
28
+ expect(subject.add(value3)).to eq(average)
29
+ end
30
+ end
31
+ end
32
+ end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: nexia_worker_roulette
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.2
4
+ version: 0.2.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - Paul Saieg
@@ -10,7 +10,7 @@ authors:
10
10
  autorequire:
11
11
  bindir: bin
12
12
  cert_chain: []
13
- date: 2015-07-13 00:00:00.000000000 Z
13
+ date: 2015-08-03 00:00:00.000000000 Z
14
14
  dependencies:
15
15
  - !ruby/object:Gem::Dependency
16
16
  name: oj
@@ -211,10 +211,15 @@ files:
211
211
  - README.md
212
212
  - Rakefile
213
213
  - lib/worker_roulette.rb
214
+ - lib/worker_roulette/batch_size.rb
214
215
  - lib/worker_roulette/foreman.rb
215
216
  - lib/worker_roulette/lua.rb
217
+ - lib/worker_roulette/monkey_patches.rb
216
218
  - lib/worker_roulette/preprocessor.rb
217
- - lib/worker_roulette/queue_latency_tracker.rb
219
+ - lib/worker_roulette/queue_depth.rb
220
+ - lib/worker_roulette/queue_latency.rb
221
+ - lib/worker_roulette/queue_metric_tracker.rb
222
+ - lib/worker_roulette/stat_calculator.rb
218
223
  - lib/worker_roulette/tradesman.rb
219
224
  - lib/worker_roulette/version.rb
220
225
  - spec/benchmark/irb_demo_runner.rb
@@ -226,8 +231,10 @@ files:
226
231
  - spec/unit/evented_readlock_spec.rb
227
232
  - spec/unit/lua_spec.rb
228
233
  - spec/unit/preprocessor_spec.rb
229
- - spec/unit/queue_latency_tracker_spec.rb
234
+ - spec/unit/queue_latency_spec.rb
235
+ - spec/unit/queue_metric_tracker_spec.rb
230
236
  - spec/unit/readlock_spec.rb
237
+ - spec/unit/stat_calculator_spec.rb
231
238
  - worker_roulette.gemspec
232
239
  homepage: https://github.com/nexiahome/worker_roulette
233
240
  licenses: []
@@ -248,7 +255,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
248
255
  version: '0'
249
256
  requirements: []
250
257
  rubyforge_project:
251
- rubygems_version: 2.4.2
258
+ rubygems_version: 2.4.5
252
259
  signing_key:
253
260
  specification_version: 4
254
261
  summary: Pub Sub Queue for Redis that ensures ordered processing
@@ -262,5 +269,7 @@ test_files:
262
269
  - spec/unit/evented_readlock_spec.rb
263
270
  - spec/unit/lua_spec.rb
264
271
  - spec/unit/preprocessor_spec.rb
265
- - spec/unit/queue_latency_tracker_spec.rb
272
+ - spec/unit/queue_latency_spec.rb
273
+ - spec/unit/queue_metric_tracker_spec.rb
266
274
  - spec/unit/readlock_spec.rb
275
+ - spec/unit/stat_calculator_spec.rb
@@ -1,48 +0,0 @@
1
- module QueueLatencyTracker
2
- GRANULARITY = 1_000_000
3
-
4
- class Foreman
5
- def process(work_order, _channel)
6
- work_order['headers'].merge!(
7
- "queued_at" => (Time.now.to_f * GRANULARITY).to_i) if work_order.is_a?(Hash) && work_order["headers"]
8
- work_order
9
- end
10
- end
11
-
12
- class Tradesman
13
- def process(work_order, channel)
14
- send_latency(work_order["headers"]["queued_at"], channel)
15
- work_order
16
- end
17
-
18
- def send_latency(queued_at, channel)
19
- latency_ns = (Time.now.to_f * GRANULARITY).to_i - queued_at
20
- logstash_send(latency_json(latency_ns / 1000.0, channel))
21
- end
22
-
23
- def logstash_send(json)
24
- UDPSocket.new.send(json, 0, config[:logstash][:server_ip], config[:logstash][:port])
25
- end
26
-
27
- def latency_json(latency_ms, channel)
28
- %({"server_name":"#{config[:server_name]}","queue_latency (ms)":#{latency_ms},"channel":"#{channel}"})
29
- end
30
-
31
- def config
32
- QueueLatencyTracker.config
33
- end
34
- end
35
-
36
- class << self
37
- attr_reader :config
38
- def configure(config)
39
- @config = {
40
- logstash: {
41
- server_ip: config[:logstash_server_ip],
42
- port: config[:logstash_port] },
43
- server_name: config[:server_name]
44
- }
45
- end
46
-
47
- end
48
- end