nexia_worker_roulette 0.1.12 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/worker_roulette/foreman.rb +17 -11
- data/lib/worker_roulette/preprocessor.rb +15 -0
- data/lib/worker_roulette/queue_latency_tracker.rb +47 -0
- data/lib/worker_roulette/tradesman.rb +7 -3
- data/lib/worker_roulette/version.rb +1 -1
- data/lib/worker_roulette.rb +34 -4
- data/spec/integration/evented_worker_roulette_spec.rb +98 -95
- data/spec/integration/worker_roulette_spec.rb +58 -46
- data/spec/unit/evented_readlock_spec.rb +33 -34
- data/spec/unit/preprocessor_spec.rb +51 -0
- data/spec/unit/queue_latency_tracker_spec.rb +68 -0
- data/spec/unit/readlock_spec.rb +42 -40
- metadata +9 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 22482d641cdf09efcb854a5a859f807399292ac5
|
4
|
+
data.tar.gz: 51850f1cd0f76e98a4a6c5bc8dc1c87c6f70eb98
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 64d5fd31dde77cc677d0e3d293df55c7fb5353594e4755dd501cf4bffcf475df0845a208fbf183bd78a43d37b40088e30a2aee1767f68cd9169f9f09c31003e2
|
7
|
+
data.tar.gz: 04f9dfcec66f9e09e57bde1004df9e8c44925bb52da04c31d629eb761252c7505d64c5a79af4c51f57ff0c6e036178b903a46f2482218624beffeb0c9e8a4053
|
@@ -1,6 +1,10 @@
|
|
1
|
+
require_relative "preprocessor"
|
2
|
+
|
1
3
|
module WorkerRoulette
|
2
4
|
class Foreman
|
3
|
-
|
5
|
+
include Preprocessor
|
6
|
+
|
7
|
+
attr_reader :sender, :namespace, :channel, :preprocessors
|
4
8
|
|
5
9
|
LUA_ENQUEUE_WORK_ORDERS = <<-HERE
|
6
10
|
local counter_key = KEYS[1]
|
@@ -32,22 +36,23 @@ module WorkerRoulette
|
|
32
36
|
enqueue_work_orders(work_order, job_notification)
|
33
37
|
HERE
|
34
38
|
|
35
|
-
def initialize(redis_pool, sender, namespace = nil)
|
36
|
-
@
|
37
|
-
@
|
38
|
-
@
|
39
|
-
@
|
40
|
-
@
|
39
|
+
def initialize(redis_pool, sender, namespace = nil, preprocessors = [])
|
40
|
+
@redis_pool = redis_pool
|
41
|
+
@sender = sender
|
42
|
+
@preprocessors = preprocessors
|
43
|
+
@namespace = namespace
|
44
|
+
@channel = namespace || WorkerRoulette::JOB_NOTIFICATIONS
|
45
|
+
@lua = Lua.new(@redis_pool)
|
41
46
|
end
|
42
47
|
|
43
48
|
def enqueue_work_order(work_order, headers = {}, &callback)
|
44
49
|
work_order = {'headers' => default_headers.merge(headers), 'payload' => work_order}
|
45
|
-
|
50
|
+
enqueue(work_order, &callback)
|
46
51
|
end
|
47
52
|
|
48
|
-
def
|
53
|
+
def enqueue(work_order, &callback)
|
49
54
|
@lua.call(LUA_ENQUEUE_WORK_ORDERS, [counter_key, job_board_key, sender_key, @channel],
|
50
|
-
[WorkerRoulette.dump(work_order), WorkerRoulette::JOB_NOTIFICATIONS], &callback)
|
55
|
+
[WorkerRoulette.dump(preprocess(work_order, channel)), WorkerRoulette::JOB_NOTIFICATIONS], &callback)
|
51
56
|
end
|
52
57
|
|
53
58
|
def job_board_key
|
@@ -65,7 +70,8 @@ module WorkerRoulette
|
|
65
70
|
private
|
66
71
|
|
67
72
|
def default_headers
|
68
|
-
|
73
|
+
{ "sender" => sender }
|
69
74
|
end
|
75
|
+
|
70
76
|
end
|
71
77
|
end
|
@@ -0,0 +1,15 @@
|
|
1
|
+
module WorkerRoulette
|
2
|
+
module Preprocessor
|
3
|
+
def preprocess(work_order, channel)
|
4
|
+
return work_order unless preprocessors.any?
|
5
|
+
|
6
|
+
class_name = self.class.name.split(/::/).last
|
7
|
+
|
8
|
+
preprocessors.inject(work_order) do |job, processor_module|
|
9
|
+
processor_class = processor_module.const_get(class_name)
|
10
|
+
processor = processor_class.new
|
11
|
+
processor.process(job, channel)
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
@@ -0,0 +1,47 @@
|
|
1
|
+
module QueueLatencyTracker
|
2
|
+
GRANULARITY = 1_000_000
|
3
|
+
|
4
|
+
class Foreman
|
5
|
+
def process(work_order, _channel)
|
6
|
+
work_order['headers'].merge!("queued_at" => (Time.now.to_f * GRANULARITY).to_i)
|
7
|
+
work_order
|
8
|
+
end
|
9
|
+
end
|
10
|
+
|
11
|
+
class Tradesman
|
12
|
+
def process(work_order, channel)
|
13
|
+
send_latency(work_order["headers"]["queued_at"], channel)
|
14
|
+
work_order
|
15
|
+
end
|
16
|
+
|
17
|
+
def send_latency(queued_at, channel)
|
18
|
+
latency_ns = (Time.now.to_f * GRANULARITY).to_i - queued_at
|
19
|
+
logstash_send(latency_json(latency_ns / 1000.0, channel))
|
20
|
+
end
|
21
|
+
|
22
|
+
def logstash_send(json)
|
23
|
+
UDPSocket.new.send(json, 0, config[:logstash][:server_ip], config[:logstash][:port])
|
24
|
+
end
|
25
|
+
|
26
|
+
def latency_json(latency_ms, channel)
|
27
|
+
%({"server_name":"#{config[:server_name]}","queue_latency (ms)":#{latency_ms},"channel":"#{channel}"})
|
28
|
+
end
|
29
|
+
|
30
|
+
def config
|
31
|
+
QueueLatencyTracker.config
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
class << self
|
36
|
+
attr_reader :config
|
37
|
+
def configure(config)
|
38
|
+
@config = {
|
39
|
+
logstash: {
|
40
|
+
server_ip: config[:logstash_server_ip],
|
41
|
+
port: config[:logstash_port] },
|
42
|
+
server_name: config[:server_name]
|
43
|
+
}
|
44
|
+
end
|
45
|
+
|
46
|
+
end
|
47
|
+
end
|
@@ -1,6 +1,9 @@
|
|
1
|
+
require_relative "preprocessor"
|
2
|
+
|
1
3
|
module WorkerRoulette
|
2
4
|
class Tradesman
|
3
|
-
|
5
|
+
include Preprocessor
|
6
|
+
attr_reader :last_sender, :remaining_jobs, :timer, :preprocessors, :channel
|
4
7
|
|
5
8
|
LUA_DRAIN_WORK_ORDERS = <<-HERE
|
6
9
|
local empty_string = ""
|
@@ -102,9 +105,10 @@ module WorkerRoulette
|
|
102
105
|
return drain_work_orders_for_sender(job_board_key, sender_key)
|
103
106
|
THERE
|
104
107
|
|
105
|
-
def initialize(redis_pool, evented, namespace = nil, polling_time = WorkerRoulette::DEFAULT_POLLING_TIME)
|
108
|
+
def initialize(redis_pool, evented, namespace = nil, polling_time = WorkerRoulette::DEFAULT_POLLING_TIME, preprocessors = [])
|
106
109
|
@evented = evented
|
107
110
|
@polling_time = polling_time
|
111
|
+
@preprocessors = preprocessors
|
108
112
|
@redis_pool = redis_pool
|
109
113
|
@namespace = namespace
|
110
114
|
@channel = namespace || WorkerRoulette::JOB_NOTIFICATIONS
|
@@ -130,7 +134,7 @@ module WorkerRoulette
|
|
130
134
|
work_orders = results[1]
|
131
135
|
@remaining_jobs = results[2]
|
132
136
|
@last_sender = sender_key.split(':').last
|
133
|
-
work
|
137
|
+
work = work_orders.map { |wo| preprocess(WorkerRoulette.load(wo), channel) }
|
134
138
|
callback.call work if callback
|
135
139
|
work
|
136
140
|
end
|
data/lib/worker_roulette.rb
CHANGED
@@ -13,6 +13,16 @@ module WorkerRoulette
|
|
13
13
|
JOB_BOARD = "job_board"
|
14
14
|
JOB_NOTIFICATIONS = "new_job_ready"
|
15
15
|
DEFAULT_POLLING_TIME = 2
|
16
|
+
DEFAULT_REDIS_CONFIG = {
|
17
|
+
host: 'localhost',
|
18
|
+
port: 6379,
|
19
|
+
db: 14,
|
20
|
+
driver: :hiredis,
|
21
|
+
timeout: 5,
|
22
|
+
evented: false,
|
23
|
+
pool_size: 10,
|
24
|
+
polling_time: DEFAULT_POLLING_TIME
|
25
|
+
}
|
16
26
|
|
17
27
|
def self.dump(obj)
|
18
28
|
Oj.dump(obj)
|
@@ -44,25 +54,45 @@ module WorkerRoulette
|
|
44
54
|
end
|
45
55
|
|
46
56
|
private_class_method :new
|
57
|
+
attr_reader :preprocessors
|
47
58
|
|
48
59
|
def initialize(config = {})
|
49
|
-
@redis_config =
|
50
|
-
@pool_config =
|
60
|
+
@redis_config = DEFAULT_REDIS_CONFIG.merge(config)
|
61
|
+
@pool_config = { size: @redis_config.delete(:pool_size), timeout: @redis_config.delete(:timeout) }
|
51
62
|
@evented = @redis_config.delete(:evented)
|
52
63
|
@polling_time = @redis_config.delete(:polling_time)
|
53
64
|
|
54
65
|
@foreman_connection_pool = ConnectionPool.new(@pool_config) {new_redis}
|
55
66
|
@tradesman_connection_pool = ConnectionPool.new(@pool_config) {new_redis}
|
67
|
+
|
68
|
+
@preprocessors = []
|
69
|
+
|
70
|
+
configure_latency_tracker(config.delete(:latency_tracker))
|
71
|
+
end
|
72
|
+
|
73
|
+
def configure_latency_tracker(config)
|
74
|
+
puts "WR:config: #{config}"
|
75
|
+
return unless config
|
76
|
+
|
77
|
+
QueueLatencyTracker.configure(
|
78
|
+
{
|
79
|
+
server_name: `hostname`.chomp,
|
80
|
+
logstash_server_ip: Resolv::DNS.new.getaddress(config[:logstash_server_name]).to_s,
|
81
|
+
logstash_port: config[:logstash_port]
|
82
|
+
}
|
83
|
+
)
|
84
|
+
|
85
|
+
preprocessors << QueueLatencyTracker
|
56
86
|
end
|
57
87
|
|
58
88
|
def foreman(sender, namespace = nil)
|
59
89
|
raise "WorkerRoulette not Started" unless @foreman_connection_pool
|
60
|
-
Foreman.new(@foreman_connection_pool, sender, namespace)
|
90
|
+
Foreman.new(@foreman_connection_pool, sender, namespace, preprocessors)
|
61
91
|
end
|
62
92
|
|
63
93
|
def tradesman(namespace = nil, polling_time = DEFAULT_POLLING_TIME)
|
64
94
|
raise "WorkerRoulette not Started" unless @tradesman_connection_pool
|
65
|
-
Tradesman.new(@tradesman_connection_pool, @evented, namespace, polling_time || @polling_time)
|
95
|
+
Tradesman.new(@tradesman_connection_pool, @evented, namespace, polling_time || @polling_time, preprocessors)
|
66
96
|
end
|
67
97
|
|
68
98
|
def tradesman_connection_pool
|
@@ -1,84 +1,90 @@
|
|
1
1
|
require "spec_helper"
|
2
|
+
|
2
3
|
module WorkerRoulette
|
3
4
|
describe WorkerRoulette do
|
4
5
|
include EventedSpec::EMSpec
|
5
6
|
|
6
|
-
let(:sender)
|
7
|
-
let(:work_orders)
|
8
|
-
let(:
|
9
|
-
let(:
|
10
|
-
let(:
|
11
|
-
let(:
|
12
|
-
let(:
|
13
|
-
let(:
|
14
|
-
let(:
|
7
|
+
let(:sender) { "katie_80" }
|
8
|
+
let(:work_orders) { ["hello", "foreman"] }
|
9
|
+
let(:queued_at) { 1234567 }
|
10
|
+
let(:default_headers) { Hash["headers" => { "sender" => sender, "queued_at" => (queued_at.to_f * 1_000_000).to_i }] }
|
11
|
+
let(:hello_work_order) { Hash["payload" => "hello"] }
|
12
|
+
let(:foreman_work_order) { Hash["payload" => "foreman"] }
|
13
|
+
let(:work_orders_with_headers) { default_headers.merge({ "payload" => work_orders }) }
|
14
|
+
let(:jsonized_work_orders_with_headers) { [WorkerRoulette.dump(work_orders_with_headers)] }
|
15
|
+
let(:worker_roulette) { WorkerRoulette.start(evented: true) }
|
16
|
+
let(:redis) { Redis.new(worker_roulette.redis_config) }
|
17
|
+
|
18
|
+
before do
|
19
|
+
allow(Time).to receive(:now).and_return(queued_at)
|
20
|
+
end
|
15
21
|
|
16
22
|
context "Evented Foreman" do
|
17
|
-
|
23
|
+
subject(:foreman) {worker_roulette.foreman(sender)}
|
18
24
|
|
19
25
|
it "enqueues work" do
|
20
26
|
called = false
|
21
|
-
foreman = worker_roulette.foreman(
|
22
|
-
foreman.enqueue_work_order(
|
27
|
+
foreman = worker_roulette.foreman("foreman")
|
28
|
+
foreman.enqueue_work_order("some old fashion work") do |redis_response, stuff|
|
23
29
|
called = true
|
24
30
|
end
|
25
31
|
done(0.1) { expect(called).to be_truthy }
|
26
32
|
end
|
27
33
|
|
28
|
-
it "
|
29
|
-
|
30
|
-
|
31
|
-
expected = work_orders.map { |m| WorkerRoulette.dump(default_headers.merge({
|
34
|
+
it "enqueues two work_orders in the sender's slot in the job board" do
|
35
|
+
foreman.enqueue_work_order(work_orders.first) do
|
36
|
+
foreman.enqueue_work_order(work_orders.last) do
|
37
|
+
expected = work_orders.map { |m| WorkerRoulette.dump(default_headers.merge({"payload" => m})) }
|
32
38
|
expect(redis.lrange(sender, 0, -1)).to eq(expected)
|
33
39
|
done
|
34
40
|
end
|
35
41
|
end
|
36
42
|
end
|
37
43
|
|
38
|
-
it "
|
39
|
-
|
44
|
+
it "enqueues an array of work_orders without headers in the sender's slot in the job board" do
|
45
|
+
foreman.enqueue(work_orders) do
|
40
46
|
expect(redis.lrange(sender, 0, -1)).to eq([WorkerRoulette.dump(work_orders)])
|
41
47
|
done
|
42
48
|
end
|
43
49
|
end
|
44
50
|
|
45
|
-
it "
|
46
|
-
|
51
|
+
it "enqueues an array of work_orders with default headers in the sender's slot in the job board" do
|
52
|
+
foreman.enqueue_work_order(work_orders) do
|
47
53
|
expect(redis.lrange(sender, 0, -1)).to eq(jsonized_work_orders_with_headers)
|
48
54
|
done
|
49
55
|
end
|
50
56
|
end
|
51
57
|
|
52
|
-
it "
|
53
|
-
extra_headers = {
|
54
|
-
|
55
|
-
work_orders_with_headers[
|
58
|
+
it "enqueues an array of work_orders with additional headers in the sender's slot in the job board" do
|
59
|
+
extra_headers = {"foo" => "bars"}
|
60
|
+
foreman.enqueue_work_order(work_orders, extra_headers) do
|
61
|
+
work_orders_with_headers["headers"].merge!(extra_headers)
|
56
62
|
expect(redis.lrange(sender, 0, -1)).to eq([WorkerRoulette.dump(work_orders_with_headers)])
|
57
63
|
done
|
58
64
|
end
|
59
65
|
end
|
60
66
|
|
61
|
-
it "
|
62
|
-
first_foreman = worker_roulette.foreman(
|
63
|
-
first_foreman.enqueue_work_order(
|
64
|
-
|
65
|
-
|
66
|
-
expect(redis.zrange(
|
67
|
+
it "posts the sender's id to the job board with an order number" do
|
68
|
+
first_foreman = worker_roulette.foreman("first_foreman")
|
69
|
+
first_foreman.enqueue_work_order("foo") do
|
70
|
+
foreman.enqueue_work_order(work_orders.first) do
|
71
|
+
foreman.enqueue_work_order(work_orders.last) do
|
72
|
+
expect(redis.zrange(foreman.job_board_key, 0, -1, with_scores: true)).to eq([["first_foreman", 1.0], ["katie_80", 2.0]])
|
67
73
|
done
|
68
74
|
end
|
69
75
|
end
|
70
76
|
end
|
71
77
|
end
|
72
78
|
|
73
|
-
it "
|
74
|
-
first_foreman = worker_roulette.foreman(
|
75
|
-
expect(redis.get(
|
79
|
+
it "generates a monotically increasing score for senders not on the job board, but not for senders already there" do
|
80
|
+
first_foreman = worker_roulette.foreman("first_foreman")
|
81
|
+
expect(redis.get(foreman.counter_key)).to be_nil
|
76
82
|
first_foreman.enqueue_work_order(work_orders.first) do
|
77
|
-
expect(redis.get(
|
83
|
+
expect(redis.get(foreman.counter_key)).to eq("1")
|
78
84
|
first_foreman.enqueue_work_order(work_orders.last) do
|
79
|
-
expect(redis.get(
|
80
|
-
|
81
|
-
expect(redis.get(
|
85
|
+
expect(redis.get(foreman.counter_key)).to eq("1")
|
86
|
+
foreman.enqueue_work_order(work_orders.first) do
|
87
|
+
expect(redis.get(foreman.counter_key)).to eq("2")
|
82
88
|
done
|
83
89
|
end
|
84
90
|
end
|
@@ -87,33 +93,33 @@ module WorkerRoulette
|
|
87
93
|
end
|
88
94
|
|
89
95
|
context "Evented Tradesman" do
|
90
|
-
let(:foreman)
|
91
|
-
|
96
|
+
let(:foreman) { worker_roulette.foreman(sender) }
|
97
|
+
subject(:tradesman) { worker_roulette.tradesman(nil, 0.01) }
|
92
98
|
|
93
|
-
it "
|
99
|
+
it "works on behalf of a sender" do
|
94
100
|
foreman.enqueue_work_order(work_orders) do
|
95
|
-
|
96
|
-
expect(
|
101
|
+
tradesman.work_orders! do |r|
|
102
|
+
expect(tradesman.last_sender).to eq(sender)
|
97
103
|
done
|
98
104
|
end
|
99
105
|
end
|
100
106
|
end
|
101
107
|
|
102
|
-
it "
|
103
|
-
most_recent_sender =
|
108
|
+
it "removes the lock from the last_sender's queue" do
|
109
|
+
most_recent_sender = "most_recent_sender"
|
104
110
|
most_recent_foreman = worker_roulette.foreman(most_recent_sender)
|
105
|
-
other_foreman = worker_roulette.foreman(
|
111
|
+
other_foreman = worker_roulette.foreman("katie_80")
|
106
112
|
|
107
113
|
other_foreman.enqueue_work_order(work_orders) do
|
108
114
|
most_recent_foreman.enqueue_work_order(work_orders) do
|
109
115
|
expect(redis.keys("L*:*").length).to eq(0)
|
110
|
-
|
116
|
+
tradesman.work_orders! do
|
111
117
|
expect(redis.get("L*:katie_80")).to eq("1")
|
112
118
|
expect(redis.keys("L*:*").length).to eq(1)
|
113
|
-
|
119
|
+
tradesman.work_orders! do
|
114
120
|
expect(redis.keys("L*:*").length).to eq(1)
|
115
121
|
expect(redis.get("L*:most_recent_sender")).to eq("1")
|
116
|
-
|
122
|
+
tradesman.work_orders!
|
117
123
|
done(0.2) do
|
118
124
|
expect(redis.keys("L*:*").length).to eq(0)
|
119
125
|
end
|
@@ -123,69 +129,66 @@ module WorkerRoulette
|
|
123
129
|
end
|
124
130
|
end
|
125
131
|
|
126
|
-
it "
|
132
|
+
it "drains one set of work_orders from the sender's slot in the job board" do
|
127
133
|
foreman.enqueue_work_order(work_orders) do
|
128
|
-
|
129
|
-
expect(
|
130
|
-
|
131
|
-
|
134
|
+
tradesman.work_orders! do |r0|
|
135
|
+
expect(r0).to eq([work_orders_with_headers])
|
136
|
+
tradesman.work_orders! do |r1| expect(r1).to be_empty
|
137
|
+
tradesman.work_orders! {|r2| expect(r2).to be_empty; done} #does not throw an error if queue is alreay empty
|
132
138
|
end
|
133
139
|
end
|
134
140
|
end
|
135
141
|
end
|
136
142
|
|
137
|
-
it "
|
143
|
+
it "takes the oldest sender off the job board (FIFO)" do
|
138
144
|
foreman.enqueue_work_order(work_orders) do
|
139
145
|
oldest_sender = sender.to_s
|
140
|
-
most_recent_sender =
|
146
|
+
most_recent_sender = "most_recent_sender"
|
141
147
|
most_recent_foreman = worker_roulette.foreman(most_recent_sender)
|
142
148
|
most_recent_foreman.enqueue_work_order(work_orders) do
|
143
|
-
expect(redis.zrange(
|
144
|
-
|
149
|
+
expect(redis.zrange(tradesman.job_board_key, 0, -1)).to eq([oldest_sender, most_recent_sender])
|
150
|
+
tradesman.work_orders! { expect(redis.zrange(tradesman.job_board_key, 0, -1)).to eq([most_recent_sender]); done }
|
145
151
|
end
|
146
152
|
end
|
147
153
|
end
|
148
154
|
|
149
|
-
it "
|
150
|
-
#tradesman polls every so often, we care that it is called at least twice, but did not use
|
151
|
-
#the built in rspec syntax for that bc if the test ends while we
|
152
|
-
#throws an Error. This way we ensure we call work_orders! at least twice and just stub the second
|
153
|
-
#call so as not to hurt redis
|
155
|
+
it "gets the work_orders from the next queue when a new job is ready" do
|
156
|
+
# tradesman polls every so often, we care that it is called at least twice, but did not use
|
157
|
+
# the built in rspec syntax for that bc if the test ends while we"re talking to redis, redis
|
158
|
+
# throws an Error. This way we ensure we call work_orders! at least twice and just stub the second
|
159
|
+
# call so as not to hurt redis" feelings.
|
154
160
|
|
155
|
-
expect(
|
156
|
-
expect(
|
161
|
+
expect(tradesman).to receive(:work_orders!).and_call_original
|
162
|
+
expect(tradesman).to receive(:work_orders!)
|
157
163
|
|
158
164
|
foreman.enqueue_work_order(work_orders) do
|
159
|
-
|
165
|
+
tradesman.wait_for_work_orders do |redis_work_orders|
|
160
166
|
expect(redis_work_orders).to eq([work_orders_with_headers])
|
161
|
-
expect(
|
167
|
+
expect(tradesman.last_sender).to match(/katie_80/)
|
162
168
|
done(0.1)
|
163
169
|
end
|
164
170
|
end
|
165
171
|
end
|
166
172
|
|
167
|
-
it "
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
tradesman = worker_roulette.tradesman('good_channel', 0.001)
|
172
|
-
evil_tradesman = worker_roulette.tradesman('bad_channel', 0.001)
|
173
|
+
it "publishes and subscribes on custom channels" do
|
174
|
+
tradesman = worker_roulette.tradesman("good_channel", 0.001)
|
175
|
+
evil_tradesman = worker_roulette.tradesman("bad_channel", 0.001)
|
173
176
|
|
174
|
-
good_foreman = worker_roulette.foreman(
|
175
|
-
bad_foreman = worker_roulette.foreman(
|
177
|
+
good_foreman = worker_roulette.foreman("foreman", "good_channel")
|
178
|
+
bad_foreman = worker_roulette.foreman("foreman", "bad_channel")
|
176
179
|
|
177
|
-
#tradesman polls every so often, we care that it is called at least twice, but did not use
|
178
|
-
#the built in rspec syntax for that bc if the test ends while we
|
179
|
-
#throws an Error. This way we ensure we call work_orders! at least twice and just stub the second
|
180
|
-
#call so as not to hurt redis
|
180
|
+
# tradesman polls every so often, we care that it is called at least twice, but did not use
|
181
|
+
# the built in rspec syntax for that bc if the test ends while we"re talking to redis, redis
|
182
|
+
# throws an Error. This way we ensure we call work_orders! at least twice and just stub the second
|
183
|
+
# call so as not to hurt redis" feelings.
|
181
184
|
expect(tradesman).to receive(:work_orders!).and_call_original
|
182
185
|
expect(tradesman).to receive(:work_orders!)
|
183
186
|
|
184
187
|
expect(evil_tradesman).to receive(:work_orders!).and_call_original
|
185
188
|
expect(evil_tradesman).to receive(:work_orders!)
|
186
189
|
|
187
|
-
good_foreman.enqueue_work_order(
|
188
|
-
bad_foreman.enqueue_work_order(
|
190
|
+
good_foreman.enqueue_work_order("some old fashion work") do
|
191
|
+
bad_foreman.enqueue_work_order("evil biddings you should not carry out") do
|
189
192
|
|
190
193
|
tradesman.wait_for_work_orders do |good_work|
|
191
194
|
expect(good_work.to_s).to match("old fashion")
|
@@ -202,24 +205,24 @@ module WorkerRoulette
|
|
202
205
|
end
|
203
206
|
end
|
204
207
|
|
205
|
-
it "
|
206
|
-
tradesman = worker_roulette.tradesman(
|
208
|
+
it "extracts work orders for more than one sender" do
|
209
|
+
tradesman = worker_roulette.tradesman("good_channel")
|
207
210
|
|
208
|
-
good_foreman = worker_roulette.foreman(
|
209
|
-
lazy_foreman = worker_roulette.foreman(
|
211
|
+
good_foreman = worker_roulette.foreman("good_foreman", "good_channel")
|
212
|
+
lazy_foreman = worker_roulette.foreman("lazy_foreman", "good_channel")
|
210
213
|
|
211
214
|
got_good = false
|
212
215
|
got_lazy = false
|
213
|
-
good_foreman.enqueue_work_order(
|
216
|
+
good_foreman.enqueue_work_order("do good work") do
|
214
217
|
tradesman.work_orders! do |r|
|
215
218
|
got_good = true
|
216
|
-
expect(r.first[
|
219
|
+
expect(r.first["payload"]).to eq("do good work")
|
217
220
|
end
|
218
221
|
end
|
219
|
-
lazy_foreman.enqueue_work_order(
|
222
|
+
lazy_foreman.enqueue_work_order("just get it done") do
|
220
223
|
tradesman.work_orders! do |r|
|
221
224
|
got_lazy = true
|
222
|
-
expect(r.first[
|
225
|
+
expect(r.first["payload"]).to eq("just get it done")
|
223
226
|
end
|
224
227
|
end
|
225
228
|
|
@@ -227,20 +230,20 @@ module WorkerRoulette
|
|
227
230
|
end
|
228
231
|
end
|
229
232
|
|
230
|
-
pending "
|
233
|
+
pending "returns a hash with a string in the payload if OJ cannot parse the json"
|
231
234
|
|
232
235
|
context "Failure" do
|
233
|
-
it "
|
236
|
+
it "does not put the sender_id and work_orders back if processing fails bc new work_orders may have been processed while that process failed" do; done; end
|
234
237
|
end
|
235
238
|
|
236
239
|
context "Concurrent Access" do
|
237
|
-
it "
|
240
|
+
it "does not leak connections"
|
238
241
|
|
239
|
-
it "
|
240
|
-
@
|
241
|
-
@
|
242
|
+
it "is fork() proof" do
|
243
|
+
@tradesman = worker_roulette.tradesman
|
244
|
+
@tradesman.work_orders! do
|
242
245
|
fork do
|
243
|
-
@
|
246
|
+
@tradesman.work_orders!
|
244
247
|
end
|
245
248
|
end
|
246
249
|
done(1)
|
@@ -5,68 +5,73 @@ module WorkerRoulette
|
|
5
5
|
end
|
6
6
|
|
7
7
|
describe WorkerRoulette do
|
8
|
-
let(:sender)
|
9
|
-
let(:work_orders)
|
10
|
-
let(:
|
11
|
-
let(:
|
12
|
-
let(:
|
13
|
-
let(:
|
14
|
-
let(:
|
15
|
-
let(:
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
8
|
+
let(:sender) { 'katie_80' }
|
9
|
+
let(:work_orders) { ["hello", "foreman"] }
|
10
|
+
let(:queued_at) { 1234567 }
|
11
|
+
let(:default_headers) { Hash["headers" => { "sender" => sender, "queued_at" => (queued_at.to_f * 1_000_000).to_i }] }
|
12
|
+
let(:hello_work_order) { Hash['payload' => "hello"] }
|
13
|
+
let(:foreman_work_order) { Hash['payload' => "foreman"] }
|
14
|
+
let(:work_orders_with_headers) { default_headers.merge({ 'payload' => work_orders }) }
|
15
|
+
let(:jsonized_work_orders_with_headers) { [WorkerRoulette.dump(work_orders_with_headers)] }
|
16
|
+
let(:worker_roulette) { WorkerRoulette.start }
|
17
|
+
|
18
|
+
let(:redis) { Redis.new(worker_roulette.redis_config) }
|
19
|
+
|
20
|
+
before :each do
|
20
21
|
redis.flushall
|
21
22
|
end
|
22
23
|
|
23
|
-
|
24
|
+
before :each do
|
25
|
+
allow(Time).to receive(:now).and_return(queued_at)
|
26
|
+
end
|
27
|
+
|
28
|
+
it "exists" do
|
24
29
|
expect(worker_roulette).to be_instance_of(WorkerRoulette)
|
25
30
|
end
|
26
31
|
|
27
32
|
context Foreman do
|
28
|
-
|
33
|
+
subject(:foreman) { worker_roulette.foreman(sender) }
|
29
34
|
|
30
|
-
it "
|
31
|
-
expect(
|
35
|
+
it "works on behalf of a sender" do
|
36
|
+
expect(foreman.sender).to eq(sender)
|
32
37
|
end
|
33
38
|
|
34
|
-
it "
|
35
|
-
|
36
|
-
|
37
|
-
expect(redis.lrange(sender, 0, -1)).to eq(work_orders.map {|m| WorkerRoulette.dump(default_headers.merge({'payload' => m})) })
|
39
|
+
it "enqueues two work_orders in the sender's work queue" do
|
40
|
+
foreman.enqueue_work_order(work_orders.first) {}
|
41
|
+
foreman.enqueue_work_order(work_orders.last) {}
|
42
|
+
expect(redis.lrange(sender, 0, -1)).to eq(work_orders.map { |m| WorkerRoulette.dump(default_headers.merge({ 'payload' => m })) })
|
38
43
|
end
|
39
44
|
|
40
|
-
it "
|
41
|
-
|
45
|
+
it "enqueues an array of work_orders without headers in the sender's work queue" do
|
46
|
+
foreman.enqueue(work_orders)
|
42
47
|
expect(redis.lrange(sender, 0, -1)).to eq([WorkerRoulette.dump(work_orders)])
|
43
48
|
end
|
44
49
|
|
45
|
-
it "
|
46
|
-
|
50
|
+
it "enqueues an array of work_orders with default headers in the sender's work queue" do
|
51
|
+
foreman.enqueue_work_order(work_orders)
|
47
52
|
expect(redis.lrange(sender, 0, -1)).to eq(jsonized_work_orders_with_headers)
|
48
53
|
end
|
49
54
|
|
50
|
-
it "
|
51
|
-
extra_headers = {'foo' => 'bars'}
|
52
|
-
|
55
|
+
it "enqueues an array of work_orders with additional headers in the sender's work queue" do
|
56
|
+
extra_headers = { 'foo' => 'bars' }
|
57
|
+
foreman.enqueue_work_order(work_orders, extra_headers)
|
53
58
|
work_orders_with_headers['headers'].merge!(extra_headers)
|
54
59
|
expect(redis.lrange(sender, 0, -1)).to eq([WorkerRoulette.dump(work_orders_with_headers)])
|
55
60
|
end
|
56
61
|
|
57
|
-
it "
|
58
|
-
|
62
|
+
it "posts the sender's id to the job board with an order number" do
|
63
|
+
foreman.enqueue_work_order(work_orders.first)
|
59
64
|
worker_roulette.foreman('other_forman').enqueue_work_order(work_orders.last)
|
60
|
-
expect(redis.zrange(
|
65
|
+
expect(redis.zrange(foreman.job_board_key, 0, -1, with_scores: true)).to eq([[sender, 1.0], ["other_forman", 2.0]])
|
61
66
|
end
|
62
67
|
|
63
|
-
it "
|
68
|
+
it "generates a monotically increasing score for senders not on the job board, but not for senders already there" do
|
64
69
|
other_forman = worker_roulette.foreman('other_forman')
|
65
|
-
expect(redis.get(
|
66
|
-
|
67
|
-
expect(redis.get(
|
68
|
-
|
69
|
-
expect(redis.get(
|
70
|
+
expect(redis.get(foreman.counter_key)).to be_nil
|
71
|
+
foreman.enqueue_work_order(work_orders.first)
|
72
|
+
expect(redis.get(foreman.counter_key)).to eq("1")
|
73
|
+
foreman.enqueue_work_order(work_orders.last)
|
74
|
+
expect(redis.get(foreman.counter_key)).to eq("1")
|
70
75
|
other_forman.enqueue_work_order(work_orders.last)
|
71
76
|
expect(redis.get(other_forman.counter_key)).to eq("2")
|
72
77
|
end
|
@@ -97,31 +102,31 @@ module WorkerRoulette
|
|
97
102
|
end
|
98
103
|
end
|
99
104
|
|
100
|
-
it "
|
105
|
+
it "has a last sender if it found messages" do
|
101
106
|
expect(tradesman.work_orders!.length).to eq(1)
|
102
107
|
expect(tradesman.last_sender).to eq(sender)
|
103
108
|
end
|
104
109
|
|
105
|
-
it "
|
110
|
+
it "does not have a last sender if it found no messages" do
|
106
111
|
expect(tradesman.work_orders!.length).to eq(1)
|
107
112
|
expect(tradesman.work_orders!.length).to eq(0)
|
108
113
|
expect(tradesman.last_sender).to be_nil
|
109
114
|
end
|
110
115
|
|
111
|
-
it "
|
116
|
+
it "drains one set of work_orders from the sender's work queue" do
|
112
117
|
expect(tradesman.work_orders!).to eq([work_orders_with_headers])
|
113
118
|
expect(tradesman.work_orders!).to be_empty
|
114
119
|
expect(tradesman.work_orders!).to be_empty #does not throw an error if queue is already empty
|
115
120
|
end
|
116
121
|
|
117
|
-
it "
|
122
|
+
it "drains all the work_orders from the sender's work queue" do
|
118
123
|
foreman.enqueue_work_order(work_orders)
|
119
124
|
expect(tradesman.work_orders!).to eq([work_orders_with_headers, work_orders_with_headers])
|
120
125
|
expect(tradesman.work_orders!).to be_empty
|
121
126
|
expect(tradesman.work_orders!).to be_empty #does not throw an error if queue is already empty
|
122
127
|
end
|
123
128
|
|
124
|
-
it "
|
129
|
+
it "takes the oldest sender off the job board (FIFO)" do
|
125
130
|
oldest_sender = sender.to_s
|
126
131
|
most_recent_sender = 'most_recent_sender'
|
127
132
|
most_recent_foreman = worker_roulette.foreman(most_recent_sender)
|
@@ -131,7 +136,7 @@ module WorkerRoulette
|
|
131
136
|
expect(redis.zrange(tradesman.job_board_key, 0, -1)).to eq([most_recent_sender])
|
132
137
|
end
|
133
138
|
|
134
|
-
it "
|
139
|
+
it "gets the work_orders from the next queue when a new job is ready, then poll for new work" do
|
135
140
|
tradesman.wait_for_work_orders do |redis_work_orders|
|
136
141
|
expect(redis_work_orders).to eq([work_orders_with_headers])
|
137
142
|
expect(tradesman.last_sender).to eq('katie_80')
|
@@ -139,7 +144,14 @@ module WorkerRoulette
|
|
139
144
|
end
|
140
145
|
end
|
141
146
|
|
142
|
-
it "
|
147
|
+
it "sees queued_at in the header" do
|
148
|
+
tradesman.wait_for_work_orders do |redis_work_orders|
|
149
|
+
expect(redis_work_orders.first["headers"]["queued_at"]).to_not be_nil
|
150
|
+
allow(tradesman).to receive(:wait_for_work_orders)
|
151
|
+
end
|
152
|
+
end
|
153
|
+
|
154
|
+
it "publishes and subscribes on custom channels" do
|
143
155
|
tradesman = worker_roulette.tradesman('good_channel')
|
144
156
|
expect(tradesman).to receive(:work_orders!).and_call_original
|
145
157
|
|
@@ -178,13 +190,13 @@ module WorkerRoulette
|
|
178
190
|
end
|
179
191
|
|
180
192
|
context "Failure" do
|
181
|
-
it "
|
193
|
+
it "does not put the sender_id and work_orders back if processing fails bc new work_orders may have been processed while that process failed" do; end
|
182
194
|
end
|
183
195
|
|
184
196
|
context "Concurrent Access" do
|
185
|
-
it "
|
197
|
+
it "pools its connections" do
|
186
198
|
Array.new(100) do
|
187
|
-
Thread.new {worker_roulette.tradesman_connection_pool.with {|pooled_redis| pooled_redis.get("foo")}}
|
199
|
+
Thread.new { worker_roulette.tradesman_connection_pool.with { |pooled_redis| pooled_redis.get("foo") } }
|
188
200
|
end.each(&:join)
|
189
201
|
worker_roulette.tradesman_connection_pool.with do |pooled_redis|
|
190
202
|
expect(pooled_redis.info["connected_clients"].to_i).to be > (worker_roulette.pool_size)
|
@@ -1,23 +1,25 @@
|
|
1
|
-
require
|
1
|
+
require "spec_helper"
|
2
2
|
module WorkerRoulette
|
3
3
|
describe "Evented Read Lock" do
|
4
4
|
include EventedSpec::EMSpec
|
5
5
|
|
6
|
-
let(:redis)
|
7
|
-
let(:sender)
|
8
|
-
let(:work_orders)
|
9
|
-
let(:lock_key)
|
10
|
-
let(:
|
11
|
-
let(:
|
12
|
-
let(:
|
13
|
-
let(:worker_roulette)
|
14
|
-
let(:
|
15
|
-
let(:
|
16
|
-
let(:
|
17
|
-
let(:
|
18
|
-
|
6
|
+
let(:redis) { Redis.new(WorkerRoulette.start.redis_config) }
|
7
|
+
let(:sender) { "katie_80" }
|
8
|
+
let(:work_orders) { "hellot" }
|
9
|
+
let(:lock_key) { "L*:#{sender}" }
|
10
|
+
let(:queued_at) { 1234567 }
|
11
|
+
let(:default_headers) { Hash["headers" => { "sender" => sender, "queued_at" => (queued_at.to_f * 1_000_000).to_i }] }
|
12
|
+
let(:work_orders_with_headers) { default_headers.merge({ "payload" => work_orders }) }
|
13
|
+
let(:worker_roulette) { WorkerRoulette.start(evented: true) }
|
14
|
+
let(:foreman1) { worker_roulette.foreman(sender) }
|
15
|
+
let(:foreman2) { worker_roulette.foreman("foreman2") }
|
16
|
+
let(:tradesman2 ) { worker_roulette.tradesman }
|
17
|
+
let(:lua) { Lua.new(worker_roulette.tradesman_connection_pool) }
|
18
|
+
|
19
|
+
subject(:tradesman) {worker_roulette.tradesman}
|
19
20
|
|
20
21
|
em_before do
|
22
|
+
allow(Time).to receive(:now).and_return(queued_at)
|
21
23
|
lua.clear_cache!
|
22
24
|
redis.script(:flush)
|
23
25
|
redis.flushdb
|
@@ -39,17 +41,17 @@ module WorkerRoulette
|
|
39
41
|
|
40
42
|
it "should not read a locked queue" do
|
41
43
|
evented_readlock_preconditions do
|
42
|
-
|
43
|
-
|
44
|
+
foreman1.enqueue_work_order(work_orders) do #locked
|
45
|
+
tradesman2.work_orders! { |work| expect(work).to be_empty; done}
|
44
46
|
end
|
45
47
|
end
|
46
48
|
end
|
47
49
|
|
48
50
|
it "should read from the first available queue that is not locked" do
|
49
51
|
evented_readlock_preconditions do
|
50
|
-
|
51
|
-
|
52
|
-
|
52
|
+
foreman1.enqueue_work_order(work_orders) do #locked
|
53
|
+
foreman2.enqueue_work_order(work_orders) do #unlocked
|
54
|
+
tradesman2.work_orders!{|work| expect(work.first["headers"]["sender"]).to eq("foreman2"); done}
|
53
55
|
end
|
54
56
|
end
|
55
57
|
end
|
@@ -57,10 +59,10 @@ module WorkerRoulette
|
|
57
59
|
|
58
60
|
it "should release its last lock when it asks for its next work order from another sender" do
|
59
61
|
evented_readlock_preconditions do
|
60
|
-
|
61
|
-
expect(
|
62
|
-
|
63
|
-
expect(work.first[
|
62
|
+
foreman2.enqueue_work_order(work_orders) do #unlocked
|
63
|
+
expect(tradesman.last_sender).to eq(sender)
|
64
|
+
tradesman.work_orders! do |work|
|
65
|
+
expect(work.first["headers"]["sender"]).to eq("foreman2")
|
64
66
|
expect(redis.get(lock_key)).to be_nil
|
65
67
|
done
|
66
68
|
end
|
@@ -70,10 +72,9 @@ module WorkerRoulette
|
|
70
72
|
|
71
73
|
it "should not release its lock when it asks for its next work order from the same sender" do
|
72
74
|
evented_readlock_preconditions do
|
73
|
-
|
74
|
-
|
75
|
-
expect(
|
76
|
-
expect(subject.last_sender).to eq(sender)
|
75
|
+
foreman1.enqueue_work_order(work_orders) do #locked
|
76
|
+
tradesman.work_orders! do |work|
|
77
|
+
expect(tradesman.last_sender).to eq(sender)
|
77
78
|
expect(redis.get(lock_key)).not_to be_nil
|
78
79
|
done
|
79
80
|
end
|
@@ -83,10 +84,9 @@ module WorkerRoulette
|
|
83
84
|
|
84
85
|
it "should not take out another lock if there is no work to do" do
|
85
86
|
evented_readlock_preconditions do
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
subject.work_orders! do |work|
|
87
|
+
foreman1.enqueue_work_order(work_orders) do #locked
|
88
|
+
tradesman.work_orders! do |work_order|
|
89
|
+
tradesman.work_orders! do |work|
|
90
90
|
expect(work).to be_empty
|
91
91
|
expect(redis.get(lock_key)).to be_nil
|
92
92
|
done
|
@@ -97,9 +97,8 @@ module WorkerRoulette
|
|
97
97
|
end
|
98
98
|
|
99
99
|
def evented_readlock_preconditions(&spec_block)
|
100
|
-
|
101
|
-
|
102
|
-
expect(work).to eq([work_orders_with_headers])
|
100
|
+
foreman1.enqueue_work_order(work_orders) do
|
101
|
+
tradesman.work_orders! do |work|
|
103
102
|
spec_block.call
|
104
103
|
end
|
105
104
|
end
|
@@ -0,0 +1,51 @@
|
|
1
|
+
require "spec_helper"
|
2
|
+
|
3
|
+
describe WorkerRoulette::Preprocessor do
|
4
|
+
before { allow(subject).to receive(:preprocessors).and_return(preprocessors) }
|
5
|
+
|
6
|
+
class TestClass
|
7
|
+
include WorkerRoulette::Preprocessor
|
8
|
+
def preprocessors
|
9
|
+
end
|
10
|
+
end
|
11
|
+
|
12
|
+
module TestPreprocessor
|
13
|
+
class TestClass
|
14
|
+
def process(job, channel)
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
describe "#preprocess" do
|
20
|
+
let(:wo) { double("work_order") }
|
21
|
+
let(:result) { double("resulting_wo") }
|
22
|
+
let(:channel) { "aChannel" }
|
23
|
+
subject { TestClass.new }
|
24
|
+
|
25
|
+
context "with one preprocessor" do
|
26
|
+
let(:preprocessors) { [TestPreprocessor] }
|
27
|
+
|
28
|
+
it "calls the correct preprocessor with the correct args" do
|
29
|
+
expect_any_instance_of(TestPreprocessor::TestClass).to receive(:process).with(wo, channel)
|
30
|
+
subject.preprocess(wo, channel)
|
31
|
+
end
|
32
|
+
|
33
|
+
it "returns the value of the preprocessor" do
|
34
|
+
allow_any_instance_of(TestPreprocessor::TestClass).to receive(:process).and_return(result)
|
35
|
+
expect(subject.preprocess(wo, channel)).to eq(result)
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
context "with two preprocessors" do
|
40
|
+
let(:preprocessors) { [TestPreprocessor, TestPreprocessor] }
|
41
|
+
let(:intermediate) { double("intermediate_result") }
|
42
|
+
|
43
|
+
it "chains the preprocessors and returns the correct result" do
|
44
|
+
allow_any_instance_of(TestPreprocessor::TestClass).to receive(:process).with(wo, channel).and_return(intermediate)
|
45
|
+
allow_any_instance_of(TestPreprocessor::TestClass).to receive(:process).with(intermediate, channel).and_return(result)
|
46
|
+
|
47
|
+
expect(subject.preprocess(wo, channel)).to eq(result)
|
48
|
+
end
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
@@ -0,0 +1,68 @@
|
|
1
|
+
require "spec_helper"
|
2
|
+
|
3
|
+
module QueueLatencyTracker
|
4
|
+
describe ".configure" do
|
5
|
+
let(:source_config) { { logstash_server_ip: ip, logstash_port: port, server_name: server_name } }
|
6
|
+
let(:ip) { "1.2.3.4" }
|
7
|
+
let(:port) { 123 }
|
8
|
+
let(:server_name) { "server.example" }
|
9
|
+
|
10
|
+
|
11
|
+
it "stores the configuration" do
|
12
|
+
QueueLatencyTracker.configure(source_config)
|
13
|
+
|
14
|
+
expect(QueueLatencyTracker.config).to eq({
|
15
|
+
logstash: {
|
16
|
+
server_ip: ip,
|
17
|
+
port: port },
|
18
|
+
server_name: server_name
|
19
|
+
})
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
describe Foreman do
|
24
|
+
describe "#process" do
|
25
|
+
let(:channel) { "a_channel" }
|
26
|
+
let(:queued_at) { 1234567 }
|
27
|
+
let(:raw_work_order) { { "headers" => {}, "payload" => "aPayload" } }
|
28
|
+
let(:work_order) { subject.process(raw_work_order, channel) }
|
29
|
+
|
30
|
+
before { allow(Time).to receive(:now).and_return(queued_at) }
|
31
|
+
|
32
|
+
it "sets queued_at to now using specified granularity" do
|
33
|
+
expect(work_order["headers"]["queued_at"]).to eq(queued_at * GRANULARITY)
|
34
|
+
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
describe Tradesman do
|
40
|
+
describe "#process" do
|
41
|
+
let(:queued_at) { 1234567 * GRANULARITY }
|
42
|
+
let(:expected_json) { %({"server_name":"#{server_name}","queue_latency (ms)":#{latency * 1000},"channel":"#{channel}"}) }
|
43
|
+
let(:ip) { "1.2.3.4" }
|
44
|
+
let(:port) { 123 }
|
45
|
+
let(:latency) { 123.432 }
|
46
|
+
let(:server_name) { "server.example" }
|
47
|
+
let(:channel) { "a_channel" }
|
48
|
+
let(:headers) { { "queued_at" => queued_at } }
|
49
|
+
let(:raw_work_order) { { "headers" => headers, "payload" => "aPayload" } }
|
50
|
+
let(:logstash_config) { { server_ip: ip, port: port } }
|
51
|
+
let(:config) { { logstash: logstash_config, server_name: server_name } }
|
52
|
+
|
53
|
+
before { allow(QueueLatencyTracker).to receive(:config).and_return(config) }
|
54
|
+
before { allow(Time).to receive(:now).and_return(queued_at / GRANULARITY + latency) }
|
55
|
+
before { allow_any_instance_of(UDPSocket).to receive(:send) }
|
56
|
+
|
57
|
+
it "passes the right json to logstash_send" do
|
58
|
+
expect_any_instance_of(UDPSocket).to receive(:send).with(expected_json, 0, ip, port)
|
59
|
+
|
60
|
+
subject.process(raw_work_order, channel)
|
61
|
+
end
|
62
|
+
|
63
|
+
it "returns the work order unchanged" do
|
64
|
+
expect(subject.process(raw_work_order, channel)).to eq(raw_work_order)
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
data/spec/unit/readlock_spec.rb
CHANGED
@@ -1,70 +1,72 @@
|
|
1
|
-
require
|
1
|
+
require "spec_helper"
|
2
|
+
|
2
3
|
module WorkerRoulette
|
3
4
|
describe "Read Lock" do
|
4
|
-
let(:worker_roulette)
|
5
|
-
let(:redis)
|
6
|
-
let(:sender)
|
7
|
-
let(:work_orders)
|
8
|
-
let(:lock_key)
|
9
|
-
let(:
|
10
|
-
let(:
|
11
|
-
let(:
|
12
|
-
let(:
|
13
|
-
let(:
|
14
|
-
let(:
|
15
|
-
let(:
|
16
|
-
let(:
|
5
|
+
let(:worker_roulette) { WorkerRoulette.start(evented: false) }
|
6
|
+
let(:redis) { Redis.new(worker_roulette.redis_config) }
|
7
|
+
let(:sender) { "katie_80" }
|
8
|
+
let(:work_orders) { "hello" }
|
9
|
+
let(:lock_key) { "L*:#{sender}" }
|
10
|
+
let(:queued_at) { 1234567 }
|
11
|
+
let(:default_headers) { Hash["headers" => { "sender" => sender, "queued_at" => (queued_at.to_f * 1_000_000).to_i }] }
|
12
|
+
let(:work_orders_with_headers) { default_headers.merge({ "payload" => work_orders }) }
|
13
|
+
let(:foreman1) { worker_roulette.foreman(sender) }
|
14
|
+
let(:foreman2) { worker_roulette.foreman("foreman2") }
|
15
|
+
let(:lua) { Lua.new(worker_roulette.tradesman_connection_pool) }
|
16
|
+
let(:tradesman_1) { worker_roulette.tradesman }
|
17
|
+
let(:tradesman_2) { worker_roulette.tradesman }
|
17
18
|
|
18
19
|
before do
|
19
20
|
lua.clear_cache!
|
20
21
|
redis.script(:flush)
|
21
22
|
redis.flushdb
|
22
|
-
|
23
|
-
|
23
|
+
allow(Time).to receive(:now).and_return(queued_at)
|
24
|
+
foreman1.enqueue_work_order(work_orders)
|
25
|
+
expect(tradesman_1.work_orders!).to eq([work_orders_with_headers])
|
24
26
|
end
|
25
27
|
|
26
|
-
it "
|
28
|
+
it "locks a queue when it reads from it" do
|
27
29
|
expect(redis.get(lock_key)).not_to be_nil
|
28
30
|
end
|
29
31
|
|
30
|
-
it "
|
32
|
+
it "sets the lock to expire in 3 seconds" do
|
31
33
|
expect(redis.ttl(lock_key)).to eq(3)
|
32
34
|
end
|
33
35
|
|
34
|
-
it "
|
35
|
-
|
36
|
-
expect(
|
36
|
+
it "does not read a locked queue" do
|
37
|
+
foreman1.enqueue_work_order(work_orders) #locked
|
38
|
+
expect(tradesman_2.work_orders!).to be_empty
|
37
39
|
end
|
38
40
|
|
39
|
-
it "
|
40
|
-
|
41
|
-
|
42
|
-
expect(
|
41
|
+
it "reads from the first available queue that is not locked" do
|
42
|
+
foreman1.enqueue_work_order(work_orders) #locked
|
43
|
+
foreman2.enqueue_work_order(work_orders) #unlocked
|
44
|
+
expect(tradesman_2.work_orders!.first["headers"]["sender"]).to eq("foreman2")
|
43
45
|
end
|
44
46
|
|
45
|
-
it "
|
46
|
-
|
47
|
-
expect(
|
48
|
-
expect(
|
47
|
+
it "releases its previous lock when it asks for work from another sender" do
|
48
|
+
foreman2.enqueue_work_order(work_orders) #unlocked
|
49
|
+
expect(tradesman_1.last_sender).to eq(sender)
|
50
|
+
expect(tradesman_1.work_orders!.first["headers"]["sender"]).to eq("foreman2")
|
49
51
|
expect(redis.get(lock_key)).to be_nil
|
50
52
|
end
|
51
53
|
|
52
|
-
it "
|
53
|
-
|
54
|
-
expect(
|
55
|
-
expect(
|
54
|
+
it "does not release its lock when it asks for work from the same sender" do
|
55
|
+
foreman1.enqueue_work_order(work_orders) #locked
|
56
|
+
expect(tradesman_1.work_orders!).to eq([work_orders_with_headers])
|
57
|
+
expect(tradesman_1.last_sender).to eq(sender)
|
56
58
|
|
57
|
-
|
58
|
-
expect(
|
59
|
-
expect(
|
59
|
+
foreman1.enqueue_work_order(work_orders) #locked
|
60
|
+
expect(tradesman_1.work_orders!).to eq([work_orders_with_headers])
|
61
|
+
expect(tradesman_1.last_sender).to eq(sender)
|
60
62
|
|
61
63
|
expect(redis.get(lock_key)).not_to be_nil
|
62
64
|
end
|
63
65
|
|
64
|
-
it "
|
65
|
-
|
66
|
-
expect(
|
67
|
-
expect(
|
66
|
+
it "releases its previous lock if there is no work to do from the same sender" do
|
67
|
+
foreman1.enqueue_work_order(work_orders) #locked
|
68
|
+
expect(tradesman_1.work_orders!).to eq([work_orders_with_headers])
|
69
|
+
expect(tradesman_1.work_orders!).to be_empty
|
68
70
|
expect(redis.get(lock_key)).to be_nil
|
69
71
|
end
|
70
72
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: nexia_worker_roulette
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.2.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Paul Saieg
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2015-07-10 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: oj
|
@@ -211,6 +211,8 @@ files:
|
|
211
211
|
- lib/worker_roulette.rb
|
212
212
|
- lib/worker_roulette/foreman.rb
|
213
213
|
- lib/worker_roulette/lua.rb
|
214
|
+
- lib/worker_roulette/preprocessor.rb
|
215
|
+
- lib/worker_roulette/queue_latency_tracker.rb
|
214
216
|
- lib/worker_roulette/tradesman.rb
|
215
217
|
- lib/worker_roulette/version.rb
|
216
218
|
- spec/benchmark/irb_demo_runner.rb
|
@@ -221,6 +223,8 @@ files:
|
|
221
223
|
- spec/spec_helper.rb
|
222
224
|
- spec/unit/evented_readlock_spec.rb
|
223
225
|
- spec/unit/lua_spec.rb
|
226
|
+
- spec/unit/preprocessor_spec.rb
|
227
|
+
- spec/unit/queue_latency_tracker_spec.rb
|
224
228
|
- spec/unit/readlock_spec.rb
|
225
229
|
- worker_roulette.gemspec
|
226
230
|
homepage: https://github.com/nexiahome/worker_roulette
|
@@ -242,7 +246,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
242
246
|
version: '0'
|
243
247
|
requirements: []
|
244
248
|
rubyforge_project:
|
245
|
-
rubygems_version: 2.
|
249
|
+
rubygems_version: 2.4.5
|
246
250
|
signing_key:
|
247
251
|
specification_version: 4
|
248
252
|
summary: Pub Sub Queue for Redis that ensures ordered processing
|
@@ -255,4 +259,6 @@ test_files:
|
|
255
259
|
- spec/spec_helper.rb
|
256
260
|
- spec/unit/evented_readlock_spec.rb
|
257
261
|
- spec/unit/lua_spec.rb
|
262
|
+
- spec/unit/preprocessor_spec.rb
|
263
|
+
- spec/unit/queue_latency_tracker_spec.rb
|
258
264
|
- spec/unit/readlock_spec.rb
|