chore-core 1.8.2 → 4.0.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (62) hide show
  1. checksums.yaml +4 -4
  2. data/LICENSE.txt +1 -1
  3. data/README.md +173 -150
  4. data/chore-core.gemspec +3 -3
  5. data/lib/chore.rb +31 -5
  6. data/lib/chore/cli.rb +22 -4
  7. data/lib/chore/configuration.rb +1 -1
  8. data/lib/chore/consumer.rb +54 -12
  9. data/lib/chore/fetcher.rb +12 -7
  10. data/lib/chore/hooks.rb +2 -1
  11. data/lib/chore/job.rb +19 -0
  12. data/lib/chore/manager.rb +18 -2
  13. data/lib/chore/publisher.rb +18 -2
  14. data/lib/chore/queues/filesystem/consumer.rb +126 -64
  15. data/lib/chore/queues/filesystem/filesystem_queue.rb +19 -0
  16. data/lib/chore/queues/filesystem/publisher.rb +13 -19
  17. data/lib/chore/queues/sqs.rb +22 -13
  18. data/lib/chore/queues/sqs/consumer.rb +64 -51
  19. data/lib/chore/queues/sqs/publisher.rb +26 -17
  20. data/lib/chore/strategies/consumer/batcher.rb +14 -15
  21. data/lib/chore/strategies/consumer/single_consumer_strategy.rb +5 -5
  22. data/lib/chore/strategies/consumer/threaded_consumer_strategy.rb +9 -7
  23. data/lib/chore/strategies/consumer/throttled_consumer_strategy.rb +120 -0
  24. data/lib/chore/strategies/worker/forked_worker_strategy.rb +5 -6
  25. data/lib/chore/strategies/worker/helpers/ipc.rb +87 -0
  26. data/lib/chore/strategies/worker/helpers/preforked_worker.rb +163 -0
  27. data/lib/chore/strategies/worker/helpers/work_distributor.rb +65 -0
  28. data/lib/chore/strategies/worker/helpers/worker_info.rb +13 -0
  29. data/lib/chore/strategies/worker/helpers/worker_killer.rb +40 -0
  30. data/lib/chore/strategies/worker/helpers/worker_manager.rb +183 -0
  31. data/lib/chore/strategies/worker/preforked_worker_strategy.rb +150 -0
  32. data/lib/chore/strategies/worker/single_worker_strategy.rb +35 -13
  33. data/lib/chore/unit_of_work.rb +10 -1
  34. data/lib/chore/util.rb +5 -1
  35. data/lib/chore/version.rb +3 -3
  36. data/lib/chore/worker.rb +32 -3
  37. data/spec/chore/cli_spec.rb +2 -2
  38. data/spec/chore/consumer_spec.rb +1 -5
  39. data/spec/chore/duplicate_detector_spec.rb +17 -5
  40. data/spec/chore/fetcher_spec.rb +0 -11
  41. data/spec/chore/manager_spec.rb +7 -0
  42. data/spec/chore/queues/filesystem/filesystem_consumer_spec.rb +74 -16
  43. data/spec/chore/queues/sqs/consumer_spec.rb +117 -78
  44. data/spec/chore/queues/sqs/publisher_spec.rb +49 -60
  45. data/spec/chore/queues/sqs_spec.rb +32 -41
  46. data/spec/chore/strategies/consumer/batcher_spec.rb +50 -0
  47. data/spec/chore/strategies/consumer/single_consumer_strategy_spec.rb +3 -3
  48. data/spec/chore/strategies/consumer/threaded_consumer_strategy_spec.rb +7 -6
  49. data/spec/chore/strategies/consumer/throttled_consumer_strategy_spec.rb +165 -0
  50. data/spec/chore/strategies/worker/forked_worker_strategy_spec.rb +17 -2
  51. data/spec/chore/strategies/worker/helpers/ipc_spec.rb +127 -0
  52. data/spec/chore/strategies/worker/helpers/preforked_worker_spec.rb +236 -0
  53. data/spec/chore/strategies/worker/helpers/work_distributor_spec.rb +131 -0
  54. data/spec/chore/strategies/worker/helpers/worker_info_spec.rb +14 -0
  55. data/spec/chore/strategies/worker/helpers/worker_killer_spec.rb +97 -0
  56. data/spec/chore/strategies/worker/helpers/worker_manager_spec.rb +304 -0
  57. data/spec/chore/strategies/worker/preforked_worker_strategy_spec.rb +183 -0
  58. data/spec/chore/strategies/worker/single_worker_strategy_spec.rb +25 -0
  59. data/spec/chore/worker_spec.rb +82 -14
  60. data/spec/spec_helper.rb +1 -1
  61. data/spec/support/queues/sqs/fake_objects.rb +18 -0
  62. metadata +39 -15
@@ -35,6 +35,13 @@ describe Chore::Manager do
35
35
  manager.assign(work)
36
36
  end
37
37
  end
38
+
39
+ describe 'returning work' do
40
+ it 'should return work to the fetcher' do
41
+ expect(fetcher).to receive(:return_work).with([work])
42
+ manager.return_work([work])
43
+ end
44
+ end
38
45
  end
39
46
 
40
47
  end
@@ -8,10 +8,16 @@ describe Chore::Queues::Filesystem::Consumer do
8
8
  let(:publisher) { Chore::Queues::Filesystem::Publisher.new }
9
9
  let(:test_queues_dir) { "test-queues" }
10
10
  let(:test_queue) { "test-queue" }
11
+ let(:default_timeout) { 60 }
12
+ let(:timeout) { nil }
11
13
 
12
14
  before do
13
15
  Chore.config.fs_queue_root = test_queues_dir
14
- expect(Chore.config).to receive(:default_queue_timeout).and_return(60)
16
+ if timeout
17
+ File.open("#{config_dir}/timeout", "w") {|f| f << timeout.to_s}
18
+ else
19
+ expect(Chore.config).to receive(:default_queue_timeout).and_return(default_timeout)
20
+ end
15
21
  allow(consumer).to receive(:sleep)
16
22
  end
17
23
 
@@ -22,35 +28,79 @@ describe Chore::Queues::Filesystem::Consumer do
22
28
  let(:test_job_hash) {{:class => "TestClass", :args => "test-args"}}
23
29
  let(:new_dir) { described_class.new_dir(test_queue) }
24
30
  let(:in_progress_dir) { described_class.in_progress_dir(test_queue) }
31
+ let(:config_dir) { described_class.config_dir(test_queue) }
25
32
 
26
33
  describe ".cleanup" do
27
- it "should move in_progress jobs to new dir" do
34
+ it "should move expired in_progress jobs to new dir" do
35
+ timestamp = Time.now.to_i - 1
36
+
37
+ FileUtils.touch("#{in_progress_dir}/foo.1.#{timestamp}.job")
38
+ described_class.cleanup(Time.now.to_i, new_dir, in_progress_dir)
39
+ expect(File.exist?("#{new_dir}/foo.2.job")).to eq(true)
40
+ end
41
+
42
+ it "should move non-timestamped jobs from in_progress_dir to new dir" do
28
43
  FileUtils.touch("#{in_progress_dir}/foo.1.job")
29
- described_class.cleanup(test_queue)
44
+ described_class.cleanup(Time.now.to_i, new_dir, in_progress_dir)
30
45
  expect(File.exist?("#{new_dir}/foo.2.job")).to eq(true)
31
46
  end
47
+
48
+ it "should not affect non-expired jobs" do
49
+ timestamp = Time.now.to_i - 1
50
+
51
+ FileUtils.touch("#{in_progress_dir}/foo.1.#{timestamp}.job")
52
+ described_class.cleanup(Time.now.to_i - 2, new_dir, in_progress_dir)
53
+ expect(File.exist?("#{new_dir}/foo.2.job")).to eq(false)
54
+ end
32
55
  end
33
56
 
34
57
  describe ".make_in_progress" do
35
- it "should move job to in_progress dir" do
58
+ it "should move non-empty job to in_progress dir" do
59
+ now = Time.now
60
+
61
+ Timecop.freeze(now) do
62
+ File.open("#{new_dir}/foo.1.job", "w") {|f| f << "{}"}
63
+ described_class.make_in_progress("foo.1.job", new_dir, in_progress_dir, default_timeout)
64
+ expect(File.exist?("#{in_progress_dir}/foo.1.#{now.to_i}.job")).to eq(true)
65
+ end
66
+ end
67
+
68
+ it "should not move empty jobs to in_progress dir" do
69
+ now = Time.now
70
+
71
+ Timecop.freeze(now) do
72
+ FileUtils.touch("#{new_dir}/foo.1.job")
73
+ described_class.make_in_progress("foo.1.job", new_dir, in_progress_dir, default_timeout)
74
+ expect(File.exist?("#{new_dir}/foo.1.job")).to eq(true)
75
+ expect(File.exist?("#{in_progress_dir}/foo.1.#{now.to_i}.job")).to eq(false)
76
+ end
77
+ end
78
+
79
+ it "should delete expired empty jobs" do
36
80
  FileUtils.touch("#{new_dir}/foo.1.job")
37
- described_class.make_in_progress("foo.1.job", new_dir, in_progress_dir)
38
- expect(File.exist?("#{in_progress_dir}/foo.1.job")).to eq(true)
81
+
82
+ now = Time.now + default_timeout
83
+ Timecop.freeze(now) do
84
+ described_class.make_in_progress("foo.1.job", new_dir, in_progress_dir, default_timeout)
85
+ expect(File.exist?("#{new_dir}/foo.1.job")).to eq(false)
86
+ expect(File.exist?("#{in_progress_dir}/foo.1.#{now.to_i}.job")).to eq(false)
87
+ end
39
88
  end
40
89
  end
41
90
 
42
91
  describe ".make_new_again" do
43
92
  it "should move job to new dir" do
44
- FileUtils.touch("#{in_progress_dir}/foo.1.job")
45
- described_class.make_new_again("foo.1.job", new_dir, in_progress_dir)
93
+ timestamp = Time.now.to_i
94
+ FileUtils.touch("#{in_progress_dir}/foo.1.#{timestamp}.job")
95
+ described_class.make_new_again("foo.1.#{timestamp}.job", new_dir, in_progress_dir)
46
96
  expect(File.exist?("#{new_dir}/foo.2.job")).to eq(true)
47
97
  end
48
98
  end
49
99
 
50
- describe ".job_files" do
100
+ describe ".each_file" do
51
101
  it "should list jobs in dir" do
52
102
  FileUtils.touch("#{new_dir}/foo.1.job")
53
- expect(described_class.job_files(new_dir)).to eq(["foo.1.job"])
103
+ expect {|b| described_class.each_file(new_dir, &b) }.to yield_with_args("foo.1.job")
54
104
  end
55
105
  end
56
106
 
@@ -71,7 +121,7 @@ describe Chore::Queues::Filesystem::Consumer do
71
121
  end
72
122
 
73
123
  it "should consume a published job and yield the job to the handler block" do
74
- expect { |b| consumer.consume(&b) }.to yield_with_args(anything, 'test-queue', 60, test_job_hash.to_json, 0)
124
+ expect { |b| consumer.consume(&b) }.to yield_with_args(anything, anything, 'test-queue', 60, test_job_hash.to_json, 0)
75
125
  end
76
126
 
77
127
  context "rejecting a job" do
@@ -85,7 +135,9 @@ describe Chore::Queues::Filesystem::Consumer do
85
135
  end
86
136
  expect(rejected).to be true
87
137
 
88
- expect { |b| consumer.consume(&b) }.to yield_with_args(anything, 'test-queue', 60, test_job_hash.to_json, 1)
138
+ Timecop.freeze(Time.now + 61) do
139
+ expect { |b| consumer.consume(&b) }.to yield_with_args(anything, anything, 'test-queue', 60, test_job_hash.to_json, 1)
140
+ end
89
141
  end
90
142
  end
91
143
 
@@ -93,16 +145,23 @@ describe Chore::Queues::Filesystem::Consumer do
93
145
  let!(:consumer_run_for_two_messages) { allow(consumer).to receive(:running?).and_return(true, false,true,false) }
94
146
 
95
147
  it "should remove job on completion" do
96
- completed = false
148
+
97
149
  consumer.consume do |job_id, queue_name, job_hash|
150
+ expect(File).to receive(:delete).with(kind_of(String))
98
151
  consumer.complete(job_id)
99
- completed = true
100
152
  end
101
- expect(completed).to be true
102
153
 
103
154
  expect { |b| consumer.consume(&b) }.to_not yield_control
104
155
  end
105
156
  end
157
+
158
+ context "with queue-specific timeout config" do
159
+ let(:timeout) { 30 }
160
+
161
+ it "should consume a published job and yield the job to the handler block" do
162
+ expect { |b| consumer.consume(&b) }.to yield_with_args(anything, anything, 'test-queue', 30, test_job_hash.to_json, 0)
163
+ end
164
+ end
106
165
  end
107
166
 
108
167
  context "not finding a published job" do
@@ -112,4 +171,3 @@ describe Chore::Queues::Filesystem::Consumer do
112
171
  end
113
172
  end
114
173
  end
115
-
@@ -1,133 +1,170 @@
1
1
  require 'spec_helper'
2
2
 
3
3
  describe Chore::Queues::SQS::Consumer do
4
- let(:queue_name) { "test" }
5
- let(:queue_url) { "test_url" }
6
- let(:queues) { double("queues") }
7
- let(:queue) { double("test_queue", :visibility_timeout=>10, :url=>"test_queue", :name=>"test_queue") }
4
+ include_context 'fake objects'
5
+
8
6
  let(:options) { {} }
9
7
  let(:consumer) { Chore::Queues::SQS::Consumer.new(queue_name) }
10
- let(:message) { TestMessage.new("handle",queue, "message body", 1) }
11
- let(:message_data) {{:id=>message.id, :queue=>message.queue.url, :visibility_timeout=>message.queue.visibility_timeout}}
12
- let(:pool) { double("pool") }
13
- let(:sqs) { double('AWS::SQS') }
14
- let(:backoff_func) { nil }
8
+ let(:job) { {'class' => 'TestJob', 'args'=>[1,2,'3']} }
9
+ let(:backoff_func) { Proc.new { 2 + 2 } }
10
+
11
+ let(:receive_message_result) { Aws::SQS::Message::Collection.new([message], size: 1) }
12
+
13
+ let(:message) do
14
+ Aws::SQS::Message.new(
15
+ message_id: 'message id',
16
+ receipt_handle: "receipt_handle",
17
+ body: job.to_json,
18
+ data: job,
19
+ queue: queue,
20
+ queue_url: queue_url,
21
+ )
22
+ end
15
23
 
16
- before do
17
- allow(AWS::SQS).to receive(:new).and_return(sqs)
18
- allow(sqs).to receive(:queues) { queues }
24
+ # Since a message handler is required (but not validated), this convenience method lets us
25
+ # effectively stub the block.
26
+ def consume(&block)
27
+ block = Proc.new{} unless block_given?
28
+ consumer.consume(&block)
29
+ end
19
30
 
20
- allow(queues).to receive(:url_for) { queue_url }
21
- allow(queues).to receive(:[]) { queue }
22
- allow(queue).to receive(:receive_message) { message }
23
- allow(pool).to receive(:empty!) { nil }
31
+ before do
32
+ allow(Aws::SQS::Client).to receive(:new).and_return(sqs)
33
+ allow(Aws::SQS::Queue).to receive(:new).and_return(queue)
34
+ allow(queue).to receive(:receive_messages).and_return(receive_message_result)
35
+ allow(message).to receive(:attributes).and_return({ 'ApproximateReceiveCount' => rand(10) })
24
36
  end
25
37
 
26
38
  describe "consuming messages" do
27
- let!(:consumer_run_for_one_message) { allow(consumer).to receive(:running?).and_return(true, false) }
28
- let!(:messages_be_unique) { allow_any_instance_of(Chore::DuplicateDetector).to receive(:found_duplicate?).and_return(false) }
29
- let!(:queue_contain_messages) { allow(queue).to receive(:receive_messages).and_return(message) }
30
-
31
- it 'should configure sqs' do
32
- allow(Chore.config).to receive(:aws_access_key).and_return('key')
33
- allow(Chore.config).to receive(:aws_secret_key).and_return('secret')
34
-
35
- expect(AWS::SQS).to receive(:new).with(
36
- :access_key_id => 'key',
37
- :secret_access_key => 'secret',
38
- :logger => Chore.logger,
39
- :log_level => :debug
40
- ).and_return(sqs)
41
- consumer.consume
39
+ before do
40
+ allow(consumer).to receive(:running?).and_return(true, false)
42
41
  end
43
42
 
44
- it 'should not configure sqs multiple times' do
45
- allow(consumer).to receive(:running?).and_return(true, true, false)
43
+ context "should create objects for interacting with the SQS API" do
44
+ it 'should create an sqs client' do
45
+ expect(queue).to receive(:receive_messages)
46
+ consume
47
+ end
46
48
 
47
- expect(AWS::SQS).to receive(:new).once.and_return(sqs)
48
- consumer.consume
49
- end
49
+ it "should only create an sqs client when one doesn't exist" do
50
+ allow(consumer).to receive(:running?).and_return(true, true, true, true, false, true, true)
51
+ expect(Aws::SQS::Client).to receive(:new).exactly(:once)
52
+ consume
53
+ end
50
54
 
51
- it 'should look up the queue url based on the queue name' do
52
- expect(queues).to receive(:url_for).with('test').and_return(queue_url)
53
- consumer.consume
54
- end
55
+ it 'should look up the queue url based on the queue name' do
56
+ expect(sqs).to receive(:get_queue_url).with(queue_name: queue_name)
57
+ consume
58
+ end
55
59
 
56
- it 'should look up the queue based on the queue url' do
57
- expect(queues).to receive(:[]).with(queue_url).and_return(queue)
58
- consumer.consume
60
+ it 'should create a queue object' do
61
+ expect(consumer.send(:queue)).to_not be_nil
62
+ consume
63
+ end
59
64
  end
60
65
 
61
66
  context "should receive a message from the queue" do
62
-
63
67
  it 'should use the default size of 10 when no queue_polling_size is specified' do
64
- expect(queue).to receive(:receive_messages).with(:limit => 10, :attributes => [:receive_count])
65
- consumer.consume
68
+ expect(queue).to receive(:receive_messages).with(
69
+ :max_number_of_messages => 10,
70
+ :attribute_names => ['ApproximateReceiveCount']
71
+ ).and_return(message)
72
+ consume
66
73
  end
67
74
 
68
75
  it 'should respect the queue_polling_size when specified' do
69
76
  allow(Chore.config).to receive(:queue_polling_size).and_return(5)
70
- expect(queue).to receive(:receive_messages).with(:limit => 5, :attributes => [:receive_count])
71
- consumer.consume
77
+ expect(queue).to receive(:receive_messages).with(
78
+ :max_number_of_messages => 5,
79
+ :attribute_names => ['ApproximateReceiveCount']
80
+ )
81
+ consume
72
82
  end
73
83
  end
74
84
 
75
- it "should check the uniqueness of the message" do
76
- allow_any_instance_of(Chore::DuplicateDetector).to receive(:found_duplicate?).with(message_data).and_return(false)
77
- consumer.consume
78
- end
79
-
80
- it "should yield the message to the handler block" do
81
- expect { |b| consumer.consume(&b) }.to yield_with_args('handle', queue_name, 10, 'message body', 0)
82
- end
83
-
84
- it 'should not yield for a dupe message' do
85
- allow_any_instance_of(Chore::DuplicateDetector).to receive(:found_duplicate?).with(message_data).and_return(true)
86
- expect {|b| consumer.consume(&b) }.not_to yield_control
87
- end
88
-
89
85
  context 'with no messages' do
90
- let!(:consumer_run_for_one_message) { allow(consumer).to receive(:running?).and_return(true, true, false) }
91
- let!(:queue_contain_messages) { allow(queue).to receive(:receive_messages).and_return(message, nil) }
86
+ before do
87
+ allow(consumer).to receive(:handle_messages).and_return([])
88
+ end
92
89
 
93
90
  it 'should sleep' do
94
91
  expect(consumer).to receive(:sleep).with(1)
95
- consumer.consume
92
+ consume
96
93
  end
97
94
  end
98
95
 
99
96
  context 'with messages' do
100
- let!(:consumer_run_for_one_message) { allow(consumer).to receive(:running?).and_return(true, true, false) }
101
- let!(:queue_contain_messages) { allow(queue).to receive(:receive_messages).and_return(message, message) }
97
+ before do
98
+ allow(consumer).to receive(:duplicate_message?).and_return(false)
99
+ allow(queue).to receive(:receive_messages).and_return(message)
100
+ end
101
+
102
+ it "should check the uniqueness of the message" do
103
+ expect(consumer).to receive(:duplicate_message?)
104
+ consume
105
+ end
106
+
107
+ it "should yield the message to the handler block" do
108
+ expect { |b| consume(&b) }
109
+ .to yield_with_args(
110
+ message.message_id,
111
+ message.receipt_handle,
112
+ queue_name,
113
+ queue.attributes['VisibilityTimeout'].to_i,
114
+ message.body,
115
+ message.attributes['ApproximateReceiveCount'].to_i - 1
116
+ )
117
+ end
102
118
 
103
119
  it 'should not sleep' do
104
120
  expect(consumer).to_not receive(:sleep)
105
- consumer.consume
121
+ consume
106
122
  end
123
+
124
+ context 'with duplicates' do
125
+ before do
126
+ allow(consumer).to receive(:duplicate_message?).and_return(true)
127
+ end
128
+
129
+ it 'should not yield for a dupe message' do
130
+ expect {|b| consume(&b) }.not_to yield_control
131
+ end
132
+ end
133
+ end
134
+ end
135
+
136
+ describe "completing work" do
137
+ it 'deletes the message from the queue' do
138
+ expect(queue).to receive(:delete_messages).with(entries: [{id: message.message_id, receipt_handle: message.receipt_handle}])
139
+ consumer.complete(message.message_id, message.receipt_handle)
107
140
  end
108
141
  end
109
142
 
110
143
  describe '#delay' do
111
- let(:item) { Chore::UnitOfWork.new(message.id, message.queue, 60, message.body, 0, consumer) }
112
- let(:backoff_func) { lambda { |item| 2 } }
144
+ let(:item) { Chore::UnitOfWork.new(message.message_id, message.receipt_handle, message.queue, 60, message.body, 0, consumer) }
145
+ let(:entries) do
146
+ [
147
+ { id: item.id, receipt_handle: item.receipt_handle, visibility_timeout: backoff_func.call(item) },
148
+ ]
149
+ end
113
150
 
114
151
  it 'changes the visiblity of the message' do
115
- expect(queue).to receive(:batch_change_visibility).with(2, [item.id])
152
+ expect(queue).to receive(:change_message_visibility_batch).with(entries: entries)
116
153
  consumer.delay(item, backoff_func)
117
154
  end
118
155
  end
119
156
 
120
157
  describe '#reset_connection!' do
121
158
  it 'should reset the connection after a call to reset_connection!' do
122
- expect(AWS::Core::Http::ConnectionPool).to receive(:pools).and_return([pool])
123
- expect(pool).to receive(:empty!)
159
+ expect(Aws).to receive(:empty_connection_pools!)
124
160
  Chore::Queues::SQS::Consumer.reset_connection!
125
161
  consumer.send(:queue)
126
162
  end
127
163
 
128
164
  it 'should not reset the connection between calls' do
129
- sqs = consumer.send(:queue)
130
- expect(sqs).to be consumer.send(:queue)
165
+ expect(Aws).to receive(:empty_connection_pools!).once
166
+ q = consumer.send(:queue)
167
+ expect(consumer.send(:queue)).to be(q)
131
168
  end
132
169
 
133
170
  it 'should reconfigure sqs' do
@@ -135,13 +172,15 @@ describe Chore::Queues::SQS::Consumer do
135
172
  allow_any_instance_of(Chore::DuplicateDetector).to receive(:found_duplicate?).and_return(false)
136
173
 
137
174
  allow(queue).to receive(:receive_messages).and_return(message)
138
- consumer.consume
175
+ allow(sqs).to receive(:receive_message).with({:attribute_names=>["ApproximateReceiveCount"], :max_number_of_messages=>10, :queue_url=>queue_url})
176
+
177
+ consume
139
178
 
140
179
  Chore::Queues::SQS::Consumer.reset_connection!
141
- allow(AWS::SQS).to receive(:new).and_return(sqs)
180
+ allow(Aws::SQS::Client).to receive(:new).and_return(sqs)
142
181
 
143
182
  expect(consumer).to receive(:running?).and_return(true, false)
144
- consumer.consume
183
+ consume
145
184
  end
146
185
  end
147
186
  end
@@ -1,74 +1,63 @@
1
1
  require 'spec_helper'
2
2
 
3
- module Chore
4
- describe Queues::SQS::Publisher do
5
- let(:job) { {'class' => 'TestJob', 'args'=>[1,2,'3']}}
6
- let(:queue_name) { 'test_queue' }
7
- let(:queue_url) {"http://www.queue_url.com/test_queue"}
8
- let(:queue) { double('queue', :send_message => nil) }
9
- let(:sqs) do
10
- double('sqs', :queues => double('queues', :named => queue, :url_for => queue_url, :[] => queue))
11
- end
12
- let(:publisher) { Queues::SQS::Publisher.new }
13
- let(:pool) { double("pool") }
3
+ describe Chore::Queues::SQS::Publisher do
4
+ include_context 'fake objects'
14
5
 
15
- before(:each) do
16
- AWS::SQS.stub(:new).and_return(sqs)
17
- end
6
+ let(:publisher) { Chore::Queues::SQS::Publisher.new }
7
+ let(:job) { {'class' => 'TestJob', 'args'=>[1,2,'3']}}
8
+ let(:send_message_result) { double(Aws::SQS::Types::SendMessageResult, :data => job) }
18
9
 
19
- it 'should configure sqs' do
20
- Chore.config.stub(:aws_access_key).and_return('key')
21
- Chore.config.stub(:aws_secret_key).and_return('secret')
10
+ before(:each) do
11
+ allow(Aws::SQS::Client).to receive(:new).and_return(sqs)
12
+ allow(sqs).to receive(:send_message).and_return(send_message_result)
13
+ end
22
14
 
23
- AWS::SQS.should_receive(:new).with(
24
- :access_key_id => 'key',
25
- :secret_access_key => 'secret',
26
- :logger => Chore.logger,
27
- :log_level => :debug
28
- )
29
- publisher.publish(queue_name,job)
30
- end
15
+ it 'should configure sqs' do
16
+ expect(Aws::SQS::Client).to receive(:new)
17
+ publisher.publish(queue_name,job)
18
+ end
31
19
 
32
- it 'should create send an encoded message to the specified queue' do
33
- queue.should_receive(:send_message).with(job.to_json)
34
- publisher.publish(queue_name,job)
35
- end
20
+ it 'should not create a new SQS client before every publish' do
21
+ expect(Aws::SQS::Client).to receive(:new).once
22
+ 2.times { publisher.send(:queue, queue_name) }
23
+ end
36
24
 
37
- it 'should lookup the queue when publishing' do
38
- sqs.queues.should_receive(:url_for).with('test_queue')
39
- publisher.publish('test_queue', job)
40
- end
25
+ it 'should lookup the queue when publishing' do
26
+ expect(sqs).to receive(:get_queue_url).with(queue_name: queue_name)
27
+ publisher.publish(queue_name, job)
28
+ end
41
29
 
42
- it 'should lookup multiple queues if specified' do
43
- sqs.queues.should_receive(:url_for).with('test_queue')
44
- sqs.queues.should_receive(:url_for).with('test_queue2')
45
- publisher.publish('test_queue', job)
46
- publisher.publish('test_queue2', job)
47
- end
30
+ it 'should create send an encoded message to the specified queue' do
31
+ expect(sqs).to receive(:send_message).with(queue_url: queue_url, message_body: job.to_json)
32
+ publisher.publish(queue_name,job)
33
+ end
34
+
35
+ it 'should lookup multiple queues if specified' do
36
+ second_queue_name = queue_name + '2'
37
+ expect(sqs).to receive(:get_queue_url).with(queue_name: queue_name)
38
+ expect(sqs).to receive(:get_queue_url).with(queue_name: second_queue_name)
39
+
40
+ publisher.publish(queue_name, job)
41
+ publisher.publish(second_queue_name, job)
42
+ end
43
+
44
+ it 'should only lookup a named queue once' do
45
+ expect(sqs).to receive(:get_queue_url).with(queue_name: queue_name).once
46
+ 4.times { publisher.publish(queue_name, job) }
47
+ end
48
48
 
49
- it 'should only lookup a named queue once' do
50
- sqs.queues.should_receive(:url_for).with('test_queue').once
51
- 2.times { publisher.publish('test_queue', job) }
49
+ describe '#reset_connection!' do
50
+ it 'should empty API client connection pool after a call to reset_connection!' do
51
+ expect(Aws).to receive(:empty_connection_pools!)
52
+ Chore::Queues::SQS::Publisher.reset_connection!
53
+ publisher.send(:queue, queue_name)
52
54
  end
53
55
 
54
- describe '#reset_connection!' do
55
- it 'should reset the connection after a call to reset_connection!' do
56
- AWS::Core::Http::ConnectionPool.stub(:pools).and_return([pool])
57
- pool.should_receive(:empty!)
58
- Chore::Queues::SQS::Publisher.reset_connection!
59
- publisher.queue(queue_name)
60
- end
61
-
62
- it 'should not reset the connection between calls' do
63
- sqs = publisher.queue(queue_name)
64
- sqs.should be publisher.queue(queue_name)
65
- end
66
-
67
- it 'should reconfigure sqs' do
68
- Chore::Queues::SQS::Publisher.reset_connection!
69
- AWS::SQS.should_receive(:new)
70
- publisher.queue(queue_name)
71
- end
56
+ # TODO: this test seems like basic identity (i.e. not even a real test)
57
+ it 'should not reset the connection between calls' do
58
+ expect(Aws).to receive(:empty_connection_pools!).once
59
+ Chore::Queues::SQS::Publisher.reset_connection!
60
+ 4.times { publisher.send(:queue, queue_name ) }
72
61
  end
73
62
  end
74
63
  end