chore-core 1.7.2 → 1.8.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 74242dc2e704cdf2cf5baf9dd9b25822bbe20066
4
- data.tar.gz: 0082dc5ab748faff7ba73a8bb6f01773aaf6b35c
3
+ metadata.gz: fd6b4ede9ed3d138dde2225105cb0bb1d28e5411
4
+ data.tar.gz: 1aee13a468b20323eefa1b854469c867ce676895
5
5
  SHA512:
6
- metadata.gz: de6ce3260f50d5a324843f6bbc9511974da8d69731e893aad11a5404ee4107f167a4cb02b9579e372f822f414551ce4ee754139a316845282a2465170f5c03a1
7
- data.tar.gz: fdd9bb38cb35ff082e055bf76a897571bdde176cb1fa20cd9880ed19f57c55d908f0f3c6c830fceae62cec88f761c0228dcb1bcdb051f1e561a48ea5d3f2bc49
6
+ metadata.gz: c76ac25d8026e1a61fb3169bbefaf4151be6a586c60161624031b48761c83f4690a2e0e6d60ec6a8541d008323e7a64f64aa902114e899430682e5521c802a11
7
+ data.tar.gz: fa19122b3e452507848b6670fbe98b008e52439cabc427a57f96a850c30c45843eddee4bf230b5cd85875896f7ac89d6591fb2cd4bab1976e99091362720f677
data/bin/chore CHANGED
@@ -22,9 +22,6 @@ Chore::Signal.trap "USR1" do
22
22
  end
23
23
 
24
24
  begin
25
- # Pre-load any Bundler dependencies now, so that the CLI parser has them loaded
26
- # prior to intrpretting the command line args for things like consumers/producers
27
- Bundler.require if defined?(Bundler)
28
25
  cli = Chore::CLI.instance
29
26
  cli.run!(ARGV)
30
27
  rescue => e
data/lib/chore/cli.rb CHANGED
@@ -63,10 +63,10 @@ module Chore #:nodoc:
63
63
  end
64
64
  end
65
65
 
66
- def parse_config_file(file) #:nodoc:
66
+ def parse_config_file(file, ignore_errors = false) #:nodoc:
67
67
  data = File.read(file)
68
68
  data = ERB.new(data).result
69
- parse_opts(data.split(/\s/).map!(&:chomp).map!(&:strip))
69
+ parse_opts(data.split(/\s/).map!(&:chomp).map!(&:strip), ignore_errors)
70
70
  end
71
71
 
72
72
  def parse(args=ARGV) #:nodoc:
@@ -74,13 +74,16 @@ module Chore #:nodoc:
74
74
  setup_options
75
75
 
76
76
  # parse once to load the config file & require options
77
- parse_opts(args)
78
- parse_config_file(@options[:config_file]) if @options[:config_file]
77
+ # any invalid options are ignored the first time around since booting the
78
+ # system may register additional options from 3rd-party libs
79
+ parse_opts(args, true)
80
+ parse_config_file(@options[:config_file], true) if @options[:config_file]
79
81
 
80
82
  validate!
81
83
  boot_system
82
84
 
83
85
  # parse again to pick up options required by loaded classes
86
+ # any invalid options will raise an exception this time
84
87
  parse_opts(args)
85
88
  parse_config_file(@options[:config_file]) if @options[:config_file]
86
89
  detect_queues
@@ -143,7 +146,7 @@ module Chore #:nodoc:
143
146
 
144
147
  end
145
148
 
146
- def parse_opts(argv) #:nodoc:
149
+ def parse_opts(argv, ignore_errors = false) #:nodoc:
147
150
  @options ||= {}
148
151
  @parser = OptionParser.new do |o|
149
152
  registered_opts.each do |key,opt|
@@ -164,7 +167,22 @@ module Chore #:nodoc:
164
167
  exit 1
165
168
  end
166
169
 
167
- @parser.parse!(argv)
170
+ # This will parse arguments in order, continuing even if invalid options
171
+ # are encountered
172
+ argv = argv.dup
173
+ begin
174
+ @parser.parse(argv)
175
+ rescue OptionParser::InvalidOption => ex
176
+ if ignore_errors
177
+ # Drop everything up to (and including) the invalid argument
178
+ # and start parsing again
179
+ invalid_arg = ex.args[0]
180
+ argv = argv.drop(argv.index(invalid_arg) + 1)
181
+ retry
182
+ else
183
+ raise
184
+ end
185
+ end
168
186
 
169
187
  @options
170
188
  end
@@ -185,6 +203,9 @@ module Chore #:nodoc:
185
203
  require File.expand_path("#{options[:require]}/config/environment.rb")
186
204
  ::Rails.application.eager_load!
187
205
  else
206
+ # Pre-load any Bundler dependencies now, so that the CLI parser has them loaded
207
+ # prior to intrpretting the command line args for things like consumers/producers
208
+ Bundler.require if defined?(Bundler)
188
209
  require File.expand_path(options[:require])
189
210
  end
190
211
  end
@@ -21,6 +21,11 @@ module Chore
21
21
  def self.reset_connection!
22
22
  end
23
23
 
24
+ # Cleans up any resources that were left behind from prior instances of the
25
+ # chore process. By default, this is a no-op.
26
+ def self.cleanup(queue)
27
+ end
28
+
24
29
  # Consume takes a block with an arity of two. The two params are
25
30
  # |message_id,message_body| where message_id is any object that the
26
31
  # consumer will need to be able to act on a message later (reject, complete, etc)
data/lib/chore/fetcher.rb CHANGED
@@ -11,6 +11,12 @@ module Chore
11
11
  # Starts the fetcher with the configured Consumer Strategy. This will begin consuming messages from your queue
12
12
  def start
13
13
  Chore.logger.info "Fetcher starting up"
14
+
15
+ # Clean up configured queues in case there are any resources left behind
16
+ Chore.config.queues.each do |queue|
17
+ Chore.config.consumer.cleanup(queue)
18
+ end
19
+
14
20
  @strategy.fetch
15
21
  end
16
22
 
@@ -17,12 +17,59 @@ module Chore
17
17
  # desired behavior long term and we may want to add configuration to this class to allow more
18
18
  # creating failure handling and retrying.
19
19
  class Consumer < Chore::Consumer
20
- include FilesystemQueue
20
+ extend FilesystemQueue
21
21
 
22
22
  Chore::CLI.register_option 'fs_queue_root', '--fs-queue-root DIRECTORY', 'Root directory for fs based queue'
23
23
 
24
24
  FILE_QUEUE_MUTEXES = {}
25
-
25
+
26
+ class << self
27
+ # Cleans up the in-progress files by making them new again. This should only
28
+ # happen once per process.
29
+ def cleanup(queue)
30
+ new_dir = self.new_dir(queue)
31
+ in_progress_dir = self.in_progress_dir(queue)
32
+
33
+ job_files(in_progress_dir).each do |file|
34
+ make_new_again(file, new_dir, in_progress_dir)
35
+ end
36
+ end
37
+
38
+ def make_in_progress(job, new_dir, in_progress_dir)
39
+ move_job(File.join(new_dir, job), File.join(in_progress_dir, job))
40
+ end
41
+
42
+ def make_new_again(job, new_dir, in_progress_dir)
43
+ basename, previous_attempts = file_info(job)
44
+ move_job(File.join(in_progress_dir, job), File.join(new_dir, "#{basename}.#{previous_attempts + 1}.job"))
45
+ end
46
+
47
+ # Moves job file to inprogress directory and returns the full path
48
+ def move_job(from, to)
49
+ f = File.open(from, "r")
50
+ # wait on the lock a publisher in another process might have.
51
+ # Once we get the lock the file is ours to move to mark it in progress
52
+ f.flock(File::LOCK_EX)
53
+ begin
54
+ FileUtils.mv(f.path, to)
55
+ ensure
56
+ f.flock(File::LOCK_UN) # yes we can unlock it after its been moved, I checked
57
+ end
58
+ to
59
+ end
60
+
61
+ def job_files(dir)
62
+ Dir.entries(dir).select{|e| ! e.start_with?(".")}
63
+ end
64
+
65
+ # Grabs the unique identifier for the job filename and the number of times
66
+ # it's been attempted (also based on the filename)
67
+ def file_info(job_file)
68
+ id, previous_attempts = File.basename(job_file, '.job').split('.')
69
+ [id, previous_attempts.to_i]
70
+ end
71
+ end
72
+
26
73
  # The amount of time units of work can run before the queue considers
27
74
  # them timed out. For filesystem queues, this is the global default.
28
75
  attr_reader :queue_timeout
@@ -35,13 +82,13 @@ module Chore
35
82
  # as they are pulled from the queue and synchronized for file operations below
36
83
  FILE_QUEUE_MUTEXES[@queue_name] ||= Mutex.new
37
84
 
38
- @in_progress_dir = in_progress_dir(queue_name)
39
- @new_dir = new_dir(queue_name)
85
+ @in_progress_dir = self.class.in_progress_dir(queue_name)
86
+ @new_dir = self.class.new_dir(queue_name)
40
87
  @queue_timeout = Chore.config.default_queue_timeout
41
88
  end
42
89
 
43
90
  def consume(&handler)
44
- Chore.logger.info "Starting consuming file system queue #{@queue_name} in #{queue_dir(queue_name)}"
91
+ Chore.logger.info "Starting consuming file system queue #{@queue_name} in #{self.class.queue_dir(queue_name)}"
45
92
  while running?
46
93
  begin
47
94
  #TODO move expired job files to new directory?
@@ -75,11 +122,11 @@ module Chore
75
122
  # ThreadedConsumerStrategy with mutiple threads on a queue safely although you
76
123
  # probably wouldn't want to do that.
77
124
  FILE_QUEUE_MUTEXES[@queue_name].synchronize do
78
- job_files.each do |job_file|
125
+ self.class.job_files(@new_dir).each do |job_file|
79
126
  Chore.logger.debug "Found a new job #{job_file}"
80
127
 
81
128
  job_json = File.read(make_in_progress(job_file))
82
- basename, previous_attempts = file_info(job_file)
129
+ basename, previous_attempts = self.class.file_info(job_file)
83
130
 
84
131
  # job_file is just the name which is the job id
85
132
  block.call(job_file, queue_name, queue_timeout, job_json, previous_attempts)
@@ -89,37 +136,11 @@ module Chore
89
136
  end
90
137
 
91
138
  def make_in_progress(job)
92
- move_job(File.join(@new_dir, job), File.join(@in_progress_dir, job))
139
+ self.class.make_in_progress(job, @new_dir, @in_progress_dir)
93
140
  end
94
141
 
95
142
  def make_new_again(job)
96
- basename, previous_attempts = file_info(job)
97
- move_job(File.join(@in_progress_dir, job), File.join(@new_dir, "#{basename}.#{previous_attempts + 1}.job"))
98
- end
99
-
100
- # Moves job file to inprogress directory and returns the full path
101
- def move_job(from, to)
102
- f = File.open(from, "r")
103
- # wait on the lock a publisher in another process might have.
104
- # Once we get the lock the file is ours to move to mark it in progress
105
- f.flock(File::LOCK_EX)
106
- begin
107
- FileUtils.mv(f.path, to)
108
- ensure
109
- f.flock(File::LOCK_UN) # yes we can unlock it after its been moved, I checked
110
- end
111
- to
112
- end
113
-
114
- def job_files
115
- Dir.entries(@new_dir).select{|e| ! e.start_with?(".")}
116
- end
117
-
118
- # Grabs the unique identifier for the job filename and the number of times
119
- # it's been attempted (also based on the filename)
120
- def file_info(job_file)
121
- id, previous_attempts = File.basename(job_file, '.job').split('.')
122
- [id, previous_attempts.to_i]
143
+ self.class.make_new_again(job, @new_dir, @in_progress_dir)
123
144
  end
124
145
  end
125
146
  end
data/lib/chore/version.rb CHANGED
@@ -1,8 +1,8 @@
1
1
  module Chore
2
2
  module Version #:nodoc:
3
3
  MAJOR = 1
4
- MINOR = 7
5
- PATCH = 2
4
+ MINOR = 8
5
+ PATCH = 0
6
6
 
7
7
  STRING = [ MAJOR, MINOR, PATCH ].join('.')
8
8
  end
@@ -22,6 +22,10 @@ describe Chore::Consumer do
22
22
  Chore::Consumer.should respond_to :reset_connection!
23
23
  end
24
24
 
25
+ it 'should have a class level cleanup method' do
26
+ Chore::Consumer.should respond_to :cleanup
27
+ end
28
+
25
29
  it 'should not have an implemented consume method' do
26
30
  expect { consumer.consume }.to raise_error(NotImplementedError)
27
31
  end
@@ -35,4 +35,15 @@ describe Chore::Fetcher do
35
35
  fetcher.start
36
36
  end
37
37
  end
38
+
39
+ describe "cleaning up" do
40
+ before(:each) do
41
+ manager.stub(:assign)
42
+ end
43
+
44
+ it "should run cleanup on each queue" do
45
+ consumer.should_receive(:cleanup).with('test')
46
+ fetcher.start
47
+ end
48
+ end
38
49
  end
@@ -19,52 +19,96 @@ describe Chore::Queues::Filesystem::Consumer do
19
19
  FileUtils.rm_rf(test_queues_dir)
20
20
  end
21
21
 
22
- let!(:consumer_run_for_one_message) { expect(consumer).to receive(:running?).and_return(true, false) }
23
22
  let(:test_job_hash) {{:class => "TestClass", :args => "test-args"}}
23
+ let(:new_dir) { described_class.new_dir(test_queue) }
24
+ let(:in_progress_dir) { described_class.in_progress_dir(test_queue) }
24
25
 
25
- context "founding a published job" do
26
- before do
27
- publisher.publish(test_queue, test_job_hash)
26
+ describe ".cleanup" do
27
+ it "should move in_progress jobs to new dir" do
28
+ FileUtils.touch("#{in_progress_dir}/foo.1.job")
29
+ described_class.cleanup(test_queue)
30
+ expect(File.exist?("#{new_dir}/foo.2.job")).to eq(true)
28
31
  end
32
+ end
29
33
 
30
- it "should consume a published job and yield the job to the handler block" do
31
- expect { |b| consumer.consume(&b) }.to yield_with_args(anything, 'test-queue', 60, test_job_hash.to_json, 0)
34
+ describe ".make_in_progress" do
35
+ it "should move job to in_progress dir" do
36
+ FileUtils.touch("#{new_dir}/foo.1.job")
37
+ described_class.make_in_progress("foo.1.job", new_dir, in_progress_dir)
38
+ expect(File.exist?("#{in_progress_dir}/foo.1.job")).to eq(true)
32
39
  end
40
+ end
33
41
 
34
- context "rejecting a job" do
35
- let!(:consumer_run_for_two_messages) { allow(consumer).to receive(:running?).and_return(true, false,true,false) }
42
+ describe ".make_new_again" do
43
+ it "should move job to new dir" do
44
+ FileUtils.touch("#{in_progress_dir}/foo.1.job")
45
+ described_class.make_new_again("foo.1.job", new_dir, in_progress_dir)
46
+ expect(File.exist?("#{new_dir}/foo.2.job")).to eq(true)
47
+ end
48
+ end
36
49
 
37
- it "should requeue a job that gets rejected" do
38
- rejected = false
39
- consumer.consume do |job_id, queue_name, job_hash|
40
- consumer.reject(job_id)
41
- rejected = true
42
- end
43
- expect(rejected).to be true
50
+ describe ".job_files" do
51
+ it "should list jobs in dir" do
52
+ FileUtils.touch("#{new_dir}/foo.1.job")
53
+ expect(described_class.job_files(new_dir)).to eq(["foo.1.job"])
54
+ end
55
+ end
44
56
 
45
- expect { |b| consumer.consume(&b) }.to yield_with_args(anything, 'test-queue', 60, test_job_hash.to_json, 1)
46
- end
57
+ describe ".file_info" do
58
+ it "should split name and attempt number" do
59
+ name, attempt = described_class.file_info("foo.1.job")
60
+ expect(name).to eq("foo")
61
+ expect(attempt).to eq(1)
47
62
  end
63
+ end
48
64
 
49
- context "completing a job" do
50
- let!(:consumer_run_for_two_messages) { allow(consumer).to receive(:running?).and_return(true, false,true,false) }
65
+ describe 'consumption' do
66
+ let!(:consumer_run_for_one_message) { expect(consumer).to receive(:running?).and_return(true, false) }
51
67
 
52
- it "should remove job on completion" do
53
- completed = false
54
- consumer.consume do |job_id, queue_name, job_hash|
55
- consumer.complete(job_id)
56
- completed = true
68
+ context "founding a published job" do
69
+ before do
70
+ publisher.publish(test_queue, test_job_hash)
71
+ end
72
+
73
+ it "should consume a published job and yield the job to the handler block" do
74
+ expect { |b| consumer.consume(&b) }.to yield_with_args(anything, 'test-queue', 60, test_job_hash.to_json, 0)
75
+ end
76
+
77
+ context "rejecting a job" do
78
+ let!(:consumer_run_for_two_messages) { allow(consumer).to receive(:running?).and_return(true, false,true,false) }
79
+
80
+ it "should requeue a job that gets rejected" do
81
+ rejected = false
82
+ consumer.consume do |job_id, queue_name, job_hash|
83
+ consumer.reject(job_id)
84
+ rejected = true
85
+ end
86
+ expect(rejected).to be true
87
+
88
+ expect { |b| consumer.consume(&b) }.to yield_with_args(anything, 'test-queue', 60, test_job_hash.to_json, 1)
57
89
  end
58
- expect(completed).to be true
90
+ end
59
91
 
60
- expect { |b| consumer.consume(&b) }.to_not yield_control
92
+ context "completing a job" do
93
+ let!(:consumer_run_for_two_messages) { allow(consumer).to receive(:running?).and_return(true, false,true,false) }
94
+
95
+ it "should remove job on completion" do
96
+ completed = false
97
+ consumer.consume do |job_id, queue_name, job_hash|
98
+ consumer.complete(job_id)
99
+ completed = true
100
+ end
101
+ expect(completed).to be true
102
+
103
+ expect { |b| consumer.consume(&b) }.to_not yield_control
104
+ end
61
105
  end
62
106
  end
63
- end
64
107
 
65
- context "not finding a published job" do
66
- it "should consume a published job and yield the job to the handler block" do
67
- expect { |b| consumer.consume(&b) }.to_not yield_control
108
+ context "not finding a published job" do
109
+ it "should consume a published job and yield the job to the handler block" do
110
+ expect { |b| consumer.consume(&b) }.to_not yield_control
111
+ end
68
112
  end
69
113
  end
70
114
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: chore-core
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.7.2
4
+ version: 1.8.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Tapjoy
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2015-12-08 00:00:00.000000000 Z
11
+ date: 2015-12-15 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: json