rocketjob 5.3.3 → 6.0.0.rc1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (55) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +19 -5
  3. data/bin/rocketjob_batch_perf +1 -1
  4. data/bin/rocketjob_perf +1 -1
  5. data/lib/rocket_job/batch.rb +3 -0
  6. data/lib/rocket_job/batch/categories.rb +338 -0
  7. data/lib/rocket_job/batch/io.rb +132 -69
  8. data/lib/rocket_job/batch/model.rb +20 -68
  9. data/lib/rocket_job/batch/performance.rb +20 -8
  10. data/lib/rocket_job/batch/statistics.rb +35 -13
  11. data/lib/rocket_job/batch/tabular.rb +2 -0
  12. data/lib/rocket_job/batch/tabular/input.rb +8 -6
  13. data/lib/rocket_job/batch/tabular/output.rb +4 -2
  14. data/lib/rocket_job/batch/throttle_running_workers.rb +8 -17
  15. data/lib/rocket_job/batch/worker.rb +27 -24
  16. data/lib/rocket_job/category/base.rb +78 -0
  17. data/lib/rocket_job/category/input.rb +110 -0
  18. data/lib/rocket_job/category/output.rb +25 -0
  19. data/lib/rocket_job/cli.rb +24 -16
  20. data/lib/rocket_job/dirmon_entry.rb +22 -12
  21. data/lib/rocket_job/event.rb +1 -1
  22. data/lib/rocket_job/extensions/iostreams/path.rb +32 -0
  23. data/lib/rocket_job/extensions/mongoid/factory.rb +4 -12
  24. data/lib/rocket_job/extensions/mongoid/stringified_symbol.rb +50 -0
  25. data/lib/rocket_job/extensions/psych/yaml_tree.rb +8 -0
  26. data/lib/rocket_job/jobs/dirmon_job.rb +1 -1
  27. data/lib/rocket_job/jobs/housekeeping_job.rb +7 -7
  28. data/lib/rocket_job/jobs/on_demand_batch_job.rb +15 -6
  29. data/lib/rocket_job/jobs/on_demand_job.rb +1 -2
  30. data/lib/rocket_job/jobs/performance_job.rb +3 -1
  31. data/lib/rocket_job/jobs/re_encrypt/relational_job.rb +5 -4
  32. data/lib/rocket_job/jobs/upload_file_job.rb +47 -10
  33. data/lib/rocket_job/lookup_collection.rb +68 -0
  34. data/lib/rocket_job/plugins/job/model.rb +25 -50
  35. data/lib/rocket_job/plugins/job/throttle_running_jobs.rb +12 -4
  36. data/lib/rocket_job/plugins/job/worker.rb +2 -7
  37. data/lib/rocket_job/plugins/restart.rb +12 -5
  38. data/lib/rocket_job/plugins/state_machine.rb +2 -1
  39. data/lib/rocket_job/ractor_worker.rb +42 -0
  40. data/lib/rocket_job/server/model.rb +1 -1
  41. data/lib/rocket_job/sliced.rb +36 -0
  42. data/lib/rocket_job/sliced/bzip2_output_slice.rb +43 -0
  43. data/lib/rocket_job/sliced/input.rb +4 -4
  44. data/lib/rocket_job/sliced/slice.rb +11 -13
  45. data/lib/rocket_job/sliced/slices.rb +20 -2
  46. data/lib/rocket_job/sliced/writer/output.rb +33 -44
  47. data/lib/rocket_job/subscribers/server.rb +1 -1
  48. data/lib/rocket_job/thread_worker.rb +46 -0
  49. data/lib/rocket_job/version.rb +1 -1
  50. data/lib/rocket_job/worker.rb +21 -55
  51. data/lib/rocket_job/worker_pool.rb +5 -7
  52. data/lib/rocketjob.rb +52 -59
  53. metadata +43 -33
  54. data/lib/rocket_job/extensions/mongoid/remove_warnings.rb +0 -12
  55. data/lib/rocket_job/jobs/on_demand_batch_tabular_job.rb +0 -28
@@ -11,46 +11,6 @@ module RocketJob
11
11
  #
12
12
  # The following attributes are set when the job is created
13
13
 
14
- # Number of records to include in each slice that is processed
15
- # Note:
16
- # slice_size is only used by SlicedJob#upload & Sliced::Input#upload
17
- # When slices are supplied directly, their size is not modified to match this number
18
- field :slice_size, type: Integer, default: 100, class_attribute: true, user_editable: true, copy_on_restart: true
19
-
20
- # Whether to retain nil results.
21
- #
22
- # Only applicable if `collect_output` is `true`
23
- # Set to `false` to prevent collecting output from the perform
24
- # method when it returns `nil`.
25
- field :collect_nil_output, type: Boolean, default: true, class_attribute: true
26
-
27
- # Optional Array<Symbol> list of categories that this job can output to
28
- #
29
- # By using categories the output from #perform can be placed in different
30
- # output collections, and therefore different output files
31
- #
32
- # Categories must be declared in advance to avoid a #perform method
33
- # accidentally writing its results to an unknown category
34
- field :output_categories, type: Array, default: [:main], class_attribute: true
35
-
36
- # Optional Array<Symbol> list of categories that this job can load input data into
37
- field :input_categories, type: Array, default: [:main], class_attribute: true
38
-
39
- # The file name of the uploaded file, if any.
40
- # Set by #upload if a file name was supplied, but can also be set explicitly.
41
- # May or may not include the fully qualified path name.
42
- field :upload_file_name, type: String
43
-
44
- # Compress uploaded records.
45
- # The fields are not affected in any way, only the data stored in the
46
- # records and results collections will compressed
47
- field :compress, type: Boolean, default: false, class_attribute: true
48
-
49
- # Encrypt uploaded records.
50
- # The fields are not affected in any way, only the data stored in the
51
- # records and results collections will be encrypted
52
- field :encrypt, type: Boolean, default: false, class_attribute: true
53
-
54
14
  #
55
15
  # Values that jobs can also update during processing
56
16
  #
@@ -69,30 +29,7 @@ module RocketJob
69
29
 
70
30
  # Breaks the :running state up into multiple sub-states:
71
31
  # :running -> :before -> :processing -> :after -> :complete
72
- field :sub_state, type: Symbol
73
-
74
- validates_presence_of :slice_size
75
-
76
- validates_each :output_categories, :input_categories do |record, attr, value|
77
- # Under some circumstances ActiveModel is passing in a nil value even though the
78
- # attributes have default values
79
- Array(value).each do |category|
80
- record.errors.add(attr, "must only contain Symbol values") unless category.is_a?(Symbol)
81
- unless category.to_s =~ /\A[a-z_0-9]+\Z/
82
- record.errors.add(attr, "must only consist of lowercase characters, digits, and _")
83
- end
84
- end
85
- end
86
- end
87
-
88
- # Returns [true|false] whether the slices for this job are encrypted
89
- def encrypted?
90
- encrypt == true
91
- end
92
-
93
- # Returns [true|false] whether the slices for this job are compressed
94
- def compressed?
95
- compress == true
32
+ field :sub_state, type: Mongoid::StringifiedSymbol
96
33
  end
97
34
 
98
35
  # Returns [Integer] percent of records completed so far
@@ -102,10 +39,10 @@ module RocketJob
102
39
  return 0 unless record_count.to_i.positive?
103
40
 
104
41
  # Approximate number of input records
105
- input_records = input.count.to_f * slice_size
42
+ input_records = input.count.to_f * input_category.slice_size
106
43
  if input_records > record_count
107
44
  # Sanity check in case slice_size is not being adhered to
108
- 99
45
+ 0
109
46
  else
110
47
  ((1.0 - (input_records.to_f / record_count)) * 100).to_i
111
48
  end
@@ -120,6 +57,10 @@ module RocketJob
120
57
  h["active_slices"] = worker_count
121
58
  h["failed_slices"] = input.failed.count
122
59
  h["queued_slices"] = input.queued.count
60
+ output_categories.each do |category|
61
+ name_str = category.name == :main ? "" : "_#{category.name}"
62
+ h["output_slices#{name_str}"] = output(category).count
63
+ end
123
64
  # Very high level estimated time left
124
65
  if record_count && running? && record_count.positive?
125
66
  percent = percent_complete
@@ -129,10 +70,9 @@ module RocketJob
129
70
  end
130
71
  end
131
72
  elsif completed?
132
- secs = seconds.to_f
73
+ secs = seconds.to_f
133
74
  h["records_per_hour"] = ((record_count.to_f / secs) * 60 * 60).round if record_count&.positive? && (secs > 0.0)
134
75
  end
135
- h["output_slices"] = output.count if collect_output? && !completed?
136
76
  h.merge!(super(time_zone))
137
77
  h.delete("result")
138
78
  # Worker name should be retrieved from the slices when processing
@@ -172,6 +112,18 @@ module RocketJob
172
112
  @worker_count_last = Time.now.to_i
173
113
  @worker_count
174
114
  end
115
+
116
+ # @deprecated
117
+ # For backward compatibility
118
+ def upload_file_name
119
+ input_category.file_name
120
+ end
121
+
122
+ # @deprecated
123
+ # For backward compatibility
124
+ def upload_file_name=(upload_file_name)
125
+ input_category.file_name = upload_file_name
126
+ end
175
127
  end
176
128
  end
177
129
  end
@@ -22,12 +22,15 @@ module RocketJob
22
22
  count_running_workers
23
23
 
24
24
  puts "Loading job with #{count} records/lines"
25
- args = {log_level: :warn, slice_size: slice_size}
26
- if defined?(::RocketJob)
27
- args[:compress] = compress
28
- args[:encrypt] = encrypt
25
+ job = RocketJob::Jobs::PerformanceJob.new(log_level: :warn)
26
+ job.input_category.slice_size = slice_size
27
+ if encrypt
28
+ job.input_category.serializer = :encrypt
29
+ job.output_category.serializer = :encrypt
30
+ elsif !compress
31
+ job.input_category.serializer = :none
32
+ job.output_category.serializer = :none
29
33
  end
30
- job = RocketJob::Jobs::PerformanceJob.new(args)
31
34
  job.upload do |writer|
32
35
  count.times { |i| writer << i }
33
36
  end
@@ -37,7 +40,15 @@ module RocketJob
37
40
  sleep 3 until job.reload.completed?
38
41
 
39
42
  duration = job.completed_at - job.started_at
40
- {count: count, duration: duration, records_per_second: (count.to_f / duration).round(3), workers: workers, servers: servers, compress: compress, encrypt: encrypt}
43
+ {
44
+ count: count,
45
+ duration: duration,
46
+ records_per_second: (count.to_f / duration).round(3),
47
+ workers: workers,
48
+ servers: servers,
49
+ compress: compress,
50
+ encrypt: encrypt
51
+ }
41
52
  end
42
53
 
43
54
  # Export the Results hash to a CSV file
@@ -53,14 +64,15 @@ module RocketJob
53
64
 
54
65
  # Parse command line options
55
66
  def parse(argv)
56
- parser = OptionParser.new do |o|
67
+ parser = OptionParser.new do |o|
57
68
  o.on("-c", "--count COUNT", "Count of records to enqueue") do |arg|
58
69
  self.count = arg.to_i
59
70
  end
60
71
  o.on("-m", "--mongo MONGO_CONFIG_FILE_NAME", "Location of mongoid.yml config file") do |arg|
61
72
  self.mongo_config = arg
62
73
  end
63
- o.on("-e", "--environment ENVIRONMENT", "The environment to run the app on (Default: RAILS_ENV || RACK_ENV || development)") do |arg|
74
+ o.on("-e", "--environment ENVIRONMENT",
75
+ "The environment to run the app on (Default: RAILS_ENV || RACK_ENV || development)") do |arg|
64
76
  self.environment = arg
65
77
  end
66
78
  o.on("-z", "--compress", "Turn on compression") do
@@ -2,7 +2,11 @@ require "active_support/concern"
2
2
 
3
3
  module RocketJob
4
4
  module Batch
5
- # Allow statistics to be gathered while a batch job is running
5
+ # Allow statistics to be gathered while a batch job is running.
6
+ #
7
+ # Notes:
8
+ # - Statistics for successfully processed records within a slice are saved.
9
+ # - Statistics gathered during a perform that then results in an exception are discarded.
6
10
  module Statistics
7
11
  extend ActiveSupport::Concern
8
12
 
@@ -45,39 +49,57 @@ module RocketJob
45
49
  last = paths.pop
46
50
  return unless last
47
51
 
48
- target = paths.inject(in_memory) { |target, key| target.key?(key) ? target[key] : target[key] = Hash.new(0) }
49
- target[last] += increment
52
+ last_target = paths.inject(in_memory) do |target, sub_key|
53
+ target.key?(sub_key) ? target[sub_key] : target[sub_key] = Hash.new(0)
54
+ end
55
+ last_target[last] += increment
50
56
  end
51
57
  end
52
58
 
53
59
  included do
54
60
  field :statistics, type: Hash, default: -> { Hash.new(0) }
55
61
 
56
- around_slice :statistics_capture
62
+ around_slice :rocket_job_statistics_capture
63
+ after_perform :rocket_job_statistics_commit
57
64
  end
58
65
 
59
66
  # Increment a statistic
60
67
  def statistics_inc(key, increment = 1)
61
68
  return if key.nil? || key == ""
62
69
 
63
- # Being called within tests outside of a perform
64
- @slice_statistics ||= Stats.new(new_record? ? statistics : nil)
65
- key.is_a?(Hash) ? @slice_statistics.inc(key) : @slice_statistics.inc_key(key, increment)
70
+ (@rocket_job_perform_statistics ||= []) << (key.is_a?(Hash) ? key : [key, increment])
66
71
  end
67
72
 
68
73
  private
69
74
 
70
- # Capture the number of successful and failed tradelines
71
- # as well as those with notices and alerts.
72
- def statistics_capture
73
- @slice_statistics = Stats.new(new_record? ? statistics : nil)
75
+ def rocket_job_statistics_capture
76
+ @rocket_job_perform_statistics = nil
77
+ @rocket_job_slice_statistics = nil
74
78
  yield
75
- collection.update_one({_id: id}, {"$inc" => @slice_statistics.stats}) unless @slice_statistics.empty?
79
+ ensure
80
+ if @rocket_job_slice_statistics && !@rocket_job_slice_statistics.empty?
81
+ collection.update_one({_id: id}, {"$inc" => @rocket_job_slice_statistics.stats})
82
+ end
83
+ end
84
+
85
+ def rocket_job_slice_statistics
86
+ @rocket_job_slice_statistics ||= Stats.new(new_record? ? statistics : nil)
87
+ end
88
+
89
+ # Apply stats gathered during the perform to the slice level stats
90
+ def rocket_job_statistics_commit
91
+ return unless @rocket_job_perform_statistics
92
+
93
+ @rocket_job_perform_statistics.each do |key|
94
+ key.is_a?(Hash) ? rocket_job_slice_statistics.inc(key) : rocket_job_slice_statistics.inc_key(*key)
95
+ end
96
+
97
+ @rocket_job_perform_statistics = nil
76
98
  end
77
99
 
78
100
  # Overrides RocketJob::Batch::Logger#rocket_job_batch_log_payload
79
101
  def rocket_job_batch_log_payload
80
- h = {
102
+ h = {
81
103
  from: aasm.from_state,
82
104
  to: aasm.to_state,
83
105
  event: aasm.current_event
@@ -12,6 +12,8 @@ module RocketJob
12
12
  # )
13
13
  #
14
14
  # tabular.render(row)
15
+ #
16
+ # @deprecated
15
17
  class Tabular
16
18
  autoload :Input, "rocket_job/batch/tabular/input"
17
19
  autoload :Output, "rocket_job/batch/tabular/output"
@@ -3,15 +3,15 @@ require "active_support/concern"
3
3
  module RocketJob
4
4
  module Batch
5
5
  class Tabular
6
- # For the simple case where all `input_categories` have the same format,
7
- # If multiple input categories are used with different formats, then use IOStreams::Tabular directly
8
- # instead of this plugin.
6
+ # @deprecated
9
7
  module Input
10
8
  extend ActiveSupport::Concern
11
9
 
12
10
  included do
11
+ warn "#{name} is using RocketJob::Batch::Tabular::Input which is deprecated"
12
+
13
13
  field :tabular_input_header, type: Array, class_attribute: true, user_editable: true
14
- field :tabular_input_format, type: Symbol, default: :csv, class_attribute: true, user_editable: true
14
+ field :tabular_input_format, type: Mongoid::StringifiedSymbol, default: :csv, class_attribute: true, user_editable: true
15
15
  field :tabular_input_options, type: Hash, class_attribute: true
16
16
 
17
17
  # tabular_input_mode: [:line | :array | :hash]
@@ -22,7 +22,7 @@ module RocketJob
22
22
  # :hash
23
23
  # Parses each line from the file into a Hash and uploads each hash for processing by workers.
24
24
  # See IOStreams#each.
25
- field :tabular_input_mode, type: Symbol, default: :line, class_attribute: true, user_editable: true, copy_on_restart: true
25
+ field :tabular_input_mode, type: Mongoid::StringifiedSymbol, default: :line, class_attribute: true, user_editable: true, copy_on_restart: true
26
26
 
27
27
  validates_inclusion_of :tabular_input_format, in: IOStreams::Tabular.registered_formats
28
28
  validates_inclusion_of :tabular_input_mode, in: %i[line array hash row record]
@@ -119,7 +119,9 @@ module RocketJob
119
119
  end
120
120
 
121
121
  def tabular_input_header_present
122
- if tabular_input_header.present? || !tabular_input.header? || (tabular_input_mode == :hash || tabular_input_mode == :record)
122
+ if tabular_input_header.present? ||
123
+ !tabular_input.header? ||
124
+ (tabular_input_mode == :hash || tabular_input_mode == :record)
123
125
  return
124
126
  end
125
127
 
@@ -10,8 +10,10 @@ module RocketJob
10
10
  extend ActiveSupport::Concern
11
11
 
12
12
  included do
13
+ warn "#{name} is using RocketJob::Batch::Tabular::Output which is deprecated"
14
+
13
15
  field :tabular_output_header, type: Array, class_attribute: true, user_editable: true, copy_on_restart: true
14
- field :tabular_output_format, type: Symbol, default: :csv, class_attribute: true, user_editable: true, copy_on_restart: true
16
+ field :tabular_output_format, type: Mongoid::StringifiedSymbol, default: :csv, class_attribute: true, user_editable: true, copy_on_restart: true
15
17
  field :tabular_output_options, type: Hash, class_attribute: true
16
18
 
17
19
  validates_inclusion_of :tabular_output_format, in: IOStreams::Tabular.registered_formats
@@ -55,7 +57,7 @@ module RocketJob
55
57
 
56
58
  # Render the output from the perform.
57
59
  def tabular_output_render
58
- return unless collect_output?
60
+ return unless output_categories.present?
59
61
 
60
62
  @rocket_job_output = tabular_output.render(@rocket_job_output)
61
63
  end
@@ -37,34 +37,25 @@ module RocketJob
37
37
  validates :throttle_running_workers, numericality: {greater_than_or_equal_to: 0}, allow_nil: true
38
38
 
39
39
  define_batch_throttle :throttle_running_workers_exceeded?, filter: :throttle_filter_id
40
-
41
- # Deprecated. For backward compatibility.
42
- alias_method :throttle_running_slices, :throttle_running_workers
43
- alias_method :throttle_running_slices=, :throttle_running_workers=
44
40
  end
45
41
 
46
42
  private
47
43
 
48
- # Returns [Boolean] whether the throttle for this job has been exceeded
44
+ # Returns [true|false] whether the throttle for this job has been exceeded
49
45
  def throttle_running_workers_exceeded?(slice)
50
- return unless throttle_running_workers&.positive?
46
+ return false unless throttle_running_workers&.positive?
51
47
 
52
48
  input.running.with(read: {mode: :primary}) do |conn|
53
49
  conn.where(:id.ne => slice.id).count >= throttle_running_workers
54
50
  end
55
51
  end
56
52
 
57
- # Returns [Boolean] whether the throttle for this job has been exceeded
58
- #
59
- # With a Batch job, allow a higher priority queued job to replace a running one with
60
- # a lower priority.
61
- def throttle_running_jobs_exceeded?
62
- return unless throttle_running_jobs&.positive?
63
-
64
- # Cannot use this class since it will include instances of parent job classes.
65
- RocketJob::Job.with(read: {mode: :primary}) do |conn|
66
- conn.running.where("_type" => self.class.name, :id.ne => id, :priority.lte => priority).count >= throttle_running_jobs
67
- end
53
+ # Allows another job with a higher priority to start even though this one is running already
54
+ # @overrides RocketJob::Plugins::Job::ThrottleRunningJobs#throttle_running_jobs_base_query
55
+ def throttle_running_jobs_base_query
56
+ query = super
57
+ query[:priority.lte] = priority if throttle_running_workers&.positive?
58
+ query
68
59
  end
69
60
  end
70
61
  end
@@ -23,9 +23,6 @@ module RocketJob
23
23
  #
24
24
  # Slices are destroyed after their records are successfully processed
25
25
  #
26
- # Results are stored in the output collection if `collect_output?`
27
- # `nil` results from workers are kept if `collect_nil_output`
28
- #
29
26
  # If an exception was thrown the entire slice of records is marked as failed.
30
27
  #
31
28
  # Thread-safe, can be called by multiple threads at the same time
@@ -40,7 +37,8 @@ module RocketJob
40
37
 
41
38
  SemanticLogger.named_tagged(job: id.to_s) do
42
39
  until worker.shutdown?
43
- if slice = input.next_slice(worker.name)
40
+ slice = input.next_slice(worker.name)
41
+ if slice
44
42
  # Grab a slice before checking the throttle to reduce concurrency race condition.
45
43
  return true if slice.fail_on_exception!(re_raise_exceptions) { rocket_job_batch_throttled?(slice, worker) }
46
44
  next if slice.failed?
@@ -97,7 +95,7 @@ module RocketJob
97
95
  servers = []
98
96
  case sub_state
99
97
  when :before, :after
100
- unless server_name && !worker_on_server?(server_name)
98
+ if running? && (server_name.nil? || worker_on_server?(server_name))
101
99
  servers << ActiveWorker.new(worker_name, started_at, self) if running?
102
100
  end
103
101
  when :processing
@@ -143,19 +141,23 @@ module RocketJob
143
141
 
144
142
  # Perform individual slice without callbacks
145
143
  def rocket_job_perform_slice(slice, &block)
146
- count = 0
147
- RocketJob::Sliced::Writer::Output.collect(self, slice) do |writer|
148
- records = slice.records
149
-
150
- # Skip records already processed, if any.
151
- # slice.processing_record_number ||= 0
152
- # TODO: Must append to existing output slices before this can be enabled.
153
- # if !collect_output && (slice.processing_record_number > 1)
154
- # records = records[slice.processing_record_number - 1..-1]
155
- # end
156
- # Until the changes above have been implemented, reprocess all records in the slice.
144
+ slice.processing_record_number ||= 0
145
+ records = []
146
+ append = false
147
+
148
+ # Skip processed records in this slice if it has no output categpries.
149
+ if slice.processing_record_number > 1
150
+ records = slice.records[slice.processing_record_number - 1..-1]
151
+ append = true
152
+ logger.info("Resuming previously incomplete slice from record number #{slice.processing_record_number}")
153
+ else
154
+ # Reprocess all records in this slice.
157
155
  slice.processing_record_number = 0
156
+ records = slice.records
157
+ end
158
158
 
159
+ count = 0
160
+ RocketJob::Sliced::Writer::Output.collect(self, input_slice: slice, append: append) do |writer|
159
161
  records.each do |record|
160
162
  slice.processing_record_number += 1
161
163
  SemanticLogger.named_tagged(record: slice.current_record_number) do
@@ -174,8 +176,8 @@ module RocketJob
174
176
  return block_given? ? yield(record) : perform(record) if _perform_callbacks.empty?
175
177
 
176
178
  # @rocket_job_input and @rocket_job_output can be modified by before/around callbacks
177
- @rocket_job_input = record
178
- @rocket_job_output = nil
179
+ @rocket_job_input = record
180
+ @rocket_job_output = nil
179
181
 
180
182
  run_callbacks(:perform) do
181
183
  @rocket_job_output =
@@ -186,9 +188,9 @@ module RocketJob
186
188
  end
187
189
  end
188
190
 
189
- @rocket_job_input = nil
190
- result = @rocket_job_output
191
- @rocket_job_output = nil
191
+ @rocket_job_input = nil
192
+ result = @rocket_job_output
193
+ @rocket_job_output = nil
192
194
  result
193
195
  end
194
196
 
@@ -244,7 +246,7 @@ module RocketJob
244
246
  unless new_record?
245
247
  # Fail job iff no other worker has already finished it
246
248
  # Must set write concern to at least 1 since we need the nModified back
247
- result = self.class.with(write: {w: 1}) do |query|
249
+ result = self.class.with(write: {w: 1}) do |query|
248
250
  query.
249
251
  where(id: id, state: :running, sub_state: :processing).
250
252
  update({"$set" => {state: :failed, worker_name: worker_name}})
@@ -305,11 +307,12 @@ module RocketJob
305
307
  # Run Batch before and after callbacks
306
308
  def rocket_job_batch_callbacks(worker)
307
309
  # If this is the first worker to pickup this job
308
- if sub_state == :before
310
+ case sub_state
311
+ when :before
309
312
  rocket_job_batch_run_before_callbacks
310
313
  # Check for 0 record jobs
311
314
  rocket_job_batch_complete?(worker.name) if running?
312
- elsif sub_state == :after
315
+ when sub_state == :after
313
316
  rocket_job_batch_run_after_callbacks
314
317
  end
315
318
  end