rocketjob 5.1.1 → 5.2.0.beta1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/bin/rocketjob +2 -2
- data/bin/rocketjob_batch_perf +1 -1
- data/bin/rocketjob_perf +1 -1
- data/lib/rocket_job/active_worker.rb +1 -0
- data/lib/rocket_job/batch.rb +16 -17
- data/lib/rocket_job/batch/callbacks.rb +1 -2
- data/lib/rocket_job/batch/io.rb +10 -6
- data/lib/rocket_job/batch/logger.rb +2 -2
- data/lib/rocket_job/batch/lower_priority.rb +2 -2
- data/lib/rocket_job/batch/model.rb +23 -23
- data/lib/rocket_job/batch/performance.rb +19 -21
- data/lib/rocket_job/batch/result.rb +1 -1
- data/lib/rocket_job/batch/results.rb +1 -1
- data/lib/rocket_job/batch/state_machine.rb +5 -6
- data/lib/rocket_job/batch/statistics.rb +10 -8
- data/lib/rocket_job/batch/tabular.rb +2 -2
- data/lib/rocket_job/batch/tabular/input.rb +11 -7
- data/lib/rocket_job/batch/tabular/output.rb +1 -1
- data/lib/rocket_job/batch/throttle.rb +11 -30
- data/lib/rocket_job/batch/{throttle_running_slices.rb → throttle_running_workers.rb} +13 -10
- data/lib/rocket_job/batch/worker.rb +102 -85
- data/lib/rocket_job/cli.rb +57 -54
- data/lib/rocket_job/config.rb +8 -10
- data/lib/rocket_job/dirmon_entry.rb +13 -10
- data/lib/rocket_job/event.rb +16 -16
- data/lib/rocket_job/extensions/mongo/logging.rb +2 -2
- data/lib/rocket_job/extensions/mongoid/clients/options.rb +2 -2
- data/lib/rocket_job/extensions/mongoid/contextual/mongo.rb +4 -2
- data/lib/rocket_job/extensions/mongoid/factory.rb +13 -5
- data/lib/rocket_job/extensions/rocket_job_adapter.rb +2 -1
- data/lib/rocket_job/job_exception.rb +0 -3
- data/lib/rocket_job/jobs/dirmon_job.rb +4 -4
- data/lib/rocket_job/jobs/housekeeping_job.rb +7 -7
- data/lib/rocket_job/jobs/on_demand_batch_job.rb +14 -4
- data/lib/rocket_job/jobs/on_demand_job.rb +3 -3
- data/lib/rocket_job/jobs/performance_job.rb +1 -1
- data/lib/rocket_job/jobs/re_encrypt/relational_job.rb +11 -10
- data/lib/rocket_job/jobs/upload_file_job.rb +9 -5
- data/lib/rocket_job/performance.rb +24 -22
- data/lib/rocket_job/plugins/cron.rb +7 -3
- data/lib/rocket_job/plugins/document.rb +7 -5
- data/lib/rocket_job/plugins/job/callbacks.rb +1 -1
- data/lib/rocket_job/plugins/job/logger.rb +3 -3
- data/lib/rocket_job/plugins/job/model.rb +34 -27
- data/lib/rocket_job/plugins/job/persistence.rb +7 -34
- data/lib/rocket_job/plugins/job/state_machine.rb +5 -4
- data/lib/rocket_job/plugins/job/throttle.rb +12 -28
- data/lib/rocket_job/plugins/job/throttle_running_jobs.rb +2 -2
- data/lib/rocket_job/plugins/job/worker.rb +22 -70
- data/lib/rocket_job/plugins/processing_window.rb +5 -4
- data/lib/rocket_job/plugins/restart.rb +3 -3
- data/lib/rocket_job/plugins/retry.rb +2 -2
- data/lib/rocket_job/plugins/singleton.rb +1 -2
- data/lib/rocket_job/plugins/state_machine.rb +4 -4
- data/lib/rocket_job/plugins/transaction.rb +1 -1
- data/lib/rocket_job/rocket_job.rb +5 -4
- data/lib/rocket_job/server.rb +2 -2
- data/lib/rocket_job/server/model.rb +14 -13
- data/lib/rocket_job/server/state_machine.rb +1 -2
- data/lib/rocket_job/sliced/compressed_slice.rb +4 -4
- data/lib/rocket_job/sliced/encrypted_slice.rb +4 -4
- data/lib/rocket_job/sliced/input.rb +16 -16
- data/lib/rocket_job/sliced/output.rb +2 -2
- data/lib/rocket_job/sliced/slice.rb +43 -20
- data/lib/rocket_job/sliced/slices.rb +14 -11
- data/lib/rocket_job/subscriber.rb +6 -6
- data/lib/rocket_job/subscribers/logger.rb +3 -3
- data/lib/rocket_job/supervisor.rb +12 -12
- data/lib/rocket_job/supervisor/shutdown.rb +7 -7
- data/lib/rocket_job/throttle_definition.rb +37 -0
- data/lib/rocket_job/throttle_definitions.rb +39 -0
- data/lib/rocket_job/version.rb +1 -1
- data/lib/rocket_job/worker.rb +116 -34
- data/lib/rocket_job/worker_pool.rb +6 -6
- data/lib/rocketjob.rb +72 -76
- metadata +16 -18
- data/lib/rocket_job/extensions/mongoid_5/clients/options.rb +0 -38
- data/lib/rocket_job/extensions/mongoid_5/contextual/mongo.rb +0 -64
- data/lib/rocket_job/extensions/mongoid_5/factory.rb +0 -13
@@ -1,4 +1,4 @@
|
|
1
|
-
require
|
1
|
+
require "active_support/concern"
|
2
2
|
|
3
3
|
module RocketJob
|
4
4
|
module Plugins
|
@@ -49,8 +49,8 @@ module RocketJob
|
|
49
49
|
validates_each :processing_schedule do |record, attr, value|
|
50
50
|
begin
|
51
51
|
RocketJob::Plugins::Rufus::CronLine.new(value)
|
52
|
-
rescue ArgumentError =>
|
53
|
-
record.errors.add(attr,
|
52
|
+
rescue ArgumentError => e
|
53
|
+
record.errors.add(attr, e.message)
|
54
54
|
end
|
55
55
|
end
|
56
56
|
end
|
@@ -68,8 +68,9 @@ module RocketJob
|
|
68
68
|
# Only process this job if it is still in its processing window
|
69
69
|
def rocket_job_processing_window_check
|
70
70
|
return if rocket_job_processing_window_active?
|
71
|
+
|
71
72
|
logger.warn("Processing window closed before job was processed. Job is re-scheduled to run at: #{rocket_job_processing_schedule.next_time}")
|
72
|
-
self.worker_name ||=
|
73
|
+
self.worker_name ||= "inline"
|
73
74
|
requeue!(worker_name)
|
74
75
|
end
|
75
76
|
|
@@ -1,4 +1,4 @@
|
|
1
|
-
require
|
1
|
+
require "active_support/concern"
|
2
2
|
|
3
3
|
module RocketJob
|
4
4
|
module Plugins
|
@@ -88,7 +88,7 @@ module RocketJob
|
|
88
88
|
# Run again in the future, even if this run fails with an exception
|
89
89
|
def rocket_job_restart_new_instance
|
90
90
|
if expired?
|
91
|
-
logger.info(
|
91
|
+
logger.info("Job has expired. Not creating a new instance.")
|
92
92
|
return
|
93
93
|
end
|
94
94
|
attributes = rocket_job_restart_attributes.each_with_object({}) { |attr, attrs| attrs[attr] = send(attr) }
|
@@ -109,7 +109,7 @@ module RocketJob
|
|
109
109
|
logger.info("Created a new job instance: #{job.id}")
|
110
110
|
return true
|
111
111
|
else
|
112
|
-
logger.info(
|
112
|
+
logger.info("Job already active, retrying after a short sleep")
|
113
113
|
sleep(sleep_interval)
|
114
114
|
end
|
115
115
|
count += 1
|
@@ -1,4 +1,4 @@
|
|
1
|
-
require
|
1
|
+
require "active_support/concern"
|
2
2
|
|
3
3
|
module RocketJob
|
4
4
|
module Plugins
|
@@ -76,7 +76,7 @@ module RocketJob
|
|
76
76
|
# Returns [Time] when to retry this job at
|
77
77
|
# Same basic formula as Delayed Job
|
78
78
|
def rocket_job_retry_seconds_to_delay
|
79
|
-
(rocket_job_failure_count
|
79
|
+
(rocket_job_failure_count**4) + 15 + (rand(30) * (rocket_job_failure_count + 1))
|
80
80
|
end
|
81
81
|
end
|
82
82
|
end
|
@@ -1,4 +1,4 @@
|
|
1
|
-
require
|
1
|
+
require "active_support/concern"
|
2
2
|
|
3
3
|
module RocketJob
|
4
4
|
module Plugins
|
@@ -25,7 +25,6 @@ module RocketJob
|
|
25
25
|
|
26
26
|
errors.add(:state, "Another instance of #{self.class.name} is already running, queued, or paused")
|
27
27
|
end
|
28
|
-
|
29
28
|
end
|
30
29
|
end
|
31
30
|
end
|
@@ -1,5 +1,5 @@
|
|
1
|
-
require
|
2
|
-
require
|
1
|
+
require "active_support/concern"
|
2
|
+
require "aasm"
|
3
3
|
|
4
4
|
module RocketJob
|
5
5
|
module Plugins
|
@@ -33,8 +33,8 @@ module RocketJob
|
|
33
33
|
# Adds a :before or :after callback to an event
|
34
34
|
# state_machine_add_event_callback(:start, :before, :my_method)
|
35
35
|
def self.state_machine_add_event_callback(event_name, action, *methods, &block)
|
36
|
-
raise(ArgumentError,
|
37
|
-
raise(ArgumentError,
|
36
|
+
raise(ArgumentError, "Cannot supply both a method name and a block") if methods.size.positive? && block
|
37
|
+
raise(ArgumentError, "Must supply either a method name or a block") unless methods.size.positive? || block
|
38
38
|
|
39
39
|
# TODO: Somehow get AASM to support options such as :if and :unless to be consistent with other callbacks
|
40
40
|
# For example:
|
@@ -26,20 +26,21 @@ module RocketJob
|
|
26
26
|
# Returns a human readable duration from the supplied [Float] number of seconds
|
27
27
|
def self.seconds_as_duration(seconds)
|
28
28
|
return nil unless seconds
|
29
|
+
|
29
30
|
if seconds >= 86_400.0 # 1 day
|
30
31
|
"#{(seconds / 86_400).to_i}d #{Time.at(seconds).strftime('%-Hh %-Mm')}"
|
31
32
|
elsif seconds >= 3600.0 # 1 hour
|
32
|
-
Time.at(seconds).strftime(
|
33
|
+
Time.at(seconds).strftime("%-Hh %-Mm")
|
33
34
|
elsif seconds >= 60.0 # 1 minute
|
34
|
-
Time.at(seconds).strftime(
|
35
|
+
Time.at(seconds).strftime("%-Mm %-Ss")
|
35
36
|
elsif seconds >= 1.0 # 1 second
|
36
|
-
format(
|
37
|
+
format("%.3fs", seconds)
|
37
38
|
else
|
38
39
|
duration = seconds * 1000
|
39
40
|
if defined? JRuby
|
40
41
|
"#{duration.to_i}ms"
|
41
42
|
else
|
42
|
-
duration < 10.0 ? format(
|
43
|
+
duration < 10.0 ? format("%.3fms", duration) : format("%.1fms", duration)
|
43
44
|
end
|
44
45
|
end
|
45
46
|
end
|
data/lib/rocket_job/server.rb
CHANGED
@@ -1,5 +1,5 @@
|
|
1
|
-
require
|
2
|
-
require
|
1
|
+
require "yaml"
|
2
|
+
require "active_support/concern"
|
3
3
|
|
4
4
|
module RocketJob
|
5
5
|
class Server
|
@@ -8,7 +8,7 @@ module RocketJob
|
|
8
8
|
extend ActiveSupport::Concern
|
9
9
|
|
10
10
|
included do
|
11
|
-
store_in collection:
|
11
|
+
store_in collection: "rocket_job.servers"
|
12
12
|
|
13
13
|
# Unique Name of this server instance
|
14
14
|
# Default: `host name:PID`
|
@@ -24,13 +24,13 @@ module RocketJob
|
|
24
24
|
field :started_at, type: Time
|
25
25
|
|
26
26
|
# The heartbeat information for this server
|
27
|
-
embeds_one :heartbeat, class_name:
|
27
|
+
embeds_one :heartbeat, class_name: "RocketJob::Heartbeat"
|
28
28
|
|
29
29
|
# Current state
|
30
30
|
# Internal use only. Do not set this field directly
|
31
31
|
field :state, type: Symbol, default: :starting
|
32
32
|
|
33
|
-
index({name: 1}, background: true, unique: true
|
33
|
+
index({name: 1}, background: true, unique: true)
|
34
34
|
|
35
35
|
validates_presence_of :state, :name, :max_workers
|
36
36
|
|
@@ -58,8 +58,8 @@ module RocketJob
|
|
58
58
|
# # => {}
|
59
59
|
def self.counts_by_state
|
60
60
|
counts = {}
|
61
|
-
collection.aggregate([{
|
62
|
-
counts[result[
|
61
|
+
collection.aggregate([{"$group" => {_id: "$state", count: {"$sum" => 1}}}]).each do |result|
|
62
|
+
counts[result["_id"].to_sym] = result["count"]
|
63
63
|
end
|
64
64
|
counts
|
65
65
|
end
|
@@ -70,6 +70,7 @@ module RocketJob
|
|
70
70
|
count = 0
|
71
71
|
each do |server|
|
72
72
|
next unless server.zombie?
|
73
|
+
|
73
74
|
logger.warn "Destroying zombie server #{server.name}, and requeueing its jobs"
|
74
75
|
server.destroy
|
75
76
|
count += 1
|
@@ -83,9 +84,9 @@ module RocketJob
|
|
83
84
|
last_heartbeat_time = Time.now - dead_seconds
|
84
85
|
where(
|
85
86
|
:state.in => %i[stopping running paused],
|
86
|
-
|
87
|
-
{
|
88
|
-
{
|
87
|
+
"$or" => [
|
88
|
+
{"heartbeat.updated_at" => {"$exists" => false}},
|
89
|
+
{"heartbeat.updated_at" => {"$lte" => last_heartbeat_time}}
|
89
90
|
]
|
90
91
|
)
|
91
92
|
end
|
@@ -100,6 +101,7 @@ module RocketJob
|
|
100
101
|
def zombie?(missed = 4)
|
101
102
|
return false unless running? || stopping? || paused?
|
102
103
|
return true if heartbeat.nil? || heartbeat.updated_at.nil?
|
104
|
+
|
103
105
|
dead_seconds = Config.heartbeat_seconds * missed
|
104
106
|
(Time.now - heartbeat.updated_at) >= dead_seconds
|
105
107
|
end
|
@@ -108,8 +110,8 @@ module RocketJob
|
|
108
110
|
def refresh(worker_count)
|
109
111
|
SemanticLogger.silence(:info) do
|
110
112
|
find_and_update(
|
111
|
-
|
112
|
-
|
113
|
+
"heartbeat.updated_at" => Time.now,
|
114
|
+
"heartbeat.workers" => worker_count
|
113
115
|
)
|
114
116
|
end
|
115
117
|
end
|
@@ -120,7 +122,6 @@ module RocketJob
|
|
120
122
|
def requeue_jobs
|
121
123
|
RocketJob::Job.requeue_dead_server(name)
|
122
124
|
end
|
123
|
-
|
124
125
|
end
|
125
126
|
end
|
126
127
|
end
|
@@ -1,4 +1,4 @@
|
|
1
|
-
require
|
1
|
+
require "zlib"
|
2
2
|
module RocketJob
|
3
3
|
module Sliced
|
4
4
|
# Compress the records within a slice
|
@@ -6,20 +6,20 @@ module RocketJob
|
|
6
6
|
private
|
7
7
|
|
8
8
|
def parse_records
|
9
|
-
records = attributes.delete(
|
9
|
+
records = attributes.delete("records")
|
10
10
|
|
11
11
|
# Convert BSON::Binary to a string
|
12
12
|
binary_str = records.data
|
13
13
|
|
14
14
|
str = Zlib::Inflate.inflate(binary_str)
|
15
|
-
@records = Hash.from_bson(BSON::ByteBuffer.new(str))[
|
15
|
+
@records = Hash.from_bson(BSON::ByteBuffer.new(str))["r"]
|
16
16
|
end
|
17
17
|
|
18
18
|
def serialize_records
|
19
19
|
return [] if @records.nil? || @records.empty?
|
20
20
|
|
21
21
|
# Convert slice of records into a single string
|
22
|
-
str = {
|
22
|
+
str = {"r" => records.to_a}.to_bson.to_s
|
23
23
|
|
24
24
|
data = Zlib::Deflate.deflate(str)
|
25
25
|
BSON::Binary.new(data)
|
@@ -1,4 +1,4 @@
|
|
1
|
-
require
|
1
|
+
require "symmetric-encryption"
|
2
2
|
module RocketJob
|
3
3
|
module Sliced
|
4
4
|
# Compress the records within a slice
|
@@ -6,7 +6,7 @@ module RocketJob
|
|
6
6
|
private
|
7
7
|
|
8
8
|
def parse_records
|
9
|
-
records = attributes.delete(
|
9
|
+
records = attributes.delete("records")
|
10
10
|
|
11
11
|
# Convert BSON::Binary to a string
|
12
12
|
binary_str = records.data
|
@@ -16,14 +16,14 @@ module RocketJob
|
|
16
16
|
# Use the header that is present to decrypt the data, since its version could be different
|
17
17
|
str = header.cipher.binary_decrypt(binary_str, header: header)
|
18
18
|
|
19
|
-
@records = Hash.from_bson(BSON::ByteBuffer.new(str))[
|
19
|
+
@records = Hash.from_bson(BSON::ByteBuffer.new(str))["r"]
|
20
20
|
end
|
21
21
|
|
22
22
|
def serialize_records
|
23
23
|
return [] if @records.nil? || @records.empty?
|
24
24
|
|
25
25
|
# Convert slice of records into a single string
|
26
|
-
str = {
|
26
|
+
str = {"r" => to_a}.to_bson.to_s
|
27
27
|
|
28
28
|
# Encrypt to binary without applying an encoding such as Base64
|
29
29
|
# Use a random_iv with each encryption for better security
|
@@ -5,9 +5,9 @@ module RocketJob
|
|
5
5
|
# Create indexes before uploading
|
6
6
|
create_indexes
|
7
7
|
Writer::Input.collect(self, on_first: on_first, &block)
|
8
|
-
rescue StandardError =>
|
8
|
+
rescue StandardError => e
|
9
9
|
drop
|
10
|
-
raise(
|
10
|
+
raise(e)
|
11
11
|
end
|
12
12
|
|
13
13
|
def upload_mongo_query(criteria, *column_names, &block)
|
@@ -19,7 +19,7 @@ module RocketJob
|
|
19
19
|
options[:projection] = options.delete(:fields) if options.key?(:fields)
|
20
20
|
else
|
21
21
|
column_names = column_names.collect(&:to_s)
|
22
|
-
column_names <<
|
22
|
+
column_names << "_id" if column_names.size.zero?
|
23
23
|
|
24
24
|
fields = options.delete(:fields) || {}
|
25
25
|
column_names.each { |col| fields[col] = 1 }
|
@@ -73,9 +73,9 @@ module RocketJob
|
|
73
73
|
count += 1
|
74
74
|
end
|
75
75
|
count
|
76
|
-
rescue StandardError =>
|
76
|
+
rescue StandardError => e
|
77
77
|
drop
|
78
|
-
raise(
|
78
|
+
raise(e)
|
79
79
|
end
|
80
80
|
|
81
81
|
def upload_integer_range_in_reverse_order(start_id, last_id)
|
@@ -91,9 +91,9 @@ module RocketJob
|
|
91
91
|
count += 1
|
92
92
|
end
|
93
93
|
count
|
94
|
-
rescue StandardError =>
|
94
|
+
rescue StandardError => e
|
95
95
|
drop
|
96
|
-
raise(
|
96
|
+
raise(e)
|
97
97
|
end
|
98
98
|
|
99
99
|
# Iterate over each failed record, if any
|
@@ -116,16 +116,16 @@ module RocketJob
|
|
116
116
|
# Requeue all failed slices
|
117
117
|
def requeue_failed
|
118
118
|
failed.update_all(
|
119
|
-
|
120
|
-
|
119
|
+
"$unset" => {worker_name: nil, started_at: nil},
|
120
|
+
"$set" => {state: :queued}
|
121
121
|
)
|
122
122
|
end
|
123
123
|
|
124
124
|
# Requeue all running slices for a server or worker that is no longer available
|
125
125
|
def requeue_running(worker_name)
|
126
126
|
running.where(worker_name: /\A#{worker_name}/).update_all(
|
127
|
-
|
128
|
-
|
127
|
+
"$unset" => {worker_name: nil, started_at: nil},
|
128
|
+
"$set" => {state: :queued}
|
129
129
|
)
|
130
130
|
end
|
131
131
|
|
@@ -137,11 +137,11 @@ module RocketJob
|
|
137
137
|
# TODO: Will it perform faster without the id sort?
|
138
138
|
# I.e. Just process on a FIFO basis?
|
139
139
|
document = all.queued.
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
140
|
+
sort("_id" => 1).
|
141
|
+
find_one_and_update(
|
142
|
+
{"$set" => {worker_name: worker_name, state: :running, started_at: Time.now}},
|
143
|
+
return_document: :after
|
144
|
+
)
|
145
145
|
document.collection_name = collection_name if document
|
146
146
|
document
|
147
147
|
end
|
@@ -1,10 +1,10 @@
|
|
1
|
-
require
|
1
|
+
require "tempfile"
|
2
2
|
|
3
3
|
module RocketJob
|
4
4
|
module Sliced
|
5
5
|
class Output < Slices
|
6
6
|
def download(header_line: nil)
|
7
|
-
raise(ArgumentError,
|
7
|
+
raise(ArgumentError, "Block is mandatory") unless block_given?
|
8
8
|
|
9
9
|
# Write the header line
|
10
10
|
yield(header_line) if header_line
|
@@ -1,4 +1,4 @@
|
|
1
|
-
require
|
1
|
+
require "forwardable"
|
2
2
|
module RocketJob
|
3
3
|
module Sliced
|
4
4
|
# A slice is an Array of Records, along with meta-data that is used
|
@@ -21,12 +21,11 @@ module RocketJob
|
|
21
21
|
include RocketJob::Plugins::StateMachine
|
22
22
|
extend Forwardable
|
23
23
|
|
24
|
-
store_in client:
|
24
|
+
store_in client: "rocketjob_slices"
|
25
25
|
|
26
26
|
# The record number of the first record in this slice.
|
27
|
-
#
|
28
|
-
#
|
29
|
-
# is being processed.
|
27
|
+
# Useful in knowing the line number of each record in this slice
|
28
|
+
# relative to the original file that was uploaded.
|
30
29
|
field :first_record_number, type: Integer
|
31
30
|
|
32
31
|
#
|
@@ -42,11 +41,14 @@ module RocketJob
|
|
42
41
|
# Number of times that this job has failed to process
|
43
42
|
field :failure_count, type: Integer
|
44
43
|
|
44
|
+
# Number of the record within this slice (not the entire file/job) currently being processed. (One based index)
|
45
|
+
field :processing_record_number, type: Integer
|
46
|
+
|
45
47
|
# This name of the worker that this job is being processed by, or was processed by
|
46
48
|
field :worker_name, type: String
|
47
49
|
|
48
50
|
# The last exception for this slice if any
|
49
|
-
embeds_one :exception, class_name:
|
51
|
+
embeds_one :exception, class_name: "RocketJob::JobException"
|
50
52
|
|
51
53
|
after_find :parse_records
|
52
54
|
|
@@ -108,12 +110,16 @@ module RocketJob
|
|
108
110
|
def_instance_delegators :records, :each, :<<, :size, :concat, :at
|
109
111
|
def_instance_delegators :records, *(Enumerable.instance_methods - Module.methods)
|
110
112
|
|
111
|
-
#
|
112
|
-
def
|
113
|
+
# Returns [Integer] the record number of the record currently being processed relative to the entire file.
|
114
|
+
def current_record_number
|
115
|
+
first_record_number.to_i + processing_record_number.to_i
|
116
|
+
end
|
117
|
+
|
118
|
+
# Before Fail save the exception to this slice.
|
119
|
+
def set_exception(exc = nil)
|
113
120
|
if exc
|
114
|
-
self.exception
|
115
|
-
exception.worker_name
|
116
|
-
exception.record_number = record_number
|
121
|
+
self.exception = JobException.from_exception(exc)
|
122
|
+
exception.worker_name = worker_name
|
117
123
|
end
|
118
124
|
self.failure_count = failure_count.to_i + 1
|
119
125
|
self.worker_name = nil
|
@@ -122,8 +128,8 @@ module RocketJob
|
|
122
128
|
# Returns the failed record.
|
123
129
|
# Returns [nil] if there is no failed record
|
124
130
|
def failed_record
|
125
|
-
if exception &&
|
126
|
-
at(
|
131
|
+
if exception && processing_record_number
|
132
|
+
at(processing_record_number - 1)
|
127
133
|
end
|
128
134
|
end
|
129
135
|
|
@@ -132,13 +138,13 @@ module RocketJob
|
|
132
138
|
if ::Mongoid::VERSION.to_i >= 6
|
133
139
|
def as_attributes
|
134
140
|
attrs = super
|
135
|
-
attrs[
|
141
|
+
attrs["records"] = serialize_records if @records
|
136
142
|
attrs
|
137
143
|
end
|
138
144
|
else
|
139
145
|
def as_document
|
140
146
|
attrs = super
|
141
|
-
attrs[
|
147
|
+
attrs["records"] = serialize_records if @records
|
142
148
|
attrs
|
143
149
|
end
|
144
150
|
end
|
@@ -147,25 +153,42 @@ module RocketJob
|
|
147
153
|
"#{super[0...-1]}, records: #{@records.inspect}, collection_name: #{collection_name.inspect}>"
|
148
154
|
end
|
149
155
|
|
156
|
+
# Fail this slice if an exception occurs during processing.
|
157
|
+
def fail_on_exception!(re_raise_exceptions = false, &block)
|
158
|
+
SemanticLogger.named_tagged(slice: id.to_s, &block)
|
159
|
+
rescue Exception => e
|
160
|
+
SemanticLogger.named_tagged(slice: id.to_s) do
|
161
|
+
if failed? || !may_fail?
|
162
|
+
exception = JobException.from_exception(e)
|
163
|
+
exception.worker_name = worker_name
|
164
|
+
save! unless new_record? || destroyed?
|
165
|
+
elsif new_record? || destroyed?
|
166
|
+
fail(e)
|
167
|
+
else
|
168
|
+
fail!(e)
|
169
|
+
end
|
170
|
+
raise e if re_raise_exceptions
|
171
|
+
end
|
172
|
+
end
|
173
|
+
|
150
174
|
private
|
151
175
|
|
152
176
|
# Always add records to any updates.
|
153
177
|
def atomic_updates(*args)
|
154
|
-
r
|
155
|
-
if @records
|
156
|
-
(r['$set'] ||= {})['records'] = serialize_records
|
157
|
-
end
|
178
|
+
r = super(*args)
|
179
|
+
(r["$set"] ||= {})["records"] = serialize_records if @records
|
158
180
|
r
|
159
181
|
end
|
160
182
|
|
161
183
|
def parse_records
|
162
|
-
@records = attributes.delete(
|
184
|
+
@records = attributes.delete("records")
|
163
185
|
end
|
164
186
|
|
165
187
|
def serialize_records
|
166
188
|
records.mongoize
|
167
189
|
end
|
168
190
|
|
191
|
+
# Before Start
|
169
192
|
def set_started_at
|
170
193
|
self.started_at = Time.now
|
171
194
|
end
|