rocketjob 6.0.0.rc1 → 6.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +164 -8
- data/lib/rocket_job/batch/categories.rb +25 -18
- data/lib/rocket_job/batch/io.rb +130 -130
- data/lib/rocket_job/batch/performance.rb +2 -2
- data/lib/rocket_job/batch/statistics.rb +2 -2
- data/lib/rocket_job/batch/throttle_running_workers.rb +1 -1
- data/lib/rocket_job/batch/worker.rb +14 -12
- data/lib/rocket_job/batch.rb +0 -1
- data/lib/rocket_job/category/base.rb +10 -7
- data/lib/rocket_job/category/input.rb +61 -1
- data/lib/rocket_job/category/output.rb +9 -0
- data/lib/rocket_job/cli.rb +1 -1
- data/lib/rocket_job/dirmon_entry.rb +1 -1
- data/lib/rocket_job/extensions/mongoid/contextual/mongo.rb +2 -2
- data/lib/rocket_job/extensions/rocket_job_adapter.rb +2 -2
- data/lib/rocket_job/job_exception.rb +1 -1
- data/lib/rocket_job/jobs/conversion_job.rb +43 -0
- data/lib/rocket_job/jobs/dirmon_job.rb +24 -35
- data/lib/rocket_job/jobs/housekeeping_job.rb +4 -5
- data/lib/rocket_job/jobs/on_demand_batch_job.rb +15 -11
- data/lib/rocket_job/jobs/on_demand_job.rb +2 -2
- data/lib/rocket_job/jobs/re_encrypt/relational_job.rb +103 -97
- data/lib/rocket_job/jobs/upload_file_job.rb +6 -3
- data/lib/rocket_job/lookup_collection.rb +4 -3
- data/lib/rocket_job/plugins/cron.rb +60 -20
- data/lib/rocket_job/plugins/job/persistence.rb +36 -0
- data/lib/rocket_job/plugins/job/throttle.rb +2 -2
- data/lib/rocket_job/plugins/restart.rb +3 -110
- data/lib/rocket_job/plugins/state_machine.rb +2 -2
- data/lib/rocket_job/plugins/throttle_dependent_jobs.rb +43 -0
- data/lib/rocket_job/sliced/bzip2_output_slice.rb +18 -19
- data/lib/rocket_job/sliced/compressed_slice.rb +3 -6
- data/lib/rocket_job/sliced/encrypted_bzip2_output_slice.rb +49 -0
- data/lib/rocket_job/sliced/encrypted_slice.rb +4 -6
- data/lib/rocket_job/sliced/input.rb +42 -54
- data/lib/rocket_job/sliced/slice.rb +7 -3
- data/lib/rocket_job/sliced/slices.rb +12 -9
- data/lib/rocket_job/sliced/writer/input.rb +46 -18
- data/lib/rocket_job/sliced/writer/output.rb +0 -1
- data/lib/rocket_job/sliced.rb +1 -19
- data/lib/rocket_job/throttle_definitions.rb +7 -1
- data/lib/rocket_job/version.rb +1 -1
- data/lib/rocketjob.rb +4 -5
- metadata +12 -12
- data/lib/rocket_job/batch/tabular/input.rb +0 -133
- data/lib/rocket_job/batch/tabular/output.rb +0 -67
- data/lib/rocket_job/batch/tabular.rb +0 -58
@@ -7,36 +7,35 @@ module RocketJob
|
|
7
7
|
# * The `bzip2` linux command line utility supports multiple embedded BZip2 stream,
|
8
8
|
# but some other custom implementations may not. They may only read the first slice and stop.
|
9
9
|
# * It is only designed for use on output collections.
|
10
|
-
#
|
11
|
-
# To download the output when using this slice:
|
12
|
-
#
|
13
|
-
# # Download the binary BZip2 streams into a single file
|
14
|
-
# IOStreams.path(output_file_name).stream(:none).writer do |io|
|
15
|
-
# job.download { |slice| io << slice[:binary] }
|
16
|
-
# end
|
17
10
|
class BZip2OutputSlice < ::RocketJob::Sliced::Slice
|
18
|
-
# This is a specialized binary slice for creating binary data from each slice
|
11
|
+
# This is a specialized binary slice for creating BZip2 binary data from each slice
|
19
12
|
# that must be downloaded as-is into output files.
|
20
|
-
def self.
|
21
|
-
|
13
|
+
def self.binary_format
|
14
|
+
:bz2
|
15
|
+
end
|
16
|
+
|
17
|
+
# Compress the supplied records with BZip2
|
18
|
+
def self.to_binary(records, record_delimiter = "\n")
|
19
|
+
return [] if records.blank?
|
20
|
+
|
21
|
+
lines = Array(records).join(record_delimiter) + record_delimiter
|
22
|
+
s = StringIO.new
|
23
|
+
IOStreams::Bzip2::Writer.stream(s) { |io| io.write(lines) }
|
24
|
+
s.string
|
22
25
|
end
|
23
26
|
|
24
27
|
private
|
25
28
|
|
29
|
+
# Returns [Hash] the BZip2 compressed binary data in binary form when reading back from Mongo.
|
26
30
|
def parse_records
|
27
|
-
records = attributes.delete("records")
|
28
|
-
|
29
31
|
# Convert BSON::Binary to a string
|
30
|
-
@records = [
|
32
|
+
@records = [attributes.delete("records").data]
|
31
33
|
end
|
32
34
|
|
35
|
+
# Returns [BSON::Binary] the records compressed using BZip2 into a string.
|
33
36
|
def serialize_records
|
34
|
-
|
35
|
-
|
36
|
-
lines = records.to_a.join("\n") + "\n"
|
37
|
-
s = StringIO.new
|
38
|
-
IOStreams::Bzip2::Writer.stream(s) { |io| io.write(lines) }
|
39
|
-
BSON::Binary.new(s.string)
|
37
|
+
# TODO: Make the line terminator configurable
|
38
|
+
BSON::Binary.new(self.class.to_binary(@records))
|
40
39
|
end
|
41
40
|
end
|
42
41
|
end
|
@@ -6,13 +6,10 @@ module RocketJob
|
|
6
6
|
private
|
7
7
|
|
8
8
|
def parse_records
|
9
|
-
records = attributes.delete("records")
|
10
|
-
|
11
9
|
# Convert BSON::Binary to a string
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
@records = Hash.from_bson(BSON::ByteBuffer.new(str))["r"]
|
10
|
+
compressed_str = attributes.delete("records").data
|
11
|
+
decompressed_str = Zlib::Inflate.inflate(compressed_str)
|
12
|
+
@records = Hash.from_bson(BSON::ByteBuffer.new(decompressed_str))["r"]
|
16
13
|
end
|
17
14
|
|
18
15
|
def serialize_records
|
@@ -0,0 +1,49 @@
|
|
1
|
+
module RocketJob
|
2
|
+
module Sliced
|
3
|
+
# This is a specialized output serializer that renders each output slice as a single BZip2 compressed stream.
|
4
|
+
# BZip2 allows multiple output streams to be written into a single BZip2 file.
|
5
|
+
#
|
6
|
+
# Notes:
|
7
|
+
# * The `bzip2` linux command line utility supports multiple embedded BZip2 stream,
|
8
|
+
# but some other custom implementations may not. They may only read the first slice and stop.
|
9
|
+
# * It is only designed for use on output collections.
|
10
|
+
class EncryptedBZip2OutputSlice < ::RocketJob::Sliced::Slice
|
11
|
+
# This is a specialized binary slice for creating BZip2 binary data from each slice
|
12
|
+
# that must be downloaded as-is into output files.
|
13
|
+
def self.binary_format
|
14
|
+
:bz2
|
15
|
+
end
|
16
|
+
|
17
|
+
private
|
18
|
+
|
19
|
+
# Returns [Hash] the BZip2 compressed binary data in binary form when reading back from Mongo.
|
20
|
+
def parse_records
|
21
|
+
# Convert BSON::Binary to a string
|
22
|
+
encrypted_str = attributes.delete("records").data
|
23
|
+
|
24
|
+
# Decrypt string
|
25
|
+
header = SymmetricEncryption::Header.new
|
26
|
+
header.parse(encrypted_str)
|
27
|
+
# Use the header that is present to decrypt the data, since its version could be different
|
28
|
+
decrypted_str = header.cipher.binary_decrypt(encrypted_str, header: header)
|
29
|
+
|
30
|
+
@records = [decrypted_str]
|
31
|
+
end
|
32
|
+
|
33
|
+
# Returns [BSON::Binary] the records compressed using BZip2 into a string.
|
34
|
+
def serialize_records
|
35
|
+
return [] if @records.nil? || @records.empty?
|
36
|
+
|
37
|
+
# TODO: Make the line terminator configurable
|
38
|
+
lines = records.to_a.join("\n") + "\n"
|
39
|
+
s = StringIO.new
|
40
|
+
IOStreams::Bzip2::Writer.stream(s) { |io| io.write(lines) }
|
41
|
+
|
42
|
+
# Encrypt to binary without applying an encoding such as Base64
|
43
|
+
# Use a random_iv with each encryption for better security
|
44
|
+
data = SymmetricEncryption.cipher.binary_encrypt(s.string, random_iv: true, compress: false)
|
45
|
+
BSON::Binary.new(data)
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
@@ -6,17 +6,15 @@ module RocketJob
|
|
6
6
|
private
|
7
7
|
|
8
8
|
def parse_records
|
9
|
-
records = attributes.delete("records")
|
10
|
-
|
11
9
|
# Convert BSON::Binary to a string
|
12
|
-
|
10
|
+
encrypted_str = attributes.delete("records").data
|
13
11
|
|
14
12
|
header = SymmetricEncryption::Header.new
|
15
|
-
header.parse(
|
13
|
+
header.parse(encrypted_str)
|
16
14
|
# Use the header that is present to decrypt the data, since its version could be different
|
17
|
-
|
15
|
+
decrypted_str = header.cipher.binary_decrypt(encrypted_str, header: header)
|
18
16
|
|
19
|
-
@records = Hash.from_bson(BSON::ByteBuffer.new(
|
17
|
+
@records = Hash.from_bson(BSON::ByteBuffer.new(decrypted_str))["r"]
|
20
18
|
end
|
21
19
|
|
22
20
|
def serialize_records
|
@@ -1,16 +1,16 @@
|
|
1
1
|
module RocketJob
|
2
2
|
module Sliced
|
3
3
|
class Input < Slices
|
4
|
-
def upload(
|
4
|
+
def upload(**args, &block)
|
5
5
|
# Create indexes before uploading
|
6
6
|
create_indexes
|
7
|
-
Writer::Input.collect(self,
|
7
|
+
Writer::Input.collect(self, **args, &block)
|
8
8
|
rescue Exception => e
|
9
9
|
drop
|
10
10
|
raise(e)
|
11
11
|
end
|
12
12
|
|
13
|
-
def upload_mongo_query(criteria,
|
13
|
+
def upload_mongo_query(criteria, columns: [], slice_batch_size: nil, &block)
|
14
14
|
options = criteria.options
|
15
15
|
|
16
16
|
# Without a block extract the fields from the supplied criteria
|
@@ -18,23 +18,21 @@ module RocketJob
|
|
18
18
|
# Criteria is returning old school :fields instead of :projections
|
19
19
|
options[:projection] = options.delete(:fields) if options.key?(:fields)
|
20
20
|
else
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
fields = options.delete(:fields) || {}
|
25
|
-
column_names.each { |col| fields[col] = 1 }
|
21
|
+
columns = columns.blank? ? ["_id"] : columns.collect(&:to_s)
|
22
|
+
fields = options.delete(:fields) || {}
|
23
|
+
columns.each { |col| fields[col] = 1 }
|
26
24
|
options[:projection] = fields
|
27
25
|
|
28
26
|
block =
|
29
|
-
if
|
30
|
-
column =
|
27
|
+
if columns.size == 1
|
28
|
+
column = columns.first
|
31
29
|
->(document) { document[column] }
|
32
30
|
else
|
33
|
-
->(document) {
|
31
|
+
->(document) { columns.collect { |c| document[c] } }
|
34
32
|
end
|
35
33
|
end
|
36
34
|
|
37
|
-
upload do |records|
|
35
|
+
upload(slice_batch_size: slice_batch_size) do |records|
|
38
36
|
# Drop down to the mongo driver level to avoid constructing a Model for each document returned
|
39
37
|
criteria.klass.collection.find(criteria.selector, options).each do |document|
|
40
38
|
records << block.call(document)
|
@@ -42,58 +40,48 @@ module RocketJob
|
|
42
40
|
end
|
43
41
|
end
|
44
42
|
|
45
|
-
def upload_arel(arel,
|
43
|
+
def upload_arel(arel, columns: nil, slice_batch_size: nil, &block)
|
46
44
|
unless block
|
47
|
-
|
45
|
+
columns = columns.blank? ? [:id] : columns.collect(&:to_sym)
|
48
46
|
|
49
47
|
block =
|
50
|
-
if
|
51
|
-
column =
|
52
|
-
->(model) { model.
|
48
|
+
if columns.size == 1
|
49
|
+
column = columns.first
|
50
|
+
->(model) { model.public_send(column) }
|
53
51
|
else
|
54
|
-
->(model) {
|
52
|
+
->(model) { columns.collect { |c| model.public_send(c) } }
|
55
53
|
end
|
56
54
|
# find_each requires the :id column in the query
|
57
|
-
selection =
|
55
|
+
selection = columns.include?(:id) ? columns : columns + [:id]
|
58
56
|
arel = arel.select(selection)
|
59
57
|
end
|
60
58
|
|
61
|
-
upload { |records| arel.find_each { |model| records << block.call(model) } }
|
59
|
+
upload(slice_batch_size: slice_batch_size) { |records| arel.find_each { |model| records << block.call(model) } }
|
62
60
|
end
|
63
61
|
|
64
|
-
def upload_integer_range(start_id, last_id)
|
65
|
-
#
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
count += 1
|
62
|
+
def upload_integer_range(start_id, last_id, slice_batch_size: 1_000)
|
63
|
+
# Each "record" is actually a range of Integers which makes up each slice
|
64
|
+
upload(slice_size: 1, slice_batch_size: slice_batch_size) do |records|
|
65
|
+
while start_id <= last_id
|
66
|
+
end_id = start_id + slice_size - 1
|
67
|
+
end_id = last_id if end_id > last_id
|
68
|
+
records << [start_id, end_id]
|
69
|
+
start_id += slice_size
|
70
|
+
end
|
74
71
|
end
|
75
|
-
count
|
76
|
-
rescue Exception => e
|
77
|
-
drop
|
78
|
-
raise(e)
|
79
72
|
end
|
80
73
|
|
81
|
-
def upload_integer_range_in_reverse_order(start_id, last_id)
|
82
|
-
#
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
count += 1
|
74
|
+
def upload_integer_range_in_reverse_order(start_id, last_id, slice_batch_size: 1_000)
|
75
|
+
# Each "record" is actually a range of Integers which makes up each slice
|
76
|
+
upload(slice_size: 1, slice_batch_size: slice_batch_size) do |records|
|
77
|
+
end_id = last_id
|
78
|
+
while end_id >= start_id
|
79
|
+
first_id = end_id - slice_size + 1
|
80
|
+
first_id = start_id if first_id.negative? || (first_id < start_id)
|
81
|
+
records << [first_id, end_id]
|
82
|
+
end_id -= slice_size
|
83
|
+
end
|
92
84
|
end
|
93
|
-
count
|
94
|
-
rescue Exception => e
|
95
|
-
drop
|
96
|
-
raise(e)
|
97
85
|
end
|
98
86
|
|
99
87
|
# Iterate over each failed record, if any
|
@@ -137,11 +125,11 @@ module RocketJob
|
|
137
125
|
# TODO: Will it perform faster without the id sort?
|
138
126
|
# I.e. Just process on a FIFO basis?
|
139
127
|
document = all.queued.
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
128
|
+
sort("_id" => 1).
|
129
|
+
find_one_and_update(
|
130
|
+
{"$set" => {worker_name: worker_name, state: "running", started_at: Time.now}},
|
131
|
+
return_document: :after
|
132
|
+
)
|
145
133
|
document.collection_name = collection_name if document
|
146
134
|
document
|
147
135
|
end
|
@@ -95,9 +95,13 @@ module RocketJob
|
|
95
95
|
end
|
96
96
|
|
97
97
|
# Returns whether this is a specialized binary slice for creating binary data from each slice
|
98
|
-
# that is
|
99
|
-
def self.
|
100
|
-
|
98
|
+
# that is downloaded without conversion into output files.
|
99
|
+
def self.binary_format
|
100
|
+
end
|
101
|
+
|
102
|
+
# For binary formats only, format the supplied records into the binary format for this slice
|
103
|
+
def self.to_binary(_records)
|
104
|
+
raise NotImplementedError
|
101
105
|
end
|
102
106
|
|
103
107
|
# `records` array has special handling so that it can be modified in place instead of having
|
@@ -42,12 +42,6 @@ module RocketJob
|
|
42
42
|
slice
|
43
43
|
end
|
44
44
|
|
45
|
-
# Returns whether this collection contains specialized binary slices for creating binary data from each slice
|
46
|
-
# that is then just downloaded as-is into output files.
|
47
|
-
def binary?
|
48
|
-
slice_class.binary?
|
49
|
-
end
|
50
|
-
|
51
45
|
# Returns output slices in the order of their id
|
52
46
|
# which is usually the order in which they were written.
|
53
47
|
def each(&block)
|
@@ -96,6 +90,11 @@ module RocketJob
|
|
96
90
|
slice
|
97
91
|
end
|
98
92
|
|
93
|
+
def insert_many(slices)
|
94
|
+
documents = slices.collect(&:as_document)
|
95
|
+
all.collection.insert_many(documents)
|
96
|
+
end
|
97
|
+
|
99
98
|
# Append to an existing slice if already present
|
100
99
|
def append(slice, input_slice)
|
101
100
|
existing_slice = all.where(id: input_slice.id).first
|
@@ -111,9 +110,13 @@ module RocketJob
|
|
111
110
|
|
112
111
|
# Index for find_and_modify only if it is not already present
|
113
112
|
def create_indexes
|
114
|
-
|
115
|
-
|
116
|
-
|
113
|
+
missing =
|
114
|
+
begin
|
115
|
+
all.collection.indexes.none? { |i| i["name"] == "state_1__id_1" }
|
116
|
+
rescue Mongo::Error::OperationFailure
|
117
|
+
true
|
118
|
+
end
|
119
|
+
all.collection.indexes.create_one({state: 1, _id: 1}, unique: true) if missing
|
117
120
|
end
|
118
121
|
|
119
122
|
# Forward additional methods.
|
@@ -12,43 +12,71 @@ module RocketJob
|
|
12
12
|
# Block to call on the first line only, instead of storing in the slice.
|
13
13
|
# Useful for extracting the header row
|
14
14
|
# Default: nil
|
15
|
-
|
16
|
-
|
15
|
+
#
|
16
|
+
# slice_size: [Integer]
|
17
|
+
# Override the slice size when uploading for example ranges, where slice is the size
|
18
|
+
# of the range itself.
|
19
|
+
#
|
20
|
+
# slice_batch_size: [Integer]
|
21
|
+
# The number of slices to batch up and to bulk load.
|
22
|
+
# For smaller slices this significantly improves upload performance.
|
23
|
+
# Note: If `slice_batch_size` is too high, it can exceed the maximum BSON block size.
|
24
|
+
def self.collect(data_store, **args)
|
25
|
+
writer = new(data_store, **args)
|
17
26
|
yield(writer)
|
18
27
|
writer.record_count
|
19
28
|
ensure
|
20
|
-
writer&.
|
29
|
+
writer&.flush
|
21
30
|
end
|
22
31
|
|
23
|
-
def initialize(
|
24
|
-
@on_first
|
25
|
-
@
|
26
|
-
@
|
27
|
-
@
|
28
|
-
@
|
29
|
-
@
|
32
|
+
def initialize(data_store, on_first: nil, slice_size: nil, slice_batch_size: nil)
|
33
|
+
@on_first = on_first
|
34
|
+
@record_count = 0
|
35
|
+
@data_store = data_store
|
36
|
+
@slice_size = slice_size || @data_store.slice_size
|
37
|
+
@slice_batch_size = slice_batch_size || 20
|
38
|
+
@batch = []
|
39
|
+
@batch_count = 0
|
40
|
+
new_slice
|
30
41
|
end
|
31
42
|
|
32
43
|
def <<(line)
|
33
|
-
@record_number += 1
|
34
44
|
if @on_first
|
35
45
|
@on_first.call(line)
|
36
46
|
@on_first = nil
|
37
47
|
return self
|
38
48
|
end
|
39
49
|
@slice << line
|
40
|
-
@batch_count += 1
|
41
50
|
@record_count += 1
|
42
|
-
if @
|
43
|
-
|
44
|
-
|
45
|
-
@slice = @input.new(first_record_number: @record_number)
|
51
|
+
if @slice.size >= @slice_size
|
52
|
+
save_slice
|
53
|
+
new_slice
|
46
54
|
end
|
47
55
|
self
|
48
56
|
end
|
49
57
|
|
50
|
-
def
|
51
|
-
|
58
|
+
def flush
|
59
|
+
if @slice_batch_size
|
60
|
+
@batch << @slice if @slice.size.positive?
|
61
|
+
@data_store.insert_many(@batch)
|
62
|
+
@batch = []
|
63
|
+
@batch_count = 0
|
64
|
+
elsif @slice.size.positive?
|
65
|
+
@data_store.insert(@slice)
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
def new_slice
|
70
|
+
@slice = @data_store.new(first_record_number: @record_count + 1)
|
71
|
+
end
|
72
|
+
|
73
|
+
def save_slice
|
74
|
+
return flush unless @slice_batch_size
|
75
|
+
|
76
|
+
@batch_count += 1
|
77
|
+
return flush if @batch_count >= @slice_batch_size
|
78
|
+
|
79
|
+
@batch << @slice
|
52
80
|
end
|
53
81
|
end
|
54
82
|
end
|
data/lib/rocket_job/sliced.rb
CHANGED
@@ -2,6 +2,7 @@ module RocketJob
|
|
2
2
|
module Sliced
|
3
3
|
autoload :BZip2OutputSlice, "rocket_job/sliced/bzip2_output_slice"
|
4
4
|
autoload :CompressedSlice, "rocket_job/sliced/compressed_slice"
|
5
|
+
autoload :EncryptedBZip2OutputSlice, "rocket_job/sliced/encrypted_bzip2_output_slice"
|
5
6
|
autoload :EncryptedSlice, "rocket_job/sliced/encrypted_slice"
|
6
7
|
autoload :Input, "rocket_job/sliced/input"
|
7
8
|
autoload :Output, "rocket_job/sliced/output"
|
@@ -13,24 +14,5 @@ module RocketJob
|
|
13
14
|
autoload :Input, "rocket_job/sliced/writer/input"
|
14
15
|
autoload :Output, "rocket_job/sliced/writer/output"
|
15
16
|
end
|
16
|
-
|
17
|
-
# Returns [RocketJob::Sliced::Slices] for the relevant direction and category.
|
18
|
-
def self.factory(direction, category, job)
|
19
|
-
collection_name = "rocket_job.#{direction}s.#{job.id}"
|
20
|
-
collection_name << ".#{category.name}" unless category.name == :main
|
21
|
-
|
22
|
-
case direction
|
23
|
-
when :input
|
24
|
-
RocketJob::Sliced::Input.new(
|
25
|
-
collection_name: collection_name,
|
26
|
-
slice_class: category.serializer_class,
|
27
|
-
slice_size: category.slice_size
|
28
|
-
)
|
29
|
-
when :output
|
30
|
-
RocketJob::Sliced::Output.new(collection_name: collection_name, slice_class: category.serializer_class)
|
31
|
-
else
|
32
|
-
raise(ArgumentError, "Unknown direction: #{direction.inspect}")
|
33
|
-
end
|
34
|
-
end
|
35
17
|
end
|
36
18
|
end
|
@@ -1,6 +1,6 @@
|
|
1
1
|
module RocketJob
|
2
2
|
class ThrottleDefinitions
|
3
|
-
|
3
|
+
attr_accessor :throttles
|
4
4
|
|
5
5
|
def initialize
|
6
6
|
@throttles = []
|
@@ -35,5 +35,11 @@ module RocketJob
|
|
35
35
|
end
|
36
36
|
nil
|
37
37
|
end
|
38
|
+
|
39
|
+
def deep_dup
|
40
|
+
new_defination = dup
|
41
|
+
new_defination.throttles = throttles.map(&:dup)
|
42
|
+
new_defination
|
43
|
+
end
|
38
44
|
end
|
39
45
|
end
|
data/lib/rocket_job/version.rb
CHANGED
data/lib/rocketjob.rb
CHANGED
@@ -63,28 +63,27 @@ module RocketJob
|
|
63
63
|
autoload :Cron, "rocket_job/plugins/cron"
|
64
64
|
autoload :Document, "rocket_job/plugins/document"
|
65
65
|
autoload :ProcessingWindow, "rocket_job/plugins/processing_window"
|
66
|
-
autoload :Restart, "rocket_job/plugins/restart"
|
67
66
|
autoload :Retry, "rocket_job/plugins/retry"
|
68
67
|
autoload :Singleton, "rocket_job/plugins/singleton"
|
69
68
|
autoload :StateMachine, "rocket_job/plugins/state_machine"
|
70
69
|
autoload :Transaction, "rocket_job/plugins/transaction"
|
70
|
+
autoload :ThrottleDependentJobs, "rocket_job/plugins/throttle_dependent_jobs"
|
71
71
|
end
|
72
72
|
|
73
73
|
module Jobs
|
74
74
|
autoload :ActiveJob, "rocket_job/jobs/active_job"
|
75
|
+
autoload :ConversionJob, "rocket_job/jobs/conversion_job"
|
75
76
|
autoload :CopyFileJob, "rocket_job/jobs/copy_file_job"
|
76
77
|
autoload :DirmonJob, "rocket_job/jobs/dirmon_job"
|
78
|
+
autoload :HousekeepingJob, "rocket_job/jobs/housekeeping_job"
|
77
79
|
autoload :OnDemandBatchJob, "rocket_job/jobs/on_demand_batch_job"
|
78
80
|
autoload :OnDemandJob, "rocket_job/jobs/on_demand_job"
|
79
|
-
autoload :HousekeepingJob, "rocket_job/jobs/housekeeping_job"
|
80
81
|
autoload :PerformanceJob, "rocket_job/jobs/performance_job"
|
81
82
|
autoload :SimpleJob, "rocket_job/jobs/simple_job"
|
82
83
|
autoload :UploadFileJob, "rocket_job/jobs/upload_file_job"
|
83
84
|
|
84
85
|
module ReEncrypt
|
85
|
-
|
86
|
-
autoload :RelationalJob, "rocket_job/jobs/re_encrypt/relational_job"
|
87
|
-
end
|
86
|
+
autoload :RelationalJob, "rocket_job/jobs/re_encrypt/relational_job"
|
88
87
|
end
|
89
88
|
end
|
90
89
|
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: rocketjob
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 6.0.
|
4
|
+
version: 6.0.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Reid Morrison
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2021-
|
11
|
+
date: 2021-09-24 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: aasm
|
@@ -58,14 +58,14 @@ dependencies:
|
|
58
58
|
requirements:
|
59
59
|
- - "~>"
|
60
60
|
- !ruby/object:Gem::Version
|
61
|
-
version: '1.
|
61
|
+
version: '1.9'
|
62
62
|
type: :runtime
|
63
63
|
prerelease: false
|
64
64
|
version_requirements: !ruby/object:Gem::Requirement
|
65
65
|
requirements:
|
66
66
|
- - "~>"
|
67
67
|
- !ruby/object:Gem::Version
|
68
|
-
version: '1.
|
68
|
+
version: '1.9'
|
69
69
|
- !ruby/object:Gem::Dependency
|
70
70
|
name: mongoid
|
71
71
|
requirement: !ruby/object:Gem::Requirement
|
@@ -84,14 +84,14 @@ dependencies:
|
|
84
84
|
name: semantic_logger
|
85
85
|
requirement: !ruby/object:Gem::Requirement
|
86
86
|
requirements:
|
87
|
-
- - "
|
87
|
+
- - ">="
|
88
88
|
- !ruby/object:Gem::Version
|
89
89
|
version: '4.7'
|
90
90
|
type: :runtime
|
91
91
|
prerelease: false
|
92
92
|
version_requirements: !ruby/object:Gem::Requirement
|
93
93
|
requirements:
|
94
|
-
- - "
|
94
|
+
- - ">="
|
95
95
|
- !ruby/object:Gem::Version
|
96
96
|
version: '4.7'
|
97
97
|
- !ruby/object:Gem::Dependency
|
@@ -134,9 +134,6 @@ files:
|
|
134
134
|
- lib/rocket_job/batch/results.rb
|
135
135
|
- lib/rocket_job/batch/state_machine.rb
|
136
136
|
- lib/rocket_job/batch/statistics.rb
|
137
|
-
- lib/rocket_job/batch/tabular.rb
|
138
|
-
- lib/rocket_job/batch/tabular/input.rb
|
139
|
-
- lib/rocket_job/batch/tabular/output.rb
|
140
137
|
- lib/rocket_job/batch/throttle.rb
|
141
138
|
- lib/rocket_job/batch/throttle_running_workers.rb
|
142
139
|
- lib/rocket_job/batch/throttle_windows.rb
|
@@ -160,6 +157,7 @@ files:
|
|
160
157
|
- lib/rocket_job/job.rb
|
161
158
|
- lib/rocket_job/job_exception.rb
|
162
159
|
- lib/rocket_job/jobs/active_job.rb
|
160
|
+
- lib/rocket_job/jobs/conversion_job.rb
|
163
161
|
- lib/rocket_job/jobs/copy_file_job.rb
|
164
162
|
- lib/rocket_job/jobs/dirmon_job.rb
|
165
163
|
- lib/rocket_job/jobs/housekeeping_job.rb
|
@@ -186,6 +184,7 @@ files:
|
|
186
184
|
- lib/rocket_job/plugins/retry.rb
|
187
185
|
- lib/rocket_job/plugins/singleton.rb
|
188
186
|
- lib/rocket_job/plugins/state_machine.rb
|
187
|
+
- lib/rocket_job/plugins/throttle_dependent_jobs.rb
|
189
188
|
- lib/rocket_job/plugins/transaction.rb
|
190
189
|
- lib/rocket_job/ractor_worker.rb
|
191
190
|
- lib/rocket_job/railtie.rb
|
@@ -196,6 +195,7 @@ files:
|
|
196
195
|
- lib/rocket_job/sliced.rb
|
197
196
|
- lib/rocket_job/sliced/bzip2_output_slice.rb
|
198
197
|
- lib/rocket_job/sliced/compressed_slice.rb
|
198
|
+
- lib/rocket_job/sliced/encrypted_bzip2_output_slice.rb
|
199
199
|
- lib/rocket_job/sliced/encrypted_slice.rb
|
200
200
|
- lib/rocket_job/sliced/input.rb
|
201
201
|
- lib/rocket_job/sliced/output.rb
|
@@ -231,11 +231,11 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
231
231
|
version: '2.5'
|
232
232
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
233
233
|
requirements:
|
234
|
-
- - "
|
234
|
+
- - ">="
|
235
235
|
- !ruby/object:Gem::Version
|
236
|
-
version:
|
236
|
+
version: '0'
|
237
237
|
requirements: []
|
238
|
-
rubygems_version: 3.2.
|
238
|
+
rubygems_version: 3.2.22
|
239
239
|
signing_key:
|
240
240
|
specification_version: 4
|
241
241
|
summary: Ruby's missing batch processing system.
|