rocketjob 6.0.0.rc1 → 6.0.0.rc2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 2794f5dc5e0ada3ffdc3da9a13fd0cb6c5713f89254d93b69d60677283bc2d64
4
- data.tar.gz: 3a208b181aca760b07432348bc2e51443a9da03cc6a143be81765ca2b3c0e37a
3
+ metadata.gz: 6568c1307b7d42a0968df0335e3177967a8223838eceecbe0b3c0cab72c398af
4
+ data.tar.gz: 1109611ffce2fe4aed881f5bd9d017681546f06d62330ffc21873adbef12c179
5
5
  SHA512:
6
- metadata.gz: 44816973f2f63dc300fe41e168ae485cd8b7def5e3bbab173501f6ef2935d3f65707ec4c2f9eb7cb6b43bab2d464b163f3e10216be50babf2dab5e82a7998439
7
- data.tar.gz: c0b2d210a3bb3faa49f30eeaf687052ed79e9da802635c6434e374c4f1ccc3538a71a4db9391f41e18a6265106aa7fedeaf920edf4e9d11acf81c9bc632534bd
6
+ metadata.gz: 073adf2196d6d0cfd5c06ad8776374a1a14618b5fbd49a550e9d8df587b0a17fd95b4dad34a60d8812a2c5f4046974e22bcf616512d77442ea28145d9bd374d2
7
+ data.tar.gz: ee9b6d35149f7d7799071485f2e4032d47303d3452a4b7c70c5422019a66615938dba90e1e27090a4f33b635df6f565871efd3152f5a2f2a5cb03a61168b3755
@@ -82,7 +82,8 @@ module RocketJob
82
82
  category = Category::Input.new
83
83
  self.input_categories = [category]
84
84
  else
85
- raise(ArgumentError, "Unknown Input Category: #{category_name.inspect}. Registered categories: #{input_categories.collect(&:name).join(',')}")
85
+ raise(ArgumentError,
86
+ "Unknown Input Category: #{category_name.inspect}. Registered categories: #{input_categories.collect(&:name).join(',')}")
86
87
  end
87
88
  end
88
89
  category
@@ -94,8 +95,10 @@ module RocketJob
94
95
  # .find does not work against this association
95
96
  output_categories.each { |catg| category = catg if catg.name == category_name }
96
97
  unless category
97
- raise(ArgumentError, "Unknown Output Category: #{category_name.inspect}. Registered categories: #{output_categories.collect(&:name).join(',')}")
98
+ raise(ArgumentError,
99
+ "Unknown Output Category: #{category_name.inspect}. Registered categories: #{output_categories.collect(&:name).join(',')}")
98
100
  end
101
+
99
102
  category
100
103
  end
101
104
 
@@ -158,13 +158,13 @@ module RocketJob
158
158
  raise(ArgumentError, "Either stream, or a block must be supplied") unless stream || block
159
159
 
160
160
  category = input_category(category) unless category.is_a?(Category::Input)
161
- stream ||= category.file_name
161
+ stream ||= category.file_name
162
162
  path = nil
163
163
 
164
164
  if stream
165
- path = IOStreams.new(stream)
166
- path.file_name = file_name if file_name
167
- category.file_name = path.file_name
165
+ path = IOStreams.new(stream)
166
+ path.file_name = file_name if file_name
167
+ category.file_name = path.file_name
168
168
 
169
169
  # Auto detect the format based on the upload file name if present.
170
170
  if category.format == :auto
@@ -421,8 +421,8 @@ module RocketJob
421
421
  def download(stream = nil, category: :main, header_line: nil, **args, &block)
422
422
  raise "Cannot download incomplete job: #{id}. Currently in state: #{state}-#{sub_state}" if rocket_job_processing?
423
423
 
424
- category = output_category(category) unless category.is_a?(Category::Output)
425
- output_collection = output(category)
424
+ category = output_category(category) unless category.is_a?(Category::Output)
425
+ output_collection = output(category)
426
426
 
427
427
  # Store the output file name in the category
428
428
  category.file_name = stream if !block && (stream.is_a?(String) || stream.is_a?(IOStreams::Path))
@@ -22,7 +22,7 @@ module RocketJob
22
22
  count_running_workers
23
23
 
24
24
  puts "Loading job with #{count} records/lines"
25
- job = RocketJob::Jobs::PerformanceJob.new(log_level: :warn)
25
+ job = RocketJob::Jobs::PerformanceJob.new(log_level: :warn)
26
26
  job.input_category.slice_size = slice_size
27
27
  if encrypt
28
28
  job.input_category.serializer = :encrypt
@@ -64,7 +64,7 @@ module RocketJob
64
64
 
65
65
  # Parse command line options
66
66
  def parse(argv)
67
- parser = OptionParser.new do |o|
67
+ parser = OptionParser.new do |o|
68
68
  o.on("-c", "--count COUNT", "Count of records to enqueue") do |arg|
69
69
  self.count = arg.to_i
70
70
  end
@@ -49,7 +49,7 @@ module RocketJob
49
49
  last = paths.pop
50
50
  return unless last
51
51
 
52
- last_target = paths.inject(in_memory) do |target, sub_key|
52
+ last_target = paths.inject(in_memory) do |target, sub_key|
53
53
  target.key?(sub_key) ? target[sub_key] : target[sub_key] = Hash.new(0)
54
54
  end
55
55
  last_target[last] += increment
@@ -99,7 +99,7 @@ module RocketJob
99
99
 
100
100
  # Overrides RocketJob::Batch::Logger#rocket_job_batch_log_payload
101
101
  def rocket_job_batch_log_payload
102
- h = {
102
+ h = {
103
103
  from: aasm.from_state,
104
104
  to: aasm.to_state,
105
105
  event: aasm.current_event
@@ -53,7 +53,7 @@ module RocketJob
53
53
  # Allows another job with a higher priority to start even though this one is running already
54
54
  # @overrides RocketJob::Plugins::Job::ThrottleRunningJobs#throttle_running_jobs_base_query
55
55
  def throttle_running_jobs_base_query
56
- query = super
56
+ query = super
57
57
  query[:priority.lte] = priority if throttle_running_workers&.positive?
58
58
  query
59
59
  end
@@ -96,7 +96,7 @@ module RocketJob
96
96
  case sub_state
97
97
  when :before, :after
98
98
  if running? && (server_name.nil? || worker_on_server?(server_name))
99
- servers << ActiveWorker.new(worker_name, started_at, self) if running?
99
+ servers << ActiveWorker.new(worker_name, started_at, self)
100
100
  end
101
101
  when :processing
102
102
  query = input.running
@@ -246,7 +246,7 @@ module RocketJob
246
246
  unless new_record?
247
247
  # Fail job iff no other worker has already finished it
248
248
  # Must set write concern to at least 1 since we need the nModified back
249
- result = self.class.with(write: {w: 1}) do |query|
249
+ result = self.class.with(write: {w: 1}) do |query|
250
250
  query.
251
251
  where(id: id, state: :running, sub_state: :processing).
252
252
  update({"$set" => {state: :failed, worker_name: worker_name}})
@@ -11,7 +11,7 @@ module RocketJob
11
11
 
12
12
  # Whether to compress, encrypt, or use the bzip2 serialization for data in this category.
13
13
  field :serializer, type: ::Mongoid::StringifiedSymbol, default: :compress
14
- validates_inclusion_of :serializer, in: [:none, :compress, :encrypt, :bzip2]
14
+ validates_inclusion_of :serializer, in: %i[none compress encrypt bzip2]
15
15
 
16
16
  # The header columns when the file does not include a header row.
17
17
  # Note:
@@ -233,7 +233,7 @@ module RocketJob
233
233
 
234
234
  # Parse command line options placing results in the corresponding instance variables
235
235
  def parse(argv)
236
- parser = OptionParser.new do |o|
236
+ parser = OptionParser.new do |o|
237
237
  o.on("-n", "--name NAME", "Unique Name of this server (Default: host_name:PID)") do |arg|
238
238
  Config.name = arg
239
239
  end
@@ -4,8 +4,8 @@ module Mongoid
4
4
  class Mongo
5
5
  def initialize(criteria)
6
6
  @criteria = criteria
7
- @klass = criteria.klass
8
- @cache = criteria.options[:cache]
7
+ @klass = criteria.klass
8
+ @cache = criteria.options[:cache]
9
9
  # Only line changed is here, get collection name from criteria, not @klass
10
10
  # @collection = @klass.collection
11
11
  @collection = criteria.collection
@@ -55,13 +55,13 @@ module ActiveJob
55
55
  # - Completed jobs will not appear in completed since the Active Job adapter
56
56
  # uses the default Rocket Job `destroy_on_completion` of `false`.
57
57
  class RocketJobAdapter
58
- def self.enqueue(active_job) #:nodoc:
58
+ def self.enqueue(active_job)
59
59
  job = RocketJob::Jobs::ActiveJob.create!(active_job_params(active_job))
60
60
  active_job.provider_job_id = job.id.to_s if active_job.respond_to?(:provider_job_id=)
61
61
  job
62
62
  end
63
63
 
64
- def self.enqueue_at(active_job, timestamp) #:nodoc:
64
+ def self.enqueue_at(active_job, timestamp)
65
65
  params = active_job_params(active_job)
66
66
  params[:run_at] = Time.at(timestamp).utc
67
67
 
@@ -82,7 +82,7 @@ module RocketJob
82
82
  key = iopath.to_s.tr(".", "_")
83
83
  previous_size = previous_file_names[key]
84
84
  # Check every few minutes for a file size change before trying to process the file.
85
- size = check_file(entry, iopath, previous_size)
85
+ size = check_file(entry, iopath, previous_size)
86
86
  new_file_names[key] = size if size
87
87
  end
88
88
  end
@@ -1,6 +1,3 @@
1
- require "active_record"
2
- require "sync_attr"
3
-
4
1
  # Batch Worker to Re-encrypt all encrypted fields in MySQL that start with `encrytped_`.
5
2
  #
6
3
  # Run in Rails console:
@@ -11,117 +8,126 @@ require "sync_attr"
11
8
  # * This job will find any column in the database that starts with`encrypted_`.
12
9
  # * This means that temporary or other tables not part of the application tables will also be processed.
13
10
  # * Since it automatically finds and re-encrypts any column, new columns are handled without any manual intervention.
14
- module RocketJob
15
- module Jobs
16
- module ReEncrypt
17
- class RelationalJob < RocketJob::Job
18
- include RocketJob::Batch
19
-
20
- self.priority = 30
21
- self.destroy_on_complete = false
22
- self.throttle_running_jobs = 1
23
- self.throttle_running_workers = 10
24
-
25
- input_category slice_size: 1_000
26
-
27
- # Name of the table being re-encrypted
28
- field :table_name, type: String
29
-
30
- # Limit the number of records to re-encrypt in test environments
31
- field :limit, type: Integer
32
-
33
- validates_presence_of :table_name
34
- before_batch :upload_records
35
-
36
- # Returns [Hash] of table names with each entry being an array
37
- # of columns that start with encrypted_
38
- sync_cattr_reader :encrypted_columns do
39
- h = {}
40
- connection.tables.each do |table|
41
- columns = connection.columns(table)
42
- columns.each do |column|
43
- if column.name.start_with?("encrypted_")
44
- add_column = column.name
45
- (h[table] ||= []) << add_column if add_column
11
+ if defined?(ActiveRecord) && defined?(SyncAttr)
12
+ require "active_record"
13
+ require "sync_attr"
14
+
15
+ module RocketJob
16
+ module Jobs
17
+ module ReEncrypt
18
+ class RelationalJob < RocketJob::Job
19
+ include RocketJob::Batch
20
+
21
+ self.priority = 30
22
+ self.destroy_on_complete = false
23
+ self.throttle_running_jobs = 1
24
+ self.throttle_running_workers = 10
25
+
26
+ input_category slice_size: 1_000
27
+
28
+ # Name of the table being re-encrypted
29
+ field :table_name, type: String
30
+
31
+ # Limit the number of records to re-encrypt in test environments
32
+ field :limit, type: Integer
33
+
34
+ validates_presence_of :table_name
35
+ before_batch :upload_records
36
+
37
+ # Returns [Hash] of table names with each entry being an array
38
+ # of columns that start with encrypted_
39
+ sync_cattr_reader :encrypted_columns do
40
+ h = {}
41
+ connection.tables.each do |table|
42
+ columns = connection.columns(table)
43
+ columns.each do |column|
44
+ if column.name.start_with?("encrypted_")
45
+ add_column = column.name
46
+ (h[table] ||= []) << add_column if add_column
47
+ end
46
48
  end
47
49
  end
50
+ h
48
51
  end
49
- h
50
- end
51
52
 
52
- # Re-encrypt all `encrypted_` columns in the relational database.
53
- # Queues a Job for each table that needs re-encryption.
54
- def self.start(**args)
55
- encrypted_columns.keys.collect do |table|
56
- create!(table_name: table, description: table, **args)
53
+ # Re-encrypt all `encrypted_` columns in the relational database.
54
+ # Queues a Job for each table that needs re-encryption.
55
+ def self.start(**args)
56
+ encrypted_columns.keys.collect do |table|
57
+ create!(table_name: table, description: table, **args)
58
+ end
57
59
  end
58
- end
59
60
 
60
- # Re-encrypt all encrypted columns for the named table.
61
- # Does not use AR models since we do not have models for all tables.
62
- def perform(range)
63
- start_id, end_id = range
61
+ # Re-encrypt all encrypted columns for the named table.
62
+ # Does not use AR models since we do not have models for all tables.
63
+ def perform(range)
64
+ start_id, end_id = range
64
65
 
65
- columns = self.class.encrypted_columns[table_name]
66
- unless columns&.size&.positive?
67
- logger.error "No columns for table: #{table_name} from #{start_id} to #{end_id}"
68
- return
69
- end
66
+ columns = self.class.encrypted_columns[table_name]
67
+ unless columns&.size&.positive?
68
+ logger.error "No columns for table: #{table_name} from #{start_id} to #{end_id}"
69
+ return
70
+ end
70
71
 
71
- logger.info "Processing: #{table_name} from #{start_id} to #{end_id}"
72
- sql = "select id, #{columns.join(',')} from #{quoted_table_name} where id >= #{start_id} and id <= #{end_id}"
73
-
74
- # Use AR to fetch all the records
75
- self.class.connection.select_rows(sql).each do |row|
76
- row.unshift(nil)
77
- index = 1
78
- sql = "update #{quoted_table_name} set "
79
- updates = []
80
- columns.collect do |column|
81
- index += 1
82
- value = row[index]
83
- # Prevent re-encryption
84
- unless value.blank?
85
- new_value = re_encrypt(value)
86
- updates << "#{column} = \"#{new_value}\"" if new_value != value
72
+ logger.info "Processing: #{table_name} from #{start_id} to #{end_id}"
73
+ sql = "select id, #{columns.join(',')} from #{quoted_table_name} where id >= #{start_id} and id <= #{end_id}"
74
+
75
+ # Use AR to fetch all the records
76
+ self.class.connection.select_rows(sql).each do |row|
77
+ row.unshift(nil)
78
+ index = 1
79
+ sql = "update #{quoted_table_name} set "
80
+ updates = []
81
+ columns.collect do |column|
82
+ index += 1
83
+ value = row[index]
84
+ # Prevent re-encryption
85
+ unless value.blank?
86
+ new_value = re_encrypt(value)
87
+ updates << "#{column} = \"#{new_value}\"" if new_value != value
88
+ end
89
+ end
90
+ if updates.size.positive?
91
+ sql << updates.join(", ")
92
+ sql << " where id=#{row[1]}"
93
+ logger.trace sql
94
+ self.class.connection.execute sql
95
+ else
96
+ logger.trace { "Skipping empty values #{table_name}:#{row[1]}" }
87
97
  end
88
- end
89
- if updates.size.positive?
90
- sql << updates.join(", ")
91
- sql << " where id=#{row[1]}"
92
- logger.trace sql
93
- self.class.connection.execute sql
94
- else
95
- logger.trace { "Skipping empty values #{table_name}:#{row[1]}" }
96
98
  end
97
99
  end
98
- end
99
100
 
100
- # Returns a database connection.
101
- #
102
- # Override this method to support other ways of obtaining a thread specific database connection.
103
- def self.connection
104
- ActiveRecord::Base.connection
105
- end
101
+ # Returns a database connection.
102
+ #
103
+ # Override this method to support other ways of obtaining a thread specific database connection.
104
+ def self.connection
105
+ ActiveRecord::Base.connection
106
+ end
106
107
 
107
- private
108
+ private
108
109
 
109
- def quoted_table_name
110
- @quoted_table_name ||= self.class.connection.quote_table_name(table_name)
111
- end
110
+ def quoted_table_name
111
+ @quoted_table_name ||= self.class.connection.quote_table_name(table_name)
112
+ end
112
113
 
113
- def re_encrypt(encrypted_value)
114
- return encrypted_value if (encrypted_value == "") || encrypted_value.nil?
114
+ def re_encrypt(encrypted_value)
115
+ return encrypted_value if (encrypted_value == "") || encrypted_value.nil?
115
116
 
116
- SymmetricEncryption.encrypt(SymmetricEncryption.decrypt(encrypted_value))
117
- end
117
+ SymmetricEncryption.encrypt(SymmetricEncryption.decrypt(encrypted_value))
118
+ end
118
119
 
119
- # Upload range to re-encrypt all rows in the specified table.
120
- def upload_records
121
- start_id = self.class.connection.select_value("select min(id) from #{quoted_table_name}").to_i
122
- last_id = self.class.connection.select_value("select max(id) from #{quoted_table_name}").to_i
123
- self.record_count =
124
- last_id.positive? ? (input.upload_integer_range_in_reverse_order(start_id, last_id) * input_category.slice_size) : 0
120
+ # Upload range to re-encrypt all rows in the specified table.
121
+ def upload_records
122
+ start_id = self.class.connection.select_value("select min(id) from #{quoted_table_name}").to_i
123
+ last_id = self.class.connection.select_value("select max(id) from #{quoted_table_name}").to_i
124
+ self.record_count =
125
+ if last_id.positive?
126
+ input.upload_integer_range_in_reverse_order(start_id, last_id) * input_category.slice_size
127
+ else
128
+ 0
129
+ end
130
+ end
125
131
  end
126
132
  end
127
133
  end
@@ -96,11 +96,10 @@ module RocketJob
96
96
  def file_exists
97
97
  # Only check for file existence when it is a local file
98
98
  return unless upload_file_name.is_a?(IOStreams::Paths::File)
99
- if upload_file_name.to_s == ""
100
- return errors.add(:upload_file_name, "Upload file name can't be blank.")
101
- end
99
+ return errors.add(:upload_file_name, "Upload file name can't be blank.") if upload_file_name.to_s == ""
102
100
 
103
101
  return if upload_file_name.exist?
102
+
104
103
  errors.add(:upload_file_name, "Upload file: #{upload_file_name} does not exist.")
105
104
  rescue NotImplementedError
106
105
  nil
@@ -22,8 +22,6 @@ module RocketJob
22
22
  find(id: id).first
23
23
  end
24
24
 
25
- private
26
-
27
25
  # Internal class for uploading records in batches
28
26
  class BatchUploader
29
27
  attr_reader :record_count
@@ -46,7 +44,10 @@ module RocketJob
46
44
 
47
45
  def <<(record)
48
46
  raise(ArgumentError, "Record must be a Hash") unless record.is_a?(Hash)
49
- raise(ArgumentError, "Record must include an :id key") unless record.key?(:id) || record.key?("id") || record.key?("_id")
47
+
48
+ unless record.key?(:id) || record.key?("id") || record.key?("_id")
49
+ raise(ArgumentError, "Record must include an :id key")
50
+ end
50
51
 
51
52
  @documents << record
52
53
  @record_count += 1
@@ -48,7 +48,7 @@ module RocketJob
48
48
  # Note: Throttles are executed in the order they are defined.
49
49
  def define_throttle(method_name, filter: :throttle_filter_class)
50
50
  # Duplicate to prevent modifying parent class throttles
51
- definitions = rocket_job_throttles ? rocket_job_throttles.dup : ThrottleDefinitions.new
51
+ definitions = rocket_job_throttles ? rocket_job_throttles.deep_dup : ThrottleDefinitions.new
52
52
  definitions.add(method_name, filter)
53
53
  self.rocket_job_throttles = definitions
54
54
  end
@@ -57,7 +57,7 @@ module RocketJob
57
57
  def undefine_throttle(method_name)
58
58
  return unless rocket_job_throttles
59
59
 
60
- definitions = rocket_job_throttles.dup
60
+ definitions = rocket_job_throttles.deep_dup
61
61
  definitions.remove(method_name)
62
62
  self.rocket_job_throttles = definitions
63
63
  end
@@ -91,9 +91,9 @@ module RocketJob
91
91
  logger.info("Job has expired. Not creating a new instance.")
92
92
  return
93
93
  end
94
- job_attrs =
94
+ job_attrs =
95
95
  rocket_job_restart_attributes.each_with_object({}) { |attr, attrs| attrs[attr] = send(attr) }
96
- job = self.class.new(job_attrs)
96
+ job = self.class.new(job_attrs)
97
97
 
98
98
  # Copy across input and output categories to new scheduled job so that all of the
99
99
  # settings are remembered between instance. Example: slice_size
@@ -0,0 +1,38 @@
1
+ require "active_support/concern"
2
+ module RocketJob
3
+ module Plugins
4
+ # Prevent this job from starting, or a batch slice from starting if the dependent jobs are running.
5
+ #
6
+ # Features:
7
+ # - Ensures dependent jobs won't run
8
+ # When the throttle has been exceeded all jobs of this class will be ignored until the
9
+ # next refresh. `RocketJob::Config::re_check_seconds` which by default is 60 seconds.
10
+ module ThrottleDependentJobs
11
+ extend ActiveSupport::Concern
12
+
13
+ included do
14
+ class_attribute :dependent_jobs
15
+ self.dependent_jobs = nil
16
+
17
+ define_throttle :dependent_job_exists?
18
+ define_batch_throttle :dependent_job_exists? if respond_to?(:define_batch_throttle)
19
+ end
20
+
21
+ private
22
+
23
+ # Checks if there are any dependent jobs are running
24
+ def dependent_job_exists?
25
+ return false if dependent_jobs.blank?
26
+
27
+ jobs_count = RocketJob::Job.running.where(:_type.in => dependent_jobs).count
28
+ return false if jobs_count.zero?
29
+
30
+ logger.info(
31
+ message: "#{jobs_count} Dependent Jobs are running from #{dependent_jobs.join(', ')}",
32
+ metric: "#{self.class.name}/dependent_jobs_throttle"
33
+ )
34
+ true
35
+ end
36
+ end
37
+ end
38
+ end
@@ -34,7 +34,7 @@ module RocketJob
34
34
  return [] if @records.nil? || @records.empty?
35
35
 
36
36
  lines = records.to_a.join("\n") + "\n"
37
- s = StringIO.new
37
+ s = StringIO.new
38
38
  IOStreams::Bzip2::Writer.stream(s) { |io| io.write(lines) }
39
39
  BSON::Binary.new(s.string)
40
40
  end
@@ -65,7 +65,6 @@ module RocketJob
65
65
  (categorized_records[named_category] ||= []) << value unless value.nil? && !job.output_category(named_category).nils
66
66
  end
67
67
  end
68
-
69
68
  end
70
69
  end
71
70
  end
@@ -1,6 +1,6 @@
1
1
  module RocketJob
2
2
  class ThrottleDefinitions
3
- attr_reader :throttles
3
+ attr_accessor :throttles
4
4
 
5
5
  def initialize
6
6
  @throttles = []
@@ -35,5 +35,11 @@ module RocketJob
35
35
  end
36
36
  nil
37
37
  end
38
+
39
+ def deep_dup
40
+ new_defination = dup
41
+ new_defination.throttles = throttles.map(&:dup)
42
+ new_defination
43
+ end
38
44
  end
39
45
  end
@@ -1,3 +1,3 @@
1
1
  module RocketJob
2
- VERSION = "6.0.0.rc1".freeze
2
+ VERSION = "6.0.0.rc2".freeze
3
3
  end
data/lib/rocketjob.rb CHANGED
@@ -68,6 +68,7 @@ module RocketJob
68
68
  autoload :Singleton, "rocket_job/plugins/singleton"
69
69
  autoload :StateMachine, "rocket_job/plugins/state_machine"
70
70
  autoload :Transaction, "rocket_job/plugins/transaction"
71
+ autoload :ThrottleDependentJobs, "rocket_job/plugins/throttle_dependent_jobs"
71
72
  end
72
73
 
73
74
  module Jobs
@@ -82,9 +83,7 @@ module RocketJob
82
83
  autoload :UploadFileJob, "rocket_job/jobs/upload_file_job"
83
84
 
84
85
  module ReEncrypt
85
- if defined?(ActiveRecord) && defined?(SyncAttr)
86
- autoload :RelationalJob, "rocket_job/jobs/re_encrypt/relational_job"
87
- end
86
+ autoload :RelationalJob, "rocket_job/jobs/re_encrypt/relational_job"
88
87
  end
89
88
  end
90
89
 
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: rocketjob
3
3
  version: !ruby/object:Gem::Version
4
- version: 6.0.0.rc1
4
+ version: 6.0.0.rc2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Reid Morrison
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2021-06-03 00:00:00.000000000 Z
11
+ date: 2021-06-16 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: aasm
@@ -186,6 +186,7 @@ files:
186
186
  - lib/rocket_job/plugins/retry.rb
187
187
  - lib/rocket_job/plugins/singleton.rb
188
188
  - lib/rocket_job/plugins/state_machine.rb
189
+ - lib/rocket_job/plugins/throttle_dependent_jobs.rb
189
190
  - lib/rocket_job/plugins/transaction.rb
190
191
  - lib/rocket_job/ractor_worker.rb
191
192
  - lib/rocket_job/railtie.rb