rocketjob 5.4.0.beta2 → 6.0.0.rc3

Sign up to get free protection for your applications and to get access to all the features.
Files changed (61) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +149 -5
  3. data/bin/rocketjob_batch_perf +1 -1
  4. data/bin/rocketjob_perf +1 -1
  5. data/lib/rocket_job/batch.rb +3 -1
  6. data/lib/rocket_job/batch/categories.rb +341 -0
  7. data/lib/rocket_job/batch/io.rb +128 -60
  8. data/lib/rocket_job/batch/model.rb +20 -68
  9. data/lib/rocket_job/batch/performance.rb +19 -7
  10. data/lib/rocket_job/batch/statistics.rb +34 -12
  11. data/lib/rocket_job/batch/tabular.rb +2 -0
  12. data/lib/rocket_job/batch/tabular/input.rb +8 -6
  13. data/lib/rocket_job/batch/tabular/output.rb +4 -2
  14. data/lib/rocket_job/batch/throttle_running_workers.rb +8 -17
  15. data/lib/rocket_job/batch/worker.rb +27 -24
  16. data/lib/rocket_job/category/base.rb +78 -0
  17. data/lib/rocket_job/category/input.rb +110 -0
  18. data/lib/rocket_job/category/output.rb +25 -0
  19. data/lib/rocket_job/cli.rb +25 -17
  20. data/lib/rocket_job/dirmon_entry.rb +22 -12
  21. data/lib/rocket_job/event.rb +1 -1
  22. data/lib/rocket_job/extensions/iostreams/path.rb +32 -0
  23. data/lib/rocket_job/extensions/mongoid/contextual/mongo.rb +2 -2
  24. data/lib/rocket_job/extensions/mongoid/factory.rb +4 -12
  25. data/lib/rocket_job/extensions/mongoid/stringified_symbol.rb +50 -0
  26. data/lib/rocket_job/extensions/psych/yaml_tree.rb +8 -0
  27. data/lib/rocket_job/extensions/rocket_job_adapter.rb +2 -2
  28. data/lib/rocket_job/jobs/conversion_job.rb +39 -0
  29. data/lib/rocket_job/jobs/dirmon_job.rb +2 -2
  30. data/lib/rocket_job/jobs/housekeeping_job.rb +7 -7
  31. data/lib/rocket_job/jobs/on_demand_batch_job.rb +17 -6
  32. data/lib/rocket_job/jobs/on_demand_job.rb +1 -2
  33. data/lib/rocket_job/jobs/performance_job.rb +3 -1
  34. data/lib/rocket_job/jobs/re_encrypt/relational_job.rb +103 -96
  35. data/lib/rocket_job/jobs/upload_file_job.rb +44 -8
  36. data/lib/rocket_job/lookup_collection.rb +69 -0
  37. data/lib/rocket_job/plugins/job/model.rb +25 -50
  38. data/lib/rocket_job/plugins/job/throttle.rb +2 -2
  39. data/lib/rocket_job/plugins/job/throttle_running_jobs.rb +12 -4
  40. data/lib/rocket_job/plugins/job/worker.rb +2 -7
  41. data/lib/rocket_job/plugins/restart.rb +12 -5
  42. data/lib/rocket_job/plugins/state_machine.rb +2 -1
  43. data/lib/rocket_job/plugins/throttle_dependent_jobs.rb +38 -0
  44. data/lib/rocket_job/ractor_worker.rb +42 -0
  45. data/lib/rocket_job/server/model.rb +1 -1
  46. data/lib/rocket_job/sliced.rb +15 -70
  47. data/lib/rocket_job/sliced/bzip2_output_slice.rb +1 -1
  48. data/lib/rocket_job/sliced/input.rb +1 -1
  49. data/lib/rocket_job/sliced/slice.rb +5 -13
  50. data/lib/rocket_job/sliced/slices.rb +14 -2
  51. data/lib/rocket_job/sliced/writer/output.rb +33 -45
  52. data/lib/rocket_job/subscribers/server.rb +1 -1
  53. data/lib/rocket_job/thread_worker.rb +46 -0
  54. data/lib/rocket_job/throttle_definitions.rb +7 -1
  55. data/lib/rocket_job/version.rb +1 -1
  56. data/lib/rocket_job/worker.rb +21 -55
  57. data/lib/rocket_job/worker_pool.rb +5 -7
  58. data/lib/rocketjob.rb +53 -43
  59. metadata +36 -26
  60. data/lib/rocket_job/extensions/mongoid/remove_warnings.rb +0 -12
  61. data/lib/rocket_job/jobs/on_demand_batch_tabular_job.rb +0 -28
@@ -38,12 +38,11 @@
38
38
  #
39
39
  # Example: Retain output:
40
40
  # code = <<~CODE
41
- # {'value' => data['a'] * data['b']}
41
+ # data['result'] = data['a'] * data['b']
42
42
  # CODE
43
43
  #
44
44
  # RocketJob::Jobs::OnDemandJob.create!(
45
45
  # code: code,
46
- # collect_output: true,
47
46
  # data: {'a' => 10, 'b' => 2}
48
47
  # )
49
48
  #
@@ -6,9 +6,11 @@ module RocketJob
6
6
  # Define the job's default attributes
7
7
  self.description = "Performance Test"
8
8
  self.priority = 5
9
- self.slice_size = 100
10
9
  self.destroy_on_complete = false
11
10
 
11
+ input_category slice_size: 100
12
+ output_category
13
+
12
14
  # No operation, just return the supplied line (record)
13
15
  def perform(line)
14
16
  line
@@ -1,6 +1,3 @@
1
- require "active_record"
2
- require "sync_attr"
3
-
4
1
  # Batch Worker to Re-encrypt all encrypted fields in MySQL that start with `encrytped_`.
5
2
  #
6
3
  # Run in Rails console:
@@ -11,116 +8,126 @@ require "sync_attr"
11
8
  # * This job will find any column in the database that starts with`encrypted_`.
12
9
  # * This means that temporary or other tables not part of the application tables will also be processed.
13
10
  # * Since it automatically finds and re-encrypts any column, new columns are handled without any manual intervention.
14
- module RocketJob
15
- module Jobs
16
- module ReEncrypt
17
- class RelationalJob < RocketJob::Job
18
- include RocketJob::Batch
19
-
20
- self.slice_size = 1000
21
- self.priority = 30
22
- self.destroy_on_complete = false
23
- self.compress = true
24
- self.throttle_running_jobs = 1
25
- self.throttle_running_workers = 10
26
-
27
- # Name of the table being re-encrypted
28
- field :table_name, type: String
29
-
30
- # Limit the number of records to re-encrypt in test environments
31
- field :limit, type: Integer
32
-
33
- validates_presence_of :table_name
34
- before_batch :upload_records
35
-
36
- # Returns [Hash] of table names with each entry being an array
37
- # of columns that start with encrypted_
38
- sync_cattr_reader :encrypted_columns do
39
- h = {}
40
- connection.tables.each do |table|
41
- columns = connection.columns(table)
42
- columns.each do |column|
43
- if column.name.start_with?("encrypted_")
44
- add_column = column.name
45
- (h[table] ||= []) << add_column if add_column
11
+ if defined?(ActiveRecord) && defined?(SyncAttr)
12
+ require "active_record"
13
+ require "sync_attr"
14
+
15
+ module RocketJob
16
+ module Jobs
17
+ module ReEncrypt
18
+ class RelationalJob < RocketJob::Job
19
+ include RocketJob::Batch
20
+
21
+ self.priority = 30
22
+ self.destroy_on_complete = false
23
+ self.throttle_running_jobs = 1
24
+ self.throttle_running_workers = 10
25
+
26
+ input_category slice_size: 1_000
27
+
28
+ # Name of the table being re-encrypted
29
+ field :table_name, type: String
30
+
31
+ # Limit the number of records to re-encrypt in test environments
32
+ field :limit, type: Integer
33
+
34
+ validates_presence_of :table_name
35
+ before_batch :upload_records
36
+
37
+ # Returns [Hash] of table names with each entry being an array
38
+ # of columns that start with encrypted_
39
+ sync_cattr_reader :encrypted_columns do
40
+ h = {}
41
+ connection.tables.each do |table|
42
+ columns = connection.columns(table)
43
+ columns.each do |column|
44
+ if column.name.start_with?("encrypted_")
45
+ add_column = column.name
46
+ (h[table] ||= []) << add_column if add_column
47
+ end
46
48
  end
47
49
  end
50
+ h
48
51
  end
49
- h
50
- end
51
52
 
52
- # Re-encrypt all `encrypted_` columns in the relational database.
53
- # Queues a Job for each table that needs re-encryption.
54
- def self.start(**args)
55
- encrypted_columns.keys.collect do |table|
56
- create!(table_name: table, description: table, **args)
53
+ # Re-encrypt all `encrypted_` columns in the relational database.
54
+ # Queues a Job for each table that needs re-encryption.
55
+ def self.start(**args)
56
+ encrypted_columns.keys.collect do |table|
57
+ create!(table_name: table, description: table, **args)
58
+ end
57
59
  end
58
- end
59
60
 
60
- # Re-encrypt all encrypted columns for the named table.
61
- # Does not use AR models since we do not have models for all tables.
62
- def perform(range)
63
- start_id, end_id = range
61
+ # Re-encrypt all encrypted columns for the named table.
62
+ # Does not use AR models since we do not have models for all tables.
63
+ def perform(range)
64
+ start_id, end_id = range
64
65
 
65
- columns = self.class.encrypted_columns[table_name]
66
- unless columns&.size&.positive?
67
- logger.error "No columns for table: #{table_name} from #{start_id} to #{end_id}"
68
- return
69
- end
66
+ columns = self.class.encrypted_columns[table_name]
67
+ unless columns&.size&.positive?
68
+ logger.error "No columns for table: #{table_name} from #{start_id} to #{end_id}"
69
+ return
70
+ end
70
71
 
71
- logger.info "Processing: #{table_name} from #{start_id} to #{end_id}"
72
- sql = "select id, #{columns.join(',')} from #{quoted_table_name} where id >= #{start_id} and id <= #{end_id}"
73
-
74
- # Use AR to fetch all the records
75
- self.class.connection.select_rows(sql).each do |row|
76
- row = row.unshift(nil)
77
- index = 1
78
- sql = "update #{quoted_table_name} set "
79
- updates = []
80
- columns.collect do |column|
81
- index += 1
82
- value = row[index]
83
- # Prevent re-encryption
84
- unless value.blank?
85
- new_value = re_encrypt(value)
86
- updates << "#{column} = \"#{new_value}\"" if new_value != value
72
+ logger.info "Processing: #{table_name} from #{start_id} to #{end_id}"
73
+ sql = "select id, #{columns.join(',')} from #{quoted_table_name} where id >= #{start_id} and id <= #{end_id}"
74
+
75
+ # Use AR to fetch all the records
76
+ self.class.connection.select_rows(sql).each do |row|
77
+ row.unshift(nil)
78
+ index = 1
79
+ sql = "update #{quoted_table_name} set "
80
+ updates = []
81
+ columns.collect do |column|
82
+ index += 1
83
+ value = row[index]
84
+ # Prevent re-encryption
85
+ unless value.blank?
86
+ new_value = re_encrypt(value)
87
+ updates << "#{column} = \"#{new_value}\"" if new_value != value
88
+ end
89
+ end
90
+ if updates.size.positive?
91
+ sql << updates.join(", ")
92
+ sql << " where id=#{row[1]}"
93
+ logger.trace sql
94
+ self.class.connection.execute sql
95
+ else
96
+ logger.trace { "Skipping empty values #{table_name}:#{row[1]}" }
87
97
  end
88
- end
89
- if updates.size.positive?
90
- sql << updates.join(", ")
91
- sql << " where id=#{row[1]}"
92
- logger.trace sql
93
- self.class.connection.execute sql
94
- else
95
- logger.trace { "Skipping empty values #{table_name}:#{row[1]}" }
96
98
  end
97
99
  end
98
- end
99
100
 
100
- # Returns a database connection.
101
- #
102
- # Override this method to support other ways of obtaining a thread specific database connection.
103
- def self.connection
104
- ActiveRecord::Base.connection
105
- end
101
+ # Returns a database connection.
102
+ #
103
+ # Override this method to support other ways of obtaining a thread specific database connection.
104
+ def self.connection
105
+ ActiveRecord::Base.connection
106
+ end
106
107
 
107
- private
108
+ private
108
109
 
109
- def quoted_table_name
110
- @quoted_table_name ||= self.class.connection.quote_table_name(table_name)
111
- end
110
+ def quoted_table_name
111
+ @quoted_table_name ||= self.class.connection.quote_table_name(table_name)
112
+ end
112
113
 
113
- def re_encrypt(encrypted_value)
114
- return encrypted_value if (encrypted_value == "") || encrypted_value.nil?
114
+ def re_encrypt(encrypted_value)
115
+ return encrypted_value if (encrypted_value == "") || encrypted_value.nil?
115
116
 
116
- SymmetricEncryption.encrypt(SymmetricEncryption.decrypt(encrypted_value))
117
- end
117
+ SymmetricEncryption.encrypt(SymmetricEncryption.decrypt(encrypted_value))
118
+ end
118
119
 
119
- # Upload range to re-encrypt all rows in the specified table.
120
- def upload_records
121
- start_id = self.class.connection.select_value("select min(id) from #{quoted_table_name}").to_i
122
- last_id = self.class.connection.select_value("select max(id) from #{quoted_table_name}").to_i
123
- self.record_count = last_id.positive? ? (input.upload_integer_range_in_reverse_order(start_id, last_id) * slice_size) : 0
120
+ # Upload range to re-encrypt all rows in the specified table.
121
+ def upload_records
122
+ start_id = self.class.connection.select_value("select min(id) from #{quoted_table_name}").to_i
123
+ last_id = self.class.connection.select_value("select max(id) from #{quoted_table_name}").to_i
124
+ self.record_count =
125
+ if last_id.positive?
126
+ input.upload_integer_range_in_reverse_order(start_id, last_id) * input_category.slice_size
127
+ else
128
+ 0
129
+ end
130
+ end
124
131
  end
125
132
  end
126
133
  end
@@ -19,7 +19,7 @@ module RocketJob
19
19
  field :properties, type: Hash, default: {}, user_editable: true
20
20
 
21
21
  # File to upload
22
- field :upload_file_name, type: String, user_editable: true
22
+ field :upload_file_name, type: IOStreams::Path, user_editable: true
23
23
 
24
24
  # The original Input file name.
25
25
  # Used by #upload to extract the IOStreams when present.
@@ -33,10 +33,11 @@ module RocketJob
33
33
  validate :job_is_a_rocket_job
34
34
  validate :job_implements_upload
35
35
  validate :file_exists
36
+ validate :job_has_properties
36
37
 
37
38
  # Create the job and upload the file into it.
38
39
  def perform
39
- job = job_class.new(properties)
40
+ job = job_class.from_properties(properties)
40
41
  job.id = job_id if job_id
41
42
  upload_file(job)
42
43
  job.save!
@@ -66,7 +67,10 @@ module RocketJob
66
67
  elsif job.respond_to?(:full_file_name=)
67
68
  job.full_file_name = upload_file_name
68
69
  else
69
- raise(ArgumentError, "Model #{job_class_name} must implement '#upload', or have attribute 'upload_file_name' or 'full_file_name'")
70
+ raise(
71
+ ArgumentError,
72
+ "Model #{job_class_name} must implement '#upload', or have attribute 'upload_file_name' or 'full_file_name'"
73
+ )
70
74
  end
71
75
  end
72
76
 
@@ -85,17 +89,49 @@ module RocketJob
85
89
  klass = job_class
86
90
  return if klass.nil? || klass.instance_methods.any? { |m| VALID_INSTANCE_METHODS.include?(m) }
87
91
 
88
- errors.add(:job_class_name, "#{job_class} must implement any one of: :#{VALID_INSTANCE_METHODS.join(' :')} instance methods")
92
+ errors.add(:job_class_name,
93
+ "#{job_class} must implement any one of: :#{VALID_INSTANCE_METHODS.join(' :')} instance methods")
89
94
  end
90
95
 
91
96
  def file_exists
92
- return if upload_file_name.nil?
97
+ # Only check for file existence when it is a local file
98
+ return unless upload_file_name.is_a?(IOStreams::Paths::File)
99
+ return errors.add(:upload_file_name, "Upload file name can't be blank.") if upload_file_name.to_s == ""
93
100
 
94
- uri = URI.parse(upload_file_name)
95
- return unless uri.scheme.nil? || uri.scheme == "file"
96
- return if File.exist?(upload_file_name)
101
+ return if upload_file_name.exist?
97
102
 
98
103
  errors.add(:upload_file_name, "Upload file: #{upload_file_name} does not exist.")
104
+ rescue NotImplementedError
105
+ nil
106
+ end
107
+
108
+ def job_has_properties
109
+ klass = job_class
110
+ return unless klass
111
+
112
+ properties.each_pair do |k, _v|
113
+ next if klass.public_method_defined?("#{k}=".to_sym)
114
+
115
+ if %i[output_categories input_categories].include?(k)
116
+ category_class = k == :input_categories ? RocketJob::Category::Input : RocketJob::Category::Output
117
+ properties[k].each do |category|
118
+ category.each_pair do |key, _value|
119
+ next if category_class.public_method_defined?("#{key}=".to_sym)
120
+
121
+ errors.add(
122
+ :properties,
123
+ "Unknown Property in #{k}: Attempted to set a value for #{key}.#{k} which is not allowed on the job #{job_class_name}"
124
+ )
125
+ end
126
+ end
127
+ next
128
+ end
129
+
130
+ errors.add(
131
+ :properties,
132
+ "Unknown Property: Attempted to set a value for #{k.inspect} which is not allowed on the job #{job_class_name}"
133
+ )
134
+ end
99
135
  end
100
136
  end
101
137
  end
@@ -0,0 +1,69 @@
1
+ module RocketJob
2
+ class LookupCollection < Mongo::Collection
3
+ # Rapidly upload individual records in batches.
4
+ #
5
+ # Operates directly on a Mongo Collection to avoid the overhead of creating Mongoid objects
6
+ # for each and every row.
7
+ #
8
+ # Example:
9
+ # lookup_collection(:my_lookup).upload do |io|
10
+ # io << {id: 123, data: "first record"}
11
+ # io << {id: 124, data: "second record"}
12
+ # end
13
+ #
14
+ # input_category(:my_lookup).find(id: 123).first
15
+ def upload(batch_size: 10_000, &block)
16
+ BatchUploader.upload(batch_size: batch_size, &block)
17
+ end
18
+
19
+ # Looks up the value at the specified id.
20
+ # Returns [nil] if no record was found with the supplied id.
21
+ def lookup(id)
22
+ find(id: id).first
23
+ end
24
+
25
+ # Internal class for uploading records in batches
26
+ class BatchUploader
27
+ attr_reader :record_count
28
+
29
+ def self.upload(collection, **args)
30
+ writer = new(collection, **args)
31
+ yield(writer)
32
+ writer.record_count
33
+ ensure
34
+ writer&.close
35
+ end
36
+
37
+ def initialize(collection, batch_size:)
38
+ @batch_size = batch_size
39
+ @record_count = 0
40
+ @batch_count = 0
41
+ @documents = []
42
+ @collection = collection
43
+ end
44
+
45
+ def <<(record)
46
+ raise(ArgumentError, "Record must be a Hash") unless record.is_a?(Hash)
47
+
48
+ unless record.key?(:id) || record.key?("id") || record.key?("_id")
49
+ raise(ArgumentError, "Record must include an :id key")
50
+ end
51
+
52
+ @documents << record
53
+ @record_count += 1
54
+ @batch_count += 1
55
+ if @batch_count >= @batch_size
56
+ @collection.insert_many(@documents)
57
+ @documents.clear
58
+ @batch_count = 0
59
+ end
60
+
61
+ self
62
+ end
63
+
64
+ def close
65
+ @collection.insert_many(@documents) unless @documents.empty?
66
+ end
67
+ end
68
+ end
69
+ end
@@ -37,12 +37,10 @@ module RocketJob
37
37
  # arrives, then the current job will complete the current slices and process
38
38
  # the new higher priority job
39
39
  field :priority, type: Integer, default: 50, class_attribute: true, user_editable: true, copy_on_restart: true
40
+ validates_inclusion_of :priority, in: 1..100
40
41
 
41
42
  # When the job completes destroy it from both the database and the UI
42
- field :destroy_on_complete, type: Boolean, default: true, class_attribute: true, copy_on_restart: true
43
-
44
- # Whether to store the results from this job
45
- field :collect_output, type: Boolean, default: false, class_attribute: true
43
+ field :destroy_on_complete, type: Mongoid::Boolean, default: true, class_attribute: true, copy_on_restart: true
46
44
 
47
45
  # Run this job no earlier than this time
48
46
  field :run_at, type: Time, user_editable: true
@@ -54,14 +52,15 @@ module RocketJob
54
52
  # Can be used to reduce log noise, especially during high volume calls
55
53
  # For debugging a single job can be logged at a low level such as :trace
56
54
  # Levels supported: :trace, :debug, :info, :warn, :error, :fatal
57
- field :log_level, type: Symbol, class_attribute: true, user_editable: true, copy_on_restart: true
55
+ field :log_level, type: Mongoid::StringifiedSymbol, class_attribute: true, user_editable: true, copy_on_restart: true
56
+ validates_inclusion_of :log_level, in: SemanticLogger::LEVELS + [nil]
58
57
 
59
58
  #
60
59
  # Read-only attributes
61
60
  #
62
61
 
63
62
  # Current state, as set by the state machine. Do not modify this value directly.
64
- field :state, type: Symbol, default: :queued
63
+ field :state, type: Mongoid::StringifiedSymbol, default: :queued
65
64
 
66
65
  # When the job was created
67
66
  field :created_at, type: Time, default: -> { Time.now }
@@ -89,17 +88,12 @@ module RocketJob
89
88
  # Store the last exception for this job
90
89
  embeds_one :exception, class_name: "RocketJob::JobException"
91
90
 
92
- # Store the Hash result from this job if collect_output is true,
93
- # and the job returned actually returned a Hash, otherwise nil
94
- # Not applicable to SlicedJob jobs, since its output is stored in a
95
- # separate collection
96
- field :result, type: Hash
97
-
91
+ # Used when workers fetch jobs to work on.
98
92
  index({state: 1, priority: 1, _id: 1}, background: true)
93
+ # Used by Mission Control to display completed jobs sorted by completion.
94
+ index({completed_at: 1}, background: true)
99
95
 
100
96
  validates_presence_of :state, :failure_count, :created_at
101
- validates :priority, inclusion: 1..100
102
- validates :log_level, inclusion: SemanticLogger::LEVELS + [nil]
103
97
  end
104
98
 
105
99
  module ClassMethods
@@ -155,14 +149,8 @@ module RocketJob
155
149
 
156
150
  # Scope for queued jobs that can run now
157
151
  # I.e. Queued jobs excluding scheduled jobs
158
- if Mongoid::VERSION.to_f >= 7.1
159
- def queued_now
160
- queued.and(RocketJob::Job.where(run_at: nil).or(:run_at.lte => Time.now))
161
- end
162
- else
163
- def queued_now
164
- queued.or({run_at: nil}, :run_at.lte => Time.now)
165
- end
152
+ def queued_now
153
+ queued.and(RocketJob::Job.where(run_at: nil).or(:run_at.lte => Time.now))
166
154
  end
167
155
 
168
156
  # Defines all the fields that are accessible on the Document
@@ -183,43 +171,30 @@ module RocketJob
183
171
  #
184
172
  # @return [ Field ] The generated field
185
173
  def field(name, options)
186
- if options.delete(:user_editable) == true
187
- self.user_editable_fields += [name.to_sym] unless user_editable_fields.include?(name.to_sym)
174
+ if (options.delete(:user_editable) == true) && !user_editable_fields.include?(name.to_sym)
175
+ self.user_editable_fields += [name.to_sym]
188
176
  end
177
+
189
178
  if options.delete(:class_attribute) == true
190
179
  class_attribute(name, instance_accessor: false)
191
180
  public_send("#{name}=", options[:default]) if options.key?(:default)
192
181
  options[:default] = -> { self.class.public_send(name) }
193
182
  end
194
- if options.delete(:copy_on_restart) == true
195
- self.rocket_job_restart_attributes += [name.to_sym] unless rocket_job_restart_attributes.include?(name.to_sym)
183
+
184
+ if (options.delete(:copy_on_restart) == true) && !rocket_job_restart_attributes.include?(name.to_sym)
185
+ self.rocket_job_restart_attributes += [name.to_sym]
196
186
  end
197
- super(name, options)
198
- end
199
187
 
200
- # DEPRECATED
201
- def rocket_job
202
- warn "Replace calls to .rocket_job with calls to set class instance variables. For example: self.priority = 50"
203
- yield(self)
188
+ super(name, options)
204
189
  end
205
190
 
206
- # DEPRECATED
207
- def public_rocket_job_properties(*args)
208
- warn "Replace calls to .public_rocket_job_properties by adding `user_editable: true` option to the field declaration in #{name} for: #{args.inspect}"
209
- self.user_editable_fields += args.collect(&:to_sym)
191
+ # Builds this job instance from the supplied properties hash.
192
+ # Overridden by batch to support child objects.
193
+ def from_properties(properties)
194
+ new(properties)
210
195
  end
211
196
  end
212
197
 
213
- # Returns [true|false] whether to collect nil results from running this batch
214
- def collect_nil_output?
215
- collect_output? ? (collect_nil_output == true) : false
216
- end
217
-
218
- # Returns [true|false] whether to collect the results from running this batch
219
- def collect_output?
220
- collect_output == true
221
- end
222
-
223
198
  # Returns [Float] the number of seconds the job has taken
224
199
  # - Elapsed seconds to process the job from when a worker first started working on it
225
200
  # until now if still running, or until it was completed
@@ -282,7 +257,6 @@ module RocketJob
282
257
  # Returns [Hash] status of this job
283
258
  def as_json
284
259
  attrs = serializable_hash(methods: %i[seconds duration])
285
- attrs.delete("result") unless collect_output?
286
260
  attrs.delete("failure_count") unless failure_count.positive?
287
261
  if queued?
288
262
  attrs.delete("started_at")
@@ -319,16 +293,17 @@ module RocketJob
319
293
  h = as_json
320
294
  h.delete("seconds")
321
295
  h.dup.each_pair do |k, v|
322
- if v.is_a?(Time)
296
+ case v
297
+ when Time
323
298
  h[k] = v.in_time_zone(time_zone).to_s
324
- elsif v.is_a?(BSON::ObjectId)
299
+ when BSON::ObjectId
325
300
  h[k] = v.to_s
326
301
  end
327
302
  end
328
303
  h
329
304
  end
330
305
 
331
- # Returns [Boolean] whether the worker runs on a particular server.
306
+ # Returns [true|false] whether the worker runs on a particular server.
332
307
  def worker_on_server?(server_name)
333
308
  return false unless worker_name.present? && server_name.present?
334
309