rocketjob 5.4.1 → 6.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +175 -5
  3. data/bin/rocketjob_batch_perf +1 -1
  4. data/bin/rocketjob_perf +1 -1
  5. data/lib/rocket_job/batch/categories.rb +345 -0
  6. data/lib/rocket_job/batch/io.rb +174 -106
  7. data/lib/rocket_job/batch/model.rb +20 -68
  8. data/lib/rocket_job/batch/performance.rb +19 -7
  9. data/lib/rocket_job/batch/statistics.rb +34 -12
  10. data/lib/rocket_job/batch/throttle_running_workers.rb +2 -6
  11. data/lib/rocket_job/batch/worker.rb +31 -26
  12. data/lib/rocket_job/batch.rb +3 -1
  13. data/lib/rocket_job/category/base.rb +81 -0
  14. data/lib/rocket_job/category/input.rb +170 -0
  15. data/lib/rocket_job/category/output.rb +34 -0
  16. data/lib/rocket_job/cli.rb +25 -17
  17. data/lib/rocket_job/dirmon_entry.rb +23 -13
  18. data/lib/rocket_job/event.rb +1 -1
  19. data/lib/rocket_job/extensions/iostreams/path.rb +32 -0
  20. data/lib/rocket_job/extensions/mongoid/contextual/mongo.rb +2 -2
  21. data/lib/rocket_job/extensions/mongoid/factory.rb +4 -12
  22. data/lib/rocket_job/extensions/mongoid/stringified_symbol.rb +50 -0
  23. data/lib/rocket_job/extensions/psych/yaml_tree.rb +8 -0
  24. data/lib/rocket_job/extensions/rocket_job_adapter.rb +2 -2
  25. data/lib/rocket_job/jobs/conversion_job.rb +43 -0
  26. data/lib/rocket_job/jobs/dirmon_job.rb +25 -36
  27. data/lib/rocket_job/jobs/housekeeping_job.rb +11 -12
  28. data/lib/rocket_job/jobs/on_demand_batch_job.rb +24 -11
  29. data/lib/rocket_job/jobs/on_demand_job.rb +3 -4
  30. data/lib/rocket_job/jobs/performance_job.rb +3 -1
  31. data/lib/rocket_job/jobs/re_encrypt/relational_job.rb +103 -96
  32. data/lib/rocket_job/jobs/upload_file_job.rb +48 -8
  33. data/lib/rocket_job/lookup_collection.rb +69 -0
  34. data/lib/rocket_job/plugins/cron.rb +60 -20
  35. data/lib/rocket_job/plugins/job/model.rb +25 -50
  36. data/lib/rocket_job/plugins/job/persistence.rb +36 -0
  37. data/lib/rocket_job/plugins/job/throttle.rb +2 -2
  38. data/lib/rocket_job/plugins/job/throttle_running_jobs.rb +1 -1
  39. data/lib/rocket_job/plugins/job/worker.rb +2 -7
  40. data/lib/rocket_job/plugins/restart.rb +3 -103
  41. data/lib/rocket_job/plugins/state_machine.rb +4 -3
  42. data/lib/rocket_job/plugins/throttle_dependent_jobs.rb +37 -0
  43. data/lib/rocket_job/ractor_worker.rb +42 -0
  44. data/lib/rocket_job/server/model.rb +1 -1
  45. data/lib/rocket_job/sliced/bzip2_output_slice.rb +18 -19
  46. data/lib/rocket_job/sliced/compressed_slice.rb +3 -6
  47. data/lib/rocket_job/sliced/encrypted_bzip2_output_slice.rb +49 -0
  48. data/lib/rocket_job/sliced/encrypted_slice.rb +4 -6
  49. data/lib/rocket_job/sliced/input.rb +42 -54
  50. data/lib/rocket_job/sliced/slice.rb +12 -16
  51. data/lib/rocket_job/sliced/slices.rb +26 -11
  52. data/lib/rocket_job/sliced/writer/input.rb +46 -18
  53. data/lib/rocket_job/sliced/writer/output.rb +33 -45
  54. data/lib/rocket_job/sliced.rb +1 -74
  55. data/lib/rocket_job/subscribers/server.rb +1 -1
  56. data/lib/rocket_job/thread_worker.rb +46 -0
  57. data/lib/rocket_job/throttle_definitions.rb +7 -1
  58. data/lib/rocket_job/version.rb +1 -1
  59. data/lib/rocket_job/worker.rb +21 -55
  60. data/lib/rocket_job/worker_pool.rb +5 -7
  61. data/lib/rocketjob.rb +53 -43
  62. metadata +36 -28
  63. data/lib/rocket_job/batch/tabular/input.rb +0 -131
  64. data/lib/rocket_job/batch/tabular/output.rb +0 -65
  65. data/lib/rocket_job/batch/tabular.rb +0 -56
  66. data/lib/rocket_job/extensions/mongoid/remove_warnings.rb +0 -12
  67. data/lib/rocket_job/jobs/on_demand_batch_tabular_job.rb +0 -28
@@ -27,15 +27,14 @@ module RocketJob
27
27
  # )
28
28
  class HousekeepingJob < RocketJob::Job
29
29
  include RocketJob::Plugins::Cron
30
- include RocketJob::Plugins::Singleton
31
30
 
32
- self.priority = 25
33
- self.description = "Cleans out historical jobs, and zombie servers."
34
- # Runs every 15 minutes
35
- self.cron_schedule = "*/15 * * * * UTC"
31
+ # Runs every 15 minutes on the 15 minute period
32
+ self.cron_schedule = "0,15,30,45 * * * * UTC"
33
+ self.description = "Cleans out historical jobs, and zombie servers."
34
+ self.priority = 25
36
35
 
37
36
  # Whether to destroy zombie servers automatically
38
- field :destroy_zombies, type: Boolean, default: true, user_editable: true, copy_on_restart: true
37
+ field :destroy_zombies, type: Mongoid::Boolean, default: true, user_editable: true, copy_on_restart: true
39
38
 
40
39
  # Retention intervals in seconds.
41
40
  # Set to nil to retain everything.
@@ -54,12 +53,12 @@ module RocketJob
54
53
  RocketJob::Job.paused.where(completed_at: {"$lte" => paused_retention.seconds.ago}).destroy_all if paused_retention
55
54
  RocketJob::Job.queued.where(created_at: {"$lte" => queued_retention.seconds.ago}).destroy_all if queued_retention
56
55
 
57
- if destroy_zombies
58
- # Cleanup zombie servers
59
- RocketJob::Server.destroy_zombies
60
- # Requeue jobs where the worker is in the zombie state and its server has gone away
61
- RocketJob::ActiveWorker.requeue_zombies
62
- end
56
+ return unless destroy_zombies
57
+
58
+ # Cleanup zombie servers
59
+ RocketJob::Server.destroy_zombies
60
+ # Requeue jobs where the worker is in the zombie state and its server has gone away
61
+ RocketJob::ActiveWorker.requeue_zombies
63
62
  end
64
63
  end
65
64
  end
@@ -31,16 +31,17 @@
31
31
  # job.perform_now
32
32
  # job.cleanup!
33
33
  #
34
- # By default output is not collected, add the option `collect_output: true` to collect output.
34
+ # By default output is not collected, call the method `#collect_output` to collect output.
35
35
  #
36
36
  # Example:
37
37
  # job = RocketJob::Jobs::OnDemandBatchJob(
38
38
  # description: 'Fix data',
39
39
  # code: code,
40
40
  # throttle_running_workers: 5,
41
- # priority: 30,
42
- # collect_output: true
41
+ # priority: 30
43
42
  # )
43
+ # job.collect_output
44
+ # job.save!
44
45
  #
45
46
  # Example: Move the upload operation into a before_batch.
46
47
  # upload_code = <<-CODE
@@ -64,27 +65,29 @@ module RocketJob
64
65
  module Jobs
65
66
  class OnDemandBatchJob < RocketJob::Job
66
67
  include RocketJob::Plugins::Cron
68
+ include RocketJob::Plugins::Retry
67
69
  include RocketJob::Batch
68
70
  include RocketJob::Batch::Statistics
69
71
 
70
72
  self.priority = 90
71
- self.description = "Batch Job"
73
+ self.description = "On Demand Batch Job"
72
74
  self.destroy_on_complete = false
75
+ self.retry_limit = 0
73
76
 
74
77
  # Code that is performed against every row / record.
75
- field :code, type: String
78
+ field :code, type: String, user_editable: true, copy_on_restart: true
76
79
 
77
80
  # Optional code to execute before the batch is run.
78
81
  # Usually to upload data into the job.
79
- field :before_code, type: String
82
+ field :before_code, type: String, user_editable: true, copy_on_restart: true
80
83
 
81
84
  # Optional code to execute after the batch is run.
82
85
  # Usually to upload data into the job.
83
- field :after_code, type: String
86
+ field :after_code, type: String, user_editable: true, copy_on_restart: true
84
87
 
85
88
  # Data that is made available to the job during the perform.
86
89
  # Be sure to store key names only as Strings, not Symbols.
87
- field :data, type: Hash, default: {}
90
+ field :data, type: Hash, default: {}, user_editable: true, copy_on_restart: true
88
91
 
89
92
  validates :code, presence: true
90
93
  validate :validate_code
@@ -95,10 +98,20 @@ module RocketJob
95
98
  before_batch :run_before_code
96
99
  after_batch :run_after_code
97
100
 
101
+ # Shortcut for setting the slice_size
102
+ def slice_size=(slice_size)
103
+ input_category.slice_size = slice_size
104
+ end
105
+
106
+ # Add a new output category and collect output for it.
107
+ def add_output_category(**args)
108
+ self.output_categories << RocketJob::Category::Output.new(**args)
109
+ end
110
+
98
111
  private
99
112
 
100
113
  def load_perform_code
101
- instance_eval("def perform(row)\n#{code}\nend")
114
+ instance_eval("def perform(row)\n#{code}\nend", __FILE__, __LINE__)
102
115
  end
103
116
 
104
117
  def run_before_code
@@ -118,13 +131,13 @@ module RocketJob
118
131
  def validate_before_code
119
132
  return if before_code.nil?
120
133
 
121
- validate_field(:before_code) { instance_eval("def __before_code\n#{before_code}\nend") }
134
+ validate_field(:before_code) { instance_eval("def __before_code\n#{before_code}\nend", __FILE__, __LINE__) }
122
135
  end
123
136
 
124
137
  def validate_after_code
125
138
  return if after_code.nil?
126
139
 
127
- validate_field(:after_code) { instance_eval("def __after_code\n#{after_code}\nend") }
140
+ validate_field(:after_code) { instance_eval("def __after_code\n#{after_code}\nend", __FILE__, __LINE__) }
128
141
  end
129
142
 
130
143
  def validate_field(field)
@@ -38,12 +38,11 @@
38
38
  #
39
39
  # Example: Retain output:
40
40
  # code = <<~CODE
41
- # {'value' => data['a'] * data['b']}
41
+ # data['result'] = data['a'] * data['b']
42
42
  # CODE
43
43
  #
44
44
  # RocketJob::Jobs::OnDemandJob.create!(
45
45
  # code: code,
46
- # collect_output: true,
47
46
  # data: {'a' => 10, 'b' => 2}
48
47
  # )
49
48
  #
@@ -79,8 +78,8 @@ module RocketJob
79
78
  self.retry_limit = 0
80
79
 
81
80
  # Be sure to store key names only as Strings, not Symbols
82
- field :data, type: Hash, default: {}, copy_on_restart: true
83
- field :code, type: String, copy_on_restart: true
81
+ field :data, type: Hash, default: {}, user_editable: true, copy_on_restart: true
82
+ field :code, type: String, user_editable: true, copy_on_restart: true
84
83
 
85
84
  validates :code, presence: true
86
85
  validate :validate_code
@@ -6,9 +6,11 @@ module RocketJob
6
6
  # Define the job's default attributes
7
7
  self.description = "Performance Test"
8
8
  self.priority = 5
9
- self.slice_size = 100
10
9
  self.destroy_on_complete = false
11
10
 
11
+ input_category slice_size: 100
12
+ output_category
13
+
12
14
  # No operation, just return the supplied line (record)
13
15
  def perform(line)
14
16
  line
@@ -1,6 +1,3 @@
1
- require "active_record"
2
- require "sync_attr"
3
-
4
1
  # Batch Worker to Re-encrypt all encrypted fields in MySQL that start with `encrytped_`.
5
2
  #
6
3
  # Run in Rails console:
@@ -11,116 +8,126 @@ require "sync_attr"
11
8
  # * This job will find any column in the database that starts with`encrypted_`.
12
9
  # * This means that temporary or other tables not part of the application tables will also be processed.
13
10
  # * Since it automatically finds and re-encrypts any column, new columns are handled without any manual intervention.
14
- module RocketJob
15
- module Jobs
16
- module ReEncrypt
17
- class RelationalJob < RocketJob::Job
18
- include RocketJob::Batch
19
-
20
- self.slice_size = 1000
21
- self.priority = 30
22
- self.destroy_on_complete = false
23
- self.compress = true
24
- self.throttle_running_jobs = 1
25
- self.throttle_running_workers = 10
26
-
27
- # Name of the table being re-encrypted
28
- field :table_name, type: String
29
-
30
- # Limit the number of records to re-encrypt in test environments
31
- field :limit, type: Integer
32
-
33
- validates_presence_of :table_name
34
- before_batch :upload_records
35
-
36
- # Returns [Hash] of table names with each entry being an array
37
- # of columns that start with encrypted_
38
- sync_cattr_reader :encrypted_columns do
39
- h = {}
40
- connection.tables.each do |table|
41
- columns = connection.columns(table)
42
- columns.each do |column|
43
- if column.name.start_with?("encrypted_")
44
- add_column = column.name
45
- (h[table] ||= []) << add_column if add_column
11
+ if defined?(ActiveRecord) && defined?(SyncAttr)
12
+ require "active_record"
13
+ require "sync_attr"
14
+
15
+ module RocketJob
16
+ module Jobs
17
+ module ReEncrypt
18
+ class RelationalJob < RocketJob::Job
19
+ include RocketJob::Batch
20
+
21
+ self.priority = 30
22
+ self.destroy_on_complete = false
23
+ self.throttle_running_jobs = 1
24
+ self.throttle_running_workers = 10
25
+
26
+ input_category slice_size: 1_000
27
+
28
+ # Name of the table being re-encrypted
29
+ field :table_name, type: String
30
+
31
+ # Limit the number of records to re-encrypt in test environments
32
+ field :limit, type: Integer
33
+
34
+ validates_presence_of :table_name
35
+ before_batch :upload_records
36
+
37
+ # Returns [Hash] of table names with each entry being an array
38
+ # of columns that start with encrypted_
39
+ sync_cattr_reader :encrypted_columns do
40
+ h = {}
41
+ connection.tables.each do |table|
42
+ columns = connection.columns(table)
43
+ columns.each do |column|
44
+ if column.name.start_with?("encrypted_")
45
+ add_column = column.name
46
+ (h[table] ||= []) << add_column if add_column
47
+ end
46
48
  end
47
49
  end
50
+ h
48
51
  end
49
- h
50
- end
51
52
 
52
- # Re-encrypt all `encrypted_` columns in the relational database.
53
- # Queues a Job for each table that needs re-encryption.
54
- def self.start(**args)
55
- encrypted_columns.keys.collect do |table|
56
- create!(table_name: table, description: table, **args)
53
+ # Re-encrypt all `encrypted_` columns in the relational database.
54
+ # Queues a Job for each table that needs re-encryption.
55
+ def self.start(**args)
56
+ encrypted_columns.keys.collect do |table|
57
+ create!(table_name: table, description: table, **args)
58
+ end
57
59
  end
58
- end
59
60
 
60
- # Re-encrypt all encrypted columns for the named table.
61
- # Does not use AR models since we do not have models for all tables.
62
- def perform(range)
63
- start_id, end_id = range
61
+ # Re-encrypt all encrypted columns for the named table.
62
+ # Does not use AR models since we do not have models for all tables.
63
+ def perform(range)
64
+ start_id, end_id = range
64
65
 
65
- columns = self.class.encrypted_columns[table_name]
66
- unless columns&.size&.positive?
67
- logger.error "No columns for table: #{table_name} from #{start_id} to #{end_id}"
68
- return
69
- end
66
+ columns = self.class.encrypted_columns[table_name]
67
+ unless columns&.size&.positive?
68
+ logger.error "No columns for table: #{table_name} from #{start_id} to #{end_id}"
69
+ return
70
+ end
70
71
 
71
- logger.info "Processing: #{table_name} from #{start_id} to #{end_id}"
72
- sql = "select id, #{columns.join(',')} from #{quoted_table_name} where id >= #{start_id} and id <= #{end_id}"
73
-
74
- # Use AR to fetch all the records
75
- self.class.connection.select_rows(sql).each do |row|
76
- row = row.unshift(nil)
77
- index = 1
78
- sql = "update #{quoted_table_name} set "
79
- updates = []
80
- columns.collect do |column|
81
- index += 1
82
- value = row[index]
83
- # Prevent re-encryption
84
- unless value.blank?
85
- new_value = re_encrypt(value)
86
- updates << "#{column} = \"#{new_value}\"" if new_value != value
72
+ logger.info "Processing: #{table_name} from #{start_id} to #{end_id}"
73
+ sql = "select id, #{columns.join(',')} from #{quoted_table_name} where id >= #{start_id} and id <= #{end_id}"
74
+
75
+ # Use AR to fetch all the records
76
+ self.class.connection.select_rows(sql).each do |row|
77
+ row.unshift(nil)
78
+ index = 1
79
+ sql = "update #{quoted_table_name} set "
80
+ updates = []
81
+ columns.collect do |column|
82
+ index += 1
83
+ value = row[index]
84
+ # Prevent re-encryption
85
+ unless value.blank?
86
+ new_value = re_encrypt(value)
87
+ updates << "#{column} = \"#{new_value}\"" if new_value != value
88
+ end
89
+ end
90
+ if updates.size.positive?
91
+ sql << updates.join(", ")
92
+ sql << " where id=#{row[1]}"
93
+ logger.trace sql
94
+ self.class.connection.execute sql
95
+ else
96
+ logger.trace { "Skipping empty values #{table_name}:#{row[1]}" }
87
97
  end
88
- end
89
- if updates.size.positive?
90
- sql << updates.join(", ")
91
- sql << " where id=#{row[1]}"
92
- logger.trace sql
93
- self.class.connection.execute sql
94
- else
95
- logger.trace { "Skipping empty values #{table_name}:#{row[1]}" }
96
98
  end
97
99
  end
98
- end
99
100
 
100
- # Returns a database connection.
101
- #
102
- # Override this method to support other ways of obtaining a thread specific database connection.
103
- def self.connection
104
- ActiveRecord::Base.connection
105
- end
101
+ # Returns a database connection.
102
+ #
103
+ # Override this method to support other ways of obtaining a thread specific database connection.
104
+ def self.connection
105
+ ActiveRecord::Base.connection
106
+ end
106
107
 
107
- private
108
+ private
108
109
 
109
- def quoted_table_name
110
- @quoted_table_name ||= self.class.connection.quote_table_name(table_name)
111
- end
110
+ def quoted_table_name
111
+ @quoted_table_name ||= self.class.connection.quote_table_name(table_name)
112
+ end
112
113
 
113
- def re_encrypt(encrypted_value)
114
- return encrypted_value if (encrypted_value == "") || encrypted_value.nil?
114
+ def re_encrypt(encrypted_value)
115
+ return encrypted_value if (encrypted_value == "") || encrypted_value.nil?
115
116
 
116
- SymmetricEncryption.encrypt(SymmetricEncryption.decrypt(encrypted_value))
117
- end
117
+ SymmetricEncryption.encrypt(SymmetricEncryption.decrypt(encrypted_value))
118
+ end
118
119
 
119
- # Upload range to re-encrypt all rows in the specified table.
120
- def upload_records
121
- start_id = self.class.connection.select_value("select min(id) from #{quoted_table_name}").to_i
122
- last_id = self.class.connection.select_value("select max(id) from #{quoted_table_name}").to_i
123
- self.record_count = last_id.positive? ? (input.upload_integer_range_in_reverse_order(start_id, last_id) * slice_size) : 0
120
+ # Upload range to re-encrypt all rows in the specified table.
121
+ def upload_records
122
+ start_id = self.class.connection.select_value("select min(id) from #{quoted_table_name}").to_i
123
+ last_id = self.class.connection.select_value("select max(id) from #{quoted_table_name}").to_i
124
+ self.record_count =
125
+ if last_id.positive?
126
+ input.upload_integer_range_in_reverse_order(start_id, last_id) * input_category.slice_size
127
+ else
128
+ 0
129
+ end
130
+ end
124
131
  end
125
132
  end
126
133
  end
@@ -19,7 +19,7 @@ module RocketJob
19
19
  field :properties, type: Hash, default: {}, user_editable: true
20
20
 
21
21
  # File to upload
22
- field :upload_file_name, type: String, user_editable: true
22
+ field :upload_file_name, type: IOStreams::Path, user_editable: true
23
23
 
24
24
  # The original Input file name.
25
25
  # Used by #upload to extract the IOStreams when present.
@@ -33,10 +33,11 @@ module RocketJob
33
33
  validate :job_is_a_rocket_job
34
34
  validate :job_implements_upload
35
35
  validate :file_exists
36
+ validate :job_has_properties
36
37
 
37
38
  # Create the job and upload the file into it.
38
39
  def perform
39
- job = job_class.new(properties)
40
+ job = job_class.from_properties(properties)
40
41
  job.id = job_id if job_id
41
42
  upload_file(job)
42
43
  job.save!
@@ -56,6 +57,10 @@ module RocketJob
56
57
 
57
58
  def upload_file(job)
58
59
  if job.respond_to?(:upload)
60
+ # Return the database connection for this thread back to the connection pool
61
+ # in case the upload takes a long time and the database connection expires.
62
+ ActiveRecord::Base.clear_active_connections! if defined?(ActiveRecord::Base)
63
+
59
64
  if original_file_name
60
65
  job.upload(upload_file_name, file_name: original_file_name)
61
66
  else
@@ -66,7 +71,10 @@ module RocketJob
66
71
  elsif job.respond_to?(:full_file_name=)
67
72
  job.full_file_name = upload_file_name
68
73
  else
69
- raise(ArgumentError, "Model #{job_class_name} must implement '#upload', or have attribute 'upload_file_name' or 'full_file_name'")
74
+ raise(
75
+ ArgumentError,
76
+ "Model #{job_class_name} must implement '#upload', or have attribute 'upload_file_name' or 'full_file_name'"
77
+ )
70
78
  end
71
79
  end
72
80
 
@@ -85,17 +93,49 @@ module RocketJob
85
93
  klass = job_class
86
94
  return if klass.nil? || klass.instance_methods.any? { |m| VALID_INSTANCE_METHODS.include?(m) }
87
95
 
88
- errors.add(:job_class_name, "#{job_class} must implement any one of: :#{VALID_INSTANCE_METHODS.join(' :')} instance methods")
96
+ errors.add(:job_class_name,
97
+ "#{job_class} must implement any one of: :#{VALID_INSTANCE_METHODS.join(' :')} instance methods")
89
98
  end
90
99
 
91
100
  def file_exists
92
- return if upload_file_name.nil?
101
+ # Only check for file existence when it is a local file
102
+ return unless upload_file_name.is_a?(IOStreams::Paths::File)
103
+ return errors.add(:upload_file_name, "Upload file name can't be blank.") if upload_file_name.to_s == ""
93
104
 
94
- uri = URI.parse(upload_file_name)
95
- return unless uri.scheme.nil? || uri.scheme == "file"
96
- return if File.exist?(upload_file_name)
105
+ return if upload_file_name.exist?
97
106
 
98
107
  errors.add(:upload_file_name, "Upload file: #{upload_file_name} does not exist.")
108
+ rescue NotImplementedError
109
+ nil
110
+ end
111
+
112
+ def job_has_properties
113
+ klass = job_class
114
+ return unless klass
115
+
116
+ properties.each_pair do |k, _v|
117
+ next if klass.public_method_defined?("#{k}=".to_sym)
118
+
119
+ if %i[output_categories input_categories].include?(k)
120
+ category_class = k == :input_categories ? RocketJob::Category::Input : RocketJob::Category::Output
121
+ properties[k].each do |category|
122
+ category.each_pair do |key, _value|
123
+ next if category_class.public_method_defined?("#{key}=".to_sym)
124
+
125
+ errors.add(
126
+ :properties,
127
+ "Unknown Property in #{k}: Attempted to set a value for #{key}.#{k} which is not allowed on the job #{job_class_name}"
128
+ )
129
+ end
130
+ end
131
+ next
132
+ end
133
+
134
+ errors.add(
135
+ :properties,
136
+ "Unknown Property: Attempted to set a value for #{k.inspect} which is not allowed on the job #{job_class_name}"
137
+ )
138
+ end
99
139
  end
100
140
  end
101
141
  end