rocketjob 6.0.0.rc3 → 6.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +26 -0
  3. data/lib/rocket_job/batch/categories.rb +24 -20
  4. data/lib/rocket_job/batch/io.rb +128 -128
  5. data/lib/rocket_job/batch/worker.rb +14 -12
  6. data/lib/rocket_job/category/base.rb +10 -7
  7. data/lib/rocket_job/category/input.rb +61 -1
  8. data/lib/rocket_job/category/output.rb +9 -0
  9. data/lib/rocket_job/dirmon_entry.rb +1 -1
  10. data/lib/rocket_job/jobs/conversion_job.rb +21 -17
  11. data/lib/rocket_job/jobs/dirmon_job.rb +24 -35
  12. data/lib/rocket_job/jobs/housekeeping_job.rb +4 -5
  13. data/lib/rocket_job/jobs/on_demand_batch_job.rb +7 -5
  14. data/lib/rocket_job/jobs/on_demand_job.rb +2 -2
  15. data/lib/rocket_job/jobs/upload_file_job.rb +4 -0
  16. data/lib/rocket_job/plugins/cron.rb +60 -20
  17. data/lib/rocket_job/plugins/job/persistence.rb +36 -0
  18. data/lib/rocket_job/plugins/restart.rb +3 -110
  19. data/lib/rocket_job/plugins/state_machine.rb +2 -2
  20. data/lib/rocket_job/plugins/throttle_dependent_jobs.rb +1 -2
  21. data/lib/rocket_job/sliced/bzip2_output_slice.rb +18 -19
  22. data/lib/rocket_job/sliced/compressed_slice.rb +3 -6
  23. data/lib/rocket_job/sliced/encrypted_bzip2_output_slice.rb +49 -0
  24. data/lib/rocket_job/sliced/encrypted_slice.rb +4 -6
  25. data/lib/rocket_job/sliced/input.rb +42 -54
  26. data/lib/rocket_job/sliced/slice.rb +7 -3
  27. data/lib/rocket_job/sliced/slices.rb +12 -9
  28. data/lib/rocket_job/sliced/writer/input.rb +46 -18
  29. data/lib/rocket_job/sliced.rb +1 -19
  30. data/lib/rocket_job/version.rb +1 -1
  31. data/lib/rocketjob.rb +2 -2
  32. metadata +8 -10
  33. data/lib/rocket_job/batch/tabular/input.rb +0 -133
  34. data/lib/rocket_job/batch/tabular/output.rb +0 -67
  35. data/lib/rocket_job/batch/tabular.rb +0 -58
@@ -67,6 +67,8 @@ module RocketJob
67
67
  # Returns [Integer] the number of records processed in the slice
68
68
  #
69
69
  # Note: The slice will be removed from processing when this method completes
70
+ #
71
+ # @deprecated Please open a ticket if you need this behavior.
70
72
  def work_first_slice(&block)
71
73
  raise "#work_first_slice can only be called from within before_batch callbacks" unless sub_state == :before
72
74
 
@@ -142,19 +144,19 @@ module RocketJob
142
144
  # Perform individual slice without callbacks
143
145
  def rocket_job_perform_slice(slice, &block)
144
146
  slice.processing_record_number ||= 0
145
- records = []
146
147
  append = false
147
148
 
148
- # Skip processed records in this slice if it has no output categpries.
149
- if slice.processing_record_number > 1
150
- records = slice.records[slice.processing_record_number - 1..-1]
151
- append = true
152
- logger.info("Resuming previously incomplete slice from record number #{slice.processing_record_number}")
153
- else
154
- # Reprocess all records in this slice.
155
- slice.processing_record_number = 0
156
- records = slice.records
157
- end
149
+ # Skip processed records in this slice if it has no output categories.
150
+ records =
151
+ if slice.processing_record_number.to_i > 1
152
+ append = true
153
+ logger.info("Resuming previously incomplete slice from record number #{slice.processing_record_number}")
154
+ slice.records[slice.processing_record_number - 1..-1]
155
+ else
156
+ # Reprocess all records in this slice.
157
+ slice.processing_record_number = 0
158
+ slice.records
159
+ end
158
160
 
159
161
  count = 0
160
162
  RocketJob::Sliced::Writer::Output.collect(self, input_slice: slice, append: append) do |writer|
@@ -246,7 +248,7 @@ module RocketJob
246
248
  unless new_record?
247
249
  # Fail job iff no other worker has already finished it
248
250
  # Must set write concern to at least 1 since we need the nModified back
249
- result = self.class.with(write: {w: 1}) do |query|
251
+ result = self.class.with(write: {w: 1}) do |query|
250
252
  query.
251
253
  where(id: id, state: :running, sub_state: :processing).
252
254
  update({"$set" => {state: :failed, worker_name: worker_name}})
@@ -11,7 +11,6 @@ module RocketJob
11
11
 
12
12
  # Whether to compress, encrypt, or use the bzip2 serialization for data in this category.
13
13
  field :serializer, type: ::Mongoid::StringifiedSymbol, default: :compress
14
- validates_inclusion_of :serializer, in: %i[none compress encrypt bzip2]
15
14
 
16
15
  # The header columns when the file does not include a header row.
17
16
  # Note:
@@ -49,10 +48,12 @@ module RocketJob
49
48
  Sliced::CompressedSlice
50
49
  when :encrypt
51
50
  Sliced::EncryptedSlice
52
- when :bzip2
51
+ when :bzip2, :bz2
53
52
  Sliced::BZip2OutputSlice
53
+ when :encrypted_bz2
54
+ Sliced::EncryptedBZip2OutputSlice
54
55
  else
55
- raise(ArgumentError, "serialize: #{serializer.inspect} must be :none, :compress, :encrypt, or :bzip2")
56
+ raise(ArgumentError, "serialize: #{serializer.inspect} must be :none, :compress, :encrypt, :bz2, or :encrypted_bz2")
56
57
  end
57
58
  end
58
59
 
@@ -65,14 +66,16 @@ module RocketJob
65
66
  )
66
67
  end
67
68
 
68
- def reset_tabular
69
- @tabular = nil
70
- end
71
-
72
69
  # Returns [true|false] whether this category has the attributes defined for tabular to work.
73
70
  def tabular?
74
71
  format.present?
75
72
  end
73
+
74
+ def build_collection_name(direction, job)
75
+ collection_name = "rocket_job.#{direction}s.#{job.id}"
76
+ collection_name << ".#{name}" unless name == :main
77
+ collection_name
78
+ end
76
79
  end
77
80
  end
78
81
  end
@@ -10,6 +10,7 @@ module RocketJob
10
10
 
11
11
  # Slice size for this input collection
12
12
  field :slice_size, type: Integer, default: 100
13
+ validates_presence_of :slice_size
13
14
 
14
15
  #
15
16
  # The fields below only apply if the field `format` has been set:
@@ -82,7 +83,7 @@ module RocketJob
82
83
  field :header_cleanser, type: ::Mongoid::StringifiedSymbol, default: :default
83
84
  validates :header_cleanser, inclusion: %i[default none]
84
85
 
85
- validates_presence_of :slice_size
86
+ validates_inclusion_of :serializer, in: %i[none compress encrypt]
86
87
 
87
88
  # Cleanses the header column names when `cleanse_header` is true
88
89
  def cleanse_header!
@@ -105,6 +106,65 @@ module RocketJob
105
106
  skip_unknown: skip_unknown
106
107
  )
107
108
  end
109
+
110
+ def data_store(job)
111
+ RocketJob::Sliced::Input.new(
112
+ collection_name: build_collection_name(:input, job),
113
+ slice_class: serializer_class,
114
+ slice_size: slice_size
115
+ )
116
+ end
117
+
118
+ # Returns [IOStreams::Path] of file to upload.
119
+ # Auto-detects file format from file name when format is :auto.
120
+ def upload_path(stream = nil, original_file_name: nil)
121
+ unless stream || file_name
122
+ raise(ArgumentError, "Either supply a file name to upload, or set input_collection.file_name first")
123
+ end
124
+
125
+ path = IOStreams.new(stream || file_name)
126
+ path.file_name = original_file_name if original_file_name
127
+ self.file_name = path.file_name
128
+
129
+ # Auto detect the format based on the upload file name if present.
130
+ if format == :auto
131
+ self.format = path.format || :csv
132
+ # Rebuild tabular with new values.
133
+ @tabular = nil
134
+ end
135
+
136
+ # Remove non-printable characters from tabular input formats.
137
+ if tabular?
138
+ # Cannot change the length of fixed width lines.
139
+ replace = format == :fixed ? " " : ""
140
+ path.option_or_stream(:encode, encoding: "UTF-8", cleaner: :printable, replace: replace)
141
+ end
142
+ path
143
+ end
144
+
145
+ # Return a lambda to extract the header row from the uploaded file.
146
+ def extract_header_callback(on_first)
147
+ return on_first unless tabular? && tabular.header?
148
+
149
+ case mode
150
+ when :line
151
+ lambda do |line|
152
+ tabular.parse_header(line)
153
+ cleanse_header!
154
+ self.columns = tabular.header.columns
155
+ # Call chained on_first if present
156
+ on_first&.call(line)
157
+ end
158
+ when :array
159
+ lambda do |row|
160
+ tabular.header.columns = row
161
+ cleanse_header!
162
+ self.columns = category.tabular.header.columns
163
+ # Call chained on_first if present
164
+ on_first&.call(line)
165
+ end
166
+ end
167
+ end
108
168
  end
109
169
  end
110
170
  end
@@ -13,6 +13,8 @@ module RocketJob
13
13
  # false: do not save nil values to the output categories.
14
14
  field :nils, type: ::Mongoid::Boolean, default: false
15
15
 
16
+ validates_inclusion_of :serializer, in: %i[none compress encrypt bz2 encrypted_bz2 bzip2]
17
+
16
18
  # Renders [String] the header line.
17
19
  # Returns [nil] if no header is needed.
18
20
  def render_header
@@ -20,6 +22,13 @@ module RocketJob
20
22
 
21
23
  tabular.render_header
22
24
  end
25
+
26
+ def data_store(job)
27
+ RocketJob::Sliced::Output.new(
28
+ collection_name: build_collection_name(:output, job),
29
+ slice_class: serializer_class
30
+ )
31
+ end
23
32
  end
24
33
  end
25
34
  end
@@ -173,7 +173,7 @@ module RocketJob
173
173
  counts
174
174
  end
175
175
 
176
- # Passes each filename [Pathname] found that matches the pattern into the supplied block
176
+ # Yields [IOStreams::Path] for each file found that matches the current pattern.
177
177
  def each
178
178
  SemanticLogger.named_tagged(dirmon_entry: id.to_s) do
179
179
  # Case insensitive filename matching
@@ -1,39 +1,43 @@
1
1
  # Convert to and from CSV, JSON, xlsx, and PSV files.
2
2
  #
3
3
  # Example, Convert CSV file to JSON.
4
- # job = RocketJob::ConversionJob.new
5
- # job.upload("data.csv")
4
+ # job = RocketJob::Jobs::ConversionJob.new
5
+ # job.input_category.file_name = "data.csv"
6
6
  # job.output_category.file_name = "data.json"
7
7
  # job.save!
8
8
  #
9
9
  # Example, Convert JSON file to PSV and compress it with GZip.
10
- # job = RocketJob::ConversionJob.new
11
- # job.upload("data.json")
10
+ # job = RocketJob::Jobs::ConversionJob.new
11
+ # job.input_category.file_name = "data.json"
12
12
  # job.output_category.file_name = "data.psv.gz"
13
13
  # job.save!
14
14
  #
15
15
  # Example, Read a CSV file that has been zipped from a remote website and the convert it to a GZipped json file.
16
- # job = RocketJob::ConversionJob.new
17
- # job.upload("https://example.org/file.zip")
16
+ # job = RocketJob::Jobs::ConversionJob.new
17
+ # job.input_category.file_name = "https://example.org/file.zip"
18
18
  # job.output_category.file_name = "data.json.gz"
19
19
  # job.save!
20
20
  #
21
21
  module RocketJob
22
- class ConversionJob < RocketJob::Job
23
- include RocketJob::Batch
22
+ module Jobs
23
+ class ConversionJob < RocketJob::Job
24
+ include RocketJob::Batch
24
25
 
25
- self.destroy_on_complete = false
26
+ self.destroy_on_complete = false
26
27
 
27
- # Detects file extension for its type
28
- input_category format: :auto
29
- output_category format: :auto
28
+ # Detects file extension for its type
29
+ input_category format: :auto
30
+ output_category format: :auto
30
31
 
31
- # When the job completes it will write the result to the output_category.file_name
32
- after_batch :download
32
+ # Upload the file specified in `input_category.file_name` unless already uploaded.
33
+ before_batch :upload, unless: :record_count
33
34
 
34
- def perform(hash)
35
- # For this job return the input hash record as-is. Could be transformed here as needed.
36
- hash
35
+ # When the job completes it will write the result to `output_category.file_name`.
36
+ after_batch :cleanup!, :download
37
+
38
+ def perform(hash)
39
+ hash
40
+ end
37
41
  end
38
42
  end
39
43
  end
@@ -30,59 +30,48 @@ module RocketJob
30
30
  #
31
31
  # If another DirmonJob instance is already queued or running, then the create
32
32
  # above will fail with:
33
- # MongoMapper::DocumentNotValid: Validation failed: State Another instance of this job is already queued or running
33
+ # Validation failed: State Another instance of this job is already queued or running
34
34
  #
35
35
  # Or to start DirmonJob and ignore errors if already running
36
36
  # RocketJob::Jobs::DirmonJob.create
37
37
  class DirmonJob < RocketJob::Job
38
- # Only allow one DirmonJob instance to be running at a time
39
- include RocketJob::Plugins::Singleton
40
- # Start a new job when this one completes, fails, or aborts
41
- include RocketJob::Plugins::Restart
38
+ include RocketJob::Plugins::Cron
42
39
 
43
- self.priority = 30
44
-
45
- # Number of seconds between directory scans. Default 5 mins
46
- field :check_seconds, type: Float, default: 300.0, copy_on_restart: true
40
+ # Runs every 5 minutes by default
41
+ self.cron_schedule = "*/5 * * * * UTC"
42
+ self.description = "Directory Monitor"
43
+ self.priority = 30
47
44
 
48
45
  # Hash[file_name, size]
49
46
  field :previous_file_names, type: Hash, default: {}, copy_on_restart: true
50
47
 
51
- before_create :set_run_at
52
-
53
- # Iterate over each Dirmon entry looking for new files
54
- # If a new file is found, it is not processed immediately, instead
55
- # it is passed to the next run of this job along with the file size.
56
- # If the file size has not changed, the Job is kicked off.
48
+ # Checks the directories for new files, starting jobs if files have not changed since the last run.
57
49
  def perform
58
50
  check_directories
59
51
  end
60
52
 
61
53
  private
62
54
 
63
- # Set a run_at when a new instance of this job is created
64
- def set_run_at
65
- self.run_at = Time.now + check_seconds
66
- end
67
-
68
- # Checks the directories for new files, starting jobs if files have not changed
69
- # since the last run
55
+ # Iterate over each Dirmon Entry looking for new files
56
+ # If a new file is found, it is not processed immediately, instead
57
+ # it is passed to the next run of this job along with the file size.
58
+ # If the file size has not changed, the Job is kicked off.
70
59
  def check_directories
71
60
  new_file_names = {}
72
- DirmonEntry.enabled.each do |entry|
73
- entry.each do |iopath|
74
- # S3 files are only visible once completely uploaded.
75
- unless iopath.partial_files_visible?
76
- logger.info("File: #{iopath}. Starting: #{entry.job_class_name}")
77
- entry.later(iopath)
61
+ DirmonEntry.enabled.each do |dirmon_entry|
62
+ dirmon_entry.each do |path|
63
+ # Skip file size checking since S3 files are only visible once completely uploaded.
64
+ unless path.partial_files_visible?
65
+ logger.info("File: #{path}. Starting: #{dirmon_entry.job_class_name}")
66
+ dirmon_entry.later(path)
78
67
  next
79
68
  end
80
69
 
81
70
  # BSON Keys cannot contain periods
82
- key = iopath.to_s.tr(".", "_")
71
+ key = path.to_s.tr(".", "_")
83
72
  previous_size = previous_file_names[key]
84
73
  # Check every few minutes for a file size change before trying to process the file.
85
- size = check_file(entry, iopath, previous_size)
74
+ size = check_file(dirmon_entry, path, previous_size)
86
75
  new_file_names[key] = size if size
87
76
  end
88
77
  end
@@ -91,14 +80,14 @@ module RocketJob
91
80
 
92
81
  # Checks if a file should result in starting a job
93
82
  # Returns [Integer] file size, or nil if the file started a job
94
- def check_file(entry, iopath, previous_size)
95
- size = iopath.size
83
+ def check_file(dirmon_entry, path, previous_size)
84
+ size = path.size
96
85
  if previous_size && (previous_size == size)
97
- logger.info("File stabilized: #{iopath}. Starting: #{entry.job_class_name}")
98
- entry.later(iopath)
86
+ logger.info("File stabilized: #{path}. Starting: #{dirmon_entry.job_class_name}")
87
+ dirmon_entry.later(path)
99
88
  nil
100
89
  else
101
- logger.info("Found file: #{iopath}. File size: #{size}")
90
+ logger.info("Found file: #{path}. File size: #{size}")
102
91
  # Keep for the next run
103
92
  size
104
93
  end
@@ -27,12 +27,11 @@ module RocketJob
27
27
  # )
28
28
  class HousekeepingJob < RocketJob::Job
29
29
  include RocketJob::Plugins::Cron
30
- include RocketJob::Plugins::Singleton
31
30
 
32
- self.priority = 25
33
- self.description = "Cleans out historical jobs, and zombie servers."
34
- # Runs every 15 minutes
35
- self.cron_schedule = "*/15 * * * * UTC"
31
+ # Runs every 15 minutes on the 15 minute period
32
+ self.cron_schedule = "0,15,30,45 * * * * UTC"
33
+ self.description = "Cleans out historical jobs, and zombie servers."
34
+ self.priority = 25
36
35
 
37
36
  # Whether to destroy zombie servers automatically
38
37
  field :destroy_zombies, type: Mongoid::Boolean, default: true, user_editable: true, copy_on_restart: true
@@ -65,27 +65,29 @@ module RocketJob
65
65
  module Jobs
66
66
  class OnDemandBatchJob < RocketJob::Job
67
67
  include RocketJob::Plugins::Cron
68
+ include RocketJob::Plugins::Retry
68
69
  include RocketJob::Batch
69
70
  include RocketJob::Batch::Statistics
70
71
 
71
72
  self.priority = 90
72
- self.description = "Batch Job"
73
+ self.description = "On Demand Batch Job"
73
74
  self.destroy_on_complete = false
75
+ self.retry_limit = 0
74
76
 
75
77
  # Code that is performed against every row / record.
76
- field :code, type: String
78
+ field :code, type: String, user_editable: true, copy_on_restart: true
77
79
 
78
80
  # Optional code to execute before the batch is run.
79
81
  # Usually to upload data into the job.
80
- field :before_code, type: String
82
+ field :before_code, type: String, user_editable: true, copy_on_restart: true
81
83
 
82
84
  # Optional code to execute after the batch is run.
83
85
  # Usually to upload data into the job.
84
- field :after_code, type: String
86
+ field :after_code, type: String, user_editable: true, copy_on_restart: true
85
87
 
86
88
  # Data that is made available to the job during the perform.
87
89
  # Be sure to store key names only as Strings, not Symbols.
88
- field :data, type: Hash, default: {}
90
+ field :data, type: Hash, default: {}, user_editable: true, copy_on_restart: true
89
91
 
90
92
  validates :code, presence: true
91
93
  validate :validate_code
@@ -78,8 +78,8 @@ module RocketJob
78
78
  self.retry_limit = 0
79
79
 
80
80
  # Be sure to store key names only as Strings, not Symbols
81
- field :data, type: Hash, default: {}, copy_on_restart: true
82
- field :code, type: String, copy_on_restart: true
81
+ field :data, type: Hash, default: {}, user_editable: true, copy_on_restart: true
82
+ field :code, type: String, user_editable: true, copy_on_restart: true
83
83
 
84
84
  validates :code, presence: true
85
85
  validate :validate_code
@@ -57,6 +57,10 @@ module RocketJob
57
57
 
58
58
  def upload_file(job)
59
59
  if job.respond_to?(:upload)
60
+ # Return the database connection for this thread back to the connection pool
61
+ # in case the upload takes a long time and the database connection expires.
62
+ ActiveRecord::Base.clear_active_connections! if defined?(ActiveRecord::Base)
63
+
60
64
  if original_file_name
61
65
  job.upload(upload_file_name, file_name: original_file_name)
62
66
  else
@@ -14,41 +14,81 @@ module RocketJob
14
14
  extend ActiveSupport::Concern
15
15
 
16
16
  included do
17
- include Restart
18
-
19
17
  field :cron_schedule, type: String, class_attribute: true, user_editable: true, copy_on_restart: true
20
18
 
19
+ # Whether to prevent another instance of this job from running with the exact _same_ cron schedule.
20
+ # Another job instance with a different `cron_schedule` string is permitted.
21
+ field :cron_singleton, type: Mongoid::Boolean, default: true, class_attribute: true, user_editable: true, copy_on_restart: true
22
+
23
+ # Whether to re-schedule the next job occurrence when this job starts, or when it is complete.
24
+ #
25
+ # `true`: Create a new scheduled instance of this job after it has started. (Default)
26
+ # - Ensures that the next scheduled instance is not missed because the current instance is still running.
27
+ # - Any changes to fields marked with `copy_on_restart` of `true` will be saved to the new scheduled instance
28
+ # _only_ if they were changed during an `after_start` callback.
29
+ # Changes to these during other callbacks or during the `perform` will not be saved to the new scheduled
30
+ # instance.
31
+ # - To prevent this job creating any new duplicate instances during subsequent processing,
32
+ # its `cron_schedule` is set to `nil`.
33
+ #
34
+ # `false`: Create a new scheduled instance of this job on `fail`, or `abort`.
35
+ # - Prevents the next scheduled instance from running or being scheduled while the current instance is
36
+ # still running.
37
+ # - Any changes to fields marked with `copy_on_restart` of `true` will be saved to the new scheduled instance
38
+ # at any time until after the job has failed, or is aborted.
39
+ # - To prevent this job creating any new duplicate instances during subsequent processing,
40
+ # its `cron_schedule` is set to `nil` after it fails or is aborted.
41
+ field :cron_after_start, type: Mongoid::Boolean, default: true, class_attribute: true, user_editable: true, copy_on_restart: true
42
+
21
43
  validates_each :cron_schedule do |record, attr, value|
22
44
  record.errors.add(attr, "Invalid cron_schedule: #{value.inspect}") if value && !Fugit::Cron.new(value)
23
45
  end
46
+ validate :rocket_job_cron_singleton_check
47
+
24
48
  before_save :rocket_job_cron_set_run_at
25
49
 
26
- private
50
+ after_start :rocket_job_cron_on_start
51
+ after_abort :rocket_job_cron_end_state
52
+ after_complete :rocket_job_cron_end_state
53
+ after_fail :rocket_job_cron_end_state
54
+ end
27
55
 
28
- # Prevent auto restart if this job does not have a cron schedule.
29
- # Overrides: RocketJob::Plugins::Restart#rocket_job_restart_new_instance
30
- def rocket_job_restart_new_instance
31
- return unless cron_schedule
56
+ def rocket_job_cron_set_run_at
57
+ return if cron_schedule.nil? || !(cron_schedule_changed? && !run_at_changed?)
32
58
 
33
- super
34
- end
59
+ self.run_at = Fugit::Cron.new(cron_schedule).next_time.to_utc_time
60
+ end
35
61
 
36
- # On failure:
37
- # - create a new instance scheduled to run in the future.
38
- # - clear out the `cron_schedule` so this instance will not schedule another instance to run on completion.
39
- # Overrides: RocketJob::Plugins::Restart#rocket_job_restart_abort
40
- def rocket_job_restart_abort
41
- return unless cron_schedule
62
+ private
42
63
 
43
- rocket_job_restart_new_instance
44
- update_attribute(:cron_schedule, nil)
64
+ def rocket_job_cron_on_start
65
+ return unless cron_schedule && cron_after_start
66
+
67
+ current_cron_schedule = cron_schedule
68
+ update_attribute(:cron_schedule, nil)
69
+ create_restart!(cron_schedule: current_cron_schedule)
70
+ end
71
+
72
+ def rocket_job_cron_end_state
73
+ return unless cron_schedule && !cron_after_start
74
+
75
+ current_cron_schedule = cron_schedule
76
+ update_attribute(:cron_schedule, nil)
77
+ create_restart!(cron_schedule: current_cron_schedule)
78
+ end
79
+
80
+ # Returns [true|false] whether another instance of this job with the same cron schedule is already active
81
+ def rocket_job_cron_duplicate?
82
+ self.class.with(read: {mode: :primary}) do |conn|
83
+ conn.where(:state.in => %i[queued running failed paused], :id.ne => id, cron_schedule: cron_schedule).exists?
45
84
  end
46
85
  end
47
86
 
48
- def rocket_job_cron_set_run_at
49
- return if cron_schedule.nil? || !(cron_schedule_changed? && !run_at_changed?)
87
+ # Prevent creation of a new job when another is running with the same cron schedule.
88
+ def rocket_job_cron_singleton_check
89
+ return if cron_schedule.nil? || completed? || aborted? || !rocket_job_cron_duplicate?
50
90
 
51
- self.run_at = Fugit::Cron.new(cron_schedule).next_time.to_utc_time
91
+ errors.add(:state, "Another instance of #{self.class.name} is already queued, running, failed, or paused with the same cron schedule: #{cron_schedule}")
52
92
  end
53
93
  end
54
94
  end