rocketjob 5.4.1 → 6.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +175 -5
  3. data/bin/rocketjob_batch_perf +1 -1
  4. data/bin/rocketjob_perf +1 -1
  5. data/lib/rocket_job/batch/categories.rb +345 -0
  6. data/lib/rocket_job/batch/io.rb +174 -106
  7. data/lib/rocket_job/batch/model.rb +20 -68
  8. data/lib/rocket_job/batch/performance.rb +19 -7
  9. data/lib/rocket_job/batch/statistics.rb +34 -12
  10. data/lib/rocket_job/batch/throttle_running_workers.rb +2 -6
  11. data/lib/rocket_job/batch/worker.rb +31 -26
  12. data/lib/rocket_job/batch.rb +3 -1
  13. data/lib/rocket_job/category/base.rb +81 -0
  14. data/lib/rocket_job/category/input.rb +170 -0
  15. data/lib/rocket_job/category/output.rb +34 -0
  16. data/lib/rocket_job/cli.rb +25 -17
  17. data/lib/rocket_job/dirmon_entry.rb +23 -13
  18. data/lib/rocket_job/event.rb +1 -1
  19. data/lib/rocket_job/extensions/iostreams/path.rb +32 -0
  20. data/lib/rocket_job/extensions/mongoid/contextual/mongo.rb +2 -2
  21. data/lib/rocket_job/extensions/mongoid/factory.rb +4 -12
  22. data/lib/rocket_job/extensions/mongoid/stringified_symbol.rb +50 -0
  23. data/lib/rocket_job/extensions/psych/yaml_tree.rb +8 -0
  24. data/lib/rocket_job/extensions/rocket_job_adapter.rb +2 -2
  25. data/lib/rocket_job/jobs/conversion_job.rb +43 -0
  26. data/lib/rocket_job/jobs/dirmon_job.rb +25 -36
  27. data/lib/rocket_job/jobs/housekeeping_job.rb +11 -12
  28. data/lib/rocket_job/jobs/on_demand_batch_job.rb +24 -11
  29. data/lib/rocket_job/jobs/on_demand_job.rb +3 -4
  30. data/lib/rocket_job/jobs/performance_job.rb +3 -1
  31. data/lib/rocket_job/jobs/re_encrypt/relational_job.rb +103 -96
  32. data/lib/rocket_job/jobs/upload_file_job.rb +48 -8
  33. data/lib/rocket_job/lookup_collection.rb +69 -0
  34. data/lib/rocket_job/plugins/cron.rb +60 -20
  35. data/lib/rocket_job/plugins/job/model.rb +25 -50
  36. data/lib/rocket_job/plugins/job/persistence.rb +36 -0
  37. data/lib/rocket_job/plugins/job/throttle.rb +2 -2
  38. data/lib/rocket_job/plugins/job/throttle_running_jobs.rb +1 -1
  39. data/lib/rocket_job/plugins/job/worker.rb +2 -7
  40. data/lib/rocket_job/plugins/restart.rb +3 -103
  41. data/lib/rocket_job/plugins/state_machine.rb +4 -3
  42. data/lib/rocket_job/plugins/throttle_dependent_jobs.rb +37 -0
  43. data/lib/rocket_job/ractor_worker.rb +42 -0
  44. data/lib/rocket_job/server/model.rb +1 -1
  45. data/lib/rocket_job/sliced/bzip2_output_slice.rb +18 -19
  46. data/lib/rocket_job/sliced/compressed_slice.rb +3 -6
  47. data/lib/rocket_job/sliced/encrypted_bzip2_output_slice.rb +49 -0
  48. data/lib/rocket_job/sliced/encrypted_slice.rb +4 -6
  49. data/lib/rocket_job/sliced/input.rb +42 -54
  50. data/lib/rocket_job/sliced/slice.rb +12 -16
  51. data/lib/rocket_job/sliced/slices.rb +26 -11
  52. data/lib/rocket_job/sliced/writer/input.rb +46 -18
  53. data/lib/rocket_job/sliced/writer/output.rb +33 -45
  54. data/lib/rocket_job/sliced.rb +1 -74
  55. data/lib/rocket_job/subscribers/server.rb +1 -1
  56. data/lib/rocket_job/thread_worker.rb +46 -0
  57. data/lib/rocket_job/throttle_definitions.rb +7 -1
  58. data/lib/rocket_job/version.rb +1 -1
  59. data/lib/rocket_job/worker.rb +21 -55
  60. data/lib/rocket_job/worker_pool.rb +5 -7
  61. data/lib/rocketjob.rb +53 -43
  62. metadata +36 -28
  63. data/lib/rocket_job/batch/tabular/input.rb +0 -131
  64. data/lib/rocket_job/batch/tabular/output.rb +0 -65
  65. data/lib/rocket_job/batch/tabular.rb +0 -56
  66. data/lib/rocket_job/extensions/mongoid/remove_warnings.rb +0 -12
  67. data/lib/rocket_job/jobs/on_demand_batch_tabular_job.rb +0 -28
@@ -42,16 +42,10 @@ module RocketJob
42
42
  slice
43
43
  end
44
44
 
45
- # Returns whether this collection contains specialized binary slices for creating binary data from each slice
46
- # that is then just downloaded as-is into output files.
47
- def binary?
48
- slice_class.binary?
49
- end
50
-
51
45
  # Returns output slices in the order of their id
52
46
  # which is usually the order in which they were written.
53
- def each
54
- all.sort(id: 1).each { |document| yield(document) }
47
+ def each(&block)
48
+ all.sort(id: 1).each(&block)
55
49
  end
56
50
 
57
51
  # Insert a new slice into the collection
@@ -96,13 +90,33 @@ module RocketJob
96
90
  slice
97
91
  end
98
92
 
93
+ def insert_many(slices)
94
+ documents = slices.collect(&:as_document)
95
+ all.collection.insert_many(documents)
96
+ end
97
+
98
+ # Append to an existing slice if already present
99
+ def append(slice, input_slice)
100
+ existing_slice = all.where(id: input_slice.id).first
101
+ return insert(slice, input_slice) unless existing_slice
102
+
103
+ extra_records = slice.is_a?(Slice) ? slice.records : slice
104
+ existing_slice.records = existing_slice.records + extra_records
105
+ existing_slice.save!
106
+ existing_slice
107
+ end
108
+
99
109
  alias << insert
100
110
 
101
111
  # Index for find_and_modify only if it is not already present
102
112
  def create_indexes
103
- all.collection.indexes.create_one(state: 1, _id: 1) if all.collection.indexes.none? { |i| i["name"] == "state_1__id_1" }
104
- rescue Mongo::Error::OperationFailure
105
- all.collection.indexes.create_one(state: 1, _id: 1)
113
+ missing =
114
+ begin
115
+ all.collection.indexes.none? { |i| i["name"] == "state_1__id_1" }
116
+ rescue Mongo::Error::OperationFailure
117
+ true
118
+ end
119
+ all.collection.indexes.create_one({state: 1, _id: 1}, unique: true) if missing
106
120
  end
107
121
 
108
122
  # Forward additional methods.
@@ -139,6 +153,7 @@ module RocketJob
139
153
  def last
140
154
  all.sort("_id" => -1).first
141
155
  end
156
+
142
157
  # rubocop:enable Style/RedundantSort
143
158
 
144
159
  # Returns [Array<Struct>] grouped exceptions by class name,
@@ -12,43 +12,71 @@ module RocketJob
12
12
  # Block to call on the first line only, instead of storing in the slice.
13
13
  # Useful for extracting the header row
14
14
  # Default: nil
15
- def self.collect(input, **args)
16
- writer = new(input, **args)
15
+ #
16
+ # slice_size: [Integer]
17
+ # Override the slice size when uploading for example ranges, where slice is the size
18
+ # of the range itself.
19
+ #
20
+ # slice_batch_size: [Integer]
21
+ # The number of slices to batch up and to bulk load.
22
+ # For smaller slices this significantly improves upload performance.
23
+ # Note: If `slice_batch_size` is too high, it can exceed the maximum BSON block size.
24
+ def self.collect(data_store, **args)
25
+ writer = new(data_store, **args)
17
26
  yield(writer)
18
27
  writer.record_count
19
28
  ensure
20
- writer&.close
29
+ writer&.flush
21
30
  end
22
31
 
23
- def initialize(input, on_first: nil)
24
- @on_first = on_first
25
- @batch_count = 0
26
- @record_count = 0
27
- @input = input
28
- @record_number = 1
29
- @slice = @input.new(first_record_number: @record_number)
32
+ def initialize(data_store, on_first: nil, slice_size: nil, slice_batch_size: nil)
33
+ @on_first = on_first
34
+ @record_count = 0
35
+ @data_store = data_store
36
+ @slice_size = slice_size || @data_store.slice_size
37
+ @slice_batch_size = slice_batch_size || 20
38
+ @batch = []
39
+ @batch_count = 0
40
+ new_slice
30
41
  end
31
42
 
32
43
  def <<(line)
33
- @record_number += 1
34
44
  if @on_first
35
45
  @on_first.call(line)
36
46
  @on_first = nil
37
47
  return self
38
48
  end
39
49
  @slice << line
40
- @batch_count += 1
41
50
  @record_count += 1
42
- if @batch_count >= @input.slice_size
43
- @input.insert(@slice)
44
- @batch_count = 0
45
- @slice = @input.new(first_record_number: @record_number)
51
+ if @slice.size >= @slice_size
52
+ save_slice
53
+ new_slice
46
54
  end
47
55
  self
48
56
  end
49
57
 
50
- def close
51
- @input.insert(@slice) if @slice.size.positive?
58
+ def flush
59
+ if @slice_batch_size
60
+ @batch << @slice if @slice.size.positive?
61
+ @data_store.insert_many(@batch)
62
+ @batch = []
63
+ @batch_count = 0
64
+ elsif @slice.size.positive?
65
+ @data_store.insert(@slice)
66
+ end
67
+ end
68
+
69
+ def new_slice
70
+ @slice = @data_store.new(first_record_number: @record_count + 1)
71
+ end
72
+
73
+ def save_slice
74
+ return flush unless @slice_batch_size
75
+
76
+ @batch_count += 1
77
+ return flush if @batch_count >= @slice_batch_size
78
+
79
+ @batch << @slice
52
80
  end
53
81
  end
54
82
  end
@@ -1,30 +1,37 @@
1
1
  module RocketJob
2
2
  module Sliced
3
3
  module Writer
4
- # Internal class for writing categorized results into output slices
5
- class Output
4
+ class Null
6
5
  attr_reader :job, :categorized_records
7
- attr_accessor :input_slice
8
-
9
- # Collect output results and write to output collections
10
- # iff job is collecting output
11
- # Notes:
12
- # Nothing is saved if an exception is raised inside the block
13
- def self.collect(job, input_slice = nil)
14
- if job.collect_output?
15
- writer = new(job, input_slice)
16
- yield(writer)
17
- writer.close
18
- else
19
- writer = NullWriter.new(job, input_slice)
20
- yield(writer)
21
- end
22
- end
6
+ attr_accessor :input_slice, :append
23
7
 
24
- def initialize(job, input_slice = nil)
8
+ def initialize(job, input_slice: nil, append: false)
25
9
  @job = job
26
10
  @input_slice = input_slice
27
11
  @categorized_records = {}
12
+ @append = append
13
+ end
14
+
15
+ def <<(_)
16
+ # noop
17
+ end
18
+
19
+ def close
20
+ # noop
21
+ end
22
+ end
23
+
24
+ # Internal class for writing categorized results into output slices
25
+ class Output < Null
26
+ # Collect output results and write to output collections
27
+ # iff job is collecting output
28
+ # Notes:
29
+ # Partial slices are saved when an exception is raised inside the block
30
+ def self.collect(job, **args)
31
+ writer = job.output_categories.present? ? new(job, **args) : Null.new(job, **args)
32
+ yield(writer)
33
+ ensure
34
+ writer&.close
28
35
  end
29
36
 
30
37
  # Writes the supplied result, RocketJob::Batch::Result or RocketJob::Batch::Results
@@ -40,7 +47,8 @@ module RocketJob
40
47
  # Write categorized results to their relevant collections
41
48
  def close
42
49
  categorized_records.each_pair do |category, results|
43
- job.output(category).insert(results, input_slice)
50
+ collection = job.output(category)
51
+ append ? collection.append(results, input_slice) : collection.insert(results, input_slice)
44
52
  end
45
53
  end
46
54
 
@@ -48,33 +56,13 @@ module RocketJob
48
56
 
49
57
  # Stores the categorized result from one result
50
58
  def extract_categorized_result(result)
51
- category = :main
52
- value = result
59
+ named_category = :main
60
+ value = result
53
61
  if result.is_a?(RocketJob::Batch::Result)
54
- category = result.category
55
- value = result.value
56
- raise(ArgumentError, "Invalid RocketJob Output Category: #{category}") if job.output_categories.exclude?(category)
62
+ named_category = result.category
63
+ value = result.value
57
64
  end
58
- (categorized_records[category] ||= []) << value unless value.nil? && !job.collect_nil_output?
59
- end
60
- end
61
-
62
- class NullWriter
63
- attr_reader :job, :categorized_records
64
- attr_accessor :input_slice
65
-
66
- def initialize(job, input_slice = nil)
67
- @job = job
68
- @input_slice = input_slice
69
- @categorized_records = {}
70
- end
71
-
72
- def <<(_)
73
- # noop
74
- end
75
-
76
- def close
77
- # noop
65
+ (categorized_records[named_category] ||= []) << value unless value.nil? && !job.output_category(named_category).nils
78
66
  end
79
67
  end
80
68
  end
@@ -2,6 +2,7 @@ module RocketJob
2
2
  module Sliced
3
3
  autoload :BZip2OutputSlice, "rocket_job/sliced/bzip2_output_slice"
4
4
  autoload :CompressedSlice, "rocket_job/sliced/compressed_slice"
5
+ autoload :EncryptedBZip2OutputSlice, "rocket_job/sliced/encrypted_bzip2_output_slice"
5
6
  autoload :EncryptedSlice, "rocket_job/sliced/encrypted_slice"
6
7
  autoload :Input, "rocket_job/sliced/input"
7
8
  autoload :Output, "rocket_job/sliced/output"
@@ -13,79 +14,5 @@ module RocketJob
13
14
  autoload :Input, "rocket_job/sliced/writer/input"
14
15
  autoload :Output, "rocket_job/sliced/writer/output"
15
16
  end
16
-
17
- # Returns [RocketJob::Sliced::Slices] for the relevant type and category.
18
- #
19
- # Supports compress and encrypt with [true|false|Hash] values.
20
- # When [Hash] they must specify whether the apply to the input or output collection types.
21
- #
22
- # Example, compress both input and output collections:
23
- # class MyJob < RocketJob::Job
24
- # include RocketJob::Batch
25
- # self.compress = true
26
- # end
27
- #
28
- # Example, compress just the output collections:
29
- # class MyJob < RocketJob::Job
30
- # include RocketJob::Batch
31
- # self.compress = {output: true}
32
- # end
33
- #
34
- # To use the specialized BZip output compressor, and the regular compressor for the input collections:
35
- # class MyJob < RocketJob::Job
36
- # include RocketJob::Batch
37
- # self.compress = {output: :bzip2, input: true}
38
- # end
39
- def self.factory(type, category, job)
40
- raise(ArgumentError, "Unknown type: #{type.inspect}") unless %i[input output].include?(type)
41
-
42
- collection_name = "rocket_job.#{type}s.#{job.id}"
43
- collection_name << ".#{category}" unless category == :main
44
-
45
- args = {collection_name: collection_name, slice_size: job.slice_size}
46
- klass = slice_class(type, job)
47
- args[:slice_class] = klass if klass
48
-
49
- if type == :input
50
- RocketJob::Sliced::Input.new(args)
51
- else
52
- RocketJob::Sliced::Output.new(args)
53
- end
54
- end
55
-
56
- private
57
-
58
- # Parses the encrypt and compress options to determine which slice serializer to use.
59
- # `encrypt` takes priority over any `compress` option.
60
- def self.slice_class(type, job)
61
- encrypt = extract_value(type, job.encrypt)
62
- compress = extract_value(type, job.compress)
63
-
64
- if encrypt
65
- case encrypt
66
- when true
67
- EncryptedSlice
68
- else
69
- raise(ArgumentError, "Unknown job `encrypt` value: #{compress}") unless compress.is_a?(Slices)
70
- # Returns the supplied class to use for encryption.
71
- encrypt
72
- end
73
- elsif compress
74
- case compress
75
- when true
76
- CompressedSlice
77
- when :bzip2
78
- BZip2OutputSlice
79
- else
80
- raise(ArgumentError, "Unknown job `compress` value: #{compress}") unless compress.is_a?(Slices)
81
- # Returns the supplied class to use for compression.
82
- compress
83
- end
84
- end
85
- end
86
-
87
- def self.extract_value(type, value)
88
- value.is_a?(Hash) ? value[type] : value
89
- end
90
17
  end
91
18
  end
@@ -17,7 +17,7 @@ module RocketJob
17
17
 
18
18
  supervisor.logger.info("Stopping Pool")
19
19
  supervisor.worker_pool.stop
20
- unless supervisor.worker_pool.living_count == 0
20
+ unless supervisor.worker_pool.living_count.zero?
21
21
  supervisor.logger.info("Giving pool #{wait_timeout} seconds to terminate")
22
22
  sleep(wait_timeout)
23
23
  end
@@ -0,0 +1,46 @@
1
+ require "concurrent"
2
+ module RocketJob
3
+ # ThreadWorker
4
+ #
5
+ # A worker runs on a single operating system thread.
6
+ # Is usually started under a Rocket Job server process.
7
+ class ThreadWorker < Worker
8
+ attr_reader :thread
9
+
10
+ def initialize(id:, server_name:)
11
+ super(id: id, server_name: server_name)
12
+ @shutdown = Concurrent::Event.new
13
+ @thread = Thread.new { run }
14
+ end
15
+
16
+ def alive?
17
+ @thread.alive?
18
+ end
19
+
20
+ def backtrace
21
+ @thread.backtrace
22
+ end
23
+
24
+ def join(*args)
25
+ @thread.join(*args)
26
+ end
27
+
28
+ # Send each active worker the RocketJob::ShutdownException so that stops processing immediately.
29
+ def kill
30
+ @thread.raise(Shutdown, "Shutdown due to kill request for worker: #{name}") if @thread.alive?
31
+ end
32
+
33
+ def shutdown?
34
+ @shutdown.set?
35
+ end
36
+
37
+ def shutdown!
38
+ @shutdown.set
39
+ end
40
+
41
+ # Returns [true|false] whether the shutdown indicator was set
42
+ def wait_for_shutdown?(timeout = nil)
43
+ @shutdown.wait(timeout)
44
+ end
45
+ end
46
+ end
@@ -1,6 +1,6 @@
1
1
  module RocketJob
2
2
  class ThrottleDefinitions
3
- attr_reader :throttles
3
+ attr_accessor :throttles
4
4
 
5
5
  def initialize
6
6
  @throttles = []
@@ -35,5 +35,11 @@ module RocketJob
35
35
  end
36
36
  nil
37
37
  end
38
+
39
+ def deep_dup
40
+ new_defination = dup
41
+ new_defination.throttles = throttles.map(&:dup)
42
+ new_defination
43
+ end
38
44
  end
39
45
  end
@@ -1,3 +1,3 @@
1
1
  module RocketJob
2
- VERSION = "5.4.1".freeze
2
+ VERSION = "6.0.0".freeze
3
3
  end
@@ -1,5 +1,3 @@
1
- require "concurrent"
2
- require "forwardable"
3
1
  module RocketJob
4
2
  # Worker
5
3
  #
@@ -7,12 +5,9 @@ module RocketJob
7
5
  # Is usually started under a Rocket Job server process.
8
6
  class Worker
9
7
  include SemanticLogger::Loggable
10
- include ActiveSupport::Callbacks
11
-
12
- define_callbacks :running
13
8
 
14
9
  attr_accessor :id, :current_filter
15
- attr_reader :thread, :name, :inline, :server_name
10
+ attr_reader :name, :server_name
16
11
 
17
12
  # Raised when a worker is killed so that it shutdown immediately, yet cleanly.
18
13
  #
@@ -21,59 +16,41 @@ module RocketJob
21
16
  class Shutdown < RuntimeError
22
17
  end
23
18
 
24
- def self.before_running(*filters, &blk)
25
- set_callback(:running, :before, *filters, &blk)
26
- end
27
-
28
- def self.after_running(*filters, &blk)
29
- set_callback(:running, :after, *filters, &blk)
30
- end
31
-
32
- def self.around_running(*filters, &blk)
33
- set_callback(:running, :around, *filters, &blk)
34
- end
35
-
36
- def initialize(id: 0, server_name: "inline:0", inline: false)
19
+ def initialize(id: 0, server_name: "inline:0")
37
20
  @id = id
38
21
  @server_name = server_name
39
- @shutdown = Concurrent::Event.new
40
22
  @name = "#{server_name}:#{id}"
41
23
  @re_check_start = Time.now
42
24
  @current_filter = Config.filter || {}
43
- @thread = Thread.new { run } unless inline
44
- @inline = inline
45
25
  end
46
26
 
47
27
  def alive?
48
- inline ? true : @thread.alive?
28
+ true
49
29
  end
50
30
 
51
31
  def backtrace
52
- inline ? Thread.current.backtrace : @thread.backtrace
32
+ Thread.current.backtrace
53
33
  end
54
34
 
55
- def join(*args)
56
- @thread.join(*args) unless inline
35
+ def join(*_args)
36
+ true
57
37
  end
58
38
 
59
- # Send each active worker the RocketJob::ShutdownException so that stops processing immediately.
60
39
  def kill
61
- return true if inline
62
-
63
- @thread.raise(Shutdown, "Shutdown due to kill request for worker: #{name}") if @thread.alive?
40
+ true
64
41
  end
65
42
 
66
43
  def shutdown?
67
- @shutdown.set?
44
+ false
68
45
  end
69
46
 
70
47
  def shutdown!
71
- @shutdown.set
48
+ true
72
49
  end
73
50
 
74
51
  # Returns [true|false] whether the shutdown indicator was set
75
- def wait_for_shutdown?(timeout = nil)
76
- @shutdown.wait(timeout)
52
+ def wait_for_shutdown?(_timeout = nil)
53
+ false
77
54
  end
78
55
 
79
56
  # Process jobs until it shuts down
@@ -146,6 +123,8 @@ module RocketJob
146
123
 
147
124
  # Should this job be throttled?
148
125
  next if job.fail_on_exception! { throttled_job?(job) }
126
+ # Job failed during throttle execution?
127
+ next if job.failed?
149
128
 
150
129
  # Start this job!
151
130
  job.fail_on_exception! { job.start!(name) }
@@ -171,27 +150,14 @@ module RocketJob
171
150
  # Applies the current filter to exclude filtered jobs.
172
151
  #
173
152
  # Returns nil if no jobs are available for processing.
174
- if Mongoid::VERSION.to_f >= 7.1
175
- def find_and_assign_job
176
- SemanticLogger.silence(:info) do
177
- scheduled = RocketJob::Job.where(run_at: nil).or(:run_at.lte => Time.now)
178
- working = RocketJob::Job.queued.or(state: :running, sub_state: :processing)
179
- query = RocketJob::Job.and(working, scheduled)
180
- query = query.and(current_filter) unless current_filter.blank?
181
- update = {"$set" => {"worker_name" => name, "state" => "running"}}
182
- query.sort(priority: 1, _id: 1).find_one_and_update(update, bypass_document_validation: true)
183
- end
184
- end
185
- else
186
- def find_and_assign_job
187
- SemanticLogger.silence(:info) do
188
- scheduled = {"$or" => [{run_at: nil}, {:run_at.lte => Time.now}]}
189
- working = {"$or" => [{state: :queued}, {state: :running, sub_state: :processing}]}
190
- query = RocketJob::Job.and(working, scheduled)
191
- query = query.where(current_filter) unless current_filter.blank?
192
- update = {"$set" => {"worker_name" => name, "state" => "running"}}
193
- query.sort(priority: 1, _id: 1).find_one_and_update(update, bypass_document_validation: true)
194
- end
153
+ def find_and_assign_job
154
+ SemanticLogger.silence(:info) do
155
+ scheduled = RocketJob::Job.where(run_at: nil).or(:run_at.lte => Time.now)
156
+ working = RocketJob::Job.queued.or(state: "running", sub_state: "processing")
157
+ query = RocketJob::Job.and(working, scheduled)
158
+ query = query.and(current_filter) unless current_filter.blank?
159
+ update = {"$set" => {"worker_name" => name, "state" => "running"}}
160
+ query.sort(priority: 1, _id: 1).find_one_and_update(update, bypass_document_validation: true)
195
161
  end
196
162
  end
197
163