que 0.11.3 → 2.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. checksums.yaml +5 -5
  2. data/.github/workflows/tests.yml +51 -0
  3. data/.gitignore +2 -0
  4. data/.ruby-version +1 -0
  5. data/CHANGELOG.md +502 -97
  6. data/Dockerfile +20 -0
  7. data/LICENSE.txt +1 -1
  8. data/README.md +205 -59
  9. data/auto/dev +21 -0
  10. data/auto/pre-push-hook +30 -0
  11. data/auto/psql +9 -0
  12. data/auto/test +5 -0
  13. data/auto/test-postgres-14 +17 -0
  14. data/bin/que +8 -81
  15. data/docker-compose.yml +47 -0
  16. data/docs/README.md +881 -0
  17. data/lib/que/active_job/extensions.rb +114 -0
  18. data/lib/que/active_record/connection.rb +51 -0
  19. data/lib/que/active_record/model.rb +48 -0
  20. data/lib/que/command_line_interface.rb +259 -0
  21. data/lib/que/connection.rb +198 -0
  22. data/lib/que/connection_pool.rb +78 -0
  23. data/lib/que/job.rb +210 -103
  24. data/lib/que/job_buffer.rb +255 -0
  25. data/lib/que/job_methods.rb +176 -0
  26. data/lib/que/listener.rb +176 -0
  27. data/lib/que/locker.rb +507 -0
  28. data/lib/que/metajob.rb +47 -0
  29. data/lib/que/migrations/4/down.sql +48 -0
  30. data/lib/que/migrations/4/up.sql +267 -0
  31. data/lib/que/migrations/5/down.sql +73 -0
  32. data/lib/que/migrations/5/up.sql +76 -0
  33. data/lib/que/migrations/6/down.sql +8 -0
  34. data/lib/que/migrations/6/up.sql +8 -0
  35. data/lib/que/migrations/7/down.sql +5 -0
  36. data/lib/que/migrations/7/up.sql +13 -0
  37. data/lib/que/migrations.rb +37 -18
  38. data/lib/que/poller.rb +274 -0
  39. data/lib/que/rails/railtie.rb +12 -0
  40. data/lib/que/result_queue.rb +35 -0
  41. data/lib/que/sequel/model.rb +52 -0
  42. data/lib/que/utils/assertions.rb +62 -0
  43. data/lib/que/utils/constantization.rb +19 -0
  44. data/lib/que/utils/error_notification.rb +68 -0
  45. data/lib/que/utils/freeze.rb +20 -0
  46. data/lib/que/utils/introspection.rb +50 -0
  47. data/lib/que/utils/json_serialization.rb +21 -0
  48. data/lib/que/utils/logging.rb +79 -0
  49. data/lib/que/utils/middleware.rb +46 -0
  50. data/lib/que/utils/queue_management.rb +18 -0
  51. data/lib/que/utils/ruby2_keywords.rb +19 -0
  52. data/lib/que/utils/transactions.rb +34 -0
  53. data/lib/que/version.rb +5 -1
  54. data/lib/que/worker.rb +145 -149
  55. data/lib/que.rb +103 -159
  56. data/que.gemspec +17 -4
  57. data/scripts/docker-entrypoint +14 -0
  58. data/scripts/test +6 -0
  59. metadata +59 -95
  60. data/.rspec +0 -2
  61. data/.travis.yml +0 -17
  62. data/Gemfile +0 -24
  63. data/docs/advanced_setup.md +0 -106
  64. data/docs/customizing_que.md +0 -200
  65. data/docs/error_handling.md +0 -47
  66. data/docs/inspecting_the_queue.md +0 -114
  67. data/docs/logging.md +0 -50
  68. data/docs/managing_workers.md +0 -80
  69. data/docs/migrating.md +0 -30
  70. data/docs/multiple_queues.md +0 -27
  71. data/docs/shutting_down_safely.md +0 -7
  72. data/docs/using_plain_connections.md +0 -41
  73. data/docs/using_sequel.md +0 -31
  74. data/docs/writing_reliable_jobs.md +0 -117
  75. data/lib/generators/que/install_generator.rb +0 -24
  76. data/lib/generators/que/templates/add_que.rb +0 -13
  77. data/lib/que/adapters/active_record.rb +0 -54
  78. data/lib/que/adapters/base.rb +0 -127
  79. data/lib/que/adapters/connection_pool.rb +0 -16
  80. data/lib/que/adapters/pg.rb +0 -21
  81. data/lib/que/adapters/pond.rb +0 -16
  82. data/lib/que/adapters/sequel.rb +0 -20
  83. data/lib/que/railtie.rb +0 -16
  84. data/lib/que/rake_tasks.rb +0 -59
  85. data/lib/que/sql.rb +0 -152
  86. data/spec/adapters/active_record_spec.rb +0 -152
  87. data/spec/adapters/connection_pool_spec.rb +0 -22
  88. data/spec/adapters/pg_spec.rb +0 -41
  89. data/spec/adapters/pond_spec.rb +0 -22
  90. data/spec/adapters/sequel_spec.rb +0 -57
  91. data/spec/gemfiles/Gemfile1 +0 -18
  92. data/spec/gemfiles/Gemfile2 +0 -18
  93. data/spec/spec_helper.rb +0 -118
  94. data/spec/support/helpers.rb +0 -19
  95. data/spec/support/jobs.rb +0 -35
  96. data/spec/support/shared_examples/adapter.rb +0 -37
  97. data/spec/support/shared_examples/multi_threaded_adapter.rb +0 -46
  98. data/spec/travis.rb +0 -23
  99. data/spec/unit/connection_spec.rb +0 -14
  100. data/spec/unit/customization_spec.rb +0 -251
  101. data/spec/unit/enqueue_spec.rb +0 -245
  102. data/spec/unit/helper_spec.rb +0 -12
  103. data/spec/unit/logging_spec.rb +0 -101
  104. data/spec/unit/migrations_spec.rb +0 -84
  105. data/spec/unit/pool_spec.rb +0 -365
  106. data/spec/unit/run_spec.rb +0 -14
  107. data/spec/unit/states_spec.rb +0 -50
  108. data/spec/unit/stats_spec.rb +0 -46
  109. data/spec/unit/transaction_spec.rb +0 -36
  110. data/spec/unit/work_spec.rb +0 -407
  111. data/spec/unit/worker_spec.rb +0 -167
  112. data/tasks/benchmark.rb +0 -3
  113. data/tasks/rspec.rb +0 -14
  114. data/tasks/safe_shutdown.rb +0 -67
@@ -0,0 +1,78 @@
1
+ # frozen_string_literal: true
2
+
3
+ # A wrapper around whatever connection pool we're using. Mainly just asserts
4
+ # that the source connection pool is reentrant and thread-safe.
5
+
6
+ module Que
7
+ class ConnectionPool
8
+ def initialize(&block)
9
+ @connection_proc = block
10
+ @checked_out = Set.new
11
+ @mutex = Mutex.new
12
+ @thread_key = "que_connection_pool_#{object_id}".to_sym
13
+ end
14
+
15
+ def checkout
16
+ # Do some asserting to ensure that the connection pool we're using is
17
+ # behaving properly.
18
+ @connection_proc.call do |conn|
19
+ # Did this pool already have a connection for this thread?
20
+ preexisting = wrapped = current_connection
21
+
22
+ begin
23
+ if preexisting
24
+ # If so, check that the connection we just got is the one we expect.
25
+ if preexisting.wrapped_connection.backend_pid != conn.backend_pid
26
+ raise Error, "Connection pool is not reentrant! previous: #{preexisting.wrapped_connection.inspect} now: #{conn.inspect}"
27
+ end
28
+ else
29
+ # If not, make sure that it wasn't promised to any other threads.
30
+ sync do
31
+ Que.assert(@checked_out.add?(conn.backend_pid)) do
32
+ "Connection pool didn't synchronize access properly! (entrance: #{conn.backend_pid})"
33
+ end
34
+ end
35
+
36
+ self.current_connection = wrapped = Connection.wrap(conn)
37
+ end
38
+
39
+ yield(wrapped)
40
+ ensure
41
+ if preexisting.nil?
42
+ # We're at the top level (about to return this connection to the
43
+ # pool we got it from), so mark it as no longer ours.
44
+ self.current_connection = nil
45
+
46
+ sync do
47
+ Que.assert(@checked_out.delete?(conn.backend_pid)) do
48
+ "Connection pool didn't synchronize access properly! (exit: #{conn.backend_pid})"
49
+ end
50
+ end
51
+ end
52
+ end
53
+ end
54
+ end
55
+
56
+ def execute(*args)
57
+ checkout { |conn| conn.execute(*args) }
58
+ end
59
+
60
+ def in_transaction?
61
+ checkout { |conn| conn.in_transaction? }
62
+ end
63
+
64
+ private
65
+
66
+ def sync(&block)
67
+ @mutex.synchronize(&block)
68
+ end
69
+
70
+ def current_connection
71
+ Thread.current[@thread_key]
72
+ end
73
+
74
+ def current_connection=(c)
75
+ Thread.current[@thread_key] = c
76
+ end
77
+ end
78
+ end
data/lib/que/job.rb CHANGED
@@ -1,11 +1,58 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ # The class that jobs should generally inherit from.
4
+
3
5
  module Que
4
6
  class Job
5
- attr_reader :attrs
7
+ include JobMethods
8
+
9
+ MAXIMUM_TAGS_COUNT = 5
10
+ MAXIMUM_TAG_LENGTH = 100
11
+
12
+ SQL[:insert_job] =
13
+ %{
14
+ INSERT INTO public.que_jobs
15
+ (queue, priority, run_at, job_class, args, kwargs, data, job_schema_version)
16
+ VALUES
17
+ (
18
+ coalesce($1, 'default')::text,
19
+ coalesce($2, 100)::smallint,
20
+ coalesce($3, now())::timestamptz,
21
+ $4::text,
22
+ coalesce($5, '[]')::jsonb,
23
+ coalesce($6, '{}')::jsonb,
24
+ coalesce($7, '{}')::jsonb,
25
+ #{Que.job_schema_version}
26
+ )
27
+ RETURNING *
28
+ }
29
+
30
+ SQL[:bulk_insert_jobs] =
31
+ %{
32
+ WITH args_and_kwargs as (
33
+ SELECT * from json_to_recordset(coalesce($5, '[{args:{},kwargs:{}}]')::json) as x(args jsonb, kwargs jsonb)
34
+ )
35
+ INSERT INTO public.que_jobs
36
+ (queue, priority, run_at, job_class, args, kwargs, data, job_schema_version)
37
+ SELECT
38
+ coalesce($1, 'default')::text,
39
+ coalesce($2, 100)::smallint,
40
+ coalesce($3, now())::timestamptz,
41
+ $4::text,
42
+ args_and_kwargs.args,
43
+ args_and_kwargs.kwargs,
44
+ coalesce($6, '{}')::jsonb,
45
+ #{Que.job_schema_version}
46
+ FROM args_and_kwargs
47
+ RETURNING *
48
+ }
49
+
50
+ attr_reader :que_attrs
51
+ attr_accessor :que_error, :que_resolved
6
52
 
7
53
  def initialize(attrs)
8
- @attrs = attrs
54
+ @que_attrs = attrs
55
+ Que.internal_log(:job_instantiate, self) { attrs }
9
56
  end
10
57
 
11
58
  # Subclasses should define their own run methods, but keep an empty one
@@ -13,139 +60,199 @@ module Que
13
60
  def run(*args)
14
61
  end
15
62
 
16
- def _run
17
- run(*attrs[:args])
18
- destroy unless @destroyed
19
- end
20
-
21
63
  private
22
64
 
23
- def destroy
24
- Que.execute :destroy_job, attrs.values_at(:queue, :priority, :run_at, :job_id)
25
- @destroyed = true
65
+ # Have the job helper methods act on this object.
66
+ def que_target
67
+ self
26
68
  end
27
69
 
28
- @retry_interval = proc { |count| count ** 4 + 3 }
29
-
30
70
  class << self
31
- attr_reader :retry_interval
71
+ # Job class configuration options.
72
+ attr_accessor \
73
+ :run_synchronously,
74
+ :retry_interval,
75
+ :maximum_retry_count,
76
+ :queue,
77
+ :priority,
78
+ :run_at
32
79
 
33
80
  def enqueue(*args)
34
- if args.last.is_a?(Hash)
35
- options = args.pop
36
- queue = options.delete(:queue) || '' if options.key?(:queue)
37
- job_class = options.delete(:job_class)
38
- run_at = options.delete(:run_at)
39
- priority = options.delete(:priority)
40
- args << options if options.any?
41
- end
81
+ args, kwargs = Que.split_out_ruby2_keywords(args)
42
82
 
43
- attrs = {:job_class => job_class || to_s, :args => args}
83
+ job_options = kwargs.delete(:job_options) || {}
44
84
 
45
- warn "@default_run_at in #{to_s} has been deprecated and will be removed in Que version 1.0.0. Please use @run_at instead." if @default_run_at
85
+ if job_options[:tags]
86
+ if job_options[:tags].length > MAXIMUM_TAGS_COUNT
87
+ raise Que::Error, "Can't enqueue a job with more than #{MAXIMUM_TAGS_COUNT} tags! (passed #{job_options[:tags].length})"
88
+ end
46
89
 
47
- if t = run_at || @run_at && @run_at.call || @default_run_at && @default_run_at.call
48
- attrs[:run_at] = t
90
+ job_options[:tags].each do |tag|
91
+ if tag.length > MAXIMUM_TAG_LENGTH
92
+ raise Que::Error, "Can't enqueue a job with a tag longer than 100 characters! (\"#{tag}\")"
93
+ end
94
+ end
49
95
  end
50
96
 
51
- warn "@default_priority in #{to_s} has been deprecated and will be removed in Que version 1.0.0. Please use @priority instead." if @default_priority
97
+ attrs = {
98
+ queue: job_options[:queue] || resolve_que_setting(:queue) || Que.default_queue,
99
+ priority: job_options[:priority] || resolve_que_setting(:priority),
100
+ run_at: job_options[:run_at] || resolve_que_setting(:run_at),
101
+ args: args,
102
+ kwargs: kwargs,
103
+ data: job_options[:tags] ? { tags: job_options[:tags] } : {},
104
+ job_class: \
105
+ job_options[:job_class] || name ||
106
+ raise(Error, "Can't enqueue an anonymous subclass of Que::Job"),
107
+ }
52
108
 
53
- if p = priority || @priority || @default_priority
54
- attrs[:priority] = p
55
- end
109
+ if Thread.current[:que_jobs_to_bulk_insert]
110
+ if self.name == 'ActiveJob::QueueAdapters::QueAdapter::JobWrapper'
111
+ raise Que::Error, "Que.bulk_enqueue does not support ActiveJob."
112
+ end
56
113
 
57
- if q = queue || @queue
58
- attrs[:queue] = q
59
- end
114
+ raise Que::Error, "When using .bulk_enqueue, job_options must be passed to that method rather than .enqueue" unless job_options == {}
60
115
 
61
- if Que.mode == :sync && !t
62
- run(*attrs[:args])
116
+ Thread.current[:que_jobs_to_bulk_insert][:jobs_attrs] << attrs
117
+ new({})
118
+ elsif attrs[:run_at].nil? && resolve_que_setting(:run_synchronously)
119
+ attrs.merge!(
120
+ args: Que.deserialize_json(Que.serialize_json(attrs[:args])),
121
+ kwargs: Que.deserialize_json(Que.serialize_json(attrs[:kwargs])),
122
+ data: Que.deserialize_json(Que.serialize_json(attrs[:data])),
123
+ )
124
+ _run_attrs(attrs)
63
125
  else
64
- values = Que.execute(:insert_job, attrs.values_at(:queue, :priority, :run_at, :job_class, :args)).first
65
- Que.adapter.wake_worker_after_commit unless t
126
+ attrs.merge!(
127
+ args: Que.serialize_json(attrs[:args]),
128
+ kwargs: Que.serialize_json(attrs[:kwargs]),
129
+ data: Que.serialize_json(attrs[:data]),
130
+ )
131
+ values = Que.execute(
132
+ :insert_job,
133
+ attrs.values_at(:queue, :priority, :run_at, :job_class, :args, :kwargs, :data),
134
+ ).first
66
135
  new(values)
67
136
  end
68
137
  end
138
+ ruby2_keywords(:enqueue) if respond_to?(:ruby2_keywords, true)
69
139
 
70
- def queue(*args)
71
- warn "#{to_s}.queue(*args) is deprecated and will be removed in Que version 1.0.0. Please use #{to_s}.enqueue(*args) instead."
72
- enqueue(*args)
140
+ def bulk_enqueue(job_options: {}, notify: false)
141
+ raise Que::Error, "Can't nest .bulk_enqueue" unless Thread.current[:que_jobs_to_bulk_insert].nil?
142
+ Thread.current[:que_jobs_to_bulk_insert] = { jobs_attrs: [], job_options: job_options }
143
+ yield
144
+ jobs_attrs = Thread.current[:que_jobs_to_bulk_insert][:jobs_attrs]
145
+ job_options = Thread.current[:que_jobs_to_bulk_insert][:job_options]
146
+ return [] if jobs_attrs.empty?
147
+ raise Que::Error, "When using .bulk_enqueue, all jobs enqueued must be of the same job class" unless jobs_attrs.map { |attrs| attrs[:job_class] }.uniq.one?
148
+ args_and_kwargs_array = jobs_attrs.map { |attrs| attrs.slice(:args, :kwargs) }
149
+ klass = job_options[:job_class] ? Que::Job : Que.constantize(jobs_attrs.first[:job_class])
150
+ klass._bulk_enqueue_insert(args_and_kwargs_array, job_options: job_options, notify: notify)
151
+ ensure
152
+ Thread.current[:que_jobs_to_bulk_insert] = nil
73
153
  end
74
154
 
75
- def run(*args)
76
- # Should not fail if there's no DB connection.
77
- new(:args => args).tap { |job| job.run(*args) }
78
- end
155
+ def _bulk_enqueue_insert(args_and_kwargs_array, job_options: {}, notify:)
156
+ raise 'Unexpected bulk args format' if !args_and_kwargs_array.is_a?(Array) || !args_and_kwargs_array.all? { |a| a.is_a?(Hash) }
79
157
 
80
- def work(queue = '')
81
- # Since we're taking session-level advisory locks, we have to hold the
82
- # same connection throughout the process of getting a job, working it,
83
- # deleting it, and removing the lock.
84
- return_value =
85
- Que.adapter.checkout do
86
- begin
87
- if job = Que.execute(:lock_job, [queue]).first
88
- # Edge case: It's possible for the lock_job query to have
89
- # grabbed a job that's already been worked, if it took its MVCC
90
- # snapshot while the job was processing, but didn't attempt the
91
- # advisory lock until it was finished. Since we have the lock, a
92
- # previous worker would have deleted it by now, so we just
93
- # double check that it still exists before working it.
94
-
95
- # Note that there is currently no spec for this behavior, since
96
- # I'm not sure how to reliably commit a transaction that deletes
97
- # the job in a separate thread between lock_job and check_job.
98
- if Que.execute(:check_job, job.values_at(:queue, :priority, :run_at, :job_id)).none?
99
- {:event => :job_race_condition}
100
- else
101
- klass = class_for(job[:job_class])
102
- klass.new(job)._run
103
- {:event => :job_worked, :job => job}
104
- end
105
- else
106
- {:event => :job_unavailable}
107
- end
108
- rescue => error
109
- begin
110
- if job
111
- count = job[:error_count].to_i + 1
112
- interval = klass && klass.respond_to?(:retry_interval) && klass.retry_interval || retry_interval
113
- delay = interval.respond_to?(:call) ? interval.call(count) : interval
114
- message = "#{error.message}\n#{error.backtrace.join("\n")}"
115
- Que.execute :set_error, [count, delay, message] + job.values_at(:queue, :priority, :run_at, :job_id)
116
- end
117
- rescue
118
- # If we can't reach the database for some reason, too bad, but
119
- # don't let it crash the work loop.
120
- end
121
-
122
- if Que.error_handler
123
- # Similarly, protect the work loop from a failure of the error handler.
124
- Que.error_handler.call(error, job) rescue nil
125
- end
126
-
127
- return {:event => :job_errored, :error => error, :job => job}
128
- ensure
129
- # Clear the advisory lock we took when locking the job. Important
130
- # to do this so that they don't pile up in the database. Again, if
131
- # we can't reach the database, don't crash the work loop.
132
- begin
133
- Que.execute "SELECT pg_advisory_unlock($1)", [job[:job_id]] if job
134
- rescue
135
- end
158
+ if job_options[:tags]
159
+ if job_options[:tags].length > MAXIMUM_TAGS_COUNT
160
+ raise Que::Error, "Can't enqueue a job with more than #{MAXIMUM_TAGS_COUNT} tags! (passed #{job_options[:tags].length})"
161
+ end
162
+
163
+ job_options[:tags].each do |tag|
164
+ if tag.length > MAXIMUM_TAG_LENGTH
165
+ raise Que::Error, "Can't enqueue a job with a tag longer than 100 characters! (\"#{tag}\")"
136
166
  end
137
167
  end
168
+ end
169
+
170
+ args_and_kwargs_array = args_and_kwargs_array.map do |args_and_kwargs|
171
+ args_and_kwargs.merge(
172
+ args: args_and_kwargs.fetch(:args, []),
173
+ kwargs: args_and_kwargs.fetch(:kwargs, {}),
174
+ )
175
+ end
176
+
177
+ attrs = {
178
+ queue: job_options[:queue] || resolve_que_setting(:queue) || Que.default_queue,
179
+ priority: job_options[:priority] || resolve_que_setting(:priority),
180
+ run_at: job_options[:run_at] || resolve_que_setting(:run_at),
181
+ args_and_kwargs_array: args_and_kwargs_array,
182
+ data: job_options[:tags] ? { tags: job_options[:tags] } : {},
183
+ job_class: \
184
+ job_options[:job_class] || name ||
185
+ raise(Error, "Can't enqueue an anonymous subclass of Que::Job"),
186
+ }
187
+
188
+ if attrs[:run_at].nil? && resolve_que_setting(:run_synchronously)
189
+ args_and_kwargs_array = Que.deserialize_json(Que.serialize_json(attrs.delete(:args_and_kwargs_array)))
190
+ args_and_kwargs_array.map do |args_and_kwargs|
191
+ _run_attrs(
192
+ attrs.merge(
193
+ args: args_and_kwargs.fetch(:args),
194
+ kwargs: args_and_kwargs.fetch(:kwargs),
195
+ ),
196
+ )
197
+ end
198
+ else
199
+ attrs.merge!(
200
+ args_and_kwargs_array: Que.serialize_json(attrs[:args_and_kwargs_array]),
201
+ data: Que.serialize_json(attrs[:data]),
202
+ )
203
+ values_array =
204
+ Que.transaction do
205
+ Que.execute('SET LOCAL que.skip_notify TO true') unless notify
206
+ Que.execute(
207
+ :bulk_insert_jobs,
208
+ attrs.values_at(:queue, :priority, :run_at, :job_class, :args_and_kwargs_array, :data),
209
+ )
210
+ end
211
+ values_array.map(&method(:new))
212
+ end
213
+ end
138
214
 
139
- Que.adapter.cleanup!
215
+ def run(*args)
216
+ # Make sure things behave the same as they would have with a round-trip
217
+ # to the DB.
218
+ args, kwargs = Que.split_out_ruby2_keywords(args)
219
+ args = Que.deserialize_json(Que.serialize_json(args))
220
+ kwargs = Que.deserialize_json(Que.serialize_json(kwargs))
140
221
 
141
- return_value
222
+ # Should not fail if there's no DB connection.
223
+ _run_attrs(args: args, kwargs: kwargs)
224
+ end
225
+ ruby2_keywords(:run) if respond_to?(:ruby2_keywords, true)
226
+
227
+ def resolve_que_setting(setting, *args)
228
+ value = send(setting) if respond_to?(setting)
229
+
230
+ if !value.nil?
231
+ value.respond_to?(:call) ? value.call(*args) : value
232
+ else
233
+ c = superclass
234
+ if c.respond_to?(:resolve_que_setting)
235
+ c.resolve_que_setting(setting, *args)
236
+ end
237
+ end
142
238
  end
143
239
 
144
240
  private
145
241
 
146
- def class_for(string)
147
- Que.constantize(string)
242
+ def _run_attrs(attrs)
243
+ attrs[:error_count] = 0
244
+ Que.recursively_freeze(attrs)
245
+
246
+ new(attrs).tap do |job|
247
+ Que.run_job_middleware(job) do
248
+ job._run(reraise_errors: true)
249
+ end
250
+ end
148
251
  end
149
252
  end
253
+
254
+ # Set up some defaults.
255
+ self.retry_interval = proc { |count| count ** 4 + 3 }
256
+ self.maximum_retry_count = 15
150
257
  end
151
258
  end
@@ -0,0 +1,255 @@
1
+ # frozen_string_literal: true
2
+
3
+ # A sized thread-safe queue that holds ordered job sort_keys. Supports blocking
4
+ # while waiting for a job to become available, only returning jobs over a
5
+ # minimum priority, and stopping gracefully.
6
+
7
+ module Que
8
+ class JobBuffer
9
+ attr_reader :maximum_size, :priority_queues
10
+
11
+ # Since we use a mutex, which is not reentrant, we have to be a little
12
+ # careful to not call a method that locks the mutex when we've already
13
+ # locked it. So, as a general rule, public methods handle locking the mutex
14
+ # when necessary, while private methods handle the actual underlying data
15
+ # changes. This lets us reuse those private methods without running into
16
+ # locking issues.
17
+
18
+ def initialize(
19
+ maximum_size:,
20
+ priorities:
21
+ )
22
+ @maximum_size = Que.assert(Integer, maximum_size)
23
+ Que.assert(maximum_size >= 0) { "maximum_size for a JobBuffer must be at least zero!" }
24
+
25
+ @stop = false
26
+ @array = []
27
+ @mutex = Mutex.new
28
+
29
+ @priority_queues = Hash[
30
+ # Make sure that priority = nil sorts highest.
31
+ priorities.sort_by{|p| p || MAXIMUM_PRIORITY}.map do |p|
32
+ [p, PriorityQueue.new(priority: p, job_buffer: self)]
33
+ end
34
+ ].freeze
35
+ end
36
+
37
+ def push(*metajobs)
38
+ Que.internal_log(:job_buffer_push, self) do
39
+ {
40
+ maximum_size: maximum_size,
41
+ ids: metajobs.map(&:id),
42
+ current_queue: to_a,
43
+ }
44
+ end
45
+
46
+ sync do
47
+ return metajobs if _stopping?
48
+
49
+ @array.concat(metajobs).sort!
50
+
51
+ # Relying on the hash's contents being sorted, here.
52
+ priority_queues.reverse_each do |_, pq|
53
+ pq.populate do
54
+ _shift_job(pq.priority)
55
+ end
56
+ end
57
+
58
+ # If we passed the maximum buffer size, drop the lowest sort keys and
59
+ # return their ids to be unlocked.
60
+ overage = -_buffer_space
61
+ pop(overage) if overage > 0
62
+ end
63
+ end
64
+
65
+ def shift(priority = nil)
66
+ queue = priority_queues.fetch(priority) { raise Error, "not a permitted priority! #{priority}" }
67
+ queue.pop || shift_job(priority)
68
+ end
69
+
70
+ def shift_job(priority = nil)
71
+ sync { _shift_job(priority) }
72
+ end
73
+
74
+ def accept?(metajobs)
75
+ metajobs.sort!
76
+
77
+ sync do
78
+ return [] if _stopping?
79
+
80
+ start_index = _buffer_space
81
+ final_index = metajobs.length - 1
82
+
83
+ return metajobs if start_index > final_index
84
+ index_to_lose = @array.length - 1
85
+
86
+ start_index.upto(final_index) do |index|
87
+ if index_to_lose >= 0 && (metajobs[index] <=> @array[index_to_lose]) < 0
88
+ return metajobs if index == final_index
89
+ index_to_lose -= 1
90
+ else
91
+ return metajobs.slice(0...index)
92
+ end
93
+ end
94
+
95
+ []
96
+ end
97
+ end
98
+
99
+ def waiting_count
100
+ count = 0
101
+ priority_queues.each_value do |pq|
102
+ count += pq.waiting_count
103
+ end
104
+ count
105
+ end
106
+
107
+ def available_priorities
108
+ hash = {}
109
+ lowest_priority = true
110
+
111
+ priority_queues.reverse_each do |priority, pq|
112
+ count = pq.waiting_count
113
+
114
+ if lowest_priority
115
+ count += buffer_space
116
+ lowest_priority = false
117
+ end
118
+
119
+ hash[priority || MAXIMUM_PRIORITY] = count if count > 0
120
+ end
121
+
122
+ hash
123
+ end
124
+
125
+ def buffer_space
126
+ sync { _buffer_space }
127
+ end
128
+
129
+ def size
130
+ sync { _size }
131
+ end
132
+
133
+ def to_a
134
+ sync { @array.dup }
135
+ end
136
+
137
+ def stop
138
+ sync { @stop = true }
139
+ priority_queues.each_value(&:stop)
140
+ end
141
+
142
+ def clear
143
+ sync { pop(_size) }
144
+ end
145
+
146
+ def stopping?
147
+ sync { _stopping? }
148
+ end
149
+
150
+ def job_available?(priority)
151
+ (job = @array.first) && job.priority_sufficient?(priority)
152
+ end
153
+
154
+ private
155
+
156
+ def _buffer_space
157
+ maximum_size - _size
158
+ end
159
+
160
+ def pop(count)
161
+ @array.pop(count)
162
+ end
163
+
164
+ def _shift_job(priority)
165
+ if _stopping?
166
+ false
167
+ elsif (job = @array.first) && job.priority_sufficient?(priority)
168
+ @array.shift
169
+ end
170
+ end
171
+
172
+ def _size
173
+ @array.size
174
+ end
175
+
176
+ def _stopping?
177
+ !!@stop
178
+ end
179
+
180
+ def sync(&block)
181
+ @mutex.synchronize(&block)
182
+ end
183
+
184
+ # A queue object dedicated to a specific worker priority. It's basically a
185
+ # Queue object from the standard library, but it's able to reach into the
186
+ # JobBuffer's buffer in order to satisfy a pop.
187
+ class PriorityQueue
188
+ attr_reader :job_buffer, :priority, :mutex
189
+
190
+ def initialize(
191
+ job_buffer:,
192
+ priority:
193
+ )
194
+ @job_buffer = job_buffer
195
+ @priority = priority
196
+ @waiting = 0
197
+ @stopping = false
198
+ @items = [] # Items pending distribution to waiting threads.
199
+ @mutex = Mutex.new
200
+ @cv = ConditionVariable.new
201
+ end
202
+
203
+ def pop
204
+ sync do
205
+ loop do
206
+ if @stopping
207
+ return false
208
+ elsif item = @items.pop
209
+ return item
210
+ elsif job_buffer.job_available?(priority)
211
+ return false
212
+ end
213
+
214
+ @waiting += 1
215
+ @cv.wait(mutex)
216
+ @waiting -= 1
217
+ end
218
+ end
219
+ end
220
+
221
+ def stop
222
+ sync do
223
+ @stopping = true
224
+ @cv.broadcast
225
+ end
226
+ end
227
+
228
+ def populate
229
+ sync do
230
+ waiting_count.times do
231
+ job = yield
232
+ break if job.nil? # False would mean we're stopping.
233
+ _push(job)
234
+ end
235
+ end
236
+ end
237
+
238
+ def waiting_count
239
+ @waiting
240
+ end
241
+
242
+ private
243
+
244
+ def sync(&block)
245
+ mutex.synchronize(&block)
246
+ end
247
+
248
+ def _push(item)
249
+ Que.assert(waiting_count > 0)
250
+ @items << item
251
+ @cv.signal
252
+ end
253
+ end
254
+ end
255
+ end