que 0.11.3 → 2.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. checksums.yaml +5 -5
  2. data/.github/workflows/tests.yml +51 -0
  3. data/.gitignore +2 -0
  4. data/.ruby-version +1 -0
  5. data/CHANGELOG.md +502 -97
  6. data/Dockerfile +20 -0
  7. data/LICENSE.txt +1 -1
  8. data/README.md +205 -59
  9. data/auto/dev +21 -0
  10. data/auto/pre-push-hook +30 -0
  11. data/auto/psql +9 -0
  12. data/auto/test +5 -0
  13. data/auto/test-postgres-14 +17 -0
  14. data/bin/que +8 -81
  15. data/docker-compose.yml +47 -0
  16. data/docs/README.md +881 -0
  17. data/lib/que/active_job/extensions.rb +114 -0
  18. data/lib/que/active_record/connection.rb +51 -0
  19. data/lib/que/active_record/model.rb +48 -0
  20. data/lib/que/command_line_interface.rb +259 -0
  21. data/lib/que/connection.rb +198 -0
  22. data/lib/que/connection_pool.rb +78 -0
  23. data/lib/que/job.rb +210 -103
  24. data/lib/que/job_buffer.rb +255 -0
  25. data/lib/que/job_methods.rb +176 -0
  26. data/lib/que/listener.rb +176 -0
  27. data/lib/que/locker.rb +507 -0
  28. data/lib/que/metajob.rb +47 -0
  29. data/lib/que/migrations/4/down.sql +48 -0
  30. data/lib/que/migrations/4/up.sql +267 -0
  31. data/lib/que/migrations/5/down.sql +73 -0
  32. data/lib/que/migrations/5/up.sql +76 -0
  33. data/lib/que/migrations/6/down.sql +8 -0
  34. data/lib/que/migrations/6/up.sql +8 -0
  35. data/lib/que/migrations/7/down.sql +5 -0
  36. data/lib/que/migrations/7/up.sql +13 -0
  37. data/lib/que/migrations.rb +37 -18
  38. data/lib/que/poller.rb +274 -0
  39. data/lib/que/rails/railtie.rb +12 -0
  40. data/lib/que/result_queue.rb +35 -0
  41. data/lib/que/sequel/model.rb +52 -0
  42. data/lib/que/utils/assertions.rb +62 -0
  43. data/lib/que/utils/constantization.rb +19 -0
  44. data/lib/que/utils/error_notification.rb +68 -0
  45. data/lib/que/utils/freeze.rb +20 -0
  46. data/lib/que/utils/introspection.rb +50 -0
  47. data/lib/que/utils/json_serialization.rb +21 -0
  48. data/lib/que/utils/logging.rb +79 -0
  49. data/lib/que/utils/middleware.rb +46 -0
  50. data/lib/que/utils/queue_management.rb +18 -0
  51. data/lib/que/utils/ruby2_keywords.rb +19 -0
  52. data/lib/que/utils/transactions.rb +34 -0
  53. data/lib/que/version.rb +5 -1
  54. data/lib/que/worker.rb +145 -149
  55. data/lib/que.rb +103 -159
  56. data/que.gemspec +17 -4
  57. data/scripts/docker-entrypoint +14 -0
  58. data/scripts/test +6 -0
  59. metadata +59 -95
  60. data/.rspec +0 -2
  61. data/.travis.yml +0 -17
  62. data/Gemfile +0 -24
  63. data/docs/advanced_setup.md +0 -106
  64. data/docs/customizing_que.md +0 -200
  65. data/docs/error_handling.md +0 -47
  66. data/docs/inspecting_the_queue.md +0 -114
  67. data/docs/logging.md +0 -50
  68. data/docs/managing_workers.md +0 -80
  69. data/docs/migrating.md +0 -30
  70. data/docs/multiple_queues.md +0 -27
  71. data/docs/shutting_down_safely.md +0 -7
  72. data/docs/using_plain_connections.md +0 -41
  73. data/docs/using_sequel.md +0 -31
  74. data/docs/writing_reliable_jobs.md +0 -117
  75. data/lib/generators/que/install_generator.rb +0 -24
  76. data/lib/generators/que/templates/add_que.rb +0 -13
  77. data/lib/que/adapters/active_record.rb +0 -54
  78. data/lib/que/adapters/base.rb +0 -127
  79. data/lib/que/adapters/connection_pool.rb +0 -16
  80. data/lib/que/adapters/pg.rb +0 -21
  81. data/lib/que/adapters/pond.rb +0 -16
  82. data/lib/que/adapters/sequel.rb +0 -20
  83. data/lib/que/railtie.rb +0 -16
  84. data/lib/que/rake_tasks.rb +0 -59
  85. data/lib/que/sql.rb +0 -152
  86. data/spec/adapters/active_record_spec.rb +0 -152
  87. data/spec/adapters/connection_pool_spec.rb +0 -22
  88. data/spec/adapters/pg_spec.rb +0 -41
  89. data/spec/adapters/pond_spec.rb +0 -22
  90. data/spec/adapters/sequel_spec.rb +0 -57
  91. data/spec/gemfiles/Gemfile1 +0 -18
  92. data/spec/gemfiles/Gemfile2 +0 -18
  93. data/spec/spec_helper.rb +0 -118
  94. data/spec/support/helpers.rb +0 -19
  95. data/spec/support/jobs.rb +0 -35
  96. data/spec/support/shared_examples/adapter.rb +0 -37
  97. data/spec/support/shared_examples/multi_threaded_adapter.rb +0 -46
  98. data/spec/travis.rb +0 -23
  99. data/spec/unit/connection_spec.rb +0 -14
  100. data/spec/unit/customization_spec.rb +0 -251
  101. data/spec/unit/enqueue_spec.rb +0 -245
  102. data/spec/unit/helper_spec.rb +0 -12
  103. data/spec/unit/logging_spec.rb +0 -101
  104. data/spec/unit/migrations_spec.rb +0 -84
  105. data/spec/unit/pool_spec.rb +0 -365
  106. data/spec/unit/run_spec.rb +0 -14
  107. data/spec/unit/states_spec.rb +0 -50
  108. data/spec/unit/stats_spec.rb +0 -46
  109. data/spec/unit/transaction_spec.rb +0 -36
  110. data/spec/unit/work_spec.rb +0 -407
  111. data/spec/unit/worker_spec.rb +0 -167
  112. data/tasks/benchmark.rb +0 -3
  113. data/tasks/rspec.rb +0 -14
  114. data/tasks/safe_shutdown.rb +0 -67
data/lib/que/locker.rb ADDED
@@ -0,0 +1,507 @@
1
+ # frozen_string_literal: true
2
+
3
+ # The Locker class encapsulates a thread that is listening/polling for new
4
+ # jobs in the DB, locking them, passing their primary keys to workers, then
5
+ # cleaning up by unlocking them once the workers are done.
6
+
7
+ require 'set'
8
+
9
+ module Que
10
+ Listener::MESSAGE_FORMATS[:job_available] =
11
+ {
12
+ queue: String,
13
+ id: Integer,
14
+ run_at: TIME_REGEX,
15
+ priority: Integer,
16
+ }
17
+
18
+ SQL[:clean_lockers] =
19
+ %{
20
+ DELETE FROM public.que_lockers
21
+ WHERE pid = pg_backend_pid()
22
+ OR NOT EXISTS (SELECT 1 FROM pg_stat_activity WHERE pid = public.que_lockers.pid)
23
+ }
24
+
25
+ SQL[:register_locker] =
26
+ %{
27
+ INSERT INTO public.que_lockers (pid, worker_count, worker_priorities, ruby_pid, ruby_hostname, listening, queues, job_schema_version)
28
+ VALUES (pg_backend_pid(), $1::integer, $2::integer[], $3::integer, $4::text, $5::boolean, $6::text[], $7::integer)
29
+ }
30
+
31
+ class Locker
32
+ attr_reader :thread, :workers, :job_buffer, :locks, :queues, :poll_interval
33
+
34
+ MESSAGE_RESOLVERS = {}
35
+ RESULT_RESOLVERS = {}
36
+
37
+ MESSAGE_RESOLVERS[:job_available] =
38
+ -> (messages) {
39
+ metajobs = messages.map { |key| Metajob.new(key) }
40
+ push_jobs(lock_jobs(job_buffer.accept?(metajobs)))
41
+ }
42
+
43
+ RESULT_RESOLVERS[:job_finished] =
44
+ -> (messages) { finish_jobs(messages.map{|m| m.fetch(:metajob)}) }
45
+
46
+ DEFAULT_POLL_INTERVAL = 5.0
47
+ DEFAULT_WAIT_PERIOD = 50
48
+ DEFAULT_MAXIMUM_BUFFER_SIZE = 8
49
+ DEFAULT_WORKER_PRIORITIES = [10, 30, 50, nil, nil, nil].freeze
50
+
51
+ def initialize(
52
+ queues: [Que.default_queue],
53
+ connection_url: nil,
54
+ listen: true,
55
+ poll: true,
56
+ poll_interval: DEFAULT_POLL_INTERVAL,
57
+ wait_period: DEFAULT_WAIT_PERIOD,
58
+ maximum_buffer_size: DEFAULT_MAXIMUM_BUFFER_SIZE,
59
+ worker_priorities: DEFAULT_WORKER_PRIORITIES,
60
+ on_worker_start: nil
61
+ )
62
+
63
+ # Sanity-check all our arguments, since some users may instantiate Locker
64
+ # directly.
65
+ Que.assert [TrueClass, FalseClass], listen
66
+ Que.assert [TrueClass, FalseClass], poll
67
+
68
+ Que.assert Numeric, poll_interval
69
+ Que.assert Numeric, wait_period
70
+
71
+ Que.assert Array, worker_priorities
72
+ worker_priorities.each { |p| Que.assert([Integer, NilClass], p) }
73
+
74
+ # We use a JobBuffer to track jobs and pass them to workers, and a
75
+ # ResultQueue to receive messages from workers.
76
+ @job_buffer = JobBuffer.new(
77
+ maximum_size: maximum_buffer_size,
78
+ priorities: worker_priorities.uniq,
79
+ )
80
+
81
+ @result_queue = ResultQueue.new
82
+
83
+ @stop = false
84
+
85
+ Que.internal_log :locker_instantiate, self do
86
+ {
87
+ queues: queues,
88
+ listen: listen,
89
+ poll: poll,
90
+ poll_interval: poll_interval,
91
+ wait_period: wait_period,
92
+ maximum_buffer_size: maximum_buffer_size,
93
+ worker_priorities: worker_priorities,
94
+ }
95
+ end
96
+
97
+ # Local cache of which advisory locks are held by this connection.
98
+ @locks = Set.new
99
+
100
+ @poll_interval = poll_interval
101
+
102
+ if queues.is_a?(Hash)
103
+ @queue_names = queues.keys
104
+ @queues = queues.transform_values do |interval|
105
+ interval || poll_interval
106
+ end
107
+ else
108
+ @queue_names = queues
109
+ @queues = queues.map do |queue_name|
110
+ [queue_name, poll_interval]
111
+ end.to_h
112
+ end
113
+
114
+ @wait_period = wait_period.to_f / 1000 # Milliseconds to seconds.
115
+
116
+ @workers =
117
+ worker_priorities.map do |priority|
118
+ Worker.new(
119
+ priority: priority,
120
+ job_buffer: @job_buffer,
121
+ result_queue: @result_queue,
122
+ start_callback: on_worker_start,
123
+ )
124
+ end
125
+
126
+ # To prevent race conditions, let every worker get into a ready state
127
+ # before starting up the locker thread.
128
+ loop do
129
+ break if job_buffer.waiting_count == workers.count
130
+ sleep 0.001
131
+ end
132
+
133
+ # If we weren't passed a specific connection_url, borrow a connection from
134
+ # the pool and derive the connection string from it.
135
+ connection_args =
136
+ if connection_url
137
+ uri = URI.parse(connection_url)
138
+
139
+ opts =
140
+ {
141
+ host: uri.host,
142
+ user: uri.user,
143
+ password: uri.password,
144
+ port: uri.port || 5432,
145
+ dbname: uri.path[1..-1],
146
+ }
147
+
148
+ if uri.query
149
+ opts.merge!(Hash[uri.query.split("&").map{|s| s.split('=')}.map{|a,b| [a.to_sym, b]}])
150
+ end
151
+
152
+ opts
153
+ else
154
+ Que.pool.checkout do |conn|
155
+ c = conn.wrapped_connection
156
+
157
+ {
158
+ host: c.host,
159
+ user: c.user,
160
+ password: c.pass,
161
+ port: c.port,
162
+ dbname: c.db,
163
+ }
164
+ end
165
+ end
166
+
167
+ @connection = Que::Connection.wrap(PG::Connection.open(connection_args))
168
+
169
+ @thread =
170
+ Thread.new do
171
+ # An error causing this thread to exit is a bug in Que, which we want
172
+ # to know about ASAP, so propagate the error if it happens.
173
+ Thread.current.abort_on_exception = true
174
+
175
+ # Give this thread priority, so it can promptly respond to NOTIFYs.
176
+ Thread.current.priority = 1
177
+
178
+ begin
179
+ unless connection_args.has_key?(:application_name)
180
+ @connection.execute(
181
+ "SELECT set_config('application_name', $1, false)",
182
+ ["Que Locker: #{@connection.backend_pid}"]
183
+ )
184
+ end
185
+
186
+ Poller.setup(@connection)
187
+
188
+ @listener =
189
+ if listen
190
+ Listener.new(connection: @connection)
191
+ end
192
+
193
+ @pollers =
194
+ if poll
195
+ @queues.map do |queue_name, interval|
196
+ Poller.new(
197
+ connection: @connection,
198
+ queue: queue_name,
199
+ poll_interval: interval,
200
+ )
201
+ end
202
+ end
203
+
204
+ work_loop
205
+ ensure
206
+ @connection.wrapped_connection.close
207
+ end
208
+ end
209
+ end
210
+
211
+ def stop!
212
+ stop; wait_for_stop
213
+ end
214
+
215
+ def stop
216
+ @job_buffer.stop
217
+ @stop = true
218
+ end
219
+
220
+ def stopping?
221
+ @stop
222
+ end
223
+
224
+ def wait_for_stop
225
+ @thread.join
226
+ end
227
+
228
+ private
229
+
230
+ attr_reader :connection, :pollers
231
+
232
+ def work_loop
233
+ Que.log(
234
+ level: :debug,
235
+ event: :locker_start,
236
+ queues: @queue_names,
237
+ )
238
+
239
+ Que.internal_log :locker_start, self do
240
+ {
241
+ backend_pid: connection.backend_pid,
242
+ worker_priorities: workers.map(&:priority),
243
+ pollers: pollers && pollers.map { |p| [p.queue, p.poll_interval] }
244
+ }
245
+ end
246
+
247
+ begin
248
+ @listener.listen if @listener
249
+
250
+ startup
251
+
252
+ {} while cycle
253
+
254
+ Que.log(
255
+ level: :debug,
256
+ event: :locker_stop,
257
+ )
258
+
259
+ shutdown
260
+ ensure
261
+ connection.execute :clean_lockers
262
+
263
+ @listener.unlisten if @listener
264
+ end
265
+ end
266
+
267
+ def startup
268
+ # A previous locker that didn't exit cleanly may have left behind
269
+ # a bad locker record, so clean up before registering.
270
+ connection.execute :clean_lockers
271
+ connection.execute :register_locker, [
272
+ @workers.count,
273
+ "{#{@workers.map(&:priority).map{|p| p || 'NULL'}.join(',')}}",
274
+ Process.pid,
275
+ CURRENT_HOSTNAME,
276
+ !!@listener,
277
+ "{\"#{@queue_names.join('","')}\"}",
278
+ Que.job_schema_version,
279
+ ]
280
+ end
281
+
282
+ def cycle
283
+ # Poll at the start of a cycle, so that when the worker starts up we can
284
+ # load up the queue with jobs immediately.
285
+ poll
286
+
287
+ # If we got the stop call while we were polling, break before going to
288
+ # sleep.
289
+ return if @stop
290
+
291
+ # The main sleeping part of the cycle. If this is a listening locker, this
292
+ # is where we wait for notifications.
293
+ wait
294
+
295
+ # Manage any job output we got while we were sleeping.
296
+ handle_results
297
+
298
+ # If we haven't gotten the stop signal, cycle again.
299
+ !@stop
300
+ end
301
+
302
+ def shutdown
303
+ unlock_jobs(@job_buffer.clear)
304
+ wait_for_shutdown
305
+ handle_results
306
+ end
307
+
308
+ def wait_for_shutdown
309
+ @workers.each(&:wait_until_stopped)
310
+ end
311
+
312
+ def poll
313
+ # Only poll when there are pollers to use (that is, when polling is
314
+ # enabled).
315
+ return unless pollers
316
+
317
+ # Figure out what job priorities we have to fill.
318
+ priorities = job_buffer.available_priorities
319
+
320
+ # Only poll when there are workers ready for jobs.
321
+ return if priorities.empty?
322
+
323
+ all_metajobs = []
324
+
325
+ pollers.each do |poller|
326
+ Que.internal_log(:locker_polling, self) {
327
+ {
328
+ priorities: priorities,
329
+ held_locks: @locks.to_a,
330
+ queue: poller.queue,
331
+ }
332
+ }
333
+
334
+ if metajobs = poller.poll(priorities: priorities, held_locks: @locks)
335
+ metajobs.sort!
336
+ all_metajobs.concat(metajobs)
337
+
338
+ # Update the desired priorities list to take the priorities that we
339
+ # just retrieved into account.
340
+ metajobs.each do |metajob|
341
+ job_priority = metajob.job.fetch(:priority)
342
+
343
+ priorities.each do |priority, count|
344
+ if job_priority <= priority
345
+ new_priority = count - 1
346
+
347
+ if new_priority <= 0
348
+ priorities.delete(priority)
349
+ else
350
+ priorities[priority] = new_priority
351
+ end
352
+
353
+ break
354
+ end
355
+ end
356
+ end
357
+
358
+ break if priorities.empty?
359
+ end
360
+ end
361
+
362
+ all_metajobs.each { |metajob| mark_id_as_locked(metajob.id) }
363
+ push_jobs(all_metajobs)
364
+ end
365
+
366
+ def wait
367
+ if l = @listener
368
+ l.wait_for_grouped_messages(@wait_period).each do |type, messages|
369
+ if resolver = MESSAGE_RESOLVERS[type]
370
+ instance_exec messages, &resolver
371
+ else
372
+ raise Error, "Unexpected message type: #{type.inspect}"
373
+ end
374
+ end
375
+ else
376
+ sleep(@wait_period)
377
+ end
378
+ end
379
+
380
+ def handle_results
381
+ messages_by_type =
382
+ @result_queue.clear.group_by{|r| r.fetch(:message_type)}
383
+
384
+ messages_by_type.each do |type, messages|
385
+ if resolver = RESULT_RESOLVERS[type]
386
+ instance_exec messages, &resolver
387
+ else
388
+ raise Error, "Unexpected result type: #{type.inspect}"
389
+ end
390
+ end
391
+ end
392
+
393
+ def lock_jobs(metajobs)
394
+ metajobs.reject! { |m| @locks.include?(m.id) }
395
+ return metajobs if metajobs.empty?
396
+
397
+ ids = metajobs.map { |m| m.id.to_i }
398
+
399
+ Que.internal_log :locker_locking, self do
400
+ {
401
+ backend_pid: connection.backend_pid,
402
+ ids: ids,
403
+ }
404
+ end
405
+
406
+ materalize_cte = connection.server_version >= 12_00_00
407
+
408
+ jobs =
409
+ connection.execute \
410
+ <<-SQL
411
+ WITH jobs AS #{materalize_cte ? 'MATERIALIZED' : ''} (SELECT * FROM que_jobs WHERE id IN (#{ids.join(', ')}))
412
+ SELECT * FROM jobs WHERE pg_try_advisory_lock(id)
413
+ SQL
414
+
415
+ jobs_by_id = {}
416
+
417
+ jobs.each do |job|
418
+ id = job.fetch(:id)
419
+ mark_id_as_locked(id)
420
+ jobs_by_id[id] = job
421
+ end
422
+
423
+ metajobs.keep_if do |metajob|
424
+ if job = jobs_by_id[metajob.id]
425
+ metajob.set_job(job)
426
+ true
427
+ else
428
+ false
429
+ end
430
+ end
431
+ end
432
+
433
+ def push_jobs(metajobs)
434
+ return if metajobs.empty?
435
+
436
+ # First check that the jobs are all still visible/available in the DB.
437
+ ids = metajobs.map(&:id)
438
+
439
+ verified_ids =
440
+ connection.execute(
441
+ <<-SQL
442
+ SELECT id
443
+ FROM public.que_jobs
444
+ WHERE finished_at IS NULL
445
+ AND expired_at IS NULL
446
+ AND id IN (#{ids.join(', ')})
447
+ SQL
448
+ ).map{|h| h[:id]}.to_set
449
+
450
+ good, bad = metajobs.partition{|mj| verified_ids.include?(mj.id)}
451
+
452
+ # Need to unlock any low-importance jobs the new ones may displace.
453
+ if displaced = @job_buffer.push(*good)
454
+ bad.concat(displaced)
455
+ end
456
+
457
+ unlock_jobs(bad)
458
+ end
459
+
460
+ def finish_jobs(metajobs)
461
+ unlock_jobs(metajobs)
462
+ end
463
+
464
+ def unlock_jobs(metajobs)
465
+ return if metajobs.empty?
466
+
467
+ # Unclear how untrusted input would get passed to this method, but since
468
+ # we need string interpolation here, make sure we only have integers.
469
+ ids = metajobs.map { |job| job.id.to_i }
470
+
471
+ Que.internal_log :locker_unlocking, self do
472
+ {
473
+ backend_pid: connection.backend_pid,
474
+ ids: ids,
475
+ }
476
+ end
477
+
478
+ values = ids.join('), (')
479
+
480
+ results =
481
+ connection.execute \
482
+ "SELECT pg_advisory_unlock(v.i) FROM (VALUES (#{values})) v (i)"
483
+
484
+ results.each do |result|
485
+ Que.assert(result.fetch(:pg_advisory_unlock)) do
486
+ [
487
+ "Tried to unlock a job we hadn't locked!",
488
+ results.inspect,
489
+ ids.inspect,
490
+ ].join(' ')
491
+ end
492
+ end
493
+
494
+ ids.each do |id|
495
+ Que.assert(@locks.delete?(id)) do
496
+ "Tried to remove a local lock that didn't exist!: #{id}"
497
+ end
498
+ end
499
+ end
500
+
501
+ def mark_id_as_locked(id)
502
+ Que.assert(@locks.add?(id)) do
503
+ "Tried to lock a job that was already locked: #{id}"
504
+ end
505
+ end
506
+ end
507
+ end
@@ -0,0 +1,47 @@
1
+ # frozen_string_literal: true
2
+
3
+ # A thin wrapper around a job's data that lets us do things like sort easily and
4
+ # make sure that run_at is in the format we want.
5
+
6
+ module Que
7
+ class Metajob
8
+ SORT_KEYS = [:priority, :run_at, :id].freeze
9
+
10
+ attr_reader :job
11
+
12
+ def initialize(job)
13
+ set_job(job)
14
+ end
15
+
16
+ def set_job(job)
17
+ if (run_at = job.fetch(:run_at)).is_a?(Time)
18
+ job[:run_at] = run_at.utc.iso8601(6)
19
+ end
20
+
21
+ @job = job
22
+ end
23
+
24
+ def id
25
+ job.fetch(:id)
26
+ end
27
+
28
+ def <=>(other)
29
+ k1 = job
30
+ k2 = other.job
31
+
32
+ SORT_KEYS.each do |key|
33
+ value1 = k1.fetch(key)
34
+ value2 = k2.fetch(key)
35
+
36
+ return -1 if value1 < value2
37
+ return 1 if value1 > value2
38
+ end
39
+
40
+ 0
41
+ end
42
+
43
+ def priority_sufficient?(threshold)
44
+ threshold.nil? || job.fetch(:priority) <= threshold
45
+ end
46
+ end
47
+ end
@@ -0,0 +1,48 @@
1
+ ALTER TABLE que_jobs RESET (fillfactor);
2
+
3
+ ALTER TABLE que_jobs DROP CONSTRAINT que_jobs_pkey;
4
+ DROP INDEX que_poll_idx;
5
+ DROP INDEX que_jobs_data_gin_idx;
6
+
7
+ DROP TRIGGER que_job_notify ON que_jobs;
8
+ DROP FUNCTION que_job_notify();
9
+ DROP TRIGGER que_state_notify ON que_jobs;
10
+ DROP FUNCTION que_state_notify();
11
+ DROP FUNCTION que_determine_job_state(que_jobs);
12
+ DROP TABLE que_lockers;
13
+
14
+ DROP TABLE que_values;
15
+ DROP INDEX que_jobs_args_gin_idx;
16
+
17
+ ALTER TABLE que_jobs RENAME COLUMN id TO job_id;
18
+ ALTER SEQUENCE que_jobs_id_seq RENAME TO que_jobs_job_id_seq;
19
+
20
+ ALTER TABLE que_jobs RENAME COLUMN last_error_message TO last_error;
21
+
22
+ DELETE FROM que_jobs WHERE (finished_at IS NOT NULL OR expired_at IS NOT NULL);
23
+
24
+ ALTER TABLE que_jobs
25
+ DROP CONSTRAINT error_length,
26
+ DROP CONSTRAINT queue_length,
27
+ DROP CONSTRAINT job_class_length,
28
+ DROP CONSTRAINT valid_args,
29
+ DROP COLUMN finished_at,
30
+ DROP COLUMN expired_at,
31
+ ALTER args TYPE JSON using args::json;
32
+
33
+ UPDATE que_jobs
34
+ SET
35
+ queue = CASE queue WHEN 'default' THEN '' ELSE queue END,
36
+ last_error = last_error || coalesce(E'\n' || last_error_backtrace, '');
37
+
38
+ ALTER TABLE que_jobs
39
+ DROP COLUMN data,
40
+ DROP COLUMN last_error_backtrace,
41
+ ALTER COLUMN args SET NOT NULL,
42
+ ALTER COLUMN args SET DEFAULT '[]',
43
+ ALTER COLUMN queue SET DEFAULT '';
44
+
45
+ ALTER TABLE que_jobs
46
+ ADD PRIMARY KEY (queue, priority, run_at, job_id);
47
+
48
+ DROP FUNCTION que_validate_tags(jsonb);