que 0.11.3 → 2.2.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (114) hide show
  1. checksums.yaml +5 -5
  2. data/.github/workflows/tests.yml +51 -0
  3. data/.gitignore +2 -0
  4. data/.ruby-version +1 -0
  5. data/CHANGELOG.md +502 -97
  6. data/Dockerfile +20 -0
  7. data/LICENSE.txt +1 -1
  8. data/README.md +205 -59
  9. data/auto/dev +21 -0
  10. data/auto/pre-push-hook +30 -0
  11. data/auto/psql +9 -0
  12. data/auto/test +5 -0
  13. data/auto/test-postgres-14 +17 -0
  14. data/bin/que +8 -81
  15. data/docker-compose.yml +47 -0
  16. data/docs/README.md +881 -0
  17. data/lib/que/active_job/extensions.rb +114 -0
  18. data/lib/que/active_record/connection.rb +51 -0
  19. data/lib/que/active_record/model.rb +48 -0
  20. data/lib/que/command_line_interface.rb +259 -0
  21. data/lib/que/connection.rb +198 -0
  22. data/lib/que/connection_pool.rb +78 -0
  23. data/lib/que/job.rb +210 -103
  24. data/lib/que/job_buffer.rb +255 -0
  25. data/lib/que/job_methods.rb +176 -0
  26. data/lib/que/listener.rb +176 -0
  27. data/lib/que/locker.rb +507 -0
  28. data/lib/que/metajob.rb +47 -0
  29. data/lib/que/migrations/4/down.sql +48 -0
  30. data/lib/que/migrations/4/up.sql +267 -0
  31. data/lib/que/migrations/5/down.sql +73 -0
  32. data/lib/que/migrations/5/up.sql +76 -0
  33. data/lib/que/migrations/6/down.sql +8 -0
  34. data/lib/que/migrations/6/up.sql +8 -0
  35. data/lib/que/migrations/7/down.sql +5 -0
  36. data/lib/que/migrations/7/up.sql +13 -0
  37. data/lib/que/migrations.rb +37 -18
  38. data/lib/que/poller.rb +274 -0
  39. data/lib/que/rails/railtie.rb +12 -0
  40. data/lib/que/result_queue.rb +35 -0
  41. data/lib/que/sequel/model.rb +52 -0
  42. data/lib/que/utils/assertions.rb +62 -0
  43. data/lib/que/utils/constantization.rb +19 -0
  44. data/lib/que/utils/error_notification.rb +68 -0
  45. data/lib/que/utils/freeze.rb +20 -0
  46. data/lib/que/utils/introspection.rb +50 -0
  47. data/lib/que/utils/json_serialization.rb +21 -0
  48. data/lib/que/utils/logging.rb +79 -0
  49. data/lib/que/utils/middleware.rb +46 -0
  50. data/lib/que/utils/queue_management.rb +18 -0
  51. data/lib/que/utils/ruby2_keywords.rb +19 -0
  52. data/lib/que/utils/transactions.rb +34 -0
  53. data/lib/que/version.rb +5 -1
  54. data/lib/que/worker.rb +145 -149
  55. data/lib/que.rb +103 -159
  56. data/que.gemspec +17 -4
  57. data/scripts/docker-entrypoint +14 -0
  58. data/scripts/test +6 -0
  59. metadata +59 -95
  60. data/.rspec +0 -2
  61. data/.travis.yml +0 -17
  62. data/Gemfile +0 -24
  63. data/docs/advanced_setup.md +0 -106
  64. data/docs/customizing_que.md +0 -200
  65. data/docs/error_handling.md +0 -47
  66. data/docs/inspecting_the_queue.md +0 -114
  67. data/docs/logging.md +0 -50
  68. data/docs/managing_workers.md +0 -80
  69. data/docs/migrating.md +0 -30
  70. data/docs/multiple_queues.md +0 -27
  71. data/docs/shutting_down_safely.md +0 -7
  72. data/docs/using_plain_connections.md +0 -41
  73. data/docs/using_sequel.md +0 -31
  74. data/docs/writing_reliable_jobs.md +0 -117
  75. data/lib/generators/que/install_generator.rb +0 -24
  76. data/lib/generators/que/templates/add_que.rb +0 -13
  77. data/lib/que/adapters/active_record.rb +0 -54
  78. data/lib/que/adapters/base.rb +0 -127
  79. data/lib/que/adapters/connection_pool.rb +0 -16
  80. data/lib/que/adapters/pg.rb +0 -21
  81. data/lib/que/adapters/pond.rb +0 -16
  82. data/lib/que/adapters/sequel.rb +0 -20
  83. data/lib/que/railtie.rb +0 -16
  84. data/lib/que/rake_tasks.rb +0 -59
  85. data/lib/que/sql.rb +0 -152
  86. data/spec/adapters/active_record_spec.rb +0 -152
  87. data/spec/adapters/connection_pool_spec.rb +0 -22
  88. data/spec/adapters/pg_spec.rb +0 -41
  89. data/spec/adapters/pond_spec.rb +0 -22
  90. data/spec/adapters/sequel_spec.rb +0 -57
  91. data/spec/gemfiles/Gemfile1 +0 -18
  92. data/spec/gemfiles/Gemfile2 +0 -18
  93. data/spec/spec_helper.rb +0 -118
  94. data/spec/support/helpers.rb +0 -19
  95. data/spec/support/jobs.rb +0 -35
  96. data/spec/support/shared_examples/adapter.rb +0 -37
  97. data/spec/support/shared_examples/multi_threaded_adapter.rb +0 -46
  98. data/spec/travis.rb +0 -23
  99. data/spec/unit/connection_spec.rb +0 -14
  100. data/spec/unit/customization_spec.rb +0 -251
  101. data/spec/unit/enqueue_spec.rb +0 -245
  102. data/spec/unit/helper_spec.rb +0 -12
  103. data/spec/unit/logging_spec.rb +0 -101
  104. data/spec/unit/migrations_spec.rb +0 -84
  105. data/spec/unit/pool_spec.rb +0 -365
  106. data/spec/unit/run_spec.rb +0 -14
  107. data/spec/unit/states_spec.rb +0 -50
  108. data/spec/unit/stats_spec.rb +0 -46
  109. data/spec/unit/transaction_spec.rb +0 -36
  110. data/spec/unit/work_spec.rb +0 -407
  111. data/spec/unit/worker_spec.rb +0 -167
  112. data/tasks/benchmark.rb +0 -3
  113. data/tasks/rspec.rb +0 -14
  114. data/tasks/safe_shutdown.rb +0 -67
@@ -0,0 +1,267 @@
1
+ ALTER TABLE que_jobs SET (fillfactor = 90);
2
+ ALTER TABLE que_jobs RENAME COLUMN last_error TO last_error_message;
3
+ ALTER TABLE que_jobs RENAME COLUMN job_id TO id;
4
+ ALTER TABLE que_jobs RENAME COLUMN args TO old_args;
5
+ ALTER SEQUENCE que_jobs_job_id_seq RENAME TO que_jobs_id_seq;
6
+
7
+ ALTER TABLE que_jobs
8
+ ADD COLUMN last_error_backtrace text,
9
+ ADD COLUMN finished_at timestamptz,
10
+ ADD COLUMN expired_at timestamptz,
11
+ ADD COLUMN args JSONB,
12
+ ADD COLUMN data JSONB;
13
+
14
+ ALTER TABLE que_jobs DROP CONSTRAINT que_jobs_pkey;
15
+
16
+ UPDATE que_jobs
17
+ SET
18
+ queue = CASE queue WHEN '' THEN 'default' ELSE queue END,
19
+ last_error_backtrace =
20
+ CASE
21
+ WHEN last_error_message ~ '\n'
22
+ THEN left(regexp_replace(last_error_message, '^[^\n]+\n', ''), 10000)
23
+ ELSE
24
+ NULL
25
+ END,
26
+ last_error_message = left(substring(last_error_message from '^[^\n]+'), 500),
27
+ args =
28
+ CASE json_typeof(old_args)
29
+ WHEN 'array' THEN old_args::jsonb
30
+ ELSE jsonb_build_array(old_args)
31
+ END,
32
+ data = '{}'::jsonb;
33
+
34
+ CREATE FUNCTION que_validate_tags(tags_array jsonb) RETURNS boolean AS $$
35
+ SELECT bool_and(
36
+ jsonb_typeof(value) = 'string'
37
+ AND
38
+ char_length(value::text) <= 100
39
+ )
40
+ FROM jsonb_array_elements(tags_array)
41
+ $$
42
+ LANGUAGE SQL;
43
+
44
+ -- Now that we're done rewriting data, add new indexes.
45
+ CREATE INDEX que_poll_idx ON que_jobs (queue, priority, run_at, id) WHERE (finished_at IS NULL AND expired_at IS NULL);
46
+ CREATE INDEX que_jobs_data_gin_idx ON que_jobs USING gin (data jsonb_path_ops);
47
+ CREATE INDEX que_jobs_args_gin_idx ON que_jobs USING gin (args jsonb_path_ops);
48
+
49
+ ALTER TABLE que_jobs
50
+ ADD PRIMARY KEY (id),
51
+ DROP COLUMN old_args,
52
+ ALTER COLUMN queue SET DEFAULT 'default',
53
+ ALTER COLUMN args SET DEFAULT '[]',
54
+ ALTER COLUMN args SET NOT NULL,
55
+ ALTER COLUMN data SET DEFAULT '{}',
56
+ ALTER COLUMN data SET NOT NULL,
57
+ ADD CONSTRAINT queue_length CHECK (
58
+ char_length(queue) <= 100
59
+ ),
60
+ ADD CONSTRAINT job_class_length CHECK (
61
+ char_length(
62
+ CASE job_class
63
+ WHEN 'ActiveJob::QueueAdapters::QueAdapter::JobWrapper' THEN
64
+ args->0->>'job_class'
65
+ ELSE
66
+ job_class
67
+ END
68
+ ) <= 200
69
+ ),
70
+ ADD CONSTRAINT valid_args CHECK (
71
+ (jsonb_typeof(args) = 'array')
72
+ ),
73
+ ADD CONSTRAINT valid_data CHECK (
74
+ (jsonb_typeof(data) = 'object')
75
+ AND
76
+ (
77
+ (NOT data ? 'tags')
78
+ OR
79
+ (
80
+ (jsonb_typeof(data->'tags') = 'array')
81
+ AND
82
+ (jsonb_array_length(data->'tags') <= 5)
83
+ AND
84
+ (public.que_validate_tags(data->'tags'))
85
+ )
86
+ )
87
+ ),
88
+ ADD CONSTRAINT error_length CHECK (
89
+ (char_length(last_error_message) <= 500) AND
90
+ (char_length(last_error_backtrace) <= 10000)
91
+ );
92
+
93
+ -- This is somewhat heretical, but we're going to need some more flexible
94
+ -- storage to support various features without requiring a ton of migrations,
95
+ -- which would be a lot of hassle for users. Hopefully this will be used smartly
96
+ -- and sparingly (famous last words).
97
+ CREATE TABLE que_values (
98
+ key text PRIMARY KEY,
99
+ value jsonb NOT NULL DEFAULT '{}',
100
+ CONSTRAINT valid_value CHECK (jsonb_typeof(value) = 'object')
101
+ )
102
+ WITH (FILLFACTOR=90);
103
+
104
+ CREATE UNLOGGED TABLE que_lockers (
105
+ pid integer NOT NULL CONSTRAINT que_lockers_pkey PRIMARY KEY,
106
+ worker_count integer NOT NULL,
107
+ worker_priorities integer[] NOT NULL,
108
+ ruby_pid integer NOT NULL,
109
+ ruby_hostname text NOT NULL,
110
+ queues text[] NOT NULL,
111
+ listening boolean NOT NULL,
112
+
113
+ CONSTRAINT valid_worker_priorities CHECK (
114
+ (array_ndims(worker_priorities) = 1)
115
+ AND
116
+ (array_length(worker_priorities, 1) IS NOT NULL) -- Doesn't do zero.
117
+ ),
118
+
119
+ CONSTRAINT valid_queues CHECK (
120
+ (array_ndims(queues) = 1)
121
+ AND
122
+ (array_length(queues, 1) IS NOT NULL) -- Doesn't do zero.
123
+ )
124
+ );
125
+
126
+ CREATE FUNCTION que_job_notify() RETURNS trigger AS $$
127
+ DECLARE
128
+ locker_pid integer;
129
+ sort_key json;
130
+ BEGIN
131
+ -- Don't do anything if the job is scheduled for a future time.
132
+ IF NEW.run_at IS NOT NULL AND NEW.run_at > now() THEN
133
+ RETURN null;
134
+ END IF;
135
+
136
+ -- Pick a locker to notify of the job's insertion, weighted by their number
137
+ -- of workers. Should bounce pseudorandomly between lockers on each
138
+ -- invocation, hence the md5-ordering, but still touch each one equally,
139
+ -- hence the modulo using the job_id.
140
+ SELECT pid
141
+ INTO locker_pid
142
+ FROM (
143
+ SELECT *, last_value(row_number) OVER () + 1 AS count
144
+ FROM (
145
+ SELECT *, row_number() OVER () - 1 AS row_number
146
+ FROM (
147
+ SELECT *
148
+ FROM public.que_lockers ql, generate_series(1, ql.worker_count) AS id
149
+ WHERE
150
+ listening AND
151
+ queues @> ARRAY[NEW.queue]
152
+ ORDER BY md5(pid::text || id::text)
153
+ ) t1
154
+ ) t2
155
+ ) t3
156
+ WHERE NEW.id % count = row_number;
157
+
158
+ IF locker_pid IS NOT NULL THEN
159
+ -- There's a size limit to what can be broadcast via LISTEN/NOTIFY, so
160
+ -- rather than throw errors when someone enqueues a big job, just
161
+ -- broadcast the most pertinent information, and let the locker query for
162
+ -- the record after it's taken the lock. The worker will have to hit the
163
+ -- DB in order to make sure the job is still visible anyway.
164
+ SELECT row_to_json(t)
165
+ INTO sort_key
166
+ FROM (
167
+ SELECT
168
+ 'job_available' AS message_type,
169
+ NEW.queue AS queue,
170
+ NEW.priority AS priority,
171
+ NEW.id AS id,
172
+ -- Make sure we output timestamps as UTC ISO 8601
173
+ to_char(NEW.run_at AT TIME ZONE 'UTC', 'YYYY-MM-DD"T"HH24:MI:SS.US"Z"') AS run_at
174
+ ) t;
175
+
176
+ PERFORM pg_notify('que_listener_' || locker_pid::text, sort_key::text);
177
+ END IF;
178
+
179
+ RETURN null;
180
+ END
181
+ $$
182
+ LANGUAGE plpgsql;
183
+
184
+ CREATE TRIGGER que_job_notify
185
+ AFTER INSERT ON que_jobs
186
+ FOR EACH ROW
187
+ EXECUTE PROCEDURE public.que_job_notify();
188
+
189
+ CREATE FUNCTION que_determine_job_state(job public.que_jobs) RETURNS text AS $$
190
+ SELECT
191
+ CASE
192
+ WHEN job.expired_at IS NOT NULL THEN 'expired'
193
+ WHEN job.finished_at IS NOT NULL THEN 'finished'
194
+ WHEN job.error_count > 0 THEN 'errored'
195
+ WHEN job.run_at > CURRENT_TIMESTAMP THEN 'scheduled'
196
+ ELSE 'ready'
197
+ END
198
+ $$
199
+ LANGUAGE SQL;
200
+
201
+ CREATE FUNCTION que_state_notify() RETURNS trigger AS $$
202
+ DECLARE
203
+ row record;
204
+ message json;
205
+ previous_state text;
206
+ current_state text;
207
+ BEGIN
208
+ IF TG_OP = 'INSERT' THEN
209
+ previous_state := 'nonexistent';
210
+ current_state := public.que_determine_job_state(NEW);
211
+ row := NEW;
212
+ ELSIF TG_OP = 'DELETE' THEN
213
+ previous_state := public.que_determine_job_state(OLD);
214
+ current_state := 'nonexistent';
215
+ row := OLD;
216
+ ELSIF TG_OP = 'UPDATE' THEN
217
+ previous_state := public.que_determine_job_state(OLD);
218
+ current_state := public.que_determine_job_state(NEW);
219
+
220
+ -- If the state didn't change, short-circuit.
221
+ IF previous_state = current_state THEN
222
+ RETURN null;
223
+ END IF;
224
+
225
+ row := NEW;
226
+ ELSE
227
+ RAISE EXCEPTION 'Unrecognized TG_OP: %', TG_OP;
228
+ END IF;
229
+
230
+ SELECT row_to_json(t)
231
+ INTO message
232
+ FROM (
233
+ SELECT
234
+ 'job_change' AS message_type,
235
+ row.id AS id,
236
+ row.queue AS queue,
237
+
238
+ coalesce(row.data->'tags', '[]'::jsonb) AS tags,
239
+
240
+ to_char(row.run_at AT TIME ZONE 'UTC', 'YYYY-MM-DD"T"HH24:MI:SS.US"Z"') AS run_at,
241
+ to_char(now() AT TIME ZONE 'UTC', 'YYYY-MM-DD"T"HH24:MI:SS.US"Z"') AS time,
242
+
243
+ CASE row.job_class
244
+ WHEN 'ActiveJob::QueueAdapters::QueAdapter::JobWrapper' THEN
245
+ coalesce(
246
+ row.args->0->>'job_class',
247
+ 'ActiveJob::QueueAdapters::QueAdapter::JobWrapper'
248
+ )
249
+ ELSE
250
+ row.job_class
251
+ END AS job_class,
252
+
253
+ previous_state AS previous_state,
254
+ current_state AS current_state
255
+ ) t;
256
+
257
+ PERFORM pg_notify('que_state', message::text);
258
+
259
+ RETURN null;
260
+ END
261
+ $$
262
+ LANGUAGE plpgsql;
263
+
264
+ CREATE TRIGGER que_state_notify
265
+ AFTER INSERT OR UPDATE OR DELETE ON que_jobs
266
+ FOR EACH ROW
267
+ EXECUTE PROCEDURE public.que_state_notify();
@@ -0,0 +1,73 @@
1
+ DROP TRIGGER que_job_notify ON que_jobs;
2
+ DROP FUNCTION que_job_notify();
3
+
4
+ DROP INDEX que_poll_idx_with_job_schema_version;
5
+
6
+ ALTER TABLE que_jobs
7
+ DROP COLUMN job_schema_version;
8
+
9
+ ALTER TABLE que_lockers
10
+ DROP COLUMN job_schema_version;
11
+
12
+ CREATE FUNCTION que_job_notify() RETURNS trigger AS $$
13
+ DECLARE
14
+ locker_pid integer;
15
+ sort_key json;
16
+ BEGIN
17
+ -- Don't do anything if the job is scheduled for a future time.
18
+ IF NEW.run_at IS NOT NULL AND NEW.run_at > now() THEN
19
+ RETURN null;
20
+ END IF;
21
+
22
+ -- Pick a locker to notify of the job's insertion, weighted by their number
23
+ -- of workers. Should bounce pseudorandomly between lockers on each
24
+ -- invocation, hence the md5-ordering, but still touch each one equally,
25
+ -- hence the modulo using the job_id.
26
+ SELECT pid
27
+ INTO locker_pid
28
+ FROM (
29
+ SELECT *, last_value(row_number) OVER () + 1 AS count
30
+ FROM (
31
+ SELECT *, row_number() OVER () - 1 AS row_number
32
+ FROM (
33
+ SELECT *
34
+ FROM public.que_lockers ql, generate_series(1, ql.worker_count) AS id
35
+ WHERE
36
+ listening AND
37
+ queues @> ARRAY[NEW.queue]
38
+ ORDER BY md5(pid::text || id::text)
39
+ ) t1
40
+ ) t2
41
+ ) t3
42
+ WHERE NEW.id % count = row_number;
43
+
44
+ IF locker_pid IS NOT NULL THEN
45
+ -- There's a size limit to what can be broadcast via LISTEN/NOTIFY, so
46
+ -- rather than throw errors when someone enqueues a big job, just
47
+ -- broadcast the most pertinent information, and let the locker query for
48
+ -- the record after it's taken the lock. The worker will have to hit the
49
+ -- DB in order to make sure the job is still visible anyway.
50
+ SELECT row_to_json(t)
51
+ INTO sort_key
52
+ FROM (
53
+ SELECT
54
+ 'job_available' AS message_type,
55
+ NEW.queue AS queue,
56
+ NEW.priority AS priority,
57
+ NEW.id AS id,
58
+ -- Make sure we output timestamps as UTC ISO 8601
59
+ to_char(NEW.run_at AT TIME ZONE 'UTC', 'YYYY-MM-DD"T"HH24:MI:SS.US"Z"') AS run_at
60
+ ) t;
61
+
62
+ PERFORM pg_notify('que_listener_' || locker_pid::text, sort_key::text);
63
+ END IF;
64
+
65
+ RETURN null;
66
+ END
67
+ $$
68
+ LANGUAGE plpgsql;
69
+
70
+ CREATE TRIGGER que_job_notify
71
+ AFTER INSERT ON que_jobs
72
+ FOR EACH ROW
73
+ EXECUTE PROCEDURE public.que_job_notify();
@@ -0,0 +1,76 @@
1
+ DROP TRIGGER que_job_notify ON que_jobs;
2
+ DROP FUNCTION que_job_notify();
3
+
4
+ ALTER TABLE que_jobs
5
+ ADD COLUMN job_schema_version INTEGER DEFAULT 1;
6
+
7
+ ALTER TABLE que_lockers
8
+ ADD COLUMN job_schema_version INTEGER DEFAULT 1;
9
+
10
+ CREATE INDEX que_poll_idx_with_job_schema_version
11
+ ON que_jobs (job_schema_version, queue, priority, run_at, id)
12
+ WHERE (finished_at IS NULL AND expired_at IS NULL);
13
+
14
+ CREATE FUNCTION que_job_notify() RETURNS trigger AS $$
15
+ DECLARE
16
+ locker_pid integer;
17
+ sort_key json;
18
+ BEGIN
19
+ -- Don't do anything if the job is scheduled for a future time.
20
+ IF NEW.run_at IS NOT NULL AND NEW.run_at > now() THEN
21
+ RETURN null;
22
+ END IF;
23
+
24
+ -- Pick a locker to notify of the job's insertion, weighted by their number
25
+ -- of workers. Should bounce pseudorandomly between lockers on each
26
+ -- invocation, hence the md5-ordering, but still touch each one equally,
27
+ -- hence the modulo using the job_id.
28
+ SELECT pid
29
+ INTO locker_pid
30
+ FROM (
31
+ SELECT *, last_value(row_number) OVER () + 1 AS count
32
+ FROM (
33
+ SELECT *, row_number() OVER () - 1 AS row_number
34
+ FROM (
35
+ SELECT *
36
+ FROM public.que_lockers ql, generate_series(1, ql.worker_count) AS id
37
+ WHERE
38
+ listening AND
39
+ queues @> ARRAY[NEW.queue] AND
40
+ ql.job_schema_version = NEW.job_schema_version
41
+ ORDER BY md5(pid::text || id::text)
42
+ ) t1
43
+ ) t2
44
+ ) t3
45
+ WHERE NEW.id % count = row_number;
46
+
47
+ IF locker_pid IS NOT NULL THEN
48
+ -- There's a size limit to what can be broadcast via LISTEN/NOTIFY, so
49
+ -- rather than throw errors when someone enqueues a big job, just
50
+ -- broadcast the most pertinent information, and let the locker query for
51
+ -- the record after it's taken the lock. The worker will have to hit the
52
+ -- DB in order to make sure the job is still visible anyway.
53
+ SELECT row_to_json(t)
54
+ INTO sort_key
55
+ FROM (
56
+ SELECT
57
+ 'job_available' AS message_type,
58
+ NEW.queue AS queue,
59
+ NEW.priority AS priority,
60
+ NEW.id AS id,
61
+ -- Make sure we output timestamps as UTC ISO 8601
62
+ to_char(NEW.run_at AT TIME ZONE 'UTC', 'YYYY-MM-DD"T"HH24:MI:SS.US"Z"') AS run_at
63
+ ) t;
64
+
65
+ PERFORM pg_notify('que_listener_' || locker_pid::text, sort_key::text);
66
+ END IF;
67
+
68
+ RETURN null;
69
+ END
70
+ $$
71
+ LANGUAGE plpgsql;
72
+
73
+ CREATE TRIGGER que_job_notify
74
+ AFTER INSERT ON que_jobs
75
+ FOR EACH ROW
76
+ EXECUTE PROCEDURE public.que_job_notify();
@@ -0,0 +1,8 @@
1
+ DROP INDEX que_jobs_kwargs_gin_idx;
2
+ ALTER TABLE que_jobs DROP COLUMN kwargs;
3
+
4
+ ALTER INDEX que_poll_idx RENAME TO que_poll_idx_with_job_schema_version;
5
+ CREATE INDEX que_poll_idx ON que_jobs (queue, priority, run_at, id) WHERE (finished_at IS NULL AND expired_at IS NULL);
6
+
7
+ ALTER TABLE que_jobs ALTER COLUMN job_schema_version SET DEFAULT 1;
8
+ ALTER TABLE que_jobs ALTER COLUMN job_schema_version DROP NOT NULL;
@@ -0,0 +1,8 @@
1
+ ALTER TABLE que_jobs ADD COLUMN kwargs JSONB NOT NULL DEFAULT '{}';
2
+ CREATE INDEX que_jobs_kwargs_gin_idx ON que_jobs USING gin (kwargs jsonb_path_ops);
3
+
4
+ DROP INDEX que_poll_idx;
5
+ ALTER INDEX que_poll_idx_with_job_schema_version RENAME TO que_poll_idx;
6
+
7
+ ALTER TABLE que_jobs ALTER COLUMN job_schema_version DROP DEFAULT;
8
+ ALTER TABLE que_jobs ALTER COLUMN job_schema_version SET NOT NULL;
@@ -0,0 +1,5 @@
1
+ DROP TRIGGER que_job_notify ON que_jobs;
2
+ CREATE TRIGGER que_job_notify
3
+ AFTER INSERT ON que_jobs
4
+ FOR EACH ROW
5
+ EXECUTE PROCEDURE public.que_job_notify();
@@ -0,0 +1,13 @@
1
+ DROP TRIGGER que_job_notify ON que_jobs;
2
+ CREATE TRIGGER que_job_notify
3
+ AFTER INSERT ON que_jobs
4
+ FOR EACH ROW
5
+ WHEN (NOT coalesce(current_setting('que.skip_notify', true), '') = 'true')
6
+ EXECUTE PROCEDURE public.que_job_notify();
7
+
8
+ DROP TRIGGER que_state_notify ON que_jobs;
9
+ CREATE TRIGGER que_state_notify
10
+ AFTER INSERT OR UPDATE OR DELETE ON que_jobs
11
+ FOR EACH ROW
12
+ WHEN (NOT coalesce(current_setting('que.skip_notify', true), '') = 'true')
13
+ EXECUTE PROCEDURE public.que_state_notify();
@@ -3,28 +3,32 @@
3
3
  module Que
4
4
  module Migrations
5
5
  # In order to ship a schema change, add the relevant up and down sql files
6
- # to the migrations directory, and bump the version both here and in the
7
- # add_que generator template.
8
- CURRENT_VERSION = 3
6
+ # to the migrations directory, and bump the version here.
7
+ CURRENT_VERSION = 7
9
8
 
10
9
  class << self
11
- def migrate!(options = {:version => CURRENT_VERSION})
10
+ def migrate!(version:)
12
11
  Que.transaction do
13
- version = options[:version]
12
+ current = db_version
14
13
 
15
- if (current = db_version) == version
14
+ if current == version
16
15
  return
17
16
  elsif current < version
18
- direction = 'up'
17
+ direction = :up
19
18
  steps = ((current + 1)..version).to_a
20
19
  elsif current > version
21
- direction = 'down'
20
+ direction = :down
22
21
  steps = ((version + 1)..current).to_a.reverse
23
22
  end
24
23
 
25
24
  steps.each do |step|
26
- sql = File.read("#{File.dirname(__FILE__)}/migrations/#{step}/#{direction}.sql")
27
- Que.execute(sql)
25
+ filename = [
26
+ File.dirname(__FILE__),
27
+ 'migrations',
28
+ step,
29
+ direction,
30
+ ].join('/') << '.sql'
31
+ Que.execute(File.read(filename))
28
32
  end
29
33
 
30
34
  set_db_version(version)
@@ -32,24 +36,39 @@ module Que
32
36
  end
33
37
 
34
38
  def db_version
35
- result = Que.execute <<-SQL
36
- SELECT relname, description
37
- FROM pg_class
38
- LEFT JOIN pg_description ON pg_description.objoid = pg_class.oid
39
- WHERE relname = 'que_jobs'
40
- SQL
39
+ result =
40
+ Que.execute <<-SQL
41
+ SELECT relname, description
42
+ FROM pg_class
43
+ LEFT JOIN pg_description ON pg_description.objoid = pg_class.oid
44
+ WHERE relname = 'que_jobs'
45
+ SQL
41
46
 
42
47
  if result.none?
43
48
  # No table in the database at all.
44
49
  0
45
50
  elsif (d = result.first[:description]).nil?
46
- # There's a table, it was just created before the migration system existed.
47
- 1
51
+ # The table exists but the version comment is missing
52
+ _raise_db_version_comment_missing_error
48
53
  else
49
54
  d.to_i
50
55
  end
51
56
  end
52
57
 
58
+ # The que_jobs table could be missing the schema version comment either due to:
59
+ # - Being created before the migration system existed; or
60
+ # - A bug in Rails schema dump in some versions of Rails
61
+ # The former is the case on Que versions prior to v0.5.0 (2014-01-14). Upgrading directly from there is unsupported, so we just raise in all cases of the comment being missing
62
+ def _raise_db_version_comment_missing_error
63
+ raise Error, <<~ERROR
64
+ Cannot determine Que DB schema version.
65
+
66
+ The que_jobs table is missing its comment recording the Que DB schema version. This is likely due to a bug in Rails schema dump in Rails 7 versions prior to 7.0.3, omitting comments - see https://github.com/que-rb/que/issues/363. Please determine the appropriate schema version from your migrations and record it manually by running the following SQL (replacing version as appropriate):
67
+
68
+ COMMENT ON TABLE que_jobs IS 'version';
69
+ ERROR
70
+ end
71
+
53
72
  def set_db_version(version)
54
73
  i = version.to_i
55
74
  Que.execute "COMMENT ON TABLE que_jobs IS '#{i}'" unless i.zero?