que 0.11.3 → 2.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. checksums.yaml +5 -5
  2. data/.github/workflows/tests.yml +51 -0
  3. data/.gitignore +2 -0
  4. data/.ruby-version +1 -0
  5. data/CHANGELOG.md +502 -97
  6. data/Dockerfile +20 -0
  7. data/LICENSE.txt +1 -1
  8. data/README.md +205 -59
  9. data/auto/dev +21 -0
  10. data/auto/pre-push-hook +30 -0
  11. data/auto/psql +9 -0
  12. data/auto/test +5 -0
  13. data/auto/test-postgres-14 +17 -0
  14. data/bin/que +8 -81
  15. data/docker-compose.yml +47 -0
  16. data/docs/README.md +881 -0
  17. data/lib/que/active_job/extensions.rb +114 -0
  18. data/lib/que/active_record/connection.rb +51 -0
  19. data/lib/que/active_record/model.rb +48 -0
  20. data/lib/que/command_line_interface.rb +259 -0
  21. data/lib/que/connection.rb +198 -0
  22. data/lib/que/connection_pool.rb +78 -0
  23. data/lib/que/job.rb +210 -103
  24. data/lib/que/job_buffer.rb +255 -0
  25. data/lib/que/job_methods.rb +176 -0
  26. data/lib/que/listener.rb +176 -0
  27. data/lib/que/locker.rb +507 -0
  28. data/lib/que/metajob.rb +47 -0
  29. data/lib/que/migrations/4/down.sql +48 -0
  30. data/lib/que/migrations/4/up.sql +267 -0
  31. data/lib/que/migrations/5/down.sql +73 -0
  32. data/lib/que/migrations/5/up.sql +76 -0
  33. data/lib/que/migrations/6/down.sql +8 -0
  34. data/lib/que/migrations/6/up.sql +8 -0
  35. data/lib/que/migrations/7/down.sql +5 -0
  36. data/lib/que/migrations/7/up.sql +13 -0
  37. data/lib/que/migrations.rb +37 -18
  38. data/lib/que/poller.rb +274 -0
  39. data/lib/que/rails/railtie.rb +12 -0
  40. data/lib/que/result_queue.rb +35 -0
  41. data/lib/que/sequel/model.rb +52 -0
  42. data/lib/que/utils/assertions.rb +62 -0
  43. data/lib/que/utils/constantization.rb +19 -0
  44. data/lib/que/utils/error_notification.rb +68 -0
  45. data/lib/que/utils/freeze.rb +20 -0
  46. data/lib/que/utils/introspection.rb +50 -0
  47. data/lib/que/utils/json_serialization.rb +21 -0
  48. data/lib/que/utils/logging.rb +79 -0
  49. data/lib/que/utils/middleware.rb +46 -0
  50. data/lib/que/utils/queue_management.rb +18 -0
  51. data/lib/que/utils/ruby2_keywords.rb +19 -0
  52. data/lib/que/utils/transactions.rb +34 -0
  53. data/lib/que/version.rb +5 -1
  54. data/lib/que/worker.rb +145 -149
  55. data/lib/que.rb +103 -159
  56. data/que.gemspec +17 -4
  57. data/scripts/docker-entrypoint +14 -0
  58. data/scripts/test +6 -0
  59. metadata +59 -95
  60. data/.rspec +0 -2
  61. data/.travis.yml +0 -17
  62. data/Gemfile +0 -24
  63. data/docs/advanced_setup.md +0 -106
  64. data/docs/customizing_que.md +0 -200
  65. data/docs/error_handling.md +0 -47
  66. data/docs/inspecting_the_queue.md +0 -114
  67. data/docs/logging.md +0 -50
  68. data/docs/managing_workers.md +0 -80
  69. data/docs/migrating.md +0 -30
  70. data/docs/multiple_queues.md +0 -27
  71. data/docs/shutting_down_safely.md +0 -7
  72. data/docs/using_plain_connections.md +0 -41
  73. data/docs/using_sequel.md +0 -31
  74. data/docs/writing_reliable_jobs.md +0 -117
  75. data/lib/generators/que/install_generator.rb +0 -24
  76. data/lib/generators/que/templates/add_que.rb +0 -13
  77. data/lib/que/adapters/active_record.rb +0 -54
  78. data/lib/que/adapters/base.rb +0 -127
  79. data/lib/que/adapters/connection_pool.rb +0 -16
  80. data/lib/que/adapters/pg.rb +0 -21
  81. data/lib/que/adapters/pond.rb +0 -16
  82. data/lib/que/adapters/sequel.rb +0 -20
  83. data/lib/que/railtie.rb +0 -16
  84. data/lib/que/rake_tasks.rb +0 -59
  85. data/lib/que/sql.rb +0 -152
  86. data/spec/adapters/active_record_spec.rb +0 -152
  87. data/spec/adapters/connection_pool_spec.rb +0 -22
  88. data/spec/adapters/pg_spec.rb +0 -41
  89. data/spec/adapters/pond_spec.rb +0 -22
  90. data/spec/adapters/sequel_spec.rb +0 -57
  91. data/spec/gemfiles/Gemfile1 +0 -18
  92. data/spec/gemfiles/Gemfile2 +0 -18
  93. data/spec/spec_helper.rb +0 -118
  94. data/spec/support/helpers.rb +0 -19
  95. data/spec/support/jobs.rb +0 -35
  96. data/spec/support/shared_examples/adapter.rb +0 -37
  97. data/spec/support/shared_examples/multi_threaded_adapter.rb +0 -46
  98. data/spec/travis.rb +0 -23
  99. data/spec/unit/connection_spec.rb +0 -14
  100. data/spec/unit/customization_spec.rb +0 -251
  101. data/spec/unit/enqueue_spec.rb +0 -245
  102. data/spec/unit/helper_spec.rb +0 -12
  103. data/spec/unit/logging_spec.rb +0 -101
  104. data/spec/unit/migrations_spec.rb +0 -84
  105. data/spec/unit/pool_spec.rb +0 -365
  106. data/spec/unit/run_spec.rb +0 -14
  107. data/spec/unit/states_spec.rb +0 -50
  108. data/spec/unit/stats_spec.rb +0 -46
  109. data/spec/unit/transaction_spec.rb +0 -36
  110. data/spec/unit/work_spec.rb +0 -407
  111. data/spec/unit/worker_spec.rb +0 -167
  112. data/tasks/benchmark.rb +0 -3
  113. data/tasks/rspec.rb +0 -14
  114. data/tasks/safe_shutdown.rb +0 -67
data/lib/que/poller.rb ADDED
@@ -0,0 +1,274 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Que
4
+ class Poller
5
+ # The following SQL statement locks a batch of jobs using a Postgres
6
+ # recursive CTE [1].
7
+ #
8
+ # As noted by the Postgres documentation, it may be slightly easier to
9
+ # think about this expression as iteration rather than recursion, despite
10
+ # the `RECURSION` nomenclature defined by the SQL standards committee.
11
+ # Recursion is used here so that jobs in the table can be iterated one-by-
12
+ # one until a lock can be acquired, where a non-recursive `SELECT` would
13
+ # have the undesirable side-effect of locking jobs unnecessarily. For
14
+ # example, the following might lock more than five jobs during execution:
15
+ #
16
+ # SELECT (j).*, pg_try_advisory_lock((j).id) AS locked
17
+ # FROM public.que_jobs AS j
18
+ # LIMIT 5;
19
+ #
20
+ # The CTE will initially produce an "anchor" from the non-recursive term
21
+ # (i.e. before the `UNION`), and then use it as the contents of the
22
+ # working table as it continues to iterate through `que_jobs` looking for
23
+ # locks. The jobs table has an index on (priority, run_at, id) which
24
+ # allows it to walk the jobs table in a stable manner. As noted above, the
25
+ # recursion examines/locks one job at a time. Every time the recursive
26
+ # entry runs, it's output becomes the new contents of the working table,
27
+ # and what was previously in the working table is appended to the final
28
+ # result set. For more information on the basic workings of recursive
29
+ # CTEs, see http://www.postgresql.org/docs/devel/static/queries-with.html
30
+ #
31
+ # The polling query is provided a JSONB hash of desired job priorities.
32
+ # For example, if the locker has three workers free that can work a
33
+ # priority less than 5, and two workers free that can work a priority less
34
+ # than 10, the provided priority document is `{"5":3,"10":2}`. The query
35
+ # uses this information to decide what jobs to lock - if only high-
36
+ # priority workers were available, it wouldn't make sense to retrieve low-
37
+ # priority jobs.
38
+ #
39
+ # As each job is retrieved from the table, it is passed to
40
+ # lock_and_update_priorities() (which, for future flexibility, we define
41
+ # as a temporary function attached to the connection rather than embedding
42
+ # permanently into the DB schema). lock_and_update_priorities() attempts
43
+ # to lock the given job and, if it is able to, updates the priorities
44
+ # document to reflect that a job was available for that given priority.
45
+ # When the priorities document is emptied (all the counts of desired jobs
46
+ # for the various priorities have reached zero and been removed), the
47
+ # recursive query returns an empty result and the recursion breaks. This
48
+ # also happens if there aren't enough appropriate jobs in the jobs table.
49
+ #
50
+ # Also note the use of JOIN LATERAL to combine the job data with the
51
+ # output of lock_and_update_priorities(). The naive approach would be to
52
+ # write the SELECT as `SELECT (j).*, (lock_and_update_priorities(..., j)).*`,
53
+ # but the asterisk-expansion of the latter composite row causes the function
54
+ # to be evaluated twice, and to thereby take the advisory lock twice,
55
+ # which complicates the later unlocking step.
56
+ #
57
+ # Thanks to RhodiumToad in #postgresql for help with the original
58
+ # (simpler) version of the recursive job lock CTE.
59
+
60
+ SQL[:poll_jobs] =
61
+ %{
62
+ WITH RECURSIVE jobs AS (
63
+ SELECT
64
+ (j).*,
65
+ l.locked,
66
+ l.remaining_priorities
67
+ FROM (
68
+ SELECT j
69
+ FROM public.que_jobs AS j
70
+ WHERE queue = $1::text
71
+ AND job_schema_version = #{Que.job_schema_version}
72
+ AND NOT id = ANY($2::bigint[])
73
+ AND priority <= pg_temp.que_highest_remaining_priority($3::jsonb)
74
+ AND run_at <= now()
75
+ AND finished_at IS NULL AND expired_at IS NULL
76
+ ORDER BY priority, run_at, id
77
+ LIMIT 1
78
+ ) AS t1
79
+ JOIN LATERAL (SELECT * FROM pg_temp.lock_and_update_priorities($3::jsonb, j)) AS l ON true
80
+ UNION ALL (
81
+ SELECT
82
+ (j).*,
83
+ l.locked,
84
+ l.remaining_priorities
85
+ FROM (
86
+ SELECT
87
+ remaining_priorities,
88
+ (
89
+ SELECT j
90
+ FROM public.que_jobs AS j
91
+ WHERE queue = $1::text
92
+ AND job_schema_version = #{Que.job_schema_version}
93
+ AND NOT id = ANY($2::bigint[])
94
+ AND priority <= pg_temp.que_highest_remaining_priority(jobs.remaining_priorities)
95
+ AND run_at <= now()
96
+ AND finished_at IS NULL AND expired_at IS NULL
97
+ AND (priority, run_at, id) >
98
+ (jobs.priority, jobs.run_at, jobs.id)
99
+ ORDER BY priority, run_at, id
100
+ LIMIT 1
101
+ ) AS j
102
+
103
+ FROM jobs
104
+ WHERE jobs.id IS NOT NULL AND jobs.remaining_priorities != '{}'::jsonb
105
+ LIMIT 1
106
+ ) AS t1
107
+ JOIN LATERAL (SELECT * FROM pg_temp.lock_and_update_priorities(remaining_priorities, j)) AS l ON true
108
+ )
109
+ )
110
+ SELECT *
111
+ FROM jobs
112
+ WHERE locked
113
+ }
114
+
115
+ attr_reader \
116
+ :connection,
117
+ :queue,
118
+ :poll_interval,
119
+ :last_polled_at,
120
+ :last_poll_satisfied
121
+
122
+ def initialize(
123
+ connection:,
124
+ queue:,
125
+ poll_interval:
126
+ )
127
+ @connection = connection
128
+ @queue = queue
129
+ @poll_interval = poll_interval
130
+ @last_polled_at = nil
131
+ @last_poll_satisfied = nil
132
+
133
+ Que.internal_log :poller_instantiate, self do
134
+ {
135
+ backend_pid: connection.backend_pid,
136
+ queue: queue,
137
+ poll_interval: poll_interval,
138
+ }
139
+ end
140
+ end
141
+
142
+ def poll(
143
+ priorities:,
144
+ held_locks:
145
+ )
146
+
147
+ return unless should_poll?
148
+
149
+ jobs =
150
+ connection.execute_prepared(
151
+ :poll_jobs,
152
+ [
153
+ @queue,
154
+ "{#{held_locks.to_a.join(',')}}",
155
+ JSON.dump(priorities),
156
+ ]
157
+ )
158
+
159
+ @last_polled_at = Time.now
160
+ @last_poll_satisfied = poll_satisfied?(priorities, jobs)
161
+
162
+ Que.internal_log :poller_polled, self do
163
+ {
164
+ queue: @queue,
165
+ locked: jobs.count,
166
+ priorities: priorities,
167
+ held_locks: held_locks.to_a,
168
+ newly_locked: jobs.map { |key| key.fetch(:id) },
169
+ }
170
+ end
171
+
172
+ jobs.map! { |job| Metajob.new(job) }
173
+ end
174
+
175
+ def should_poll?
176
+ # Never polled before?
177
+ last_poll_satisfied.nil? ||
178
+ # Plenty of jobs were available last time?
179
+ last_poll_satisfied == true ||
180
+ poll_interval_elapsed?
181
+ end
182
+
183
+ def poll_interval_elapsed?
184
+ return unless interval = poll_interval
185
+ (Time.now - last_polled_at) > interval
186
+ end
187
+
188
+ class << self
189
+ # Manage some temporary infrastructure (specific to the connection) that
190
+ # we'll use for polling. These could easily be created permanently in a
191
+ # migration, but that'd require another migration if we wanted to tweak
192
+ # them later.
193
+
194
+ def setup(connection)
195
+ connection.execute <<-SQL
196
+ -- Temporary composite type we need for our queries to work.
197
+ CREATE TYPE pg_temp.que_query_result AS (
198
+ locked boolean,
199
+ remaining_priorities jsonb
200
+ );
201
+
202
+ CREATE FUNCTION pg_temp.lock_and_update_priorities(priorities jsonb, job que_jobs)
203
+ RETURNS pg_temp.que_query_result
204
+ AS $$
205
+ WITH
206
+ -- Take the lock in a CTE because we want to use the result
207
+ -- multiple times while only taking the lock once.
208
+ lock_taken AS (
209
+ SELECT pg_try_advisory_lock((job).id) AS taken
210
+ ),
211
+ relevant AS (
212
+ SELECT priority, count
213
+ FROM (
214
+ SELECT
215
+ key::smallint AS priority,
216
+ value::text::integer AS count
217
+ FROM jsonb_each(priorities)
218
+ ) t1
219
+ WHERE priority >= (job).priority
220
+ ORDER BY priority ASC
221
+ LIMIT 1
222
+ )
223
+ SELECT
224
+ (SELECT taken FROM lock_taken), -- R
225
+ CASE (SELECT taken FROM lock_taken)
226
+ WHEN false THEN
227
+ -- Simple case - we couldn't lock the job, so don't update the
228
+ -- priorities hash.
229
+ priorities
230
+ WHEN true THEN
231
+ CASE count
232
+ WHEN 1 THEN
233
+ -- Remove the priority from the JSONB doc entirely, rather
234
+ -- than leaving a zero entry in it.
235
+ priorities - priority::text
236
+ ELSE
237
+ -- Decrement the value in the JSONB doc.
238
+ jsonb_set(
239
+ priorities,
240
+ ARRAY[priority::text],
241
+ to_jsonb(count - 1)
242
+ )
243
+ END
244
+ END
245
+ FROM relevant
246
+ $$
247
+ STABLE
248
+ LANGUAGE SQL;
249
+
250
+ CREATE FUNCTION pg_temp.que_highest_remaining_priority(priorities jsonb) RETURNS smallint AS $$
251
+ SELECT max(key::smallint) FROM jsonb_each(priorities)
252
+ $$
253
+ STABLE
254
+ LANGUAGE SQL;
255
+ SQL
256
+ end
257
+
258
+ def cleanup(connection)
259
+ connection.execute <<-SQL
260
+ DROP FUNCTION pg_temp.que_highest_remaining_priority(jsonb);
261
+ DROP FUNCTION pg_temp.lock_and_update_priorities(jsonb, que_jobs);
262
+ DROP TYPE pg_temp.que_query_result;
263
+ SQL
264
+ end
265
+ end
266
+
267
+ private
268
+
269
+ def poll_satisfied?(priorities, jobs)
270
+ lowest_priority = priorities.keys.max
271
+ jobs.count >= priorities[lowest_priority]
272
+ end
273
+ end
274
+ end
@@ -0,0 +1,12 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Que
4
+ module Rails
5
+ class Railtie < ::Rails::Railtie
6
+ config.que = Que
7
+
8
+ Que.logger = proc { ::Rails.logger }
9
+ Que.connection = ::ActiveRecord if defined? ::ActiveRecord
10
+ end
11
+ end
12
+ end
@@ -0,0 +1,35 @@
1
+ # frozen_string_literal: true
2
+
3
+ # A thread-safe queue that holds ids for jobs that have been worked. Allows
4
+ # appending single/retrieving all ids in a thread-safe fashion.
5
+
6
+ module Que
7
+ class ResultQueue
8
+ def initialize
9
+ @array = []
10
+ @mutex = Mutex.new
11
+ end
12
+
13
+ def push(item)
14
+ sync { @array.push(item) }
15
+ end
16
+
17
+ def clear
18
+ sync { @array.pop(@array.size) }
19
+ end
20
+
21
+ def to_a
22
+ sync { @array.dup }
23
+ end
24
+
25
+ def length
26
+ sync { @array.length }
27
+ end
28
+
29
+ private
30
+
31
+ def sync(&block)
32
+ @mutex.synchronize(&block)
33
+ end
34
+ end
35
+ end
@@ -0,0 +1,52 @@
1
+ # frozen_string_literal: true
2
+
3
+ ::Sequel.extension :pg_json_ops
4
+
5
+ module Que
6
+ module Sequel
7
+ QUALIFIED_TABLE = ::Sequel.qualify(:public, :que_jobs)
8
+
9
+ class Model < ::Sequel::Model(QUALIFIED_TABLE)
10
+ dataset_module do
11
+ conditions = {
12
+ errored: QUALIFIED_TABLE[:error_count] > 0,
13
+ expired: QUALIFIED_TABLE[:expired_at] !~ nil,
14
+ finished: QUALIFIED_TABLE[:finished_at] !~ nil,
15
+ scheduled: QUALIFIED_TABLE[:run_at] > ::Sequel::CURRENT_TIMESTAMP,
16
+ }
17
+
18
+ conditions.each do |name, condition|
19
+ subset name, condition
20
+ subset :"not_#{name}", ~condition
21
+ end
22
+
23
+ subset :ready, conditions.values.map(&:~).inject(:&)
24
+ subset :not_ready, conditions.values. inject(:|)
25
+
26
+ def by_job_class(job_class)
27
+ job_class = job_class.name if job_class.is_a?(Class)
28
+ where(
29
+ (QUALIFIED_TABLE[:job_class] =~ job_class) |
30
+ (QUALIFIED_TABLE[:job_class] =~ "ActiveJob::QueueAdapters::QueAdapter::JobWrapper") &
31
+ (QUALIFIED_TABLE[:args].pg_jsonb[0].get_text("job_class") =~ job_class)
32
+ )
33
+ end
34
+
35
+ def by_queue(queue)
36
+ where(QUALIFIED_TABLE[:queue] => queue)
37
+ end
38
+
39
+ def by_tag(tag)
40
+ where(QUALIFIED_TABLE[:data].pg_jsonb.contains(JSON.dump(tags: [tag])))
41
+ end
42
+
43
+ def by_args(*args, **kwargs)
44
+ where(
45
+ QUALIFIED_TABLE[:args].pg_jsonb.contains(JSON.dump(args)) &
46
+ QUALIFIED_TABLE[:kwargs].pg_jsonb.contains(JSON.dump(kwargs))
47
+ )
48
+ end
49
+ end
50
+ end
51
+ end
52
+ end
@@ -0,0 +1,62 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Assertion helpers. Que has a fair amount of internal state, and there's no
4
+ # telling what users will try to throw at it, so for ease of debugging issues it
5
+ # makes sense to sanity-check frequently.
6
+
7
+ module Que
8
+ module Utils
9
+ module Assertions
10
+ class AssertionFailed < Error; end
11
+
12
+ def assert(*args)
13
+ comparison, object, pass = _check_assertion_args(*args)
14
+ return object if pass
15
+
16
+ message =
17
+ if block_given?
18
+ yield.to_s
19
+ elsif comparison
20
+ "Expected #{comparison.inspect}, got #{object.inspect}!"
21
+ else
22
+ "Assertion failed!"
23
+ end
24
+
25
+ # Remove this method from the backtrace, to make errors clearer.
26
+ raise AssertionFailed, message, caller
27
+ end
28
+
29
+ def assert?(*args)
30
+ _, _, pass = _check_assertion_args(*args)
31
+ !!pass
32
+ end
33
+
34
+ private
35
+
36
+ # Want to support:
37
+ # assert(x) # Truthiness.
38
+ # assert(thing, other) # Trip-equals.
39
+ # assert([thing1, thing2], other) # Multiple Trip-equals.
40
+ def _check_assertion_args(first, second = (second_omitted = true; nil))
41
+ if second_omitted
42
+ comparison = nil
43
+ object = first
44
+ else
45
+ comparison = first
46
+ object = second
47
+ end
48
+
49
+ pass =
50
+ if second_omitted
51
+ object
52
+ elsif comparison.is_a?(Array)
53
+ comparison.any? { |k| k === object }
54
+ else
55
+ comparison === object
56
+ end
57
+
58
+ [comparison, object, pass]
59
+ end
60
+ end
61
+ end
62
+ end
@@ -0,0 +1,19 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Que
4
+ module Utils
5
+ module Constantization
6
+ def constantize(string)
7
+ Que.assert String, string
8
+
9
+ if string.respond_to?(:constantize)
10
+ string.constantize
11
+ else
12
+ names = string.split('::')
13
+ names.reject!(&:empty?)
14
+ names.inject(Object, &:const_get)
15
+ end
16
+ end
17
+ end
18
+ end
19
+ end
@@ -0,0 +1,68 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Que
4
+ module Utils
5
+ module ErrorNotification
6
+ attr_accessor :error_notifier
7
+
8
+ def notify_error(*args)
9
+ Que.internal_log(:error_notification_attempted) do
10
+ {args: args.inspect}
11
+ end
12
+
13
+ if notifier = error_notifier
14
+ arity = notifier.arity
15
+ args = args.first(arity) if arity >= 0
16
+
17
+ notifier.call(*args)
18
+ end
19
+ rescue => error
20
+ Que.log(
21
+ event: :error_notifier_failed,
22
+ level: :error,
23
+ message: "error_notifier callable raised an error",
24
+
25
+ error_class: error.class.name,
26
+ error_message: error.message,
27
+ error_backtrace: error.backtrace,
28
+ )
29
+ nil
30
+ end
31
+
32
+ ASYNC_QUEUE = Queue.new
33
+ MAX_QUEUE_SIZE = 5
34
+
35
+ # Helper method to notify errors asynchronously. For use in high-priority
36
+ # code, where we don't want to be held up by whatever I/O the error
37
+ # notification proc contains.
38
+ def notify_error_async(*args)
39
+ # We don't synchronize around the size check and the push, so there's a
40
+ # race condition where the queue could grow to more than the maximum
41
+ # number of errors, but no big deal if it does. The size check is mainly
42
+ # here to ensure that the error queue doesn't grow unboundedly large in
43
+ # pathological cases.
44
+
45
+ if ASYNC_QUEUE.size < MAX_QUEUE_SIZE
46
+ ASYNC_QUEUE.push(args)
47
+ # Puma raises some ugly warnings if you start up a new thread in the
48
+ # background during initialization, so start the async error-reporting
49
+ # thread lazily.
50
+ async_error_thread
51
+ true
52
+ else
53
+ false
54
+ end
55
+ end
56
+
57
+ def async_error_thread
58
+ CONFIG_MUTEX.synchronize do
59
+ @async_error_thread ||=
60
+ Thread.new do
61
+ Thread.current.abort_on_exception = true
62
+ loop { Que.notify_error(*ASYNC_QUEUE.pop) }
63
+ end
64
+ end
65
+ end
66
+ end
67
+ end
68
+ end
@@ -0,0 +1,20 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Helper method for recursively freezing a data structure.
4
+
5
+ module Que
6
+ module Utils
7
+ module Freeze
8
+ def recursively_freeze(thing)
9
+ case thing
10
+ when Array
11
+ thing.each { |e| recursively_freeze(e) }
12
+ when Hash
13
+ thing.each { |k, v| recursively_freeze(k); recursively_freeze(v) }
14
+ end
15
+
16
+ thing.freeze
17
+ end
18
+ end
19
+ end
20
+ end
@@ -0,0 +1,50 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Tools for introspecting the state of the job queue.
4
+
5
+ module Que
6
+ module Utils
7
+ module Introspection
8
+ SQL[:job_stats] =
9
+ %{
10
+ SELECT job_class,
11
+ count(*) AS count,
12
+ count(locks.id) AS count_working,
13
+ sum((error_count > 0)::int) AS count_errored,
14
+ max(error_count) AS highest_error_count,
15
+ min(run_at) AS oldest_run_at
16
+ FROM public.que_jobs
17
+ LEFT JOIN (
18
+ SELECT (classid::bigint << 32) + objid::bigint AS id
19
+ FROM pg_locks
20
+ WHERE locktype = 'advisory'
21
+ ) locks USING (id)
22
+ WHERE finished_at IS NULL AND expired_at IS NULL
23
+ GROUP BY job_class
24
+ ORDER BY count(*) DESC
25
+ }
26
+
27
+ def job_stats
28
+ execute :job_stats
29
+ end
30
+
31
+ SQL[:job_states] =
32
+ %{
33
+ SELECT que_jobs.*,
34
+ pg.ruby_hostname,
35
+ pg.ruby_pid
36
+ FROM public.que_jobs
37
+ JOIN (
38
+ SELECT (classid::bigint << 32) + objid::bigint AS id, que_lockers.*
39
+ FROM pg_locks
40
+ JOIN public.que_lockers USING (pid)
41
+ WHERE locktype = 'advisory'
42
+ ) pg USING (id)
43
+ }
44
+
45
+ def job_states
46
+ execute :job_states
47
+ end
48
+ end
49
+ end
50
+ end
@@ -0,0 +1,21 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Logic for serializing to/from JSON. We assume that the standard library's JSON
4
+ # module is good enough for our purposes.
5
+
6
+ require 'json'
7
+
8
+ module Que
9
+ module Utils
10
+ module JSONSerialization
11
+ def serialize_json(object)
12
+ JSON.dump(object)
13
+ end
14
+
15
+ def deserialize_json(json)
16
+ # Allowing `create_additions` would be a security vulnerability.
17
+ JSON.parse(json, symbolize_names: true, create_additions: false)
18
+ end
19
+ end
20
+ end
21
+ end
@@ -0,0 +1,79 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Tools for logging from Que.
4
+
5
+ module Que
6
+ module Utils
7
+ module Logging
8
+ attr_accessor :logger, :internal_logger
9
+ attr_writer :log_formatter
10
+
11
+ def log(event:, level: :info, **extra)
12
+ data = _default_log_data
13
+ data[:event] = Que.assert(Symbol, event)
14
+ data.merge!(extra)
15
+
16
+ if l = get_logger
17
+ begin
18
+ if output = log_formatter.call(data)
19
+ l.send level, output
20
+ end
21
+ rescue => e
22
+ msg =
23
+ "Error raised from Que.log_formatter proc:" +
24
+ " #{e.class}: #{e.message}\n#{e.backtrace}"
25
+
26
+ l.error(msg)
27
+ end
28
+ end
29
+ end
30
+
31
+ # Logging method used specifically to instrument Que's internals. There's
32
+ # usually not an internal logger set up, so this method is generally a no-
33
+ # op unless the specs are running or someone turns on internal logging so
34
+ # we can debug an issue.
35
+ def internal_log(event, object = nil)
36
+ if l = get_logger(internal: true)
37
+ data = _default_log_data
38
+
39
+ data[:internal_event] = Que.assert(Symbol, event)
40
+ data[:object_id] = object.object_id if object
41
+ data[:t] = Time.now.utc.iso8601(6)
42
+
43
+ additional = Que.assert(Hash, yield)
44
+
45
+ # Make sure that none of our log contents accidentally overwrite our
46
+ # default data contents.
47
+ expected_length = data.length + additional.length
48
+ data.merge!(additional)
49
+ Que.assert(expected_length == data.length) do
50
+ "Bad internal logging keys in: #{additional.keys.inspect}"
51
+ end
52
+
53
+ l.info(JSON.dump(data))
54
+ end
55
+ end
56
+
57
+ def get_logger(internal: false)
58
+ if l = internal ? internal_logger : logger
59
+ l.respond_to?(:call) ? l.call : l
60
+ end
61
+ end
62
+
63
+ def log_formatter
64
+ @log_formatter ||= JSON.method(:dump)
65
+ end
66
+
67
+ private
68
+
69
+ def _default_log_data
70
+ {
71
+ lib: :que,
72
+ hostname: CURRENT_HOSTNAME,
73
+ pid: Process.pid,
74
+ thread: Thread.current.object_id,
75
+ }
76
+ end
77
+ end
78
+ end
79
+ end