que 1.0.0.beta4 → 1.4.0

Sign up to get free protection for your applications and to get access to all the features.
data/lib/que/job.rb CHANGED
@@ -12,7 +12,7 @@ module Que
12
12
  SQL[:insert_job] =
13
13
  %{
14
14
  INSERT INTO public.que_jobs
15
- (queue, priority, run_at, job_class, args, data)
15
+ (queue, priority, run_at, job_class, args, data, job_schema_version)
16
16
  VALUES
17
17
  (
18
18
  coalesce($1, 'default')::text,
@@ -20,7 +20,8 @@ module Que
20
20
  coalesce($3, now())::timestamptz,
21
21
  $4::text,
22
22
  coalesce($5, '[]')::jsonb,
23
- coalesce($6, '{}')::jsonb
23
+ coalesce($6, '{}')::jsonb,
24
+ #{Que.job_schema_version}
24
25
  )
25
26
  RETURNING *
26
27
  }
@@ -57,22 +58,18 @@ module Que
57
58
 
58
59
  def enqueue(
59
60
  *args,
60
- queue: nil,
61
- priority: nil,
62
- run_at: nil,
63
- job_class: nil,
64
- tags: nil,
61
+ job_options: {},
65
62
  **arg_opts
66
63
  )
67
-
64
+ arg_opts, job_options = _extract_job_options(arg_opts, job_options.dup)
68
65
  args << arg_opts if arg_opts.any?
69
66
 
70
- if tags
71
- if tags.length > MAXIMUM_TAGS_COUNT
72
- raise Que::Error, "Can't enqueue a job with more than #{MAXIMUM_TAGS_COUNT} tags! (passed #{tags.length})"
67
+ if job_options[:tags]
68
+ if job_options[:tags].length > MAXIMUM_TAGS_COUNT
69
+ raise Que::Error, "Can't enqueue a job with more than #{MAXIMUM_TAGS_COUNT} tags! (passed #{job_options[:tags].length})"
73
70
  end
74
71
 
75
- tags.each do |tag|
72
+ job_options[:tags].each do |tag|
76
73
  if tag.length > MAXIMUM_TAG_LENGTH
77
74
  raise Que::Error, "Can't enqueue a job with a tag longer than 100 characters! (\"#{tag}\")"
78
75
  end
@@ -80,13 +77,13 @@ module Que
80
77
  end
81
78
 
82
79
  attrs = {
83
- queue: queue || resolve_que_setting(:queue) || Que.default_queue,
84
- priority: priority || resolve_que_setting(:priority),
85
- run_at: run_at || resolve_que_setting(:run_at),
80
+ queue: job_options[:queue] || resolve_que_setting(:queue) || Que.default_queue,
81
+ priority: job_options[:priority] || resolve_que_setting(:priority),
82
+ run_at: job_options[:run_at] || resolve_que_setting(:run_at),
86
83
  args: Que.serialize_json(args),
87
- data: tags ? Que.serialize_json(tags: tags) : "{}",
84
+ data: job_options[:tags] ? Que.serialize_json(tags: job_options[:tags]) : "{}",
88
85
  job_class: \
89
- job_class || name ||
86
+ job_options[:job_class] || name ||
90
87
  raise(Error, "Can't enqueue an anonymous subclass of Que::Job"),
91
88
  }
92
89
 
@@ -139,6 +136,27 @@ module Que
139
136
  end
140
137
  end
141
138
  end
139
+
140
+ def _extract_job_options(arg_opts, job_options)
141
+ deprecated_job_option_names = []
142
+
143
+ %i[queue priority run_at job_class tags].each do |option_name|
144
+ next unless arg_opts.key?(option_name) && job_options[option_name].nil?
145
+
146
+ job_options[option_name] = arg_opts.delete(option_name)
147
+ deprecated_job_option_names << option_name
148
+ end
149
+
150
+ _log_job_options_deprecation(deprecated_job_option_names)
151
+
152
+ [arg_opts, job_options]
153
+ end
154
+
155
+ def _log_job_options_deprecation(deprecated_job_option_names)
156
+ return unless deprecated_job_option_names.any?
157
+
158
+ warn "Passing job options like (#{deprecated_job_option_names.join(', ')}) to `JobClass.enqueue` as top level keyword args has been deprecated and will be removed in version 2.0. Please wrap job options in an explicit `job_options` keyword arg instead."
159
+ end
142
160
  end
143
161
 
144
162
  # Set up some defaults.
@@ -6,7 +6,7 @@
6
6
 
7
7
  module Que
8
8
  class JobBuffer
9
- attr_reader :maximum_size, :minimum_size, :priority_queues
9
+ attr_reader :maximum_size, :priority_queues
10
10
 
11
11
  # Since we use a mutex, which is not reentrant, we have to be a little
12
12
  # careful to not call a method that locks the mutex when we've already
@@ -17,20 +17,11 @@ module Que
17
17
 
18
18
  def initialize(
19
19
  maximum_size:,
20
- minimum_size:,
21
20
  priorities:
22
21
  )
23
22
  @maximum_size = Que.assert(Integer, maximum_size)
24
23
  Que.assert(maximum_size >= 0) { "maximum_size for a JobBuffer must be at least zero!" }
25
24
 
26
- @minimum_size = Que.assert(Integer, minimum_size)
27
- Que.assert(minimum_size >= 0) { "minimum_size for a JobBuffer must be at least zero!" }
28
-
29
- Que.assert(minimum_size <= maximum_size) do
30
- "minimum buffer size (#{minimum_size}) is " \
31
- "greater than the maximum buffer size (#{maximum_size})!"
32
- end
33
-
34
25
  @stop = false
35
26
  @array = []
36
27
  @mutex = Mutex.new
@@ -59,10 +50,8 @@ module Que
59
50
 
60
51
  # Relying on the hash's contents being sorted, here.
61
52
  priority_queues.reverse_each do |_, pq|
62
- pq.waiting_count.times do
63
- job = _shift_job(pq.priority)
64
- break if job.nil? # False would mean we're stopping.
65
- pq.push(job)
53
+ pq.populate do
54
+ _shift_job(pq.priority)
66
55
  end
67
56
  end
68
57
 
@@ -75,7 +64,7 @@ module Que
75
64
 
76
65
  def shift(priority = nil)
77
66
  queue = priority_queues.fetch(priority) { raise Error, "not a permitted priority! #{priority}" }
78
- queue.pop
67
+ queue.pop || shift_job(priority)
79
68
  end
80
69
 
81
70
  def shift_job(priority = nil)
@@ -158,6 +147,10 @@ module Que
158
147
  sync { _stopping? }
159
148
  end
160
149
 
150
+ def job_available?(priority)
151
+ (job = @array.first) && job.priority_sufficient?(priority)
152
+ end
153
+
161
154
  private
162
155
 
163
156
  def _buffer_space
@@ -210,15 +203,14 @@ module Que
210
203
  def pop
211
204
  sync do
212
205
  loop do
213
- return false if @stopping
214
-
215
- if item = @items.pop
206
+ if @stopping
207
+ return false
208
+ elsif item = @items.pop
216
209
  return item
210
+ elsif job_buffer.job_available?(priority)
211
+ return false
217
212
  end
218
213
 
219
- job = job_buffer.shift_job(priority)
220
- return job unless job.nil? # False means we're stopping.
221
-
222
214
  @waiting += 1
223
215
  @cv.wait(mutex)
224
216
  @waiting -= 1
@@ -226,18 +218,20 @@ module Que
226
218
  end
227
219
  end
228
220
 
229
- def push(item)
221
+ def stop
230
222
  sync do
231
- Que.assert(waiting_count > 0)
232
- @items << item
233
- @cv.signal
223
+ @stopping = true
224
+ @cv.broadcast
234
225
  end
235
226
  end
236
227
 
237
- def stop
228
+ def populate
238
229
  sync do
239
- @stopping = true
240
- @cv.broadcast
230
+ waiting_count.times do
231
+ job = yield
232
+ break if job.nil? # False would mean we're stopping.
233
+ _push(job)
234
+ end
241
235
  end
242
236
  end
243
237
 
@@ -250,6 +244,12 @@ module Que
250
244
  def sync(&block)
251
245
  mutex.synchronize(&block)
252
246
  end
247
+
248
+ def _push(item)
249
+ Que.assert(waiting_count > 0)
250
+ @items << item
251
+ @cv.signal
252
+ end
253
253
  end
254
254
  end
255
255
  end
data/lib/que/locker.rb CHANGED
@@ -24,12 +24,12 @@ module Que
24
24
 
25
25
  SQL[:register_locker] =
26
26
  %{
27
- INSERT INTO public.que_lockers (pid, worker_count, worker_priorities, ruby_pid, ruby_hostname, listening, queues)
28
- VALUES (pg_backend_pid(), $1::integer, $2::integer[], $3::integer, $4::text, $5::boolean, $6::text[])
27
+ INSERT INTO public.que_lockers (pid, worker_count, worker_priorities, ruby_pid, ruby_hostname, listening, queues, job_schema_version)
28
+ VALUES (pg_backend_pid(), $1::integer, $2::integer[], $3::integer, $4::text, $5::boolean, $6::text[], $7::integer)
29
29
  }
30
30
 
31
31
  class Locker
32
- attr_reader :thread, :workers, :job_buffer, :locks
32
+ attr_reader :thread, :workers, :job_buffer, :locks, :queues, :poll_interval
33
33
 
34
34
  MESSAGE_RESOLVERS = {}
35
35
  RESULT_RESOLVERS = {}
@@ -45,7 +45,6 @@ module Que
45
45
 
46
46
  DEFAULT_POLL_INTERVAL = 5.0
47
47
  DEFAULT_WAIT_PERIOD = 50
48
- DEFAULT_MINIMUM_BUFFER_SIZE = 2
49
48
  DEFAULT_MAXIMUM_BUFFER_SIZE = 8
50
49
  DEFAULT_WORKER_PRIORITIES = [10, 30, 50, nil, nil, nil].freeze
51
50
 
@@ -57,7 +56,6 @@ module Que
57
56
  poll_interval: DEFAULT_POLL_INTERVAL,
58
57
  wait_period: DEFAULT_WAIT_PERIOD,
59
58
  maximum_buffer_size: DEFAULT_MAXIMUM_BUFFER_SIZE,
60
- minimum_buffer_size: DEFAULT_MINIMUM_BUFFER_SIZE,
61
59
  worker_priorities: DEFAULT_WORKER_PRIORITIES,
62
60
  on_worker_start: nil
63
61
  )
@@ -77,7 +75,6 @@ module Que
77
75
  # ResultQueue to receive messages from workers.
78
76
  @job_buffer = JobBuffer.new(
79
77
  maximum_size: maximum_buffer_size,
80
- minimum_size: minimum_buffer_size,
81
78
  priorities: worker_priorities.uniq,
82
79
  )
83
80
 
@@ -93,7 +90,6 @@ module Que
93
90
  poll_interval: poll_interval,
94
91
  wait_period: wait_period,
95
92
  maximum_buffer_size: maximum_buffer_size,
96
- minimum_buffer_size: minimum_buffer_size,
97
93
  worker_priorities: worker_priorities,
98
94
  }
99
95
  end
@@ -101,7 +97,20 @@ module Que
101
97
  # Local cache of which advisory locks are held by this connection.
102
98
  @locks = Set.new
103
99
 
104
- @queue_names = queues.is_a?(Hash) ? queues.keys : queues
100
+ @poll_interval = poll_interval
101
+
102
+ if queues.is_a?(Hash)
103
+ @queue_names = queues.keys
104
+ @queues = queues.transform_values do |interval|
105
+ interval || poll_interval
106
+ end
107
+ else
108
+ @queue_names = queues
109
+ @queues = queues.map do |queue_name|
110
+ [queue_name, poll_interval]
111
+ end.to_h
112
+ end
113
+
105
114
  @wait_period = wait_period.to_f / 1000 # Milliseconds to seconds.
106
115
 
107
116
  @workers =
@@ -183,11 +192,11 @@ module Que
183
192
 
184
193
  @pollers =
185
194
  if poll
186
- queues.map do |queue, interval|
195
+ @queues.map do |queue_name, interval|
187
196
  Poller.new(
188
197
  connection: @connection,
189
- queue: queue,
190
- poll_interval: interval || poll_interval,
198
+ queue: queue_name,
199
+ poll_interval: interval,
191
200
  )
192
201
  end
193
202
  end
@@ -266,6 +275,7 @@ module Que
266
275
  CURRENT_HOSTNAME,
267
276
  !!@listener,
268
277
  "{\"#{@queue_names.join('","')}\"}",
278
+ Que.job_schema_version,
269
279
  ]
270
280
  end
271
281
 
@@ -393,10 +403,12 @@ module Que
393
403
  }
394
404
  end
395
405
 
406
+ materalize_cte = connection.server_version >= 12_00_00
407
+
396
408
  jobs =
397
409
  connection.execute \
398
410
  <<-SQL
399
- WITH jobs AS (SELECT * FROM que_jobs WHERE id IN (#{ids.join(', ')}))
411
+ WITH jobs AS #{materalize_cte ? 'MATERIALIZED' : ''} (SELECT * FROM que_jobs WHERE id IN (#{ids.join(', ')}))
400
412
  SELECT * FROM jobs WHERE pg_try_advisory_lock(id)
401
413
  SQL
402
414
 
@@ -146,7 +146,9 @@ CREATE FUNCTION que_job_notify() RETURNS trigger AS $$
146
146
  FROM (
147
147
  SELECT *
148
148
  FROM public.que_lockers ql, generate_series(1, ql.worker_count) AS id
149
- WHERE listening AND queues @> ARRAY[NEW.queue]
149
+ WHERE
150
+ listening AND
151
+ queues @> ARRAY[NEW.queue]
150
152
  ORDER BY md5(pid::text || id::text)
151
153
  ) t1
152
154
  ) t2
@@ -0,0 +1,73 @@
1
+ DROP TRIGGER que_job_notify ON que_jobs;
2
+ DROP FUNCTION que_job_notify();
3
+
4
+ DROP INDEX que_poll_idx_with_job_schema_version;
5
+
6
+ ALTER TABLE que_jobs
7
+ DROP COLUMN job_schema_version;
8
+
9
+ ALTER TABLE que_lockers
10
+ DROP COLUMN job_schema_version;
11
+
12
+ CREATE FUNCTION que_job_notify() RETURNS trigger AS $$
13
+ DECLARE
14
+ locker_pid integer;
15
+ sort_key json;
16
+ BEGIN
17
+ -- Don't do anything if the job is scheduled for a future time.
18
+ IF NEW.run_at IS NOT NULL AND NEW.run_at > now() THEN
19
+ RETURN null;
20
+ END IF;
21
+
22
+ -- Pick a locker to notify of the job's insertion, weighted by their number
23
+ -- of workers. Should bounce pseudorandomly between lockers on each
24
+ -- invocation, hence the md5-ordering, but still touch each one equally,
25
+ -- hence the modulo using the job_id.
26
+ SELECT pid
27
+ INTO locker_pid
28
+ FROM (
29
+ SELECT *, last_value(row_number) OVER () + 1 AS count
30
+ FROM (
31
+ SELECT *, row_number() OVER () - 1 AS row_number
32
+ FROM (
33
+ SELECT *
34
+ FROM public.que_lockers ql, generate_series(1, ql.worker_count) AS id
35
+ WHERE
36
+ listening AND
37
+ queues @> ARRAY[NEW.queue]
38
+ ORDER BY md5(pid::text || id::text)
39
+ ) t1
40
+ ) t2
41
+ ) t3
42
+ WHERE NEW.id % count = row_number;
43
+
44
+ IF locker_pid IS NOT NULL THEN
45
+ -- There's a size limit to what can be broadcast via LISTEN/NOTIFY, so
46
+ -- rather than throw errors when someone enqueues a big job, just
47
+ -- broadcast the most pertinent information, and let the locker query for
48
+ -- the record after it's taken the lock. The worker will have to hit the
49
+ -- DB in order to make sure the job is still visible anyway.
50
+ SELECT row_to_json(t)
51
+ INTO sort_key
52
+ FROM (
53
+ SELECT
54
+ 'job_available' AS message_type,
55
+ NEW.queue AS queue,
56
+ NEW.priority AS priority,
57
+ NEW.id AS id,
58
+ -- Make sure we output timestamps as UTC ISO 8601
59
+ to_char(NEW.run_at AT TIME ZONE 'UTC', 'YYYY-MM-DD"T"HH24:MI:SS.US"Z"') AS run_at
60
+ ) t;
61
+
62
+ PERFORM pg_notify('que_listener_' || locker_pid::text, sort_key::text);
63
+ END IF;
64
+
65
+ RETURN null;
66
+ END
67
+ $$
68
+ LANGUAGE plpgsql;
69
+
70
+ CREATE TRIGGER que_job_notify
71
+ AFTER INSERT ON que_jobs
72
+ FOR EACH ROW
73
+ EXECUTE PROCEDURE public.que_job_notify();
@@ -0,0 +1,76 @@
1
+ DROP TRIGGER que_job_notify ON que_jobs;
2
+ DROP FUNCTION que_job_notify();
3
+
4
+ ALTER TABLE que_jobs
5
+ ADD COLUMN job_schema_version INTEGER DEFAULT 1;
6
+
7
+ ALTER TABLE que_lockers
8
+ ADD COLUMN job_schema_version INTEGER DEFAULT 1;
9
+
10
+ CREATE INDEX que_poll_idx_with_job_schema_version
11
+ ON que_jobs (job_schema_version, queue, priority, run_at, id)
12
+ WHERE (finished_at IS NULL AND expired_at IS NULL);
13
+
14
+ CREATE FUNCTION que_job_notify() RETURNS trigger AS $$
15
+ DECLARE
16
+ locker_pid integer;
17
+ sort_key json;
18
+ BEGIN
19
+ -- Don't do anything if the job is scheduled for a future time.
20
+ IF NEW.run_at IS NOT NULL AND NEW.run_at > now() THEN
21
+ RETURN null;
22
+ END IF;
23
+
24
+ -- Pick a locker to notify of the job's insertion, weighted by their number
25
+ -- of workers. Should bounce pseudorandomly between lockers on each
26
+ -- invocation, hence the md5-ordering, but still touch each one equally,
27
+ -- hence the modulo using the job_id.
28
+ SELECT pid
29
+ INTO locker_pid
30
+ FROM (
31
+ SELECT *, last_value(row_number) OVER () + 1 AS count
32
+ FROM (
33
+ SELECT *, row_number() OVER () - 1 AS row_number
34
+ FROM (
35
+ SELECT *
36
+ FROM public.que_lockers ql, generate_series(1, ql.worker_count) AS id
37
+ WHERE
38
+ listening AND
39
+ queues @> ARRAY[NEW.queue] AND
40
+ ql.job_schema_version = NEW.job_schema_version
41
+ ORDER BY md5(pid::text || id::text)
42
+ ) t1
43
+ ) t2
44
+ ) t3
45
+ WHERE NEW.id % count = row_number;
46
+
47
+ IF locker_pid IS NOT NULL THEN
48
+ -- There's a size limit to what can be broadcast via LISTEN/NOTIFY, so
49
+ -- rather than throw errors when someone enqueues a big job, just
50
+ -- broadcast the most pertinent information, and let the locker query for
51
+ -- the record after it's taken the lock. The worker will have to hit the
52
+ -- DB in order to make sure the job is still visible anyway.
53
+ SELECT row_to_json(t)
54
+ INTO sort_key
55
+ FROM (
56
+ SELECT
57
+ 'job_available' AS message_type,
58
+ NEW.queue AS queue,
59
+ NEW.priority AS priority,
60
+ NEW.id AS id,
61
+ -- Make sure we output timestamps as UTC ISO 8601
62
+ to_char(NEW.run_at AT TIME ZONE 'UTC', 'YYYY-MM-DD"T"HH24:MI:SS.US"Z"') AS run_at
63
+ ) t;
64
+
65
+ PERFORM pg_notify('que_listener_' || locker_pid::text, sort_key::text);
66
+ END IF;
67
+
68
+ RETURN null;
69
+ END
70
+ $$
71
+ LANGUAGE plpgsql;
72
+
73
+ CREATE TRIGGER que_job_notify
74
+ AFTER INSERT ON que_jobs
75
+ FOR EACH ROW
76
+ EXECUTE PROCEDURE public.que_job_notify();
@@ -4,7 +4,7 @@ module Que
4
4
  module Migrations
5
5
  # In order to ship a schema change, add the relevant up and down sql files
6
6
  # to the migrations directory, and bump the version here.
7
- CURRENT_VERSION = 4
7
+ CURRENT_VERSION = 5
8
8
 
9
9
  class << self
10
10
  def migrate!(version:)
@@ -28,7 +28,6 @@ module Que
28
28
  step,
29
29
  direction,
30
30
  ].join('/') << '.sql'
31
-
32
31
  Que.execute(File.read(filename))
33
32
  end
34
33
 
data/lib/que/poller.rb CHANGED
@@ -68,6 +68,7 @@ module Que
68
68
  SELECT j
69
69
  FROM public.que_jobs AS j
70
70
  WHERE queue = $1::text
71
+ AND job_schema_version = #{Que.job_schema_version}
71
72
  AND NOT id = ANY($2::bigint[])
72
73
  AND priority <= pg_temp.que_highest_remaining_priority($3::jsonb)
73
74
  AND run_at <= now()
@@ -88,6 +89,7 @@ module Que
88
89
  SELECT j
89
90
  FROM public.que_jobs AS j
90
91
  WHERE queue = $1::text
92
+ AND job_schema_version = #{Que.job_schema_version}
91
93
  AND NOT id = ANY($2::bigint[])
92
94
  AND priority <= pg_temp.que_highest_remaining_priority(jobs.remaining_priorities)
93
95
  AND run_at <= now()
@@ -144,8 +146,6 @@ module Que
144
146
 
145
147
  return unless should_poll?
146
148
 
147
- expected_count = priorities.inject(0){|s,(_,c)| s + c}
148
-
149
149
  jobs =
150
150
  connection.execute_prepared(
151
151
  :poll_jobs,
@@ -157,7 +157,7 @@ module Que
157
157
  )
158
158
 
159
159
  @last_polled_at = Time.now
160
- @last_poll_satisfied = expected_count == jobs.count
160
+ @last_poll_satisfied = poll_satisfied?(priorities, jobs)
161
161
 
162
162
  Que.internal_log :poller_polled, self do
163
163
  {
@@ -263,5 +263,12 @@ module Que
263
263
  SQL
264
264
  end
265
265
  end
266
+
267
+ private
268
+
269
+ def poll_satisfied?(priorities, jobs)
270
+ lowest_priority = priorities.keys.max
271
+ jobs.count >= priorities[lowest_priority]
272
+ end
266
273
  end
267
274
  end
data/lib/que/version.rb CHANGED
@@ -1,5 +1,9 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Que
4
- VERSION = '1.0.0.beta4'
4
+ VERSION = '1.4.0'
5
+
6
+ def self.job_schema_version
7
+ 1
8
+ end
5
9
  end
data/lib/que/worker.rb CHANGED
@@ -54,10 +54,17 @@ module Que
54
54
  private
55
55
 
56
56
  def work_loop
57
- # Blocks until a job of the appropriate priority is available. If the
58
- # queue is shutting down this will return nil, which breaks the loop and
57
+ # Blocks until a job of the appropriate priority is available.
58
+ # `fetch_next_metajob` normally returns a job to be processed.
59
+ # If the queue is shutting down it will return false, which breaks the loop and
59
60
  # lets the thread finish.
60
- while metajob = fetch_next_metajob
61
+ while (metajob = fetch_next_metajob) != false
62
+ # If metajob is nil instead of false, we've hit a rare race condition where
63
+ # there was a job in the buffer when the worker code checked, but the job was
64
+ # picked up by the time we got around to shifting it off the buffer.
65
+ # Letting this case go unhandled leads to worker threads exiting pre-maturely, so
66
+ # we check explicitly and continue the loop.
67
+ next if metajob.nil?
61
68
  id = metajob.id
62
69
 
63
70
  Que.internal_log(:worker_received_job, self) { {id: id} }
@@ -130,6 +137,7 @@ module Que
130
137
  error: {
131
138
  class: error.class.to_s,
132
139
  message: error.message,
140
+ backtrace: (error.backtrace || []).join("\n").slice(0, 10000),
133
141
  },
134
142
  )
135
143
 
@@ -157,7 +165,7 @@ module Que
157
165
  Que.execute :set_error, [
158
166
  delay,
159
167
  "#{error.class}: #{error.message}".slice(0, 500),
160
- error.backtrace.join("\n").slice(0, 10000),
168
+ (error.backtrace || []).join("\n").slice(0, 10000),
161
169
  job.fetch(:id),
162
170
  ]
163
171
  end
data/lib/que.rb CHANGED
@@ -31,6 +31,8 @@ module Que
31
31
  require_relative 'que/utils/queue_management'
32
32
  require_relative 'que/utils/transactions'
33
33
 
34
+ require_relative 'que/version'
35
+
34
36
  require_relative 'que/connection'
35
37
  require_relative 'que/connection_pool'
36
38
  require_relative 'que/job_methods'
@@ -41,7 +43,6 @@ module Que
41
43
  require_relative 'que/migrations'
42
44
  require_relative 'que/poller'
43
45
  require_relative 'que/result_queue'
44
- require_relative 'que/version'
45
46
  require_relative 'que/worker'
46
47
 
47
48
  class << self
@@ -0,0 +1,14 @@
1
+ #!/bin/bash
2
+
3
+ set -Eeuo pipefail
4
+
5
+ # For using your own dotfiles within the Docker container
6
+ if [ -f /.docker-rc.d/.docker-bashrc ]; then
7
+ echo "source /.docker-rc.d/.docker-bashrc" >> ~/.bashrc
8
+ fi
9
+
10
+ gem list -i -e bundler -v "$RUBY_BUNDLER_VERSION" >/dev/null || gem install bundler -v "$RUBY_BUNDLER_VERSION"
11
+
12
+ bundle check --dry-run || bundle install
13
+
14
+ exec "${@-bash}"
data/scripts/test ADDED
@@ -0,0 +1,5 @@
1
+ #!/bin/bash
2
+
3
+ set -Eeuo pipefail
4
+
5
+ bundle exec rake spec "$@"