que 0.1.0 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +25 -0
- data/Gemfile +15 -1
- data/README.md +2 -2
- data/Rakefile +1 -411
- data/lib/que/job.rb +6 -5
- data/lib/que/version.rb +1 -1
- data/lib/que/worker.rb +7 -2
- data/que.gemspec +1 -7
- data/spec/spec_helper.rb +1 -0
- data/spec/{connection_spec.rb → unit/connection_spec.rb} +0 -0
- data/spec/{helper_spec.rb → unit/helper_spec.rb} +4 -0
- data/spec/{pool_spec.rb → unit/pool_spec.rb} +19 -9
- data/spec/{queue_spec.rb → unit/queue_spec.rb} +0 -0
- data/spec/{work_spec.rb → unit/work_spec.rb} +1 -1
- data/spec/{worker_spec.rb → unit/worker_spec.rb} +0 -0
- data/tasks/benchmark.rb +93 -0
- data/tasks/benchmark_queues.rb +398 -0
- data/tasks/rspec.rb +12 -0
- metadata +30 -110
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: f76b4a74e17281ea1007a2c313e146bcda43e1fb
|
4
|
+
data.tar.gz: 6ff1046d4cf009062327e79fb9be628032b252ce
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 319ea6bea73cdd606b4c3103beee120564aa29976255b2eb14992029b2f13768e33164b5134ad5e0b39a038086380294f02d7226a5c2e1f5d092ef32ef03a2e0
|
7
|
+
data.tar.gz: 44144ce58a99d38ced64a4e6259bafa66c7d6373ad125844efc27f45c990c764ad1e04e8e1e1b33f834e9b9be6ae3e812f2cfd3f9ce65303d19a070070b63832
|
data/CHANGELOG.md
ADDED
@@ -0,0 +1,25 @@
|
|
1
|
+
### 0.2.0 (2013-11-30)
|
2
|
+
|
3
|
+
* Officially support JRuby 1.7.5+. Earlier versions may work.
|
4
|
+
|
5
|
+
JRuby support requires the use of the `jruby-pg` gem, though that gem seems to currently be incompatible with ActiveRecord, so the ActiveRecord adapter specs don't pass (or even run). It works fine with Sequel and the other adapters, though.
|
6
|
+
|
7
|
+
* Officially support Rubinius 2.1.1+. Earlier versions may work.
|
8
|
+
|
9
|
+
* Use `multi_json` so we always use the fastest JSON parser available. (BukhariH)
|
10
|
+
|
11
|
+
* :sync mode now ignores scheduled jobs (jobs queued with a specific run_at).
|
12
|
+
|
13
|
+
### 0.1.0 (2013-11-18)
|
14
|
+
|
15
|
+
* Initial public release, after a test-driven rewrite.
|
16
|
+
|
17
|
+
Officially support Ruby 2.0.0 and Postgres 9.2+.
|
18
|
+
|
19
|
+
Also support ActiveRecord and bare PG::Connections, in or out of a ConnectionPool.
|
20
|
+
|
21
|
+
Added a Railtie for easier setup with Rails, as well as a migration generator.
|
22
|
+
|
23
|
+
### 0.0.1 (2013-11-07)
|
24
|
+
|
25
|
+
* Copy-pasted from an app of mine. Very Sequel-specific. Nobody look at it, let's pretend it never happened.
|
data/Gemfile
CHANGED
@@ -1,4 +1,18 @@
|
|
1
1
|
source 'https://rubygems.org'
|
2
2
|
|
3
|
-
|
3
|
+
group :development, :test do
|
4
|
+
gem 'rake'
|
5
|
+
|
6
|
+
gem 'activerecord', :require => nil
|
7
|
+
gem 'sequel', :require => nil
|
8
|
+
gem 'connection_pool', :require => nil
|
9
|
+
gem 'pg', :require => nil, :platform => :ruby
|
10
|
+
gem 'pg_jruby', :require => nil, :platform => :jruby
|
11
|
+
end
|
12
|
+
|
13
|
+
group :test do
|
14
|
+
gem 'rspec', '~> 2.14.1'
|
15
|
+
gem 'pry'
|
16
|
+
end
|
17
|
+
|
4
18
|
gemspec
|
data/README.md
CHANGED
@@ -18,7 +18,7 @@ Que's secondary goal is performance. It won't be able to match the speed or thro
|
|
18
18
|
|
19
19
|
The rakefile includes a benchmark that tries to compare the performance and concurrency of Que's locking mechanism to that of DelayedJob and QueueClassic. On my i5 quad-core laptop, the results are along the lines of:
|
20
20
|
|
21
|
-
~/que $ rake
|
21
|
+
~/que $ rake benchmark_queues
|
22
22
|
Benchmarking 1000 jobs, 10 workers and synchronous_commit = on...
|
23
23
|
Benchmarking delayed_job... 1000 jobs in 30.086127964 seconds = 33 jobs per second
|
24
24
|
Benchmarking queue_classic... 1000 jobs in 19.642309724 seconds = 51 jobs per second
|
@@ -27,7 +27,7 @@ The rakefile includes a benchmark that tries to compare the performance and conc
|
|
27
27
|
|
28
28
|
Or, minus the I/O limitations of my 5400 rpm hard drive:
|
29
29
|
|
30
|
-
~/que $ SYNCHRONOUS_COMMIT=off rake
|
30
|
+
~/que $ SYNCHRONOUS_COMMIT=off rake benchmark_queues
|
31
31
|
Benchmarking 1000 jobs, 10 workers and synchronous_commit = off...
|
32
32
|
Benchmarking delayed_job... 1000 jobs in 4.906474583 seconds = 204 jobs per second
|
33
33
|
Benchmarking queue_classic... 1000 jobs in 1.587542394 seconds = 630 jobs per second
|
data/Rakefile
CHANGED
@@ -1,413 +1,3 @@
|
|
1
1
|
require 'bundler/gem_tasks'
|
2
2
|
|
3
|
-
require
|
4
|
-
|
5
|
-
RSpec::Core::RakeTask.new :default do |spec|
|
6
|
-
spec.pattern = './spec/**/*_spec.rb'
|
7
|
-
end
|
8
|
-
|
9
|
-
# Shortcut to skip the adapter specs, and run only with the basic PG
|
10
|
-
# connection. I use this occasionally to make sure ActiveRecord isn't loaded,
|
11
|
-
# so any accidental Rails-isms are caught.
|
12
|
-
RSpec::Core::RakeTask.new :pg do |spec|
|
13
|
-
spec.pattern = './spec/*_spec.rb'
|
14
|
-
end
|
15
|
-
|
16
|
-
task :benchmark do
|
17
|
-
# The following is a somewhat simplistic benchmark (aren't they all) meant
|
18
|
-
# to compare the speed and concurrency of the locking mechanisms used by Que
|
19
|
-
# (standard and lateral queries), DelayedJob and QueueClassic - it does this
|
20
|
-
# by simply sending the raw queries that each system sends during operation.
|
21
|
-
|
22
|
-
# It is NOT meant to benchmark the overall performance of each system (which
|
23
|
-
# would include the time each spends working in Ruby), but to see which one
|
24
|
-
# supports the highest concurrency under load, assuming that there will be
|
25
|
-
# many workers and that Postgres will be the bottleneck. I'm unsure how
|
26
|
-
# useful it is for this, but it's a start.
|
27
|
-
|
28
|
-
JOB_COUNT = (ENV['JOB_COUNT'] || 1000).to_i
|
29
|
-
WORKER_COUNT = (ENV['WORKER_COUNT'] || 10).to_i
|
30
|
-
SYNCHRONOUS_COMMIT = ENV['SYNCHRONOUS_COMMIT'] || 'on'
|
31
|
-
|
32
|
-
require 'uri'
|
33
|
-
require 'pg'
|
34
|
-
require 'connection_pool'
|
35
|
-
|
36
|
-
uri = URI.parse ENV["DATABASE_URL"] || "postgres://postgres:@localhost/que-test"
|
37
|
-
|
38
|
-
new_connection = proc do
|
39
|
-
PG::Connection.open :host => uri.host,
|
40
|
-
:user => uri.user,
|
41
|
-
:password => uri.password,
|
42
|
-
:port => uri.port || 5432,
|
43
|
-
:dbname => uri.path[1..-1]
|
44
|
-
end
|
45
|
-
|
46
|
-
pg = new_connection.call
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
# Necessary setup, mostly for QueueClassic. I apologize for this - I hope your editor supports code folding.
|
51
|
-
pg.async_exec <<-SQL
|
52
|
-
SET SESSION client_min_messages = 'WARNING';
|
53
|
-
|
54
|
-
-- Que table.
|
55
|
-
DROP TABLE IF EXISTS que_jobs;
|
56
|
-
CREATE TABLE que_jobs
|
57
|
-
(
|
58
|
-
priority integer NOT NULL DEFAULT 1,
|
59
|
-
run_at timestamptz NOT NULL DEFAULT now(),
|
60
|
-
job_id bigserial NOT NULL,
|
61
|
-
job_class text NOT NULL,
|
62
|
-
args json NOT NULL DEFAULT '[]'::json,
|
63
|
-
error_count integer NOT NULL DEFAULT 0,
|
64
|
-
last_error text,
|
65
|
-
|
66
|
-
CONSTRAINT que_jobs_pkey PRIMARY KEY (priority, run_at, job_id)
|
67
|
-
);
|
68
|
-
|
69
|
-
DROP TABLE IF EXISTS que_lateral_jobs;
|
70
|
-
CREATE TABLE que_lateral_jobs
|
71
|
-
(
|
72
|
-
priority integer NOT NULL DEFAULT 1,
|
73
|
-
run_at timestamptz NOT NULL DEFAULT now(),
|
74
|
-
job_id bigserial NOT NULL,
|
75
|
-
job_class text NOT NULL,
|
76
|
-
args json NOT NULL DEFAULT '[]'::json,
|
77
|
-
error_count integer NOT NULL DEFAULT 0,
|
78
|
-
last_error text,
|
79
|
-
|
80
|
-
CONSTRAINT que_lateral_jobs_pkey PRIMARY KEY (priority, run_at, job_id)
|
81
|
-
);
|
82
|
-
|
83
|
-
DROP TABLE IF EXISTS delayed_jobs;
|
84
|
-
-- DelayedJob table.
|
85
|
-
CREATE TABLE delayed_jobs
|
86
|
-
(
|
87
|
-
id serial NOT NULL,
|
88
|
-
priority integer NOT NULL DEFAULT 0,
|
89
|
-
attempts integer NOT NULL DEFAULT 0,
|
90
|
-
handler text NOT NULL,
|
91
|
-
last_error text,
|
92
|
-
run_at timestamp without time zone,
|
93
|
-
locked_at timestamp without time zone,
|
94
|
-
failed_at timestamp without time zone,
|
95
|
-
locked_by character varying(255),
|
96
|
-
queue character varying(255),
|
97
|
-
created_at timestamp without time zone,
|
98
|
-
updated_at timestamp without time zone,
|
99
|
-
CONSTRAINT delayed_jobs_pkey PRIMARY KEY (id)
|
100
|
-
);
|
101
|
-
ALTER TABLE delayed_jobs
|
102
|
-
OWNER TO postgres;
|
103
|
-
|
104
|
-
CREATE INDEX delayed_jobs_priority
|
105
|
-
ON delayed_jobs
|
106
|
-
USING btree
|
107
|
-
(priority, run_at);
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
-- QueueClassic table and functions.
|
112
|
-
DROP FUNCTION IF EXISTS lock_head(tname varchar);
|
113
|
-
DROP FUNCTION IF EXISTS lock_head(q_name varchar, top_boundary integer);
|
114
|
-
DROP FUNCTION IF EXISTS queue_classic_notify() cascade;
|
115
|
-
DROP TABLE IF EXISTS queue_classic_jobs;
|
116
|
-
|
117
|
-
CREATE TABLE queue_classic_jobs (
|
118
|
-
id bigserial PRIMARY KEY,
|
119
|
-
q_name text not null check (length(q_name) > 0),
|
120
|
-
method text not null check (length(method) > 0),
|
121
|
-
args text not null,
|
122
|
-
locked_at timestamptz
|
123
|
-
);
|
124
|
-
|
125
|
-
alter table queue_classic_jobs alter column args type json using (args::json);
|
126
|
-
|
127
|
-
create function queue_classic_notify() returns trigger as $$ begin
|
128
|
-
perform pg_notify(new.q_name, '');
|
129
|
-
return null;
|
130
|
-
end $$ language plpgsql;
|
131
|
-
|
132
|
-
create trigger queue_classic_notify
|
133
|
-
after insert on queue_classic_jobs
|
134
|
-
for each row
|
135
|
-
execute procedure queue_classic_notify();
|
136
|
-
|
137
|
-
CREATE INDEX idx_qc_on_name_only_unlocked ON queue_classic_jobs (q_name, id) WHERE locked_at IS NULL;
|
138
|
-
|
139
|
-
CREATE OR REPLACE FUNCTION lock_head(q_name varchar, top_boundary integer)
|
140
|
-
RETURNS SETOF queue_classic_jobs AS $$
|
141
|
-
DECLARE
|
142
|
-
unlocked bigint;
|
143
|
-
relative_top integer;
|
144
|
-
job_count integer;
|
145
|
-
BEGIN
|
146
|
-
-- The purpose is to release contention for the first spot in the table.
|
147
|
-
-- The select count(*) is going to slow down dequeue performance but allow
|
148
|
-
-- for more workers. Would love to see some optimization here...
|
149
|
-
|
150
|
-
EXECUTE 'SELECT count(*) FROM '
|
151
|
-
|| '(SELECT * FROM queue_classic_jobs WHERE q_name = '
|
152
|
-
|| quote_literal(q_name)
|
153
|
-
|| ' LIMIT '
|
154
|
-
|| quote_literal(top_boundary)
|
155
|
-
|| ') limited'
|
156
|
-
INTO job_count;
|
157
|
-
|
158
|
-
SELECT TRUNC(random() * (top_boundary - 1))
|
159
|
-
INTO relative_top;
|
160
|
-
|
161
|
-
IF job_count < top_boundary THEN
|
162
|
-
relative_top = 0;
|
163
|
-
END IF;
|
164
|
-
|
165
|
-
LOOP
|
166
|
-
BEGIN
|
167
|
-
EXECUTE 'SELECT id FROM queue_classic_jobs '
|
168
|
-
|| ' WHERE locked_at IS NULL'
|
169
|
-
|| ' AND q_name = '
|
170
|
-
|| quote_literal(q_name)
|
171
|
-
|| ' ORDER BY id ASC'
|
172
|
-
|| ' LIMIT 1'
|
173
|
-
|| ' OFFSET ' || quote_literal(relative_top)
|
174
|
-
|| ' FOR UPDATE NOWAIT'
|
175
|
-
INTO unlocked;
|
176
|
-
EXIT;
|
177
|
-
EXCEPTION
|
178
|
-
WHEN lock_not_available THEN
|
179
|
-
-- do nothing. loop again and hope we get a lock
|
180
|
-
END;
|
181
|
-
END LOOP;
|
182
|
-
|
183
|
-
RETURN QUERY EXECUTE 'UPDATE queue_classic_jobs '
|
184
|
-
|| ' SET locked_at = (CURRENT_TIMESTAMP)'
|
185
|
-
|| ' WHERE id = $1'
|
186
|
-
|| ' AND locked_at is NULL'
|
187
|
-
|| ' RETURNING *'
|
188
|
-
USING unlocked;
|
189
|
-
|
190
|
-
RETURN;
|
191
|
-
END;
|
192
|
-
$$ LANGUAGE plpgsql;
|
193
|
-
|
194
|
-
CREATE OR REPLACE FUNCTION lock_head(tname varchar)
|
195
|
-
RETURNS SETOF queue_classic_jobs AS $$
|
196
|
-
BEGIN
|
197
|
-
RETURN QUERY EXECUTE 'SELECT * FROM lock_head($1,10)' USING tname;
|
198
|
-
END;
|
199
|
-
$$ LANGUAGE plpgsql;
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
INSERT INTO que_jobs (job_class, args, priority)
|
206
|
-
SELECT 'Que::Job', ('[' || i || ',{}]')::json, 1
|
207
|
-
FROM generate_Series(1,#{JOB_COUNT}) AS i;
|
208
|
-
|
209
|
-
INSERT INTO que_lateral_jobs (job_class, args, priority)
|
210
|
-
SELECT 'Que::Job', ('[' || i || ',{}]')::json, 1
|
211
|
-
FROM generate_Series(1,#{JOB_COUNT}) AS i;
|
212
|
-
|
213
|
-
INSERT INTO delayed_jobs (handler, run_at, created_at, updated_at)
|
214
|
-
SELECT '--- !ruby/struct:NewsletterJob\ntext: lorem ipsum...\nemails: blah@blah.com\n', now(), now(), now()
|
215
|
-
FROM generate_Series(1,#{JOB_COUNT}) AS i;
|
216
|
-
|
217
|
-
INSERT INTO queue_classic_jobs (q_name, method, args)
|
218
|
-
SELECT 'default', 'Kernel.puts', '["hello world"]'
|
219
|
-
FROM generate_Series(1,#{JOB_COUNT}) AS i;
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
-- Necessary tables and functions made, now stock them with jobs and analyze.
|
225
|
-
ANALYZE;
|
226
|
-
SQL
|
227
|
-
|
228
|
-
|
229
|
-
queries = {
|
230
|
-
:que => (
|
231
|
-
<<-SQL
|
232
|
-
WITH RECURSIVE cte AS (
|
233
|
-
SELECT (job).*, pg_try_advisory_lock((job).job_id) AS locked
|
234
|
-
FROM (
|
235
|
-
SELECT job
|
236
|
-
FROM que_jobs AS job
|
237
|
-
WHERE run_at <= now()
|
238
|
-
ORDER BY priority, run_at, job_id
|
239
|
-
LIMIT 1
|
240
|
-
) AS t1
|
241
|
-
UNION ALL (
|
242
|
-
SELECT (job).*, pg_try_advisory_lock((job).job_id) AS locked
|
243
|
-
FROM (
|
244
|
-
SELECT (
|
245
|
-
SELECT job
|
246
|
-
FROM que_jobs AS job
|
247
|
-
WHERE run_at <= now() AND (priority, run_at, job_id) > (cte.priority, cte.run_at, cte.job_id)
|
248
|
-
ORDER BY priority, run_at, job_id
|
249
|
-
LIMIT 1
|
250
|
-
) AS job
|
251
|
-
FROM cte
|
252
|
-
WHERE NOT cte.locked
|
253
|
-
LIMIT 1
|
254
|
-
) AS t1
|
255
|
-
)
|
256
|
-
)
|
257
|
-
SELECT job_id, priority, run_at, args, job_class, error_count
|
258
|
-
FROM cte
|
259
|
-
WHERE locked
|
260
|
-
LIMIT 1
|
261
|
-
SQL
|
262
|
-
),
|
263
|
-
:que_lateral => (
|
264
|
-
<<-SQL
|
265
|
-
WITH RECURSIVE cte AS (
|
266
|
-
SELECT *, pg_try_advisory_lock(s.job_id) AS locked
|
267
|
-
FROM (
|
268
|
-
SELECT *
|
269
|
-
FROM que_lateral_jobs
|
270
|
-
WHERE run_at <= now()
|
271
|
-
ORDER BY priority, run_at, job_id
|
272
|
-
LIMIT 1
|
273
|
-
) s
|
274
|
-
UNION ALL (
|
275
|
-
SELECT j.*, pg_try_advisory_lock(j.job_id) AS locked
|
276
|
-
FROM (
|
277
|
-
SELECT *
|
278
|
-
FROM cte
|
279
|
-
WHERE NOT locked
|
280
|
-
) t,
|
281
|
-
LATERAL (
|
282
|
-
SELECT *
|
283
|
-
FROM que_lateral_jobs
|
284
|
-
WHERE run_at <= now()
|
285
|
-
AND (priority, run_at, job_id) > (t.priority, t.run_at, t.job_id)
|
286
|
-
ORDER BY priority, run_at, job_id
|
287
|
-
LIMIT 1
|
288
|
-
) j
|
289
|
-
)
|
290
|
-
)
|
291
|
-
SELECT *
|
292
|
-
FROM cte
|
293
|
-
WHERE locked
|
294
|
-
LIMIT 1
|
295
|
-
SQL
|
296
|
-
),
|
297
|
-
:delayed_job => (
|
298
|
-
# From delayed_job_active_record
|
299
|
-
<<-SQL
|
300
|
-
UPDATE delayed_jobs
|
301
|
-
SET locked_at = now(),
|
302
|
-
locked_by = $1::text
|
303
|
-
WHERE id IN (
|
304
|
-
SELECT id
|
305
|
-
FROM delayed_jobs
|
306
|
-
WHERE (
|
307
|
-
(run_at <= now() AND (locked_at IS NULL OR locked_at < now()) OR locked_by = $1) AND failed_at IS NULL
|
308
|
-
)
|
309
|
-
ORDER BY priority ASC, run_at ASC
|
310
|
-
LIMIT 1
|
311
|
-
FOR UPDATE
|
312
|
-
)
|
313
|
-
RETURNING *
|
314
|
-
SQL
|
315
|
-
)
|
316
|
-
}
|
317
|
-
|
318
|
-
connections = WORKER_COUNT.times.map do
|
319
|
-
conn = new_connection.call
|
320
|
-
conn.async_exec "SET SESSION synchronous_commit = #{SYNCHRONOUS_COMMIT}"
|
321
|
-
queries.each do |name, sql|
|
322
|
-
conn.prepare(name.to_s, sql)
|
323
|
-
end
|
324
|
-
conn
|
325
|
-
end
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
# Track the ids that are worked, to make sure they're all hit.
|
330
|
-
$results = {
|
331
|
-
:delayed_job => [],
|
332
|
-
:queue_classic => [],
|
333
|
-
:que => [],
|
334
|
-
:que_lateral => []
|
335
|
-
}
|
336
|
-
|
337
|
-
def work_job(type, conn)
|
338
|
-
case type
|
339
|
-
when :delayed_job
|
340
|
-
return unless r = conn.exec_prepared("delayed_job", [conn.object_id]).first
|
341
|
-
$results[type] << r['id']
|
342
|
-
conn.async_exec "DELETE FROM delayed_jobs WHERE id = $1", [r['id']]
|
343
|
-
|
344
|
-
when :queue_classic
|
345
|
-
return unless r = conn.async_exec("SELECT * FROM lock_head($1, $2)", ['default', 9]).first
|
346
|
-
$results[type] << r['id']
|
347
|
-
conn.async_exec "DELETE FROM queue_classic_jobs WHERE id = $1", [r['id']]
|
348
|
-
|
349
|
-
when :que
|
350
|
-
begin
|
351
|
-
return unless r = conn.exec_prepared("que").first
|
352
|
-
# Have to double-check that the job is valid, as explained at length in Que::Job.work.
|
353
|
-
return true unless conn.async_exec("SELECT * FROM que_jobs WHERE priority = $1 AND run_at = $2 AND job_id = $3", [r['priority'], r['run_at'], r['job_id']]).first
|
354
|
-
conn.async_exec "DELETE FROM que_jobs WHERE priority = $1 AND run_at = $2 AND job_id = $3", [r['priority'], r['run_at'], r['job_id']]
|
355
|
-
$results[type] << r['job_id']
|
356
|
-
ensure
|
357
|
-
conn.async_exec "SELECT pg_advisory_unlock_all()" if r
|
358
|
-
end
|
359
|
-
|
360
|
-
when :que_lateral
|
361
|
-
begin
|
362
|
-
return unless r = conn.exec_prepared("que_lateral").first
|
363
|
-
return true unless conn.async_exec("SELECT * FROM que_lateral_jobs WHERE priority = $1 AND run_at = $2 AND job_id = $3", [r['priority'], r['run_at'], r['job_id']]).first
|
364
|
-
conn.async_exec "DELETE FROM que_lateral_jobs WHERE priority = $1 AND run_at = $2 AND job_id = $3", [r['priority'], r['run_at'], r['job_id']]
|
365
|
-
$results[type] << r['job_id']
|
366
|
-
ensure
|
367
|
-
conn.async_exec "SELECT pg_advisory_unlock_all()" if r
|
368
|
-
end
|
369
|
-
|
370
|
-
end
|
371
|
-
end
|
372
|
-
|
373
|
-
puts "Benchmarking #{JOB_COUNT} jobs, #{WORKER_COUNT} workers and synchronous_commit = #{SYNCHRONOUS_COMMIT}..."
|
374
|
-
|
375
|
-
{
|
376
|
-
:delayed_job => :delayed_jobs,
|
377
|
-
:queue_classic => :queue_classic_jobs,
|
378
|
-
:que => :que_jobs,
|
379
|
-
:que_lateral => :que_lateral_jobs
|
380
|
-
}.each do |type, table|
|
381
|
-
print "Benchmarking #{type}... "
|
382
|
-
start = Time.now
|
383
|
-
|
384
|
-
threads = connections.map do |conn|
|
385
|
-
Thread.new do
|
386
|
-
loop do
|
387
|
-
begin
|
388
|
-
break unless work_job(type, conn)
|
389
|
-
rescue
|
390
|
-
# DelayedJob deadlocks sometimes.
|
391
|
-
end
|
392
|
-
end
|
393
|
-
end
|
394
|
-
end
|
395
|
-
|
396
|
-
threads.each &:join
|
397
|
-
time = Time.now - start
|
398
|
-
puts "#{JOB_COUNT} jobs in #{time} seconds = #{(JOB_COUNT / time).round} jobs per second"
|
399
|
-
|
400
|
-
|
401
|
-
# These checks are commented out because I can't seem to get DelayedJob to
|
402
|
-
# pass them (Que and QueueClassic don't have the same problem). It seems
|
403
|
-
# to repeat some jobs multiple times on every run, and its run times are
|
404
|
-
# also highly variable.
|
405
|
-
|
406
|
-
# worked = $results[type].map(&:to_i).sort
|
407
|
-
# puts "Jobs worked more than once! #{worked.inspect}" unless worked == worked.uniq
|
408
|
-
# puts "Jobs worked less than once! #{worked.inspect}" unless worked.length == JOB_COUNT
|
409
|
-
|
410
|
-
puts "Jobs left in DB" unless pg.async_exec("SELECT count(*) FROM #{table}").first['count'].to_i == 0
|
411
|
-
puts "Advisory locks left over!" if pg.async_exec("SELECT * FROM pg_locks WHERE locktype = 'advisory'").first
|
412
|
-
end
|
413
|
-
end
|
3
|
+
Dir["./tasks/*.rb"].sort.each &method(:require)
|