perfectqueue 0.8.54 → 0.9.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: ed8791f086ecef1178ae6d001dc4addeef32a14f
4
- data.tar.gz: 342eb4a9e95c7a5fdcc29c909f1f7a99c1e236ea
3
+ metadata.gz: 12beb7e5ec6143447d1fbf9a973fa8cb9305c5a0
4
+ data.tar.gz: 782ca440e9554af177adaf7607b8306a57b0612c
5
5
  SHA512:
6
- metadata.gz: 4ca28812f77cfad84f2f4198de80093b41b46ae26f8ebf0fc13fe8054c785e7e24f766baf64839af1a4c0fadc8f38073e5b1abd7c962ad97a32f859a2b5b237e
7
- data.tar.gz: 13f80b70c03b3a43538f7f687869db8918eb5a319ce7fc2503963d21a0f4fd5fa3f3a1d0163f6e19424c0001061f2b049e80287261fd230a3628e5cae1fd4f76
6
+ metadata.gz: b6a1a9955411bf1c8fc34be3ad1c3116550107f7b5bd02c56bcd84553716979e73500ef006651beef88614d04bbf8699dc9b8a5b54aee8bb202488f11a55fac6
7
+ data.tar.gz: d628b629c1538834cb2b6233ca4bb82410800a31386ebc011913e51b1780e92fd47c7597ce67539038e59303a1867ebf3d8423078001bcc7ffe62c48a397636e
data/.travis.yml CHANGED
@@ -1,13 +1,9 @@
1
1
  rvm:
2
- - 2.3.8
3
- - 2.4.6
4
- - 2.5.5
5
- - 2.6.3
2
+ - 2.1.10
3
+ - 2.2.5
4
+ - 2.3.1
6
5
  - ruby-head
7
6
 
8
- services:
9
- - mysql
10
-
11
7
  script: "bundle exec rake spec"
12
8
 
13
9
  before_script:
data/ChangeLog CHANGED
@@ -1,23 +1,10 @@
1
- == 2020-09-10 version 0.8.54
1
+ == 2016-09-16 version 0.9.0
2
2
 
3
- * Enhance logging on inter-process communication (#68)
4
-
5
- == 2019-10-10 version 0.8.53
6
-
7
- * Extract max_retry_count as config parameter (#66)
8
-
9
- == 2019-07-31 version 0.8.52
10
-
11
- * Add sleep lock retrying with exponential-backoff (#64)
12
-
13
- == 2016-10-24 version 0.8.51
14
-
15
- * Retry on temporary DatabaseConnectionError (#59)
16
-
17
- == 2016-08-18 version 0.8.50
18
-
19
- * Decrease GET_LOCK timeout from 60 to 10 seconds
20
- * Add 0.5 to 30 seconds of sleep between GET_LOCK retrying
3
+ * Use UPDATE first strategy (#15)
4
+ * owner column is added for the table: migration is required
5
+ * AcquiredTask#timeout is the current value rather than at acquiring
6
+ * cancel_request is removed
7
+ * prefetch_break_types is removed
21
8
 
22
9
  == 2016-08-02 version 0.8.49
23
10
 
data/README.md CHANGED
@@ -46,10 +46,6 @@ Queue#[](key) #=> #<Task>
46
46
  # chack the existance of the task
47
47
  Task#exists?
48
48
 
49
- # request to cancel a task
50
- # (actual behavior depends on the worker program)
51
- Task#cancel_request!
52
-
53
49
  # force finish a task
54
50
  # be aware that worker programs can't detect it
55
51
  Task#force_finish!
@@ -64,8 +60,6 @@ TaskError
64
60
  # Workers may get these errors:
65
61
  #
66
62
 
67
- CancelRequestedError < TaskError
68
-
69
63
  AlreadyFinishedError < TaskError
70
64
 
71
65
  PreemptedError < TaskError
@@ -206,7 +200,6 @@ Usage: perfectqueue [options] <command>
206
200
  commands:
207
201
  list Show list of tasks
208
202
  submit <key> <type> <data> Submit a new task
209
- cancel_request <key> Cancel request
210
203
  force_finish <key> Force finish a task
211
204
  run <class> Run a worker process
212
205
  init Initialize a backend database
@@ -243,10 +236,6 @@ options for run:
243
236
  k3 system_task waiting 2012-05-18 14:04:02 -0700 2012-05-22 15:04:02 -0700 {"task_id"=>32, "type"=>"system_task"}
244
237
  3 entries.
245
238
 
246
- ### cancel a tasks
247
-
248
- $ perfectqueue cancel_request k1
249
-
250
239
  ### force finish a tasks
251
240
 
252
241
  $ perfectqueue force_finish k2
data/circle.yml ADDED
@@ -0,0 +1,7 @@
1
+ machine:
2
+ ruby:
3
+ version: 2.2.2
4
+
5
+ database:
6
+ pre:
7
+ - mysql -e 'create database perfectqueue_test;'
@@ -4,6 +4,7 @@ require_relative 'rdb_compat'
4
4
 
5
5
  module PerfectQueue::Backend
6
6
  class RDBBackend
7
+ MAX_RETRY = ::PerfectQueue::Backend::RDBCompatBackend::MAX_RETRY
7
8
  DELETE_OFFSET = ::PerfectQueue::Backend::RDBCompatBackend::DELETE_OFFSET
8
9
  class Token < Struct.new(:key)
9
10
  end
@@ -20,9 +21,6 @@ module PerfectQueue::Backend
20
21
  host: u.host,
21
22
  port: u.port ? u.port.to_i : 3306
22
23
  }
23
- @pq_connect_timeout = config.fetch(:pq_connect_timeout, 20)
24
- @max_retry_count = config.fetch(:max_retry_count, 10)
25
- options[:connect_timeout] = config.fetch(:connect_timeout, 3)
26
24
  options[:sslca] = config[:sslca] if config[:sslca]
27
25
  db_name = u.path.split('/')[1]
28
26
  @db = Sequel.mysql2(db_name, options)
@@ -34,7 +32,6 @@ module PerfectQueue::Backend
34
32
  end
35
33
 
36
34
  attr_reader :db
37
- attr_reader :max_retry_count
38
35
 
39
36
  def submit(id, data, time=Process.clock_gettime(Process::CLOCK_REALTIME, :second), resource=nil, max_running=nil)
40
37
  connect {
@@ -57,31 +54,23 @@ module PerfectQueue::Backend
57
54
  end
58
55
 
59
56
  private
60
- def connect
61
- tmax = Process.clock_gettime(Process::CLOCK_REALTIME, :second) + @pq_connect_timeout
57
+ def connect(&block)
62
58
  @mutex.synchronize do
63
59
  retry_count = 0
64
60
  begin
65
- yield
66
- rescue Sequel::DatabaseConnectionError
67
- if (retry_count += 1) < @max_retry_count && tmax > Process.clock_gettime(Process::CLOCK_REALTIME, :second)
68
- STDERR.puts "#{$!}\n retrying."
69
- sleep 2
70
- retry
71
- end
72
- STDERR.puts "#{$!}\n abort."
73
- raise
61
+ block.call
74
62
  rescue
75
63
  # workaround for "Mysql2::Error: Deadlock found when trying to get lock; try restarting transaction" error
76
64
  if $!.to_s.include?('try restarting transaction')
77
- err = $!.backtrace.map{|bt| " #{bt}" }.unshift($!).join("\n")
65
+ err = ([$!] + $!.backtrace.map {|bt| " #{bt}" }).join("\n")
78
66
  retry_count += 1
79
- if retry_count < @max_retry_count
80
- STDERR.puts "#{err}\n retrying."
67
+ if retry_count < MAX_RETRY
68
+ STDERR.puts err + "\n retrying."
81
69
  sleep 0.5
82
70
  retry
71
+ else
72
+ STDERR.puts err + "\n abort."
83
73
  end
84
- STDERR.puts "#{err}\n abort."
85
74
  end
86
75
  raise
87
76
  ensure
@@ -33,17 +33,12 @@ module PerfectQueue
33
33
  DELETE_OFFSET = 10_0000_0000
34
34
  EVENT_HORIZON = 13_0000_0000 # 2011-03-13 07:06:40 UTC
35
35
 
36
- LOCK_RETRY_INITIAL_INTERVAL = 0.5
37
- LOCK_RETRY_MAX_INTERVAL = 30
38
-
39
36
  class Token < Struct.new(:key)
40
37
  end
41
38
 
42
39
  def initialize(client, config)
43
40
  super
44
41
 
45
- @pq_connect_timeout = config.fetch(:pq_connect_timeout, 20)
46
- @max_retry_count = config.fetch(:max_retry_count, 10)
47
42
  url = config[:url]
48
43
  @table = config[:table]
49
44
  unless @table
@@ -51,9 +46,7 @@ module PerfectQueue
51
46
  end
52
47
 
53
48
  if /\Amysql2:/i =~ url
54
- options = {max_connections: 1, sslca: config[:sslca]}
55
- options[:connect_timeout] = config.fetch(:connect_timeout, 3)
56
- @db = Sequel.connect(url, options)
49
+ @db = Sequel.connect(url, {max_connections: 1, sslca: config[:sslca]})
57
50
  if config.fetch(:use_connection_pooling, nil) != nil
58
51
  @use_connection_pooling = !!config[:use_connection_pooling]
59
52
  else
@@ -61,14 +54,11 @@ module PerfectQueue
61
54
  end
62
55
  @table_lock = lambda {
63
56
  locked = nil
64
- interval = LOCK_RETRY_INITIAL_INTERVAL
65
57
  loop do
66
58
  @db.fetch("SELECT GET_LOCK('#{@table}', #{LOCK_WAIT_TIMEOUT}) locked") do |row|
67
59
  locked = true if row[:locked] == 1
68
60
  end
69
61
  break if locked
70
- sleep interval
71
- interval = [interval * 2, LOCK_RETRY_MAX_INTERVAL].min
72
62
  end
73
63
  }
74
64
  @table_unlock = lambda {
@@ -85,35 +75,57 @@ module PerfectQueue
85
75
  # connection test
86
76
  }
87
77
 
78
+ # MySQL's CONNECTION_ID() is a 64bit unsigned integer from the
79
+ # server's internal thread ID counter. It is unique while the MySQL
80
+ # server is running.
81
+ # https://bugs.mysql.com/bug.php?id=19806
82
+ #
83
+ # An acquired task is marked with next_timeout and CONNECTION_ID().
84
+ # Therefore while alive_time is not changed and we don't restart
85
+ # the server in 1 second, they won't conflict.
88
86
  if config[:disable_resource_limit]
87
+ @update_sql = <<SQL
88
+ UPDATE `#{@table}`
89
+ JOIN (
90
+ SELECT id
91
+ FROM `#{@table}` FORCE INDEX (`index_#{@table}_on_timeout`)
92
+ WHERE #{EVENT_HORIZON} < timeout AND timeout <= :now
93
+ ORDER BY timeout ASC
94
+ LIMIT :max_acquire FOR UPDATE) AS t1 USING(id)
95
+ SET timeout=:next_timeout, owner=CONNECTION_ID()
96
+ SQL
89
97
  @sql = <<SQL
90
98
  SELECT id, timeout, data, created_at, resource
91
- FROM `#{@table}`
92
- WHERE #{EVENT_HORIZON} < timeout AND timeout <= ? AND timeout <= ?
93
- AND created_at IS NOT NULL
94
- ORDER BY timeout ASC
95
- LIMIT ?
99
+ FROM `#{@table}`
100
+ WHERE timeout = ? AND owner = CONNECTION_ID()
96
101
  SQL
97
102
  else
103
+ @update_sql = <<SQL
104
+ UPDATE `#{@table}`
105
+ JOIN (
106
+ SELECT id, IFNULL(max_running, 1) / (IFNULL(running, 0) + 1) AS weight
107
+ FROM `#{@table}`
108
+ LEFT JOIN (
109
+ SELECT resource, COUNT(1) AS running
110
+ FROM `#{@table}` AS t1
111
+ WHERE timeout > :now AND resource IS NOT NULL
112
+ GROUP BY resource
113
+ FOR UPDATE
114
+ ) AS t2 USING(resource)
115
+ WHERE #{EVENT_HORIZON} < timeout AND timeout <= :now AND IFNULL(max_running - running, 1) > 0
116
+ ORDER BY weight DESC, timeout ASC
117
+ LIMIT :max_acquire
118
+ FOR UPDATE
119
+ ) AS t3 USING (id)
120
+ SET timeout = :next_timeout, owner = CONNECTION_ID()
121
+ SQL
98
122
  @sql = <<SQL
99
- SELECT id, timeout, data, created_at, resource, max_running, IFNULL(max_running, 1) / (IFNULL(running, 0) + 1) AS weight
100
- FROM `#{@table}`
101
- LEFT JOIN (
102
- SELECT resource AS res, COUNT(1) AS running
103
- FROM `#{@table}` AS T
104
- WHERE timeout > ? AND created_at IS NOT NULL AND resource IS NOT NULL
105
- GROUP BY resource
106
- ) AS R ON resource = res
107
- WHERE #{EVENT_HORIZON} < timeout AND timeout <= ?
108
- AND created_at IS NOT NULL
109
- AND (max_running-running IS NULL OR max_running-running > 0)
110
- ORDER BY weight DESC, timeout ASC
111
- LIMIT ?
123
+ SELECT id, timeout, data, created_at, resource, max_running
124
+ FROM `#{@table}`
125
+ WHERE timeout = ? AND owner = CONNECTION_ID()
112
126
  SQL
113
127
  end
114
128
 
115
- @prefetch_break_types = config[:prefetch_break_types] || []
116
-
117
129
  @cleanup_interval = config[:cleanup_interval] || DEFAULT_DELETE_INTERVAL
118
130
  # If cleanup_interval > max_request_per_child / max_acquire,
119
131
  # some processes won't run DELETE query.
@@ -122,10 +134,10 @@ SQL
122
134
  end
123
135
 
124
136
  attr_reader :db
125
- attr_reader :max_retry_count
126
137
 
127
138
  KEEPALIVE = 10
128
- LOCK_WAIT_TIMEOUT = 10
139
+ MAX_RETRY = 10
140
+ LOCK_WAIT_TIMEOUT = 60
129
141
  DEFAULT_DELETE_INTERVAL = 20
130
142
 
131
143
  def init_database(options)
@@ -139,6 +151,8 @@ SQL
139
151
  created_at INT,
140
152
  resource VARCHAR(255),
141
153
  max_running INT,
154
+ /* CONNECTION_ID() can be 64bit: https://bugs.mysql.com/bug.php?id=19806 */
155
+ owner BIGINT(21) UNSIGNED NOT NULL DEFAULT 0,
142
156
  PRIMARY KEY (id)
143
157
  )
144
158
  SQL
@@ -229,7 +243,7 @@ SQL
229
243
  t0 = nil
230
244
 
231
245
  if @cleanup_interval_count <= 0
232
- connect { # TODO: HERE should be still connect_locked ?
246
+ connect {
233
247
  t0=Process.clock_gettime(Process::CLOCK_MONOTONIC)
234
248
  @db["DELETE FROM `#{@table}` WHERE timeout <= ?", now-DELETE_OFFSET].delete
235
249
  @cleanup_interval_count = @cleanup_interval
@@ -239,34 +253,18 @@ SQL
239
253
 
240
254
  connect_locked {
241
255
  t0=Process.clock_gettime(Process::CLOCK_MONOTONIC)
256
+ n = @db[@update_sql, next_timeout: next_timeout, now: now, max_acquire: max_acquire].update
257
+ if n <= 0
258
+ return nil
259
+ end
260
+
242
261
  tasks = []
243
- @db.fetch(@sql, now, now, max_acquire) {|row|
262
+ @db.fetch(@sql, next_timeout) {|row|
244
263
  attributes = create_attributes(nil, row)
245
264
  task_token = Token.new(row[:id])
246
265
  task = AcquiredTask.new(@client, row[:id], attributes, task_token)
247
266
  tasks.push task
248
-
249
- if @prefetch_break_types.include?(attributes[:type])
250
- break
251
- end
252
267
  }
253
-
254
- if tasks.empty?
255
- return nil
256
- end
257
-
258
- sql = "UPDATE `#{@table}` FORCE INDEX (PRIMARY) SET timeout=? WHERE timeout <= ? AND id IN ("
259
- params = [sql, next_timeout, now]
260
- tasks.each {|t| params << t.key }
261
- sql << (1..tasks.size).map { '?' }.join(',')
262
- sql << ") AND created_at IS NOT NULL"
263
-
264
- n = @db[*params].update
265
- if n != tasks.size
266
- # NOTE table lock doesn't work. error!
267
- return nil
268
- end
269
-
270
268
  @cleanup_interval_count -= 1
271
269
 
272
270
  return tasks
@@ -275,18 +273,6 @@ SQL
275
273
  STDERR.puts "PQ:acquire from #{@table}:%6f sec (%d tasks)" % [Process.clock_gettime(Process::CLOCK_MONOTONIC)-t0,tasks.size] if tasks
276
274
  end
277
275
 
278
- # => nil
279
- def cancel_request(key, options)
280
- # created_at=0 means cancel_requested
281
- connect {
282
- n = @db["UPDATE `#{@table}` SET created_at=0 WHERE id=? AND created_at IS NOT NULL", key].update
283
- if n <= 0
284
- raise AlreadyFinishedError, "task key=#{key} does not exist or already finished."
285
- end
286
- }
287
- nil
288
- end
289
-
290
276
  def force_finish(key, retention_time, options)
291
277
  finish(Token.new(key), retention_time, options)
292
278
  end
@@ -343,7 +329,7 @@ SQL
343
329
  end
344
330
 
345
331
  protected
346
- def connect_locked
332
+ def connect_locked(&block)
347
333
  connect {
348
334
  locked = false
349
335
 
@@ -353,7 +339,7 @@ SQL
353
339
  locked = true
354
340
  end
355
341
 
356
- return yield
342
+ return block.call
357
343
  ensure
358
344
  if @use_connection_pooling && locked
359
345
  @table_unlock.call
@@ -362,31 +348,22 @@ SQL
362
348
  }
363
349
  end
364
350
 
365
- def connect
351
+ def connect(&block)
366
352
  now = Time.now.to_i
367
- tmax = now + @pq_connect_timeout
368
353
  @mutex.synchronize do
369
354
  # keepalive_timeout
370
355
  @db.disconnect if now - @last_time > KEEPALIVE
371
356
 
372
357
  count = 0
373
358
  begin
374
- yield
359
+ block.call
375
360
  @last_time = now
376
- rescue Sequel::DatabaseConnectionError
377
- if (count += 1) < @max_retry_count && tmax > Time.now.to_i
378
- STDERR.puts "#{$!}\n retrying."
379
- sleep 2
380
- retry
381
- end
382
- STDERR.puts "#{$!}\n abort."
383
- raise
384
361
  rescue
385
362
  # workaround for "Mysql2::Error: Deadlock found when trying to get lock; try restarting transaction" error
386
363
  if $!.to_s.include?('try restarting transaction')
387
364
  err = ([$!] + $!.backtrace.map {|bt| " #{bt}" }).join("\n")
388
365
  count += 1
389
- if count < @max_retry_count
366
+ if count < MAX_RETRY
390
367
  STDERR.puts err + "\n retrying."
391
368
  sleep rand
392
369
  retry
@@ -415,8 +392,6 @@ SQL
415
392
  if row[:created_at] === nil
416
393
  created_at = nil # unknown creation time
417
394
  status = TaskStatus::FINISHED
418
- elsif row[:created_at] <= 0
419
- status = TaskStatus::CANCEL_REQUESTED
420
395
  elsif now && row[:timeout] < now
421
396
  created_at = row[:created_at]
422
397
  status = TaskStatus::WAITING
@@ -69,11 +69,6 @@ module PerfectQueue
69
69
  @backend.acquire(alive_time, max_acquire, options)
70
70
  end
71
71
 
72
- # :message => nil
73
- def cancel_request(key, options={})
74
- @backend.cancel_request(key, options)
75
- end
76
-
77
72
  def force_finish(key, options={})
78
73
  retention_time = options[:retention_time] || @retention_time
79
74