upperkut 0.7.4 → 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.circleci/config.yml +1 -1
- data/CHANGELOG.md +13 -0
- data/Dockerfile +7 -0
- data/Gemfile +1 -1
- data/Gemfile.lock +27 -27
- data/Makefile +4 -0
- data/README.md +2 -2
- data/docker-compose.yml +18 -0
- data/lib/upperkut.rb +1 -1
- data/lib/upperkut/cli.rb +3 -2
- data/lib/upperkut/item.rb +22 -0
- data/lib/upperkut/manager.rb +20 -14
- data/lib/upperkut/processor.rb +41 -35
- data/lib/upperkut/strategies/base.rb +14 -0
- data/lib/upperkut/strategies/buffered_queue.rb +118 -21
- data/lib/upperkut/strategies/priority_queue.rb +31 -11
- data/lib/upperkut/strategies/scheduled_queue.rb +44 -38
- data/lib/upperkut/util.rb +48 -8
- data/lib/upperkut/version.rb +1 -1
- data/lib/upperkut/worker.rb +0 -1
- data/lib/upperkut/worker_thread.rb +37 -0
- data/upperkut.gemspec +1 -1
- metadata +10 -7
- data/lib/upperkut/batch_execution.rb +0 -42
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: edf84d6612f4c9577cfe53e578e16983e3936863379c7d4c1041e9c92637f867
|
4
|
+
data.tar.gz: ce0ad624fe65306bb8fe818b8018acfe519326cbb744ef9af0519b26fd25faed
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 3f6325ce299c9af7c50c9891acee2e771131d002c27b27fe6e245227d73ce177afd65451116daf7a5af5fc5e546b8b52b796ea82abf05bb8eae88966a16efa46
|
7
|
+
data.tar.gz: 482e700d0cd87b0f6de61c528ef42d7c57bbc96afce8d5d6b1593420a4fa9308f6be668cc8c183fc98b9347a6a33800be9df5bebc3da98ac7a673f2204440cc2
|
data/.circleci/config.yml
CHANGED
data/CHANGELOG.md
CHANGED
@@ -1,7 +1,20 @@
|
|
1
1
|
# Upperkut changes
|
2
2
|
|
3
|
+
1.0.x
|
4
|
+
-------
|
5
|
+
- Add docker
|
6
|
+
- Fix to_underscore bug for ruby 2.7.2 #83 @andrehjr;
|
7
|
+
|
8
|
+
|
9
|
+
0.8.x
|
10
|
+
--------
|
11
|
+
- Added exponential backoff when push_items #57
|
12
|
+
- Introducing Item to avoid losing enqueued at and report wrong latency
|
13
|
+
metrics #56 thanks to @jeangnc
|
14
|
+
|
3
15
|
0.7.x
|
4
16
|
---------
|
17
|
+
- Fix logging timeout message #54 by @jeanmatheussouto
|
5
18
|
- Add handle_error method #44
|
6
19
|
- Added Datahog Middleware (#42)
|
7
20
|
- Added Priority Queue (#39) thanks to @jeangnc and @jeanmatheussouto
|
data/Dockerfile
ADDED
data/Gemfile
CHANGED
data/Gemfile.lock
CHANGED
@@ -1,41 +1,41 @@
|
|
1
1
|
PATH
|
2
2
|
remote: .
|
3
3
|
specs:
|
4
|
-
upperkut (0.
|
4
|
+
upperkut (1.0.2)
|
5
5
|
connection_pool (~> 2.2, >= 2.2.2)
|
6
6
|
redis (>= 4.1.0, < 5.0.0)
|
7
7
|
|
8
8
|
GEM
|
9
9
|
remote: https://rubygems.org/
|
10
10
|
specs:
|
11
|
-
coderay (1.1.
|
12
|
-
connection_pool (2.2.
|
13
|
-
diff-lcs (1.
|
14
|
-
docile (1.3.
|
11
|
+
coderay (1.1.3)
|
12
|
+
connection_pool (2.2.3)
|
13
|
+
diff-lcs (1.4.4)
|
14
|
+
docile (1.3.2)
|
15
15
|
fivemat (1.3.7)
|
16
|
-
json (2.
|
17
|
-
method_source (0.
|
18
|
-
pry (0.
|
19
|
-
coderay (~> 1.1
|
20
|
-
method_source (~>
|
21
|
-
rake (
|
22
|
-
redis (4.
|
23
|
-
rspec (3.
|
24
|
-
rspec-core (~> 3.
|
25
|
-
rspec-expectations (~> 3.
|
26
|
-
rspec-mocks (~> 3.
|
27
|
-
rspec-core (3.
|
28
|
-
rspec-support (~> 3.
|
29
|
-
rspec-expectations (3.
|
16
|
+
json (2.3.1)
|
17
|
+
method_source (1.0.0)
|
18
|
+
pry (0.13.1)
|
19
|
+
coderay (~> 1.1)
|
20
|
+
method_source (~> 1.0)
|
21
|
+
rake (13.0.1)
|
22
|
+
redis (4.2.5)
|
23
|
+
rspec (3.10.0)
|
24
|
+
rspec-core (~> 3.10.0)
|
25
|
+
rspec-expectations (~> 3.10.0)
|
26
|
+
rspec-mocks (~> 3.10.0)
|
27
|
+
rspec-core (3.10.0)
|
28
|
+
rspec-support (~> 3.10.0)
|
29
|
+
rspec-expectations (3.10.0)
|
30
30
|
diff-lcs (>= 1.2.0, < 2.0)
|
31
|
-
rspec-support (~> 3.
|
32
|
-
rspec-mocks (3.
|
31
|
+
rspec-support (~> 3.10.0)
|
32
|
+
rspec-mocks (3.10.0)
|
33
33
|
diff-lcs (>= 1.2.0, < 2.0)
|
34
|
-
rspec-support (~> 3.
|
35
|
-
rspec-support (3.
|
34
|
+
rspec-support (~> 3.10.0)
|
35
|
+
rspec-support (3.10.0)
|
36
36
|
rspec_junit_formatter (0.4.1)
|
37
37
|
rspec-core (>= 2, < 4, != 2.12.0)
|
38
|
-
simplecov (0.
|
38
|
+
simplecov (0.17.1)
|
39
39
|
docile (~> 1.1)
|
40
40
|
json (>= 1.8, < 3)
|
41
41
|
simplecov-html (~> 0.10.0)
|
@@ -48,11 +48,11 @@ DEPENDENCIES
|
|
48
48
|
bundler (>= 1.16)
|
49
49
|
fivemat
|
50
50
|
pry
|
51
|
-
rake (~>
|
51
|
+
rake (~> 13.0)
|
52
52
|
rspec (~> 3.0)
|
53
53
|
rspec_junit_formatter
|
54
|
-
simplecov
|
54
|
+
simplecov (< 0.18)
|
55
55
|
upperkut!
|
56
56
|
|
57
57
|
BUNDLED WITH
|
58
|
-
1.
|
58
|
+
2.1.4
|
data/Makefile
ADDED
data/README.md
CHANGED
@@ -42,7 +42,7 @@ Or install it yourself as:
|
|
42
42
|
|
43
43
|
2) Start pushing items;
|
44
44
|
```ruby
|
45
|
-
|
45
|
+
MyWorker.push_items(
|
46
46
|
[
|
47
47
|
{
|
48
48
|
'id' => SecureRandom.uuid,
|
@@ -80,7 +80,7 @@ Or install it yourself as:
|
|
80
80
|
2) Start pushing items with `timestamp` parameter;
|
81
81
|
```ruby
|
82
82
|
# timestamp is 'Thu, 10 May 2019 23:43:58 GMT'
|
83
|
-
|
83
|
+
MyWorker.push_items(
|
84
84
|
[
|
85
85
|
{
|
86
86
|
'timestamp' => '1557531838',
|
data/docker-compose.yml
ADDED
@@ -0,0 +1,18 @@
|
|
1
|
+
services:
|
2
|
+
gem:
|
3
|
+
build: .
|
4
|
+
volumes:
|
5
|
+
- .:/code
|
6
|
+
environment:
|
7
|
+
- REDIS_URL=redis://redis:6379
|
8
|
+
depends_on:
|
9
|
+
- redis
|
10
|
+
redis:
|
11
|
+
image: redis:5.0.4-alpine
|
12
|
+
command: redis-server --save "" --appendonly yes --appendfsync everysec
|
13
|
+
ports:
|
14
|
+
- 6379:6379
|
15
|
+
volumes:
|
16
|
+
- redis-data:/data
|
17
|
+
volumes:
|
18
|
+
redis-data:
|
data/lib/upperkut.rb
CHANGED
data/lib/upperkut/cli.rb
CHANGED
@@ -57,12 +57,13 @@ module Upperkut
|
|
57
57
|
handle_signal(signal)
|
58
58
|
end
|
59
59
|
rescue Interrupt
|
60
|
+
timeout = Integer(ENV['UPPERKUT_TIMEOUT'] || 8)
|
60
61
|
@logger.info(
|
61
|
-
|
62
|
+
"Stopping managers, wait for #{timeout} seconds and them kill processors"
|
62
63
|
)
|
63
64
|
|
64
65
|
manager.stop
|
65
|
-
sleep(
|
66
|
+
sleep(timeout)
|
66
67
|
manager.kill
|
67
68
|
exit(0)
|
68
69
|
end
|
@@ -0,0 +1,22 @@
|
|
1
|
+
require 'securerandom'
|
2
|
+
|
3
|
+
module Upperkut
|
4
|
+
class Item
|
5
|
+
attr_reader :id, :body, :enqueued_at
|
6
|
+
|
7
|
+
def initialize(id:, body:, enqueued_at: nil)
|
8
|
+
@id = id
|
9
|
+
@body = body
|
10
|
+
@enqueued_at = enqueued_at || Time.now.utc.to_i
|
11
|
+
@nacked = false
|
12
|
+
end
|
13
|
+
|
14
|
+
def nack
|
15
|
+
@nacked = true
|
16
|
+
end
|
17
|
+
|
18
|
+
def nacked?
|
19
|
+
@nacked
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
data/lib/upperkut/manager.rb
CHANGED
@@ -1,44 +1,50 @@
|
|
1
1
|
require_relative 'core_ext'
|
2
|
-
require_relative '
|
2
|
+
require_relative 'worker_thread'
|
3
|
+
require_relative 'logging'
|
3
4
|
require_relative 'worker'
|
4
5
|
|
5
6
|
module Upperkut
|
6
7
|
class Manager
|
7
8
|
attr_accessor :worker
|
8
|
-
attr_reader :stopped, :logger, :concurrency
|
9
|
+
attr_reader :stopped, :logger, :concurrency
|
9
10
|
|
10
11
|
def initialize(opts = {})
|
11
12
|
self.worker = opts.fetch(:worker).constantize
|
12
13
|
@concurrency = opts.fetch(:concurrency, 1)
|
13
|
-
@logger = opts.fetch(:logger,
|
14
|
+
@logger = opts.fetch(:logger, Logging.logger)
|
14
15
|
|
15
16
|
@stopped = false
|
16
|
-
@
|
17
|
+
@threads = []
|
17
18
|
end
|
18
19
|
|
19
20
|
def run
|
20
21
|
@concurrency.times do
|
21
|
-
|
22
|
-
@processors << processor
|
23
|
-
processor.run
|
22
|
+
spawn_thread
|
24
23
|
end
|
25
24
|
end
|
26
25
|
|
27
26
|
def stop
|
28
27
|
@stopped = true
|
28
|
+
@threads.each(&:stop)
|
29
29
|
end
|
30
30
|
|
31
31
|
def kill
|
32
|
-
@
|
32
|
+
@threads.each(&:kill)
|
33
33
|
end
|
34
34
|
|
35
|
-
def notify_killed_processor(
|
36
|
-
@
|
37
|
-
|
35
|
+
def notify_killed_processor(thread)
|
36
|
+
@threads.delete(thread)
|
37
|
+
spawn_thread unless @stopped
|
38
|
+
end
|
39
|
+
|
40
|
+
private
|
41
|
+
|
42
|
+
def spawn_thread
|
43
|
+
processor = Processor.new(worker, logger)
|
38
44
|
|
39
|
-
|
40
|
-
@
|
41
|
-
|
45
|
+
thread = WorkerThread.new(self, processor)
|
46
|
+
@threads << thread
|
47
|
+
thread.run
|
42
48
|
end
|
43
49
|
end
|
44
50
|
end
|
data/lib/upperkut/processor.rb
CHANGED
@@ -1,58 +1,64 @@
|
|
1
|
-
require_relative '
|
1
|
+
require_relative 'logging'
|
2
2
|
|
3
3
|
module Upperkut
|
4
4
|
class Processor
|
5
|
-
def initialize(
|
6
|
-
@
|
7
|
-
@
|
8
|
-
@
|
9
|
-
@
|
10
|
-
|
11
|
-
@sleeping_time = 0
|
5
|
+
def initialize(worker, logger = Logging.logger)
|
6
|
+
@worker = worker
|
7
|
+
@strategy = worker.strategy
|
8
|
+
@worker_instance = worker.new
|
9
|
+
@logger = logger
|
12
10
|
end
|
13
11
|
|
14
|
-
def
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
action: :processor_killed,
|
21
|
-
reason: e,
|
22
|
-
stacktrace: e.backtrace
|
23
|
-
)
|
24
|
-
|
25
|
-
@manager.notify_killed_processor(self)
|
26
|
-
end
|
12
|
+
def process
|
13
|
+
items = @worker.fetch_items.freeze
|
14
|
+
return unless items.any?
|
15
|
+
|
16
|
+
@worker.server_middlewares.invoke(@worker, items) do
|
17
|
+
@worker_instance.perform(items)
|
27
18
|
end
|
28
|
-
end
|
29
19
|
|
30
|
-
|
31
|
-
|
20
|
+
nacked_items, pending_ack_items = items.partition(&:nacked?)
|
21
|
+
@strategy.nack(nacked_items) if nacked_items.any?
|
22
|
+
@strategy.ack(pending_ack_items) if pending_ack_items.any?
|
23
|
+
rescue StandardError => error
|
24
|
+
@logger.error(
|
25
|
+
action: :handle_execution_error,
|
26
|
+
ex: error.to_s,
|
27
|
+
backtrace: error.backtrace.join("\n"),
|
28
|
+
item_size: Array(items).size
|
29
|
+
)
|
32
30
|
|
33
|
-
|
34
|
-
|
31
|
+
if items
|
32
|
+
if @worker_instance.respond_to?(:handle_error)
|
33
|
+
@worker_instance.handle_error(error, items)
|
34
|
+
return
|
35
|
+
end
|
36
|
+
|
37
|
+
@strategy.nack(items)
|
38
|
+
end
|
39
|
+
|
40
|
+
raise error
|
35
41
|
end
|
36
42
|
|
37
|
-
|
43
|
+
def blocking_process
|
44
|
+
sleeping_time = 0
|
38
45
|
|
39
|
-
def process
|
40
46
|
loop do
|
41
|
-
|
47
|
+
break if @stopped
|
42
48
|
|
43
49
|
if @strategy.process?
|
44
|
-
|
45
|
-
|
50
|
+
sleeping_time = 0
|
51
|
+
process
|
46
52
|
next
|
47
53
|
end
|
48
54
|
|
49
|
-
|
50
|
-
@logger.debug(sleeping_time:
|
55
|
+
sleeping_time += sleep(@worker.setup.polling_interval)
|
56
|
+
@logger.debug(sleeping_time: sleeping_time)
|
51
57
|
end
|
52
58
|
end
|
53
59
|
|
54
|
-
def
|
55
|
-
|
60
|
+
def stop
|
61
|
+
@stopped = true
|
56
62
|
end
|
57
63
|
end
|
58
64
|
end
|
@@ -24,6 +24,20 @@ module Upperkut
|
|
24
24
|
raise NotImplementedError
|
25
25
|
end
|
26
26
|
|
27
|
+
# Public: Confirms that items have been processed successfully.
|
28
|
+
#
|
29
|
+
# items - The Array of items do be confirmed.
|
30
|
+
def ack(_items)
|
31
|
+
raise NotImplementedError
|
32
|
+
end
|
33
|
+
|
34
|
+
# Public: Informs that items have been not processed successfully and therefore must be re-processed.
|
35
|
+
#
|
36
|
+
# items - The Array of items do be unacknowledged.
|
37
|
+
def nack(_items)
|
38
|
+
raise NotImplementedError
|
39
|
+
end
|
40
|
+
|
27
41
|
# Public: Tells when to execute the event processing,
|
28
42
|
# when this condition is met so the events are dispatched to
|
29
43
|
# the worker.
|
@@ -7,13 +7,65 @@ module Upperkut
|
|
7
7
|
class BufferedQueue < Upperkut::Strategies::Base
|
8
8
|
include Upperkut::Util
|
9
9
|
|
10
|
+
DEQUEUE_ITEMS = %(
|
11
|
+
local key = KEYS[1]
|
12
|
+
local waiting_ack_key = KEYS[2]
|
13
|
+
local batch_size = ARGV[1]
|
14
|
+
local current_timestamp = ARGV[2]
|
15
|
+
local expired_ack_timestamp = ARGV[3] + 1
|
16
|
+
|
17
|
+
-- move expired items back to the queue
|
18
|
+
local expired_ack_items = redis.call("ZRANGEBYSCORE", waiting_ack_key, 0, expired_ack_timestamp)
|
19
|
+
if table.getn(expired_ack_items) > 0 then
|
20
|
+
redis.call("ZREMRANGEBYSCORE", waiting_ack_key, 0, expired_ack_timestamp)
|
21
|
+
for i, item in ipairs(expired_ack_items) do
|
22
|
+
redis.call("RPUSH", key, item)
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
-- now fetch a batch
|
27
|
+
local items = redis.call("LRANGE", key, 0, batch_size - 1)
|
28
|
+
for i, item in ipairs(items) do
|
29
|
+
redis.call("ZADD", waiting_ack_key, current_timestamp + tonumber('0.' .. i), item)
|
30
|
+
end
|
31
|
+
redis.call("LTRIM", key, batch_size, -1)
|
32
|
+
|
33
|
+
return items
|
34
|
+
).freeze
|
35
|
+
|
36
|
+
ACK_ITEMS = %(
|
37
|
+
local waiting_ack_key = KEYS[1]
|
38
|
+
local items = ARGV
|
39
|
+
|
40
|
+
for i, item in ipairs(items) do
|
41
|
+
redis.call("ZREM", waiting_ack_key, item)
|
42
|
+
end
|
43
|
+
).freeze
|
44
|
+
|
45
|
+
NACK_ITEMS = %(
|
46
|
+
local key = KEYS[1]
|
47
|
+
local waiting_ack_key = KEYS[2]
|
48
|
+
local items = ARGV
|
49
|
+
|
50
|
+
for i, item in ipairs(items) do
|
51
|
+
redis.call("ZREM", waiting_ack_key, item)
|
52
|
+
redis.call("RPUSH", key, item)
|
53
|
+
end
|
54
|
+
).freeze
|
55
|
+
|
10
56
|
attr_reader :options
|
11
57
|
|
12
58
|
def initialize(worker, options = {})
|
13
59
|
@options = options
|
14
60
|
@redis_options = options.fetch(:redis, {})
|
15
|
-
@worker
|
16
|
-
|
61
|
+
@worker = worker
|
62
|
+
|
63
|
+
@ack_wait_limit = options.fetch(
|
64
|
+
:ack_wait_limit,
|
65
|
+
Integer(ENV['UPPERKUT_ACK_WAIT_LIMIT'] || 120)
|
66
|
+
)
|
67
|
+
|
68
|
+
@max_wait = options.fetch(
|
17
69
|
:max_wait,
|
18
70
|
Integer(ENV['UPPERKUT_MAX_WAIT'] || 20)
|
19
71
|
)
|
@@ -27,7 +79,7 @@ module Upperkut
|
|
27
79
|
end
|
28
80
|
|
29
81
|
def push_items(items = [])
|
30
|
-
items =
|
82
|
+
items = normalize_items(items)
|
31
83
|
return false if items.empty?
|
32
84
|
|
33
85
|
redis do |conn|
|
@@ -38,12 +90,12 @@ module Upperkut
|
|
38
90
|
end
|
39
91
|
|
40
92
|
def fetch_items
|
41
|
-
|
93
|
+
batch_size = [@batch_size, size].min
|
42
94
|
|
43
95
|
items = redis do |conn|
|
44
|
-
conn.
|
45
|
-
|
46
|
-
|
96
|
+
conn.eval(DEQUEUE_ITEMS,
|
97
|
+
keys: [key, processing_key],
|
98
|
+
argv: [batch_size, Time.now.utc.to_i, Time.now.utc.to_i - @ack_wait_limit])
|
47
99
|
end
|
48
100
|
|
49
101
|
decode_json_items(items)
|
@@ -53,11 +105,24 @@ module Upperkut
|
|
53
105
|
redis { |conn| conn.del(key) }
|
54
106
|
end
|
55
107
|
|
56
|
-
def
|
57
|
-
{
|
58
|
-
|
59
|
-
|
60
|
-
|
108
|
+
def ack(items)
|
109
|
+
raise ArgumentError, 'Invalid item' unless items.all? { |item| item.is_a?(Item) }
|
110
|
+
|
111
|
+
redis do |conn|
|
112
|
+
conn.eval(ACK_ITEMS,
|
113
|
+
keys: [processing_key],
|
114
|
+
argv: encode_json_items(items))
|
115
|
+
end
|
116
|
+
end
|
117
|
+
|
118
|
+
def nack(items)
|
119
|
+
raise ArgumentError, 'Invalid item' unless items.all? { |item| item.is_a?(Item) }
|
120
|
+
|
121
|
+
redis do |conn|
|
122
|
+
conn.eval(NACK_ITEMS,
|
123
|
+
keys: [key, processing_key],
|
124
|
+
argv: encode_json_items(items))
|
125
|
+
end
|
61
126
|
end
|
62
127
|
|
63
128
|
def process?
|
@@ -72,38 +137,70 @@ module Upperkut
|
|
72
137
|
end
|
73
138
|
end
|
74
139
|
|
140
|
+
def metrics
|
141
|
+
current_latency = latency
|
142
|
+
|
143
|
+
{
|
144
|
+
'latency' => current_latency,
|
145
|
+
'oldest_unacked_item_age' => oldest_item_age(current_latency),
|
146
|
+
'size' => size
|
147
|
+
}
|
148
|
+
end
|
149
|
+
|
75
150
|
private
|
76
151
|
|
77
152
|
def key
|
78
153
|
"upperkut:buffers:#{to_underscore(@worker.name)}"
|
79
154
|
end
|
80
155
|
|
156
|
+
def processing_key
|
157
|
+
"#{key}:processing"
|
158
|
+
end
|
159
|
+
|
81
160
|
def fulfill_condition?(buff_size)
|
82
161
|
return false if buff_size.zero?
|
83
162
|
|
84
163
|
buff_size >= @batch_size || @waiting_time >= @max_wait
|
85
164
|
end
|
86
165
|
|
87
|
-
def
|
88
|
-
redis do |conn|
|
89
|
-
conn.
|
166
|
+
def oldest_item_age(current_latency)
|
167
|
+
oldest_processing_item = redis do |conn|
|
168
|
+
items = conn.zrange(processing_key, 0, 0)
|
169
|
+
decode_json_items(items).first
|
90
170
|
end
|
171
|
+
|
172
|
+
oldest_processing_age = if oldest_processing_item
|
173
|
+
now = Time.now.to_f
|
174
|
+
now - oldest_processing_item.enqueued_at.to_f
|
175
|
+
else
|
176
|
+
0
|
177
|
+
end
|
178
|
+
|
179
|
+
[current_latency, oldest_processing_age].max
|
91
180
|
end
|
92
181
|
|
93
182
|
def latency
|
94
|
-
|
95
|
-
|
96
|
-
return 0 unless
|
183
|
+
items = redis { |conn| conn.lrange(key, 0, 0) }
|
184
|
+
first_item = decode_json_items(items).first
|
185
|
+
return 0 unless first_item
|
97
186
|
|
98
187
|
now = Time.now.to_f
|
99
|
-
now -
|
188
|
+
now - first_item.enqueued_at.to_f
|
189
|
+
end
|
190
|
+
|
191
|
+
def size
|
192
|
+
redis do |conn|
|
193
|
+
conn.llen(key)
|
194
|
+
end
|
100
195
|
end
|
101
196
|
|
102
197
|
def redis
|
103
198
|
raise ArgumentError, 'requires a block' unless block_given?
|
104
199
|
|
105
|
-
|
106
|
-
|
200
|
+
retry_block do
|
201
|
+
redis_pool.with do |conn|
|
202
|
+
yield conn
|
203
|
+
end
|
107
204
|
end
|
108
205
|
end
|
109
206
|
|
@@ -1,3 +1,7 @@
|
|
1
|
+
require 'upperkut/util'
|
2
|
+
require 'upperkut/redis_pool'
|
3
|
+
require 'upperkut/strategies/base'
|
4
|
+
|
1
5
|
module Upperkut
|
2
6
|
module Strategies
|
3
7
|
# Public: Queue that prevent a single tenant from taking over.
|
@@ -23,10 +27,13 @@ module Upperkut
|
|
23
27
|
# processing time.
|
24
28
|
ENQUEUE_ITEM = %(
|
25
29
|
local increment = 1
|
26
|
-
local
|
27
|
-
local
|
30
|
+
local checkpoint_key = KEYS[1]
|
31
|
+
local counter_key = KEYS[2]
|
32
|
+
local score_key = KEYS[3]
|
33
|
+
local queue_key = KEYS[4]
|
34
|
+
local current_checkpoint = tonumber(redis.call("GET", checkpoint_key)) or 0
|
35
|
+
local current_counter = tonumber(redis.call("INCR", counter_key))
|
28
36
|
local current_score = tonumber(redis.call("GET", score_key)) or 0
|
29
|
-
local queue_key = KEYS[3]
|
30
37
|
local next_score = nil
|
31
38
|
|
32
39
|
if current_score >= current_checkpoint then
|
@@ -36,7 +43,7 @@ module Upperkut
|
|
36
43
|
end
|
37
44
|
|
38
45
|
redis.call("SETEX", score_key, #{ONE_DAY_IN_SECONDS}, next_score)
|
39
|
-
redis.call("ZADD", queue_key, next_score, ARGV[1])
|
46
|
+
redis.call("ZADD", queue_key, next_score + tonumber('0.' .. current_counter), ARGV[1])
|
40
47
|
|
41
48
|
return next_score
|
42
49
|
).freeze
|
@@ -90,7 +97,7 @@ module Upperkut
|
|
90
97
|
#
|
91
98
|
# Returns true when success, raise when error.
|
92
99
|
def push_items(items = [])
|
93
|
-
items =
|
100
|
+
items = normalize_items(items)
|
94
101
|
return false if items.empty?
|
95
102
|
|
96
103
|
redis do |conn|
|
@@ -98,13 +105,14 @@ module Upperkut
|
|
98
105
|
priority_key = @priority_key.call(item)
|
99
106
|
score_key = "#{queue_key}:#{priority_key}:score"
|
100
107
|
|
101
|
-
keys = [
|
108
|
+
keys = [checkpoint_key,
|
109
|
+
counter_key,
|
102
110
|
score_key,
|
103
111
|
queue_key]
|
104
112
|
|
105
113
|
conn.eval(ENQUEUE_ITEM,
|
106
114
|
keys: keys,
|
107
|
-
argv: [encode_json_items(
|
115
|
+
argv: [encode_json_items(item)])
|
108
116
|
end
|
109
117
|
end
|
110
118
|
|
@@ -119,7 +127,7 @@ module Upperkut
|
|
119
127
|
|
120
128
|
items = redis do |conn|
|
121
129
|
conn.eval(DEQUEUE_ITEM,
|
122
|
-
keys: [
|
130
|
+
keys: [checkpoint_key, queue_key],
|
123
131
|
argv: [batch_size])
|
124
132
|
end
|
125
133
|
|
@@ -131,6 +139,12 @@ module Upperkut
|
|
131
139
|
redis { |conn| conn.del(queue_key) }
|
132
140
|
end
|
133
141
|
|
142
|
+
def ack(_items); end
|
143
|
+
|
144
|
+
def nack(items)
|
145
|
+
push_items(items)
|
146
|
+
end
|
147
|
+
|
134
148
|
# Public: Tells when to execute the event processing,
|
135
149
|
# when this condition is met so the events are dispatched to
|
136
150
|
# the worker.
|
@@ -155,10 +169,14 @@ module Upperkut
|
|
155
169
|
|
156
170
|
private
|
157
171
|
|
158
|
-
def
|
172
|
+
def checkpoint_key
|
159
173
|
"#{queue_key}:checkpoint"
|
160
174
|
end
|
161
175
|
|
176
|
+
def counter_key
|
177
|
+
"#{queue_key}:counter"
|
178
|
+
end
|
179
|
+
|
162
180
|
def queue_key
|
163
181
|
"upperkut:priority_queue:#{to_underscore(@worker.name)}"
|
164
182
|
end
|
@@ -178,8 +196,10 @@ module Upperkut
|
|
178
196
|
def redis
|
179
197
|
raise ArgumentError, 'requires a block' unless block_given?
|
180
198
|
|
181
|
-
|
182
|
-
|
199
|
+
retry_block do
|
200
|
+
redis_pool.with do |conn|
|
201
|
+
yield conn
|
202
|
+
end
|
183
203
|
end
|
184
204
|
end
|
185
205
|
|
@@ -28,19 +28,24 @@ module Upperkut
|
|
28
28
|
|
29
29
|
def initialize(worker, options = {})
|
30
30
|
@options = options
|
31
|
-
|
32
|
-
@redis_pool = setup_redis_pool
|
31
|
+
@redis_options = @options.fetch(:redis, {})
|
33
32
|
@worker = worker
|
33
|
+
|
34
|
+
@batch_size = @options.fetch(
|
35
|
+
:batch_size,
|
36
|
+
Integer(ENV['UPPERKUT_BATCH_SIZE'] || 1000)
|
37
|
+
)
|
34
38
|
end
|
35
39
|
|
36
40
|
def push_items(items = [])
|
37
|
-
items =
|
41
|
+
items = normalize_items(items)
|
38
42
|
return false if items.empty?
|
39
43
|
|
40
44
|
redis do |conn|
|
41
45
|
items.each do |item|
|
42
|
-
ensure_timestamp_attr(item)
|
43
|
-
|
46
|
+
schedule_item = ensure_timestamp_attr(item)
|
47
|
+
timestamp = schedule_item.body['timestamp']
|
48
|
+
conn.zadd(key, timestamp, encode_json_items(schedule_item))
|
44
49
|
end
|
45
50
|
end
|
46
51
|
|
@@ -66,6 +71,12 @@ module Upperkut
|
|
66
71
|
redis { |conn| conn.del(key) }
|
67
72
|
end
|
68
73
|
|
74
|
+
def ack(_items); end
|
75
|
+
|
76
|
+
def nack(items)
|
77
|
+
push_items(items)
|
78
|
+
end
|
79
|
+
|
69
80
|
def metrics
|
70
81
|
{
|
71
82
|
'latency' => latency,
|
@@ -82,12 +93,17 @@ module Upperkut
|
|
82
93
|
|
83
94
|
private
|
84
95
|
|
85
|
-
def
|
86
|
-
@
|
96
|
+
def key
|
97
|
+
"upperkut:queued:#{to_underscore(@worker.name)}"
|
98
|
+
end
|
87
99
|
|
88
|
-
|
89
|
-
|
90
|
-
|
100
|
+
def ensure_timestamp_attr(item)
|
101
|
+
return item if item.body.key?('timestamp')
|
102
|
+
|
103
|
+
Item.new(
|
104
|
+
id: item.id,
|
105
|
+
body: item.body.merge('timestamp' => Time.now.utc.to_i),
|
106
|
+
enqueued_at: item.enqueued_at
|
91
107
|
)
|
92
108
|
end
|
93
109
|
|
@@ -110,46 +126,36 @@ module Upperkut
|
|
110
126
|
|
111
127
|
def latency
|
112
128
|
now = Time.now.utc
|
113
|
-
|
114
|
-
job = nil
|
129
|
+
timestamp = now.to_f
|
115
130
|
|
116
|
-
redis do |conn|
|
117
|
-
|
118
|
-
|
131
|
+
item = redis do |conn|
|
132
|
+
item = conn.zrangebyscore(key, '-inf', timestamp.to_s, limit: [0, 1]).first
|
133
|
+
decode_json_items([item]).first
|
119
134
|
end
|
120
135
|
|
121
|
-
return
|
122
|
-
|
123
|
-
now_timestamp - job['body'].fetch('timestamp', now).to_f
|
124
|
-
end
|
125
|
-
|
126
|
-
def setup_redis_pool
|
127
|
-
return @redis_options if @redis_options.is_a?(ConnectionPool)
|
136
|
+
return timestamp - item.body['timestamp'].to_f if item
|
128
137
|
|
129
|
-
|
138
|
+
0
|
130
139
|
end
|
131
140
|
|
132
141
|
def redis
|
133
142
|
raise ArgumentError, 'requires a block' unless block_given?
|
134
143
|
|
135
|
-
|
136
|
-
|
144
|
+
retry_block do
|
145
|
+
redis_pool.with do |conn|
|
146
|
+
yield conn
|
147
|
+
end
|
137
148
|
end
|
138
149
|
end
|
139
150
|
|
140
|
-
def
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
def encode_json_item(item)
|
149
|
-
JSON.generate(
|
150
|
-
'enqueued_at' => Time.now.utc.to_i,
|
151
|
-
'body' => item
|
152
|
-
)
|
151
|
+
def redis_pool
|
152
|
+
@redis_pool ||= begin
|
153
|
+
if @redis_options.is_a?(ConnectionPool)
|
154
|
+
@redis_options
|
155
|
+
else
|
156
|
+
RedisPool.new(@redis_options).create
|
157
|
+
end
|
158
|
+
end
|
153
159
|
end
|
154
160
|
end
|
155
161
|
end
|
data/lib/upperkut/util.rb
CHANGED
@@ -1,9 +1,10 @@
|
|
1
1
|
require 'json'
|
2
|
+
require 'upperkut/item'
|
2
3
|
|
3
4
|
module Upperkut
|
4
5
|
module Util
|
5
6
|
def to_underscore(object)
|
6
|
-
klass_name = object
|
7
|
+
klass_name = object.dup
|
7
8
|
klass_name.gsub!(/::/, '_')
|
8
9
|
klass_name.gsub!(/([A-Z\d]+)([A-Z][a-z])/, '\1_\2')
|
9
10
|
klass_name.gsub!(/([a-z\d])([A-Z])/, '\1_\2')
|
@@ -12,22 +13,61 @@ module Upperkut
|
|
12
13
|
klass_name
|
13
14
|
end
|
14
15
|
|
16
|
+
# Public:
|
17
|
+
# Normalize hash and hash arrays into a hash of Items.
|
18
|
+
# An Item object contains metadata, for example the timestamp from the moment it was enqueued,
|
19
|
+
# that we need to carry through multiple execution tries.
|
20
|
+
#
|
21
|
+
# When the execution fails, we need to schedule the whole batch for retry, and scheduling
|
22
|
+
# an Item will make Upperkut understand that we're not dealing with a new batch,
|
23
|
+
# so metrics like latency will increase.
|
24
|
+
def normalize_items(items)
|
25
|
+
items = [items] unless items.is_a?(Array)
|
26
|
+
|
27
|
+
items.map do |item|
|
28
|
+
next item if item.is_a?(Item)
|
29
|
+
|
30
|
+
Item.new(id: SecureRandom.uuid, body: item)
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
15
34
|
def encode_json_items(items)
|
16
|
-
items = items
|
35
|
+
items = [items] unless items.is_a?(Array)
|
36
|
+
|
37
|
+
items.map do |item|
|
17
38
|
JSON.generate(
|
18
|
-
'
|
19
|
-
'body' =>
|
39
|
+
'id' => item.id,
|
40
|
+
'body' => item.body,
|
41
|
+
'enqueued_at' => item.enqueued_at
|
20
42
|
)
|
21
43
|
end
|
22
44
|
end
|
23
45
|
|
24
46
|
def decode_json_items(items)
|
25
|
-
items.
|
26
|
-
|
47
|
+
items.each_with_object([]) do |item_json, memo|
|
48
|
+
next unless item_json
|
49
|
+
|
50
|
+
hash = JSON.parse(item_json)
|
51
|
+
id, body, enqueued_at = hash.values_at('id', 'body', 'enqueued_at')
|
52
|
+
memo << Item.new(id: id, body: body, enqueued_at: enqueued_at)
|
27
53
|
end
|
54
|
+
end
|
55
|
+
|
56
|
+
def retry_block(retries_limit = 3, base_sleep = 2)
|
57
|
+
retries = 0
|
28
58
|
|
29
|
-
|
30
|
-
|
59
|
+
begin
|
60
|
+
yield
|
61
|
+
rescue StandardError => err
|
62
|
+
if retries < retries_limit
|
63
|
+
retries += 1
|
64
|
+
sleep_time = base_sleep**retries
|
65
|
+
Kernel.sleep(sleep_time)
|
66
|
+
retry
|
67
|
+
end
|
68
|
+
|
69
|
+
raise err
|
70
|
+
end
|
31
71
|
end
|
32
72
|
end
|
33
73
|
end
|
data/lib/upperkut/version.rb
CHANGED
data/lib/upperkut/worker.rb
CHANGED
@@ -0,0 +1,37 @@
|
|
1
|
+
require_relative 'processor'
|
2
|
+
|
3
|
+
module Upperkut
|
4
|
+
class WorkerThread
|
5
|
+
def initialize(manager, processor)
|
6
|
+
@manager = manager
|
7
|
+
@processor = processor
|
8
|
+
end
|
9
|
+
|
10
|
+
def run
|
11
|
+
@thread ||= Thread.new do
|
12
|
+
begin
|
13
|
+
@processor.blocking_process
|
14
|
+
rescue Exception => e
|
15
|
+
@manager.logger.debug(
|
16
|
+
action: :processor_killed,
|
17
|
+
reason: e,
|
18
|
+
stacktrace: e.backtrace
|
19
|
+
)
|
20
|
+
|
21
|
+
@manager.notify_killed_processor(self)
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
def stop
|
27
|
+
@processor.stop
|
28
|
+
end
|
29
|
+
|
30
|
+
def kill
|
31
|
+
return unless @thread
|
32
|
+
|
33
|
+
@thread.raise Upperkut::Shutdown
|
34
|
+
@thread.value # wait
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
data/upperkut.gemspec
CHANGED
@@ -24,6 +24,6 @@ Gem::Specification.new do |spec|
|
|
24
24
|
spec.add_dependency 'connection_pool', '~> 2.2', '>= 2.2.2'
|
25
25
|
spec.add_dependency 'redis', '>= 4.1.0', '< 5.0.0'
|
26
26
|
spec.add_development_dependency 'bundler', '>= 1.16'
|
27
|
-
spec.add_development_dependency 'rake', '~>
|
27
|
+
spec.add_development_dependency 'rake', '~> 13.0'
|
28
28
|
spec.add_development_dependency 'rspec', '~> 3.0'
|
29
29
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: upperkut
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 1.0.2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Nando Sousa
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2021-01-04 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: connection_pool
|
@@ -70,14 +70,14 @@ dependencies:
|
|
70
70
|
requirements:
|
71
71
|
- - "~>"
|
72
72
|
- !ruby/object:Gem::Version
|
73
|
-
version: '
|
73
|
+
version: '13.0'
|
74
74
|
type: :development
|
75
75
|
prerelease: false
|
76
76
|
version_requirements: !ruby/object:Gem::Requirement
|
77
77
|
requirements:
|
78
78
|
- - "~>"
|
79
79
|
- !ruby/object:Gem::Version
|
80
|
-
version: '
|
80
|
+
version: '13.0'
|
81
81
|
- !ruby/object:Gem::Dependency
|
82
82
|
name: rspec
|
83
83
|
requirement: !ruby/object:Gem::Requirement
|
@@ -106,20 +106,23 @@ files:
|
|
106
106
|
- ".rspec"
|
107
107
|
- CHANGELOG.md
|
108
108
|
- CODE_OF_CONDUCT.md
|
109
|
+
- Dockerfile
|
109
110
|
- Gemfile
|
110
111
|
- Gemfile.lock
|
111
112
|
- LICENSE.txt
|
113
|
+
- Makefile
|
112
114
|
- README.md
|
113
115
|
- Rakefile
|
114
116
|
- bin/upperkut
|
117
|
+
- docker-compose.yml
|
115
118
|
- examples/basic.rb
|
116
119
|
- examples/priority_worker.rb
|
117
120
|
- examples/scheduled_worker.rb
|
118
121
|
- examples/with_middlewares.rb
|
119
122
|
- lib/upperkut.rb
|
120
|
-
- lib/upperkut/batch_execution.rb
|
121
123
|
- lib/upperkut/cli.rb
|
122
124
|
- lib/upperkut/core_ext.rb
|
125
|
+
- lib/upperkut/item.rb
|
123
126
|
- lib/upperkut/logging.rb
|
124
127
|
- lib/upperkut/manager.rb
|
125
128
|
- lib/upperkut/middleware.rb
|
@@ -135,6 +138,7 @@ files:
|
|
135
138
|
- lib/upperkut/util.rb
|
136
139
|
- lib/upperkut/version.rb
|
137
140
|
- lib/upperkut/worker.rb
|
141
|
+
- lib/upperkut/worker_thread.rb
|
138
142
|
- upperkut.gemspec
|
139
143
|
homepage: http://shipit.resultadosdigitais.com.br/open-source/
|
140
144
|
licenses:
|
@@ -155,8 +159,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
155
159
|
- !ruby/object:Gem::Version
|
156
160
|
version: '0'
|
157
161
|
requirements: []
|
158
|
-
|
159
|
-
rubygems_version: 2.7.8
|
162
|
+
rubygems_version: 3.1.4
|
160
163
|
signing_key:
|
161
164
|
specification_version: 4
|
162
165
|
summary: Batch background processing tool
|
@@ -1,42 +0,0 @@
|
|
1
|
-
require_relative 'logging'
|
2
|
-
|
3
|
-
module Upperkut
|
4
|
-
class BatchExecution
|
5
|
-
include Upperkut::Util
|
6
|
-
|
7
|
-
def initialize(worker, logger = Upperkut::Logging.logger)
|
8
|
-
@worker = worker
|
9
|
-
@logger = logger
|
10
|
-
end
|
11
|
-
|
12
|
-
def execute
|
13
|
-
worker_instance = @worker.new
|
14
|
-
items = @worker.fetch_items.freeze
|
15
|
-
|
16
|
-
items_body = items.collect do |item|
|
17
|
-
item['body']
|
18
|
-
end
|
19
|
-
|
20
|
-
@worker.server_middlewares.invoke(@worker, items) do
|
21
|
-
worker_instance.perform(items_body.dup)
|
22
|
-
end
|
23
|
-
rescue StandardError => error
|
24
|
-
@logger.info(
|
25
|
-
action: :requeue,
|
26
|
-
ex: error,
|
27
|
-
item_size: items_body.size
|
28
|
-
)
|
29
|
-
|
30
|
-
@logger.error(error.backtrace.join("\n"))
|
31
|
-
|
32
|
-
if worker_instance.respond_to?(:handle_error)
|
33
|
-
worker_instance.handle_error(error, items_body)
|
34
|
-
return
|
35
|
-
else
|
36
|
-
@worker.push_items(items_body)
|
37
|
-
end
|
38
|
-
|
39
|
-
raise error
|
40
|
-
end
|
41
|
-
end
|
42
|
-
end
|