upperkut 0.7.4 → 1.0.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: d7c6cb99c0744e428ea946a58b2aa5b1065bb5abdd37487cd01caeb5589ecec1
4
- data.tar.gz: 9d560415137c6e60d9588a625a5f79446af88dedfd492a32e03ccea73fc45008
3
+ metadata.gz: edf84d6612f4c9577cfe53e578e16983e3936863379c7d4c1041e9c92637f867
4
+ data.tar.gz: ce0ad624fe65306bb8fe818b8018acfe519326cbb744ef9af0519b26fd25faed
5
5
  SHA512:
6
- metadata.gz: cafd45d08f7677f4c8b9624fe97303e9d0365f7d466467a6e52d152392ac272a635cf572f304ba811f2340dfe83406474de468aa5e4a1f90dd6c12e58dd76e26
7
- data.tar.gz: 8e365782ec521ee2370b5f8fefcf2b38bf6ff610f66dd6392887fa38fc544e03bb80fa02467e6d5a7aebf2be486a39b87de8f61a7fa96a6e0d43249730b4c3e5
6
+ metadata.gz: 3f6325ce299c9af7c50c9891acee2e771131d002c27b27fe6e245227d73ce177afd65451116daf7a5af5fc5e546b8b52b796ea82abf05bb8eae88966a16efa46
7
+ data.tar.gz: 482e700d0cd87b0f6de61c528ef42d7c57bbc96afce8d5d6b1593420a4fa9308f6be668cc8c183fc98b9347a6a33800be9df5bebc3da98ac7a673f2204440cc2
@@ -6,7 +6,7 @@ version: 2
6
6
  jobs:
7
7
  build:
8
8
  docker:
9
- - image: circleci/ruby:2.4.4
9
+ - image: circleci/ruby:2.7.2
10
10
  environment:
11
11
  CC_TEST_REPORTER_ID: 03ab83a772148a577d29d4acf438d7ebdc95c632224122d0ba8dbb291eedebe6
12
12
  COVERAGE: true
@@ -1,7 +1,20 @@
1
1
  # Upperkut changes
2
2
 
3
+ 1.0.x
4
+ -------
5
+ - Add docker
6
+ - Fix to_underscore bug for ruby 2.7.2 #83 @andrehjr;
7
+
8
+
9
+ 0.8.x
10
+ --------
11
+ - Added exponential backoff when push_items #57
12
+ - Introducing Item to avoid losing enqueued at and report wrong latency
13
+ metrics #56 thanks to @jeangnc
14
+
3
15
  0.7.x
4
16
  ---------
17
+ - Fix logging timeout message #54 by @jeanmatheussouto
5
18
  - Add handle_error method #44
6
19
  - Added Datahog Middleware (#42)
7
20
  - Added Priority Queue (#39) thanks to @jeangnc and @jeanmatheussouto
@@ -0,0 +1,7 @@
1
+ FROM ruby:2.7.2
2
+
3
+ WORKDIR /code
4
+ COPY . .
5
+
6
+ RUN gem install bundler
7
+ RUN bundle install
data/Gemfile CHANGED
@@ -8,4 +8,4 @@ gemspec
8
8
  gem 'fivemat'
9
9
  gem 'pry'
10
10
  gem 'rspec_junit_formatter'
11
- gem 'simplecov', require: false
11
+ gem 'simplecov', '< 0.18', require: false
@@ -1,41 +1,41 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- upperkut (0.7.4)
4
+ upperkut (1.0.2)
5
5
  connection_pool (~> 2.2, >= 2.2.2)
6
6
  redis (>= 4.1.0, < 5.0.0)
7
7
 
8
8
  GEM
9
9
  remote: https://rubygems.org/
10
10
  specs:
11
- coderay (1.1.2)
12
- connection_pool (2.2.2)
13
- diff-lcs (1.3)
14
- docile (1.3.1)
11
+ coderay (1.1.3)
12
+ connection_pool (2.2.3)
13
+ diff-lcs (1.4.4)
14
+ docile (1.3.2)
15
15
  fivemat (1.3.7)
16
- json (2.2.0)
17
- method_source (0.9.2)
18
- pry (0.12.2)
19
- coderay (~> 1.1.0)
20
- method_source (~> 0.9.0)
21
- rake (10.5.0)
22
- redis (4.1.0)
23
- rspec (3.8.0)
24
- rspec-core (~> 3.8.0)
25
- rspec-expectations (~> 3.8.0)
26
- rspec-mocks (~> 3.8.0)
27
- rspec-core (3.8.0)
28
- rspec-support (~> 3.8.0)
29
- rspec-expectations (3.8.3)
16
+ json (2.3.1)
17
+ method_source (1.0.0)
18
+ pry (0.13.1)
19
+ coderay (~> 1.1)
20
+ method_source (~> 1.0)
21
+ rake (13.0.1)
22
+ redis (4.2.5)
23
+ rspec (3.10.0)
24
+ rspec-core (~> 3.10.0)
25
+ rspec-expectations (~> 3.10.0)
26
+ rspec-mocks (~> 3.10.0)
27
+ rspec-core (3.10.0)
28
+ rspec-support (~> 3.10.0)
29
+ rspec-expectations (3.10.0)
30
30
  diff-lcs (>= 1.2.0, < 2.0)
31
- rspec-support (~> 3.8.0)
32
- rspec-mocks (3.8.0)
31
+ rspec-support (~> 3.10.0)
32
+ rspec-mocks (3.10.0)
33
33
  diff-lcs (>= 1.2.0, < 2.0)
34
- rspec-support (~> 3.8.0)
35
- rspec-support (3.8.0)
34
+ rspec-support (~> 3.10.0)
35
+ rspec-support (3.10.0)
36
36
  rspec_junit_formatter (0.4.1)
37
37
  rspec-core (>= 2, < 4, != 2.12.0)
38
- simplecov (0.16.1)
38
+ simplecov (0.17.1)
39
39
  docile (~> 1.1)
40
40
  json (>= 1.8, < 3)
41
41
  simplecov-html (~> 0.10.0)
@@ -48,11 +48,11 @@ DEPENDENCIES
48
48
  bundler (>= 1.16)
49
49
  fivemat
50
50
  pry
51
- rake (~> 10.0)
51
+ rake (~> 13.0)
52
52
  rspec (~> 3.0)
53
53
  rspec_junit_formatter
54
- simplecov
54
+ simplecov (< 0.18)
55
55
  upperkut!
56
56
 
57
57
  BUNDLED WITH
58
- 1.17.2
58
+ 2.1.4
@@ -0,0 +1,4 @@
1
+ bash:
2
+ docker-compose run gem bash
3
+ specs:
4
+ docker-compose run gem bundle exec rspec
data/README.md CHANGED
@@ -42,7 +42,7 @@ Or install it yourself as:
42
42
 
43
43
  2) Start pushing items;
44
44
  ```ruby
45
- Myworker.push_items(
45
+ MyWorker.push_items(
46
46
  [
47
47
  {
48
48
  'id' => SecureRandom.uuid,
@@ -80,7 +80,7 @@ Or install it yourself as:
80
80
  2) Start pushing items with `timestamp` parameter;
81
81
  ```ruby
82
82
  # timestamp is 'Thu, 10 May 2019 23:43:58 GMT'
83
- Myworker.push_items(
83
+ MyWorker.push_items(
84
84
  [
85
85
  {
86
86
  'timestamp' => '1557531838',
@@ -0,0 +1,18 @@
1
+ services:
2
+ gem:
3
+ build: .
4
+ volumes:
5
+ - .:/code
6
+ environment:
7
+ - REDIS_URL=redis://redis:6379
8
+ depends_on:
9
+ - redis
10
+ redis:
11
+ image: redis:5.0.4-alpine
12
+ command: redis-server --save "" --appendonly yes --appendfsync everysec
13
+ ports:
14
+ - 6379:6379
15
+ volumes:
16
+ - redis-data:/data
17
+ volumes:
18
+ redis-data:
@@ -58,7 +58,7 @@ module Upperkut
58
58
 
59
59
  def self.default
60
60
  new.tap do |config|
61
- config.polling_interval = Integer(ENV['UPPERKUT_POLLING_INTERVAL'] || 5)
61
+ config.polling_interval = Float(ENV['UPPERKUT_POLLING_INTERVAL'] || 5)
62
62
  end
63
63
  end
64
64
 
@@ -57,12 +57,13 @@ module Upperkut
57
57
  handle_signal(signal)
58
58
  end
59
59
  rescue Interrupt
60
+ timeout = Integer(ENV['UPPERKUT_TIMEOUT'] || 8)
60
61
  @logger.info(
61
- 'Stopping managers, wait for 5 seconds and them kill processors'
62
+ "Stopping managers, wait for #{timeout} seconds and them kill processors"
62
63
  )
63
64
 
64
65
  manager.stop
65
- sleep(Integer(ENV['UPPERKUT_TIMEOUT'] || 8))
66
+ sleep(timeout)
66
67
  manager.kill
67
68
  exit(0)
68
69
  end
@@ -0,0 +1,22 @@
1
+ require 'securerandom'
2
+
3
+ module Upperkut
4
+ class Item
5
+ attr_reader :id, :body, :enqueued_at
6
+
7
+ def initialize(id:, body:, enqueued_at: nil)
8
+ @id = id
9
+ @body = body
10
+ @enqueued_at = enqueued_at || Time.now.utc.to_i
11
+ @nacked = false
12
+ end
13
+
14
+ def nack
15
+ @nacked = true
16
+ end
17
+
18
+ def nacked?
19
+ @nacked
20
+ end
21
+ end
22
+ end
@@ -1,44 +1,50 @@
1
1
  require_relative 'core_ext'
2
- require_relative 'processor'
2
+ require_relative 'worker_thread'
3
+ require_relative 'logging'
3
4
  require_relative 'worker'
4
5
 
5
6
  module Upperkut
6
7
  class Manager
7
8
  attr_accessor :worker
8
- attr_reader :stopped, :logger, :concurrency, :processors
9
+ attr_reader :stopped, :logger, :concurrency
9
10
 
10
11
  def initialize(opts = {})
11
12
  self.worker = opts.fetch(:worker).constantize
12
13
  @concurrency = opts.fetch(:concurrency, 1)
13
- @logger = opts.fetch(:logger, Upperkut::Logging.logger)
14
+ @logger = opts.fetch(:logger, Logging.logger)
14
15
 
15
16
  @stopped = false
16
- @processors = []
17
+ @threads = []
17
18
  end
18
19
 
19
20
  def run
20
21
  @concurrency.times do
21
- processor = Processor.new(self)
22
- @processors << processor
23
- processor.run
22
+ spawn_thread
24
23
  end
25
24
  end
26
25
 
27
26
  def stop
28
27
  @stopped = true
28
+ @threads.each(&:stop)
29
29
  end
30
30
 
31
31
  def kill
32
- @processors.each(&:kill)
32
+ @threads.each(&:kill)
33
33
  end
34
34
 
35
- def notify_killed_processor(processor)
36
- @processors.delete(processor)
37
- return if @stopped
35
+ def notify_killed_processor(thread)
36
+ @threads.delete(thread)
37
+ spawn_thread unless @stopped
38
+ end
39
+
40
+ private
41
+
42
+ def spawn_thread
43
+ processor = Processor.new(worker, logger)
38
44
 
39
- processor = Processor.new(self)
40
- @processors << processor
41
- processor.run
45
+ thread = WorkerThread.new(self, processor)
46
+ @threads << thread
47
+ thread.run
42
48
  end
43
49
  end
44
50
  end
@@ -1,58 +1,64 @@
1
- require_relative 'batch_execution'
1
+ require_relative 'logging'
2
2
 
3
3
  module Upperkut
4
4
  class Processor
5
- def initialize(manager)
6
- @manager = manager
7
- @worker = @manager.worker
8
- @logger = @manager.logger
9
- @strategy = @worker.strategy
10
-
11
- @sleeping_time = 0
5
+ def initialize(worker, logger = Logging.logger)
6
+ @worker = worker
7
+ @strategy = worker.strategy
8
+ @worker_instance = worker.new
9
+ @logger = logger
12
10
  end
13
11
 
14
- def run
15
- @thread ||= Thread.new do
16
- begin
17
- process
18
- rescue Exception => e
19
- @logger.debug(
20
- action: :processor_killed,
21
- reason: e,
22
- stacktrace: e.backtrace
23
- )
24
-
25
- @manager.notify_killed_processor(self)
26
- end
12
+ def process
13
+ items = @worker.fetch_items.freeze
14
+ return unless items.any?
15
+
16
+ @worker.server_middlewares.invoke(@worker, items) do
17
+ @worker_instance.perform(items)
27
18
  end
28
- end
29
19
 
30
- def kill
31
- return unless @thread
20
+ nacked_items, pending_ack_items = items.partition(&:nacked?)
21
+ @strategy.nack(nacked_items) if nacked_items.any?
22
+ @strategy.ack(pending_ack_items) if pending_ack_items.any?
23
+ rescue StandardError => error
24
+ @logger.error(
25
+ action: :handle_execution_error,
26
+ ex: error.to_s,
27
+ backtrace: error.backtrace.join("\n"),
28
+ item_size: Array(items).size
29
+ )
32
30
 
33
- @thread.raise Upperkut::Shutdown
34
- @thread.value # wait
31
+ if items
32
+ if @worker_instance.respond_to?(:handle_error)
33
+ @worker_instance.handle_error(error, items)
34
+ return
35
+ end
36
+
37
+ @strategy.nack(items)
38
+ end
39
+
40
+ raise error
35
41
  end
36
42
 
37
- private
43
+ def blocking_process
44
+ sleeping_time = 0
38
45
 
39
- def process
40
46
  loop do
41
- next if @manager.stopped
47
+ break if @stopped
42
48
 
43
49
  if @strategy.process?
44
- @sleeping_time = 0
45
- process_batch
50
+ sleeping_time = 0
51
+ process
46
52
  next
47
53
  end
48
54
 
49
- @sleeping_time += sleep(@worker.setup.polling_interval)
50
- @logger.debug(sleeping_time: @sleeping_time)
55
+ sleeping_time += sleep(@worker.setup.polling_interval)
56
+ @logger.debug(sleeping_time: sleeping_time)
51
57
  end
52
58
  end
53
59
 
54
- def process_batch
55
- BatchExecution.new(@worker, @logger).execute
60
+ def stop
61
+ @stopped = true
56
62
  end
57
63
  end
58
64
  end
@@ -24,6 +24,20 @@ module Upperkut
24
24
  raise NotImplementedError
25
25
  end
26
26
 
27
+ # Public: Confirms that items have been processed successfully.
28
+ #
29
+ # items - The Array of items do be confirmed.
30
+ def ack(_items)
31
+ raise NotImplementedError
32
+ end
33
+
34
+ # Public: Informs that items have been not processed successfully and therefore must be re-processed.
35
+ #
36
+ # items - The Array of items do be unacknowledged.
37
+ def nack(_items)
38
+ raise NotImplementedError
39
+ end
40
+
27
41
  # Public: Tells when to execute the event processing,
28
42
  # when this condition is met so the events are dispatched to
29
43
  # the worker.
@@ -7,13 +7,65 @@ module Upperkut
7
7
  class BufferedQueue < Upperkut::Strategies::Base
8
8
  include Upperkut::Util
9
9
 
10
+ DEQUEUE_ITEMS = %(
11
+ local key = KEYS[1]
12
+ local waiting_ack_key = KEYS[2]
13
+ local batch_size = ARGV[1]
14
+ local current_timestamp = ARGV[2]
15
+ local expired_ack_timestamp = ARGV[3] + 1
16
+
17
+ -- move expired items back to the queue
18
+ local expired_ack_items = redis.call("ZRANGEBYSCORE", waiting_ack_key, 0, expired_ack_timestamp)
19
+ if table.getn(expired_ack_items) > 0 then
20
+ redis.call("ZREMRANGEBYSCORE", waiting_ack_key, 0, expired_ack_timestamp)
21
+ for i, item in ipairs(expired_ack_items) do
22
+ redis.call("RPUSH", key, item)
23
+ end
24
+ end
25
+
26
+ -- now fetch a batch
27
+ local items = redis.call("LRANGE", key, 0, batch_size - 1)
28
+ for i, item in ipairs(items) do
29
+ redis.call("ZADD", waiting_ack_key, current_timestamp + tonumber('0.' .. i), item)
30
+ end
31
+ redis.call("LTRIM", key, batch_size, -1)
32
+
33
+ return items
34
+ ).freeze
35
+
36
+ ACK_ITEMS = %(
37
+ local waiting_ack_key = KEYS[1]
38
+ local items = ARGV
39
+
40
+ for i, item in ipairs(items) do
41
+ redis.call("ZREM", waiting_ack_key, item)
42
+ end
43
+ ).freeze
44
+
45
+ NACK_ITEMS = %(
46
+ local key = KEYS[1]
47
+ local waiting_ack_key = KEYS[2]
48
+ local items = ARGV
49
+
50
+ for i, item in ipairs(items) do
51
+ redis.call("ZREM", waiting_ack_key, item)
52
+ redis.call("RPUSH", key, item)
53
+ end
54
+ ).freeze
55
+
10
56
  attr_reader :options
11
57
 
12
58
  def initialize(worker, options = {})
13
59
  @options = options
14
60
  @redis_options = options.fetch(:redis, {})
15
- @worker = worker
16
- @max_wait = options.fetch(
61
+ @worker = worker
62
+
63
+ @ack_wait_limit = options.fetch(
64
+ :ack_wait_limit,
65
+ Integer(ENV['UPPERKUT_ACK_WAIT_LIMIT'] || 120)
66
+ )
67
+
68
+ @max_wait = options.fetch(
17
69
  :max_wait,
18
70
  Integer(ENV['UPPERKUT_MAX_WAIT'] || 20)
19
71
  )
@@ -27,7 +79,7 @@ module Upperkut
27
79
  end
28
80
 
29
81
  def push_items(items = [])
30
- items = [items] if items.is_a?(Hash)
82
+ items = normalize_items(items)
31
83
  return false if items.empty?
32
84
 
33
85
  redis do |conn|
@@ -38,12 +90,12 @@ module Upperkut
38
90
  end
39
91
 
40
92
  def fetch_items
41
- stop = [@batch_size, size].min
93
+ batch_size = [@batch_size, size].min
42
94
 
43
95
  items = redis do |conn|
44
- conn.multi do
45
- stop.times { conn.lpop(key) }
46
- end
96
+ conn.eval(DEQUEUE_ITEMS,
97
+ keys: [key, processing_key],
98
+ argv: [batch_size, Time.now.utc.to_i, Time.now.utc.to_i - @ack_wait_limit])
47
99
  end
48
100
 
49
101
  decode_json_items(items)
@@ -53,11 +105,24 @@ module Upperkut
53
105
  redis { |conn| conn.del(key) }
54
106
  end
55
107
 
56
- def metrics
57
- {
58
- 'latency' => latency,
59
- 'size' => size
60
- }
108
+ def ack(items)
109
+ raise ArgumentError, 'Invalid item' unless items.all? { |item| item.is_a?(Item) }
110
+
111
+ redis do |conn|
112
+ conn.eval(ACK_ITEMS,
113
+ keys: [processing_key],
114
+ argv: encode_json_items(items))
115
+ end
116
+ end
117
+
118
+ def nack(items)
119
+ raise ArgumentError, 'Invalid item' unless items.all? { |item| item.is_a?(Item) }
120
+
121
+ redis do |conn|
122
+ conn.eval(NACK_ITEMS,
123
+ keys: [key, processing_key],
124
+ argv: encode_json_items(items))
125
+ end
61
126
  end
62
127
 
63
128
  def process?
@@ -72,38 +137,70 @@ module Upperkut
72
137
  end
73
138
  end
74
139
 
140
+ def metrics
141
+ current_latency = latency
142
+
143
+ {
144
+ 'latency' => current_latency,
145
+ 'oldest_unacked_item_age' => oldest_item_age(current_latency),
146
+ 'size' => size
147
+ }
148
+ end
149
+
75
150
  private
76
151
 
77
152
  def key
78
153
  "upperkut:buffers:#{to_underscore(@worker.name)}"
79
154
  end
80
155
 
156
+ def processing_key
157
+ "#{key}:processing"
158
+ end
159
+
81
160
  def fulfill_condition?(buff_size)
82
161
  return false if buff_size.zero?
83
162
 
84
163
  buff_size >= @batch_size || @waiting_time >= @max_wait
85
164
  end
86
165
 
87
- def size
88
- redis do |conn|
89
- conn.llen(key)
166
+ def oldest_item_age(current_latency)
167
+ oldest_processing_item = redis do |conn|
168
+ items = conn.zrange(processing_key, 0, 0)
169
+ decode_json_items(items).first
90
170
  end
171
+
172
+ oldest_processing_age = if oldest_processing_item
173
+ now = Time.now.to_f
174
+ now - oldest_processing_item.enqueued_at.to_f
175
+ else
176
+ 0
177
+ end
178
+
179
+ [current_latency, oldest_processing_age].max
91
180
  end
92
181
 
93
182
  def latency
94
- item = redis { |conn| conn.lrange(key, 0, 0) }
95
- item = decode_json_items(item).first
96
- return 0 unless item
183
+ items = redis { |conn| conn.lrange(key, 0, 0) }
184
+ first_item = decode_json_items(items).first
185
+ return 0 unless first_item
97
186
 
98
187
  now = Time.now.to_f
99
- now - item.fetch('enqueued_at', Time.now).to_f
188
+ now - first_item.enqueued_at.to_f
189
+ end
190
+
191
+ def size
192
+ redis do |conn|
193
+ conn.llen(key)
194
+ end
100
195
  end
101
196
 
102
197
  def redis
103
198
  raise ArgumentError, 'requires a block' unless block_given?
104
199
 
105
- redis_pool.with do |conn|
106
- yield conn
200
+ retry_block do
201
+ redis_pool.with do |conn|
202
+ yield conn
203
+ end
107
204
  end
108
205
  end
109
206
 
@@ -1,3 +1,7 @@
1
+ require 'upperkut/util'
2
+ require 'upperkut/redis_pool'
3
+ require 'upperkut/strategies/base'
4
+
1
5
  module Upperkut
2
6
  module Strategies
3
7
  # Public: Queue that prevent a single tenant from taking over.
@@ -23,10 +27,13 @@ module Upperkut
23
27
  # processing time.
24
28
  ENQUEUE_ITEM = %(
25
29
  local increment = 1
26
- local current_checkpoint = tonumber(redis.call("GET", KEYS[1])) or 0
27
- local score_key = KEYS[2]
30
+ local checkpoint_key = KEYS[1]
31
+ local counter_key = KEYS[2]
32
+ local score_key = KEYS[3]
33
+ local queue_key = KEYS[4]
34
+ local current_checkpoint = tonumber(redis.call("GET", checkpoint_key)) or 0
35
+ local current_counter = tonumber(redis.call("INCR", counter_key))
28
36
  local current_score = tonumber(redis.call("GET", score_key)) or 0
29
- local queue_key = KEYS[3]
30
37
  local next_score = nil
31
38
 
32
39
  if current_score >= current_checkpoint then
@@ -36,7 +43,7 @@ module Upperkut
36
43
  end
37
44
 
38
45
  redis.call("SETEX", score_key, #{ONE_DAY_IN_SECONDS}, next_score)
39
- redis.call("ZADD", queue_key, next_score, ARGV[1])
46
+ redis.call("ZADD", queue_key, next_score + tonumber('0.' .. current_counter), ARGV[1])
40
47
 
41
48
  return next_score
42
49
  ).freeze
@@ -90,7 +97,7 @@ module Upperkut
90
97
  #
91
98
  # Returns true when success, raise when error.
92
99
  def push_items(items = [])
93
- items = [items] if items.is_a?(Hash)
100
+ items = normalize_items(items)
94
101
  return false if items.empty?
95
102
 
96
103
  redis do |conn|
@@ -98,13 +105,14 @@ module Upperkut
98
105
  priority_key = @priority_key.call(item)
99
106
  score_key = "#{queue_key}:#{priority_key}:score"
100
107
 
101
- keys = [queue_checkpoint_key,
108
+ keys = [checkpoint_key,
109
+ counter_key,
102
110
  score_key,
103
111
  queue_key]
104
112
 
105
113
  conn.eval(ENQUEUE_ITEM,
106
114
  keys: keys,
107
- argv: [encode_json_items([item])])
115
+ argv: [encode_json_items(item)])
108
116
  end
109
117
  end
110
118
 
@@ -119,7 +127,7 @@ module Upperkut
119
127
 
120
128
  items = redis do |conn|
121
129
  conn.eval(DEQUEUE_ITEM,
122
- keys: [queue_checkpoint_key, queue_key],
130
+ keys: [checkpoint_key, queue_key],
123
131
  argv: [batch_size])
124
132
  end
125
133
 
@@ -131,6 +139,12 @@ module Upperkut
131
139
  redis { |conn| conn.del(queue_key) }
132
140
  end
133
141
 
142
+ def ack(_items); end
143
+
144
+ def nack(items)
145
+ push_items(items)
146
+ end
147
+
134
148
  # Public: Tells when to execute the event processing,
135
149
  # when this condition is met so the events are dispatched to
136
150
  # the worker.
@@ -155,10 +169,14 @@ module Upperkut
155
169
 
156
170
  private
157
171
 
158
- def queue_checkpoint_key
172
+ def checkpoint_key
159
173
  "#{queue_key}:checkpoint"
160
174
  end
161
175
 
176
+ def counter_key
177
+ "#{queue_key}:counter"
178
+ end
179
+
162
180
  def queue_key
163
181
  "upperkut:priority_queue:#{to_underscore(@worker.name)}"
164
182
  end
@@ -178,8 +196,10 @@ module Upperkut
178
196
  def redis
179
197
  raise ArgumentError, 'requires a block' unless block_given?
180
198
 
181
- redis_pool.with do |conn|
182
- yield conn
199
+ retry_block do
200
+ redis_pool.with do |conn|
201
+ yield conn
202
+ end
183
203
  end
184
204
  end
185
205
 
@@ -28,19 +28,24 @@ module Upperkut
28
28
 
29
29
  def initialize(worker, options = {})
30
30
  @options = options
31
- initialize_options
32
- @redis_pool = setup_redis_pool
31
+ @redis_options = @options.fetch(:redis, {})
33
32
  @worker = worker
33
+
34
+ @batch_size = @options.fetch(
35
+ :batch_size,
36
+ Integer(ENV['UPPERKUT_BATCH_SIZE'] || 1000)
37
+ )
34
38
  end
35
39
 
36
40
  def push_items(items = [])
37
- items = [items] if items.is_a?(Hash)
41
+ items = normalize_items(items)
38
42
  return false if items.empty?
39
43
 
40
44
  redis do |conn|
41
45
  items.each do |item|
42
- ensure_timestamp_attr(item)
43
- conn.zadd(key, item['timestamp'], encode_json_item(item))
46
+ schedule_item = ensure_timestamp_attr(item)
47
+ timestamp = schedule_item.body['timestamp']
48
+ conn.zadd(key, timestamp, encode_json_items(schedule_item))
44
49
  end
45
50
  end
46
51
 
@@ -66,6 +71,12 @@ module Upperkut
66
71
  redis { |conn| conn.del(key) }
67
72
  end
68
73
 
74
+ def ack(_items); end
75
+
76
+ def nack(items)
77
+ push_items(items)
78
+ end
79
+
69
80
  def metrics
70
81
  {
71
82
  'latency' => latency,
@@ -82,12 +93,17 @@ module Upperkut
82
93
 
83
94
  private
84
95
 
85
- def initialize_options
86
- @redis_options = @options.fetch(:redis, {})
96
+ def key
97
+ "upperkut:queued:#{to_underscore(@worker.name)}"
98
+ end
87
99
 
88
- @batch_size = @options.fetch(
89
- :batch_size,
90
- Integer(ENV['UPPERKUT_BATCH_SIZE'] || 1000)
100
+ def ensure_timestamp_attr(item)
101
+ return item if item.body.key?('timestamp')
102
+
103
+ Item.new(
104
+ id: item.id,
105
+ body: item.body.merge('timestamp' => Time.now.utc.to_i),
106
+ enqueued_at: item.enqueued_at
91
107
  )
92
108
  end
93
109
 
@@ -110,46 +126,36 @@ module Upperkut
110
126
 
111
127
  def latency
112
128
  now = Time.now.utc
113
- now_timestamp = now.to_f
114
- job = nil
129
+ timestamp = now.to_f
115
130
 
116
- redis do |conn|
117
- job = conn.zrangebyscore(key, '-inf'.freeze, now_timestamp.to_s, limit: [0, 1]).first
118
- job = decode_json_items([job]).first
131
+ item = redis do |conn|
132
+ item = conn.zrangebyscore(key, '-inf', timestamp.to_s, limit: [0, 1]).first
133
+ decode_json_items([item]).first
119
134
  end
120
135
 
121
- return 0 unless job
122
-
123
- now_timestamp - job['body'].fetch('timestamp', now).to_f
124
- end
125
-
126
- def setup_redis_pool
127
- return @redis_options if @redis_options.is_a?(ConnectionPool)
136
+ return timestamp - item.body['timestamp'].to_f if item
128
137
 
129
- RedisPool.new(options.fetch(:redis, {})).create
138
+ 0
130
139
  end
131
140
 
132
141
  def redis
133
142
  raise ArgumentError, 'requires a block' unless block_given?
134
143
 
135
- @redis_pool.with do |conn|
136
- yield conn
144
+ retry_block do
145
+ redis_pool.with do |conn|
146
+ yield conn
147
+ end
137
148
  end
138
149
  end
139
150
 
140
- def key
141
- "upperkut:queued:#{to_underscore(@worker.name)}"
142
- end
143
-
144
- def ensure_timestamp_attr(item)
145
- item['timestamp'] = Time.now.utc.to_i unless item.key?('timestamp')
146
- end
147
-
148
- def encode_json_item(item)
149
- JSON.generate(
150
- 'enqueued_at' => Time.now.utc.to_i,
151
- 'body' => item
152
- )
151
+ def redis_pool
152
+ @redis_pool ||= begin
153
+ if @redis_options.is_a?(ConnectionPool)
154
+ @redis_options
155
+ else
156
+ RedisPool.new(@redis_options).create
157
+ end
158
+ end
153
159
  end
154
160
  end
155
161
  end
@@ -1,9 +1,10 @@
1
1
  require 'json'
2
+ require 'upperkut/item'
2
3
 
3
4
  module Upperkut
4
5
  module Util
5
6
  def to_underscore(object)
6
- klass_name = object
7
+ klass_name = object.dup
7
8
  klass_name.gsub!(/::/, '_')
8
9
  klass_name.gsub!(/([A-Z\d]+)([A-Z][a-z])/, '\1_\2')
9
10
  klass_name.gsub!(/([a-z\d])([A-Z])/, '\1_\2')
@@ -12,22 +13,61 @@ module Upperkut
12
13
  klass_name
13
14
  end
14
15
 
16
+ # Public:
17
+ # Normalize hash and hash arrays into a hash of Items.
18
+ # An Item object contains metadata, for example the timestamp from the moment it was enqueued,
19
+ # that we need to carry through multiple execution tries.
20
+ #
21
+ # When the execution fails, we need to schedule the whole batch for retry, and scheduling
22
+ # an Item will make Upperkut understand that we're not dealing with a new batch,
23
+ # so metrics like latency will increase.
24
+ def normalize_items(items)
25
+ items = [items] unless items.is_a?(Array)
26
+
27
+ items.map do |item|
28
+ next item if item.is_a?(Item)
29
+
30
+ Item.new(id: SecureRandom.uuid, body: item)
31
+ end
32
+ end
33
+
15
34
  def encode_json_items(items)
16
- items = items.collect do |i|
35
+ items = [items] unless items.is_a?(Array)
36
+
37
+ items.map do |item|
17
38
  JSON.generate(
18
- 'enqueued_at' => Time.now.to_i,
19
- 'body' => i
39
+ 'id' => item.id,
40
+ 'body' => item.body,
41
+ 'enqueued_at' => item.enqueued_at
20
42
  )
21
43
  end
22
44
  end
23
45
 
24
46
  def decode_json_items(items)
25
- items.collect! do |i|
26
- JSON.parse(i) if i
47
+ items.each_with_object([]) do |item_json, memo|
48
+ next unless item_json
49
+
50
+ hash = JSON.parse(item_json)
51
+ id, body, enqueued_at = hash.values_at('id', 'body', 'enqueued_at')
52
+ memo << Item.new(id: id, body: body, enqueued_at: enqueued_at)
27
53
  end
54
+ end
55
+
56
+ def retry_block(retries_limit = 3, base_sleep = 2)
57
+ retries = 0
28
58
 
29
- items.compact!
30
- items
59
+ begin
60
+ yield
61
+ rescue StandardError => err
62
+ if retries < retries_limit
63
+ retries += 1
64
+ sleep_time = base_sleep**retries
65
+ Kernel.sleep(sleep_time)
66
+ retry
67
+ end
68
+
69
+ raise err
70
+ end
31
71
  end
32
72
  end
33
73
  end
@@ -1,3 +1,3 @@
1
1
  module Upperkut
2
- VERSION = '0.7.4'.freeze
2
+ VERSION = '1.0.2'.freeze
3
3
  end
@@ -1,7 +1,6 @@
1
1
  require 'forwardable'
2
2
  require 'upperkut/strategies/buffered_queue'
3
3
  require 'upperkut/middleware'
4
- require 'upperkut/util'
5
4
  require 'upperkut'
6
5
 
7
6
  module Upperkut
@@ -0,0 +1,37 @@
1
+ require_relative 'processor'
2
+
3
+ module Upperkut
4
+ class WorkerThread
5
+ def initialize(manager, processor)
6
+ @manager = manager
7
+ @processor = processor
8
+ end
9
+
10
+ def run
11
+ @thread ||= Thread.new do
12
+ begin
13
+ @processor.blocking_process
14
+ rescue Exception => e
15
+ @manager.logger.debug(
16
+ action: :processor_killed,
17
+ reason: e,
18
+ stacktrace: e.backtrace
19
+ )
20
+
21
+ @manager.notify_killed_processor(self)
22
+ end
23
+ end
24
+ end
25
+
26
+ def stop
27
+ @processor.stop
28
+ end
29
+
30
+ def kill
31
+ return unless @thread
32
+
33
+ @thread.raise Upperkut::Shutdown
34
+ @thread.value # wait
35
+ end
36
+ end
37
+ end
@@ -24,6 +24,6 @@ Gem::Specification.new do |spec|
24
24
  spec.add_dependency 'connection_pool', '~> 2.2', '>= 2.2.2'
25
25
  spec.add_dependency 'redis', '>= 4.1.0', '< 5.0.0'
26
26
  spec.add_development_dependency 'bundler', '>= 1.16'
27
- spec.add_development_dependency 'rake', '~> 10.0'
27
+ spec.add_development_dependency 'rake', '~> 13.0'
28
28
  spec.add_development_dependency 'rspec', '~> 3.0'
29
29
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: upperkut
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.7.4
4
+ version: 1.0.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Nando Sousa
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2019-05-06 00:00:00.000000000 Z
11
+ date: 2021-01-04 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: connection_pool
@@ -70,14 +70,14 @@ dependencies:
70
70
  requirements:
71
71
  - - "~>"
72
72
  - !ruby/object:Gem::Version
73
- version: '10.0'
73
+ version: '13.0'
74
74
  type: :development
75
75
  prerelease: false
76
76
  version_requirements: !ruby/object:Gem::Requirement
77
77
  requirements:
78
78
  - - "~>"
79
79
  - !ruby/object:Gem::Version
80
- version: '10.0'
80
+ version: '13.0'
81
81
  - !ruby/object:Gem::Dependency
82
82
  name: rspec
83
83
  requirement: !ruby/object:Gem::Requirement
@@ -106,20 +106,23 @@ files:
106
106
  - ".rspec"
107
107
  - CHANGELOG.md
108
108
  - CODE_OF_CONDUCT.md
109
+ - Dockerfile
109
110
  - Gemfile
110
111
  - Gemfile.lock
111
112
  - LICENSE.txt
113
+ - Makefile
112
114
  - README.md
113
115
  - Rakefile
114
116
  - bin/upperkut
117
+ - docker-compose.yml
115
118
  - examples/basic.rb
116
119
  - examples/priority_worker.rb
117
120
  - examples/scheduled_worker.rb
118
121
  - examples/with_middlewares.rb
119
122
  - lib/upperkut.rb
120
- - lib/upperkut/batch_execution.rb
121
123
  - lib/upperkut/cli.rb
122
124
  - lib/upperkut/core_ext.rb
125
+ - lib/upperkut/item.rb
123
126
  - lib/upperkut/logging.rb
124
127
  - lib/upperkut/manager.rb
125
128
  - lib/upperkut/middleware.rb
@@ -135,6 +138,7 @@ files:
135
138
  - lib/upperkut/util.rb
136
139
  - lib/upperkut/version.rb
137
140
  - lib/upperkut/worker.rb
141
+ - lib/upperkut/worker_thread.rb
138
142
  - upperkut.gemspec
139
143
  homepage: http://shipit.resultadosdigitais.com.br/open-source/
140
144
  licenses:
@@ -155,8 +159,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
155
159
  - !ruby/object:Gem::Version
156
160
  version: '0'
157
161
  requirements: []
158
- rubyforge_project:
159
- rubygems_version: 2.7.8
162
+ rubygems_version: 3.1.4
160
163
  signing_key:
161
164
  specification_version: 4
162
165
  summary: Batch background processing tool
@@ -1,42 +0,0 @@
1
- require_relative 'logging'
2
-
3
- module Upperkut
4
- class BatchExecution
5
- include Upperkut::Util
6
-
7
- def initialize(worker, logger = Upperkut::Logging.logger)
8
- @worker = worker
9
- @logger = logger
10
- end
11
-
12
- def execute
13
- worker_instance = @worker.new
14
- items = @worker.fetch_items.freeze
15
-
16
- items_body = items.collect do |item|
17
- item['body']
18
- end
19
-
20
- @worker.server_middlewares.invoke(@worker, items) do
21
- worker_instance.perform(items_body.dup)
22
- end
23
- rescue StandardError => error
24
- @logger.info(
25
- action: :requeue,
26
- ex: error,
27
- item_size: items_body.size
28
- )
29
-
30
- @logger.error(error.backtrace.join("\n"))
31
-
32
- if worker_instance.respond_to?(:handle_error)
33
- worker_instance.handle_error(error, items_body)
34
- return
35
- else
36
- @worker.push_items(items_body)
37
- end
38
-
39
- raise error
40
- end
41
- end
42
- end