upperkut 0.7.2 → 1.0.0.rc

Sign up to get free protection for your applications and to get access to all the features.
@@ -8,12 +8,12 @@ module Upperkut
8
8
  size: 2, # pool related option
9
9
  connect_timeout: 0.2,
10
10
  read_timeout: 5.0,
11
- write_timeout: 0.5,
12
- url: ENV['REDIS_URL']
11
+ write_timeout: 0.5
13
12
  }.freeze
14
13
 
15
14
  def initialize(options)
16
- @options = DEFAULT_OPTIONS.merge(options)
15
+ @options = DEFAULT_OPTIONS.merge(url: ENV['REDIS_URL'])
16
+ .merge(options)
17
17
 
18
18
  # Extract pool related options
19
19
  @size = @options.delete(:size)
@@ -24,6 +24,20 @@ module Upperkut
24
24
  raise NotImplementedError
25
25
  end
26
26
 
27
+ # Public: Confirms that items have been processed successfully.
28
+ #
29
+ # items - The Array of items do be confirmed.
30
+ def ack(_items)
31
+ raise NotImplementedError
32
+ end
33
+
34
+ # Public: Informs that items have been not processed successfully and therefore must be re-processed.
35
+ #
36
+ # items - The Array of items do be unacknowledged.
37
+ def nack(_items)
38
+ raise NotImplementedError
39
+ end
40
+
27
41
  # Public: Tells when to execute the event processing,
28
42
  # when this condition is met so the events are dispatched to
29
43
  # the worker.
@@ -7,14 +7,65 @@ module Upperkut
7
7
  class BufferedQueue < Upperkut::Strategies::Base
8
8
  include Upperkut::Util
9
9
 
10
+ DEQUEUE_ITEMS = %(
11
+ local key = KEYS[1]
12
+ local waiting_ack_key = KEYS[2]
13
+ local batch_size = ARGV[1]
14
+ local current_timestamp = ARGV[2]
15
+ local expired_ack_timestamp = ARGV[3] + 1
16
+
17
+ -- move expired items back to the queue
18
+ local expired_ack_items = redis.call("ZRANGEBYSCORE", waiting_ack_key, 0, expired_ack_timestamp)
19
+ if table.getn(expired_ack_items) > 0 then
20
+ redis.call("ZREMRANGEBYSCORE", waiting_ack_key, 0, expired_ack_timestamp)
21
+ for i, item in ipairs(expired_ack_items) do
22
+ redis.call("RPUSH", key, item)
23
+ end
24
+ end
25
+
26
+ -- now fetch a batch
27
+ local items = redis.call("LRANGE", key, 0, batch_size - 1)
28
+ for i, item in ipairs(items) do
29
+ redis.call("ZADD", waiting_ack_key, current_timestamp + tonumber('0.' .. i), item)
30
+ end
31
+ redis.call("LTRIM", key, batch_size, -1)
32
+
33
+ return items
34
+ ).freeze
35
+
36
+ ACK_ITEMS = %(
37
+ local waiting_ack_key = KEYS[1]
38
+ local items = ARGV
39
+
40
+ for i, item in ipairs(items) do
41
+ redis.call("ZREM", waiting_ack_key, item)
42
+ end
43
+ ).freeze
44
+
45
+ NACK_ITEMS = %(
46
+ local key = KEYS[1]
47
+ local waiting_ack_key = KEYS[2]
48
+ local items = ARGV
49
+
50
+ for i, item in ipairs(items) do
51
+ redis.call("ZREM", waiting_ack_key, item)
52
+ redis.call("RPUSH", key, item)
53
+ end
54
+ ).freeze
55
+
10
56
  attr_reader :options
11
57
 
12
58
  def initialize(worker, options = {})
13
59
  @options = options
14
60
  @redis_options = options.fetch(:redis, {})
15
- @redis_pool = setup_redis_pool
16
- @worker = worker
17
- @max_wait = options.fetch(
61
+ @worker = worker
62
+
63
+ @ack_wait_limit = options.fetch(
64
+ :ack_wait_limit,
65
+ Integer(ENV['UPPERKUT_ACK_WAIT_LIMIT'] || 120)
66
+ )
67
+
68
+ @max_wait = options.fetch(
18
69
  :max_wait,
19
70
  Integer(ENV['UPPERKUT_MAX_WAIT'] || 20)
20
71
  )
@@ -28,23 +79,23 @@ module Upperkut
28
79
  end
29
80
 
30
81
  def push_items(items = [])
31
- items = [items] if items.is_a?(Hash)
82
+ items = normalize_items(items)
32
83
  return false if items.empty?
33
84
 
34
85
  redis do |conn|
35
- conn.rpush(key, encode_json_items(items))
86
+ conn.rpush(key, items.map(&:to_json))
36
87
  end
37
88
 
38
89
  true
39
90
  end
40
91
 
41
92
  def fetch_items
42
- stop = [@batch_size, size].min
93
+ batch_size = [@batch_size, size].min
43
94
 
44
95
  items = redis do |conn|
45
- conn.multi do
46
- stop.times { conn.lpop(key) }
47
- end
96
+ conn.eval(DEQUEUE_ITEMS,
97
+ keys: [key, processing_key],
98
+ argv: [batch_size, Time.now.utc.to_i, Time.now.utc.to_i - @ack_wait_limit])
48
99
  end
49
100
 
50
101
  decode_json_items(items)
@@ -54,11 +105,24 @@ module Upperkut
54
105
  redis { |conn| conn.del(key) }
55
106
  end
56
107
 
57
- def metrics
58
- {
59
- 'latency' => latency,
60
- 'size' => size
61
- }
108
+ def ack(items)
109
+ raise ArgumentError, 'Invalid item' unless items.all? { |item| item.is_a?(Item) }
110
+
111
+ redis do |conn|
112
+ conn.eval(ACK_ITEMS,
113
+ keys: [processing_key],
114
+ argv: items.map(&:to_json))
115
+ end
116
+ end
117
+
118
+ def nack(items)
119
+ raise ArgumentError, 'Invalid item' unless items.all? { |item| item.is_a?(Item) }
120
+
121
+ redis do |conn|
122
+ conn.eval(NACK_ITEMS,
123
+ keys: [key, processing_key],
124
+ argv: items.map(&:to_json))
125
+ end
62
126
  end
63
127
 
64
128
  def process?
@@ -73,45 +137,81 @@ module Upperkut
73
137
  end
74
138
  end
75
139
 
140
+ def metrics
141
+ current_latency = latency
142
+
143
+ {
144
+ 'latency' => current_latency,
145
+ 'oldest_unacked_item_age' => oldest_item_age(current_latency),
146
+ 'size' => size
147
+ }
148
+ end
149
+
76
150
  private
77
151
 
152
+ def key
153
+ "upperkut:buffers:#{to_underscore(@worker.name)}"
154
+ end
155
+
156
+ def processing_key
157
+ "#{key}:processing"
158
+ end
159
+
78
160
  def fulfill_condition?(buff_size)
79
161
  return false if buff_size.zero?
80
162
 
81
163
  buff_size >= @batch_size || @waiting_time >= @max_wait
82
164
  end
83
165
 
84
- def size
85
- redis do |conn|
86
- conn.llen(key)
166
+ def oldest_item_age(current_latency)
167
+ oldest_processing_item = redis do |conn|
168
+ items = conn.zrange(processing_key, 0, 0)
169
+ decode_json_items(items).first
87
170
  end
171
+
172
+ oldest_processing_age = if oldest_processing_item
173
+ now = Time.now.to_f
174
+ now - oldest_processing_item.enqueued_at.to_f
175
+ else
176
+ 0
177
+ end
178
+
179
+ [current_latency, oldest_processing_age].max
88
180
  end
89
181
 
90
182
  def latency
91
- item = redis { |conn| conn.lrange(key, 0, 0) }
92
- item = decode_json_items(item).first
93
- return 0 unless item
183
+ items = redis { |conn| conn.lrange(key, 0, 0) }
184
+ first_item = decode_json_items(items).first
185
+ return 0 unless first_item
94
186
 
95
187
  now = Time.now.to_f
96
- now - item.fetch('enqueued_at', Time.now).to_f
188
+ now - first_item.enqueued_at.to_f
97
189
  end
98
190
 
99
- def setup_redis_pool
100
- return @redis_options if @redis_options.is_a?(ConnectionPool)
101
-
102
- RedisPool.new(options.fetch(:redis, {})).create
191
+ def size
192
+ redis do |conn|
193
+ conn.llen(key)
194
+ end
103
195
  end
104
196
 
105
197
  def redis
106
198
  raise ArgumentError, 'requires a block' unless block_given?
107
199
 
108
- @redis_pool.with do |conn|
109
- yield conn
200
+ retry_block do
201
+ redis_pool.with do |conn|
202
+ yield conn
203
+ end
110
204
  end
111
205
  end
112
206
 
113
- def key
114
- "upperkut:buffers:#{to_underscore(@worker.name)}"
207
+ def redis_pool
208
+ @redis_pool ||= begin
209
+ if @redis_options.is_a?(ConnectionPool)
210
+ @redis_options
211
+ else
212
+ RedisPool.new(@redis_options).create
213
+ end
214
+ end
115
215
  end
116
216
  end
117
217
  end
@@ -0,0 +1,217 @@
1
+ require 'upperkut/util'
2
+ require 'upperkut/redis_pool'
3
+ require 'upperkut/strategies/base'
4
+
5
+ module Upperkut
6
+ module Strategies
7
+ # Public: Queue that prevent a single tenant from taking over.
8
+ class PriorityQueue < Upperkut::Strategies::Base
9
+ include Upperkut::Util
10
+
11
+ ONE_DAY_IN_SECONDS = 86400
12
+
13
+ # Logic as follows:
14
+ #
15
+ # We keep the last score used for each tenant key. One tenant_key is
16
+ # an tenant unique id. To calculate the next_score we use
17
+ # max(current_tenant_score, current_global_score) + increment we store
18
+ # the queue in a sorted set using the next_score as ordering key if one
19
+ # tenant sends lots of messages, this tenant ends up with lots of
20
+ # messages in the queue spaced by increment if another tenant then
21
+ # sends a message, since it previous_tenant_score is lower than the
22
+ # first tenant, it will be inserted before it in the queue.
23
+ #
24
+ # In other words, the idea of this queue is to not allowing an tenant
25
+ # that sends a lot of messages to dominate processing and give a chance
26
+ # for tenants that sends few messages to have a fair share of
27
+ # processing time.
28
+ ENQUEUE_ITEM = %(
29
+ local increment = 1
30
+ local checkpoint_key = KEYS[1]
31
+ local counter_key = KEYS[2]
32
+ local score_key = KEYS[3]
33
+ local queue_key = KEYS[4]
34
+ local current_checkpoint = tonumber(redis.call("GET", checkpoint_key)) or 0
35
+ local current_counter = tonumber(redis.call("INCR", counter_key))
36
+ local current_score = tonumber(redis.call("GET", score_key)) or 0
37
+ local next_score = nil
38
+
39
+ if current_score >= current_checkpoint then
40
+ next_score = current_score + increment
41
+ else
42
+ next_score = current_checkpoint + increment
43
+ end
44
+
45
+ redis.call("SETEX", score_key, #{ONE_DAY_IN_SECONDS}, next_score)
46
+ redis.call("ZADD", queue_key, next_score + tonumber('0.' .. current_counter), ARGV[1])
47
+
48
+ return next_score
49
+ ).freeze
50
+
51
+ # Uses ZPOP* functions available only on redis 5.0.0+
52
+ DEQUEUE_ITEM = %(
53
+ local checkpoint_key = KEYS[1]
54
+ local queue_key = KEYS[2]
55
+ local batch_size = ARGV[1]
56
+ local popped_items = redis.call("ZPOPMIN", queue_key, batch_size)
57
+ local items = {}
58
+ local last_score = 0
59
+
60
+ for i, v in ipairs(popped_items) do
61
+ if i % 2 == 1 then
62
+ table.insert(items, v)
63
+ else
64
+ last_score = v
65
+ end
66
+ end
67
+
68
+ redis.call("SETEX", checkpoint_key, 86400, last_score)
69
+ return items
70
+ ).freeze
71
+
72
+ def initialize(worker, options)
73
+ @worker = worker
74
+ @options = options
75
+ @priority_key = options.fetch(:priority_key)
76
+ @redis_options = options.fetch(:redis, {})
77
+
78
+ @max_wait = options.fetch(
79
+ :max_wait,
80
+ Integer(ENV['UPPERKUT_MAX_WAIT'] || 20)
81
+ )
82
+
83
+ @batch_size = options.fetch(
84
+ :batch_size,
85
+ Integer(ENV['UPPERKUT_BATCH_SIZE'] || 1000)
86
+ )
87
+
88
+ @waiting_time = 0
89
+
90
+ raise ArgumentError, 'Invalid priority_key. ' \
91
+ 'Must be a lambda' unless @priority_key.respond_to?(:call)
92
+ end
93
+
94
+ # Public: Ingests the event into strategy.
95
+ #
96
+ # items - The Array of items do be inserted.
97
+ #
98
+ # Returns true when success, raise when error.
99
+ def push_items(items = [])
100
+ items = normalize_items(items)
101
+ return false if items.empty?
102
+
103
+ redis do |conn|
104
+ items.each do |item|
105
+ priority_key = @priority_key.call(item)
106
+ score_key = "#{queue_key}:#{priority_key}:score"
107
+
108
+ keys = [checkpoint_key,
109
+ counter_key,
110
+ score_key,
111
+ queue_key]
112
+
113
+ conn.eval(ENQUEUE_ITEM,
114
+ keys: keys,
115
+ argv: [item.to_json])
116
+ end
117
+ end
118
+
119
+ true
120
+ end
121
+
122
+ # Public: Retrieve events from Strategy.
123
+ #
124
+ # Returns an Array containing events as hash.
125
+ def fetch_items
126
+ batch_size = [@batch_size, size].min
127
+
128
+ items = redis do |conn|
129
+ conn.eval(DEQUEUE_ITEM,
130
+ keys: [checkpoint_key, queue_key],
131
+ argv: [batch_size])
132
+ end
133
+
134
+ decode_json_items(items)
135
+ end
136
+
137
+ # Public: Clear all data related to the strategy.
138
+ def clear
139
+ redis { |conn| conn.del(queue_key) }
140
+ end
141
+
142
+ def ack(_items); end
143
+
144
+ def nack(items)
145
+ push_items(items)
146
+ end
147
+
148
+ # Public: Tells when to execute the event processing,
149
+ # when this condition is met so the events are dispatched to
150
+ # the worker.
151
+ def process?
152
+ if fulfill_condition?(size)
153
+ @waiting_time = 0
154
+ return true
155
+ end
156
+
157
+ @waiting_time += @worker.setup.polling_interval
158
+ false
159
+ end
160
+
161
+ # Public: Consolidated strategy metrics.
162
+ #
163
+ # Returns hash containing metric name and values.
164
+ def metrics
165
+ {
166
+ 'size' => size
167
+ }
168
+ end
169
+
170
+ private
171
+
172
+ def checkpoint_key
173
+ "#{queue_key}:checkpoint"
174
+ end
175
+
176
+ def counter_key
177
+ "#{queue_key}:counter"
178
+ end
179
+
180
+ def queue_key
181
+ "upperkut:priority_queue:#{to_underscore(@worker.name)}"
182
+ end
183
+
184
+ def fulfill_condition?(buff_size)
185
+ return false if buff_size.zero?
186
+
187
+ buff_size >= @batch_size || @waiting_time >= @max_wait
188
+ end
189
+
190
+ def size
191
+ redis do |conn|
192
+ conn.zcard(queue_key)
193
+ end
194
+ end
195
+
196
+ def redis
197
+ raise ArgumentError, 'requires a block' unless block_given?
198
+
199
+ retry_block do
200
+ redis_pool.with do |conn|
201
+ yield conn
202
+ end
203
+ end
204
+ end
205
+
206
+ def redis_pool
207
+ @redis_pool ||= begin
208
+ if @redis_options.is_a?(ConnectionPool)
209
+ @redis_options
210
+ else
211
+ RedisPool.new(@options.fetch(:redis, {})).create
212
+ end
213
+ end
214
+ end
215
+ end
216
+ end
217
+ end