upperkut 1.0.3 → 1.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. checksums.yaml +4 -4
  2. data/.circleci/config.yml +65 -0
  3. data/.codeclimate.yml +15 -0
  4. data/.github/dependabot.yml +12 -0
  5. data/.gitignore +12 -0
  6. data/.rspec +4 -0
  7. data/.rubocop.yml +73 -0
  8. data/CHANGELOG.md +45 -0
  9. data/CODE_OF_CONDUCT.md +74 -0
  10. data/Dockerfile +7 -0
  11. data/Gemfile +11 -0
  12. data/Gemfile.lock +61 -0
  13. data/LICENSE.txt +21 -0
  14. data/Makefile +4 -0
  15. data/README.md +162 -0
  16. data/Rakefile +6 -0
  17. data/catalog-info.yaml +15 -0
  18. data/docker-compose.yml +18 -0
  19. data/examples/basic.rb +12 -0
  20. data/examples/priority_worker.rb +21 -0
  21. data/examples/scheduled_worker.rb +19 -0
  22. data/examples/with_middlewares.rb +42 -0
  23. data/lib/upperkut/cli.rb +100 -0
  24. data/lib/upperkut/core_ext.rb +18 -0
  25. data/lib/upperkut/item.rb +22 -0
  26. data/lib/upperkut/logging.rb +36 -0
  27. data/lib/upperkut/manager.rb +50 -0
  28. data/lib/upperkut/middleware.rb +35 -0
  29. data/lib/upperkut/middlewares/datadog.rb +11 -0
  30. data/lib/upperkut/middlewares/new_relic.rb +23 -0
  31. data/lib/upperkut/middlewares/rollbar.rb +25 -0
  32. data/lib/upperkut/processor.rb +64 -0
  33. data/lib/upperkut/redis_pool.rb +29 -0
  34. data/lib/upperkut/strategies/base.rb +56 -0
  35. data/lib/upperkut/strategies/buffered_queue.rb +218 -0
  36. data/lib/upperkut/strategies/priority_queue.rb +217 -0
  37. data/lib/upperkut/strategies/scheduled_queue.rb +162 -0
  38. data/lib/upperkut/util.rb +73 -0
  39. data/lib/upperkut/version.rb +3 -0
  40. data/lib/upperkut/worker.rb +42 -0
  41. data/lib/upperkut/worker_thread.rb +37 -0
  42. data/lib/upperkut.rb +103 -0
  43. data/upperkut.gemspec +29 -0
  44. metadata +44 -2
@@ -0,0 +1,218 @@
1
+ require 'upperkut/util'
2
+ require 'upperkut/redis_pool'
3
+ require 'upperkut/strategies/base'
4
+
5
+ module Upperkut
6
+ module Strategies
7
+ class BufferedQueue < Upperkut::Strategies::Base
8
+ include Upperkut::Util
9
+
10
+ DEQUEUE_ITEMS = %(
11
+ local key = KEYS[1]
12
+ local waiting_ack_key = KEYS[2]
13
+ local batch_size = ARGV[1]
14
+ local current_timestamp = ARGV[2]
15
+ local expired_ack_timestamp = ARGV[3] + 1
16
+
17
+ -- move expired items back to the queue
18
+ local expired_ack_items = redis.call("ZRANGEBYSCORE", waiting_ack_key, 0, expired_ack_timestamp)
19
+ if table.getn(expired_ack_items) > 0 then
20
+ redis.call("ZREMRANGEBYSCORE", waiting_ack_key, 0, expired_ack_timestamp)
21
+ for i, item in ipairs(expired_ack_items) do
22
+ redis.call("RPUSH", key, item)
23
+ end
24
+ end
25
+
26
+ -- now fetch a batch
27
+ local items = redis.call("LRANGE", key, 0, batch_size - 1)
28
+ for i, item in ipairs(items) do
29
+ redis.call("ZADD", waiting_ack_key, current_timestamp + tonumber('0.' .. i), item)
30
+ end
31
+ redis.call("LTRIM", key, batch_size, -1)
32
+
33
+ return items
34
+ ).freeze
35
+
36
+ ACK_ITEMS = %(
37
+ local waiting_ack_key = KEYS[1]
38
+ local items = ARGV
39
+
40
+ for i, item in ipairs(items) do
41
+ redis.call("ZREM", waiting_ack_key, item)
42
+ end
43
+ ).freeze
44
+
45
+ NACK_ITEMS = %(
46
+ local key = KEYS[1]
47
+ local waiting_ack_key = KEYS[2]
48
+ local items = ARGV
49
+
50
+ for i, item in ipairs(items) do
51
+ redis.call("ZREM", waiting_ack_key, item)
52
+ redis.call("RPUSH", key, item)
53
+ end
54
+ ).freeze
55
+
56
+ attr_reader :options
57
+
58
+ def initialize(worker, options = {})
59
+ @options = options
60
+ @redis_options = options.fetch(:redis, {})
61
+ @worker = worker
62
+
63
+ @ack_wait_limit = options.fetch(
64
+ :ack_wait_limit,
65
+ Integer(ENV['UPPERKUT_ACK_WAIT_LIMIT'] || 120)
66
+ )
67
+
68
+ @max_wait = options.fetch(
69
+ :max_wait,
70
+ Integer(ENV['UPPERKUT_MAX_WAIT'] || 20)
71
+ )
72
+
73
+ @batch_size = options.fetch(
74
+ :batch_size,
75
+ Integer(ENV['UPPERKUT_BATCH_SIZE'] || 1000)
76
+ )
77
+
78
+ @waiting_time = 0
79
+ end
80
+
81
+ def push_items(items = [])
82
+ items = normalize_items(items)
83
+ return false if items.empty?
84
+
85
+ redis do |conn|
86
+ conn.rpush(key, encode_json_items(items))
87
+ end
88
+
89
+ true
90
+ end
91
+
92
+ def fetch_items
93
+ batch_size = [@batch_size, size].min
94
+
95
+ items = redis do |conn|
96
+ conn.eval(DEQUEUE_ITEMS,
97
+ keys: [key, processing_key],
98
+ argv: [batch_size, Time.now.utc.to_i, Time.now.utc.to_i - @ack_wait_limit])
99
+ end
100
+
101
+ decode_json_items(items)
102
+ end
103
+
104
+ def clear
105
+ redis { |conn| conn.del(key) }
106
+ end
107
+
108
+ def ack(items)
109
+ raise ArgumentError, 'Invalid item' unless items.all? { |item| item.is_a?(Item) }
110
+
111
+ redis do |conn|
112
+ conn.eval(ACK_ITEMS,
113
+ keys: [processing_key],
114
+ argv: encode_json_items(items))
115
+ end
116
+ end
117
+
118
+ def nack(items)
119
+ raise ArgumentError, 'Invalid item' unless items.all? { |item| item.is_a?(Item) }
120
+
121
+ redis do |conn|
122
+ conn.eval(NACK_ITEMS,
123
+ keys: [key, processing_key],
124
+ argv: encode_json_items(items))
125
+ end
126
+ end
127
+
128
+ def process?
129
+ buff_size = size
130
+
131
+ if fulfill_condition?(buff_size)
132
+ @waiting_time = 0
133
+ return true
134
+ else
135
+ @waiting_time += @worker.setup.polling_interval
136
+ return false
137
+ end
138
+ end
139
+
140
+ def metrics
141
+ current_latency = latency
142
+
143
+ {
144
+ 'latency' => current_latency,
145
+ 'oldest_unacked_item_age' => oldest_item_age(current_latency),
146
+ 'size' => size
147
+ }
148
+ end
149
+
150
+ private
151
+
152
+ def key
153
+ "upperkut:buffers:#{to_underscore(@worker.name)}"
154
+ end
155
+
156
+ def processing_key
157
+ "#{key}:processing"
158
+ end
159
+
160
+ def fulfill_condition?(buff_size)
161
+ return false if buff_size.zero?
162
+
163
+ buff_size >= @batch_size || @waiting_time >= @max_wait
164
+ end
165
+
166
+ def oldest_item_age(current_latency)
167
+ oldest_processing_item = redis do |conn|
168
+ items = conn.zrange(processing_key, 0, 0)
169
+ decode_json_items(items).first
170
+ end
171
+
172
+ oldest_processing_age = if oldest_processing_item
173
+ now = Time.now.to_f
174
+ now - oldest_processing_item.enqueued_at.to_f
175
+ else
176
+ 0
177
+ end
178
+
179
+ [current_latency, oldest_processing_age].max
180
+ end
181
+
182
+ def latency
183
+ items = redis { |conn| conn.lrange(key, 0, 0) }
184
+ first_item = decode_json_items(items).first
185
+ return 0 unless first_item
186
+
187
+ now = Time.now.to_f
188
+ now - first_item.enqueued_at.to_f
189
+ end
190
+
191
+ def size
192
+ redis do |conn|
193
+ conn.llen(key)
194
+ end
195
+ end
196
+
197
+ def redis
198
+ raise ArgumentError, 'requires a block' unless block_given?
199
+
200
+ retry_block do
201
+ redis_pool.with do |conn|
202
+ yield conn
203
+ end
204
+ end
205
+ end
206
+
207
+ def redis_pool
208
+ @redis_pool ||= begin
209
+ if @redis_options.is_a?(ConnectionPool)
210
+ @redis_options
211
+ else
212
+ RedisPool.new(@redis_options).create
213
+ end
214
+ end
215
+ end
216
+ end
217
+ end
218
+ end
@@ -0,0 +1,217 @@
1
+ require 'upperkut/util'
2
+ require 'upperkut/redis_pool'
3
+ require 'upperkut/strategies/base'
4
+
5
+ module Upperkut
6
+ module Strategies
7
+ # Public: Queue that prevent a single tenant from taking over.
8
+ class PriorityQueue < Upperkut::Strategies::Base
9
+ include Upperkut::Util
10
+
11
+ ONE_DAY_IN_SECONDS = 86400
12
+
13
+ # Logic as follows:
14
+ #
15
+ # We keep the last score used for each tenant key. One tenant_key is
16
+ # an tenant unique id. To calculate the next_score we use
17
+ # max(current_tenant_score, current_global_score) + increment we store
18
+ # the queue in a sorted set using the next_score as ordering key if one
19
+ # tenant sends lots of messages, this tenant ends up with lots of
20
+ # messages in the queue spaced by increment if another tenant then
21
+ # sends a message, since it previous_tenant_score is lower than the
22
+ # first tenant, it will be inserted before it in the queue.
23
+ #
24
+ # In other words, the idea of this queue is to not allowing an tenant
25
+ # that sends a lot of messages to dominate processing and give a chance
26
+ # for tenants that sends few messages to have a fair share of
27
+ # processing time.
28
+ ENQUEUE_ITEM = %(
29
+ local increment = 1
30
+ local checkpoint_key = KEYS[1]
31
+ local counter_key = KEYS[2]
32
+ local score_key = KEYS[3]
33
+ local queue_key = KEYS[4]
34
+ local current_checkpoint = tonumber(redis.call("GET", checkpoint_key)) or 0
35
+ local current_counter = tonumber(redis.call("INCR", counter_key))
36
+ local current_score = tonumber(redis.call("GET", score_key)) or 0
37
+ local next_score = nil
38
+
39
+ if current_score >= current_checkpoint then
40
+ next_score = current_score + increment
41
+ else
42
+ next_score = current_checkpoint + increment
43
+ end
44
+
45
+ redis.call("SETEX", score_key, #{ONE_DAY_IN_SECONDS}, next_score)
46
+ redis.call("ZADD", queue_key, next_score + tonumber('0.' .. current_counter), ARGV[1])
47
+
48
+ return next_score
49
+ ).freeze
50
+
51
+ # Uses ZPOP* functions available only on redis 5.0.0+
52
+ DEQUEUE_ITEM = %(
53
+ local checkpoint_key = KEYS[1]
54
+ local queue_key = KEYS[2]
55
+ local batch_size = ARGV[1]
56
+ local popped_items = redis.call("ZPOPMIN", queue_key, batch_size)
57
+ local items = {}
58
+ local last_score = 0
59
+
60
+ for i, v in ipairs(popped_items) do
61
+ if i % 2 == 1 then
62
+ table.insert(items, v)
63
+ else
64
+ last_score = v
65
+ end
66
+ end
67
+
68
+ redis.call("SETEX", checkpoint_key, 86400, last_score)
69
+ return items
70
+ ).freeze
71
+
72
+ def initialize(worker, options)
73
+ @worker = worker
74
+ @options = options
75
+ @priority_key = options.fetch(:priority_key)
76
+ @redis_options = options.fetch(:redis, {})
77
+
78
+ @max_wait = options.fetch(
79
+ :max_wait,
80
+ Integer(ENV['UPPERKUT_MAX_WAIT'] || 20)
81
+ )
82
+
83
+ @batch_size = options.fetch(
84
+ :batch_size,
85
+ Integer(ENV['UPPERKUT_BATCH_SIZE'] || 1000)
86
+ )
87
+
88
+ @waiting_time = 0
89
+
90
+ raise ArgumentError, 'Invalid priority_key. ' \
91
+ 'Must be a lambda' unless @priority_key.respond_to?(:call)
92
+ end
93
+
94
+ # Public: Ingests the event into strategy.
95
+ #
96
+ # items - The Array of items do be inserted.
97
+ #
98
+ # Returns true when success, raise when error.
99
+ def push_items(items = [])
100
+ items = normalize_items(items)
101
+ return false if items.empty?
102
+
103
+ redis do |conn|
104
+ items.each do |item|
105
+ priority_key = @priority_key.call(item)
106
+ score_key = "#{queue_key}:#{priority_key}:score"
107
+
108
+ keys = [checkpoint_key,
109
+ counter_key,
110
+ score_key,
111
+ queue_key]
112
+
113
+ conn.eval(ENQUEUE_ITEM,
114
+ keys: keys,
115
+ argv: [encode_json_items(item)])
116
+ end
117
+ end
118
+
119
+ true
120
+ end
121
+
122
+ # Public: Retrieve events from Strategy.
123
+ #
124
+ # Returns an Array containing events as hash.
125
+ def fetch_items
126
+ batch_size = [@batch_size, size].min
127
+
128
+ items = redis do |conn|
129
+ conn.eval(DEQUEUE_ITEM,
130
+ keys: [checkpoint_key, queue_key],
131
+ argv: [batch_size])
132
+ end
133
+
134
+ decode_json_items(items)
135
+ end
136
+
137
+ # Public: Clear all data related to the strategy.
138
+ def clear
139
+ redis { |conn| conn.del(queue_key) }
140
+ end
141
+
142
+ def ack(_items); end
143
+
144
+ def nack(items)
145
+ push_items(items)
146
+ end
147
+
148
+ # Public: Tells when to execute the event processing,
149
+ # when this condition is met so the events are dispatched to
150
+ # the worker.
151
+ def process?
152
+ if fulfill_condition?(size)
153
+ @waiting_time = 0
154
+ return true
155
+ end
156
+
157
+ @waiting_time += @worker.setup.polling_interval
158
+ false
159
+ end
160
+
161
+ # Public: Consolidated strategy metrics.
162
+ #
163
+ # Returns hash containing metric name and values.
164
+ def metrics
165
+ {
166
+ 'size' => size
167
+ }
168
+ end
169
+
170
+ private
171
+
172
+ def checkpoint_key
173
+ "#{queue_key}:checkpoint"
174
+ end
175
+
176
+ def counter_key
177
+ "#{queue_key}:counter"
178
+ end
179
+
180
+ def queue_key
181
+ "upperkut:priority_queue:#{to_underscore(@worker.name)}"
182
+ end
183
+
184
+ def fulfill_condition?(buff_size)
185
+ return false if buff_size.zero?
186
+
187
+ buff_size >= @batch_size || @waiting_time >= @max_wait
188
+ end
189
+
190
+ def size
191
+ redis do |conn|
192
+ conn.zcard(queue_key)
193
+ end
194
+ end
195
+
196
+ def redis
197
+ raise ArgumentError, 'requires a block' unless block_given?
198
+
199
+ retry_block do
200
+ redis_pool.with do |conn|
201
+ yield conn
202
+ end
203
+ end
204
+ end
205
+
206
+ def redis_pool
207
+ @redis_pool ||= begin
208
+ if @redis_options.is_a?(ConnectionPool)
209
+ @redis_options
210
+ else
211
+ RedisPool.new(@options.fetch(:redis, {})).create
212
+ end
213
+ end
214
+ end
215
+ end
216
+ end
217
+ end
@@ -0,0 +1,162 @@
1
+ require 'time'
2
+ require 'upperkut/util'
3
+ require 'upperkut/redis_pool'
4
+ require 'upperkut/strategies/base'
5
+
6
+ module Upperkut
7
+ module Strategies
8
+ # Public: Encapsulates methods required to build a Scheculed Queue
9
+ # Items are queued, but are only fetched at a specific point in time.
10
+ class ScheduledQueue < Upperkut::Strategies::Base
11
+ include Upperkut::Util
12
+
13
+ ZPOPBYRANGE = %(
14
+ local score_from = ARGV[1]
15
+ local score_to = ARGV[2]
16
+ local limit = ARGV[3]
17
+
18
+ local values = redis.call('zrangebyscore', KEYS[1], score_from, score_to, 'LIMIT', '0', limit)
19
+
20
+ if table.getn(values) > 0 then
21
+ redis.call('zrem', KEYS[1], unpack(values))
22
+ end
23
+
24
+ return values
25
+ ).freeze
26
+
27
+ attr_reader :options
28
+
29
+ def initialize(worker, options = {})
30
+ @options = options
31
+ @redis_options = @options.fetch(:redis, {})
32
+ @worker = worker
33
+
34
+ @batch_size = @options.fetch(
35
+ :batch_size,
36
+ Integer(ENV['UPPERKUT_BATCH_SIZE'] || 1000)
37
+ )
38
+ end
39
+
40
+ def push_items(items = [])
41
+ items = normalize_items(items)
42
+ return false if items.empty?
43
+
44
+ redis do |conn|
45
+ items.each do |item|
46
+ schedule_item = ensure_timestamp_attr(item)
47
+ timestamp = schedule_item.body['timestamp']
48
+ conn.zadd(key, timestamp, encode_json_items(schedule_item))
49
+ end
50
+ end
51
+
52
+ true
53
+ end
54
+
55
+ def fetch_items
56
+ args = {
57
+ value_from: '-inf'.freeze,
58
+ value_to: Time.now.utc.to_f.to_s,
59
+ limit: @batch_size
60
+ }
61
+ items = []
62
+
63
+ redis do |conn|
64
+ items = pop_values(conn, args)
65
+ end
66
+
67
+ decode_json_items(items)
68
+ end
69
+
70
+ def clear
71
+ redis { |conn| conn.del(key) }
72
+ end
73
+
74
+ def ack(_items); end
75
+
76
+ def nack(items)
77
+ push_items(items)
78
+ end
79
+
80
+ def metrics
81
+ {
82
+ 'latency' => latency,
83
+ 'size' => size
84
+ }
85
+ end
86
+
87
+ def process?
88
+ buff_size = size('-inf', Time.now.utc.to_i)
89
+ return true if fulfill_condition?(buff_size)
90
+
91
+ false
92
+ end
93
+
94
+ private
95
+
96
+ def key
97
+ "upperkut:queued:#{to_underscore(@worker.name)}"
98
+ end
99
+
100
+ def ensure_timestamp_attr(item)
101
+ return item if item.body.key?('timestamp')
102
+
103
+ Item.new(
104
+ id: item.id,
105
+ body: item.body.merge('timestamp' => Time.now.utc.to_i),
106
+ enqueued_at: item.enqueued_at
107
+ )
108
+ end
109
+
110
+ def pop_values(redis_client, args)
111
+ value_from = args[:value_from]
112
+ value_to = args[:value_to]
113
+ limit = args[:limit]
114
+ redis_client.eval(ZPOPBYRANGE, keys: [key], argv: [value_from, value_to, limit])
115
+ end
116
+
117
+ def fulfill_condition?(buff_size)
118
+ !buff_size.zero?
119
+ end
120
+
121
+ def size(min = '-inf', max = '+inf')
122
+ redis do |conn|
123
+ conn.zcount(key, min, max)
124
+ end
125
+ end
126
+
127
+ def latency
128
+ now = Time.now.utc
129
+ timestamp = now.to_f
130
+
131
+ item = redis do |conn|
132
+ item = conn.zrangebyscore(key, '-inf', timestamp.to_s, limit: [0, 1]).first
133
+ decode_json_items([item]).first
134
+ end
135
+
136
+ return timestamp - item.body['timestamp'].to_f if item
137
+
138
+ 0
139
+ end
140
+
141
+ def redis
142
+ raise ArgumentError, 'requires a block' unless block_given?
143
+
144
+ retry_block do
145
+ redis_pool.with do |conn|
146
+ yield conn
147
+ end
148
+ end
149
+ end
150
+
151
+ def redis_pool
152
+ @redis_pool ||= begin
153
+ if @redis_options.is_a?(ConnectionPool)
154
+ @redis_options
155
+ else
156
+ RedisPool.new(@redis_options).create
157
+ end
158
+ end
159
+ end
160
+ end
161
+ end
162
+ end
@@ -0,0 +1,73 @@
1
+ require 'json'
2
+ require 'upperkut/item'
3
+
4
+ module Upperkut
5
+ module Util
6
+ def to_underscore(object)
7
+ klass_name = object.dup
8
+ klass_name.gsub!(/::/, '_')
9
+ klass_name.gsub!(/([A-Z\d]+)([A-Z][a-z])/, '\1_\2')
10
+ klass_name.gsub!(/([a-z\d])([A-Z])/, '\1_\2')
11
+ klass_name.tr!('-', '_')
12
+ klass_name.downcase!
13
+ klass_name
14
+ end
15
+
16
+ # Public:
17
+ # Normalize hash and hash arrays into a hash of Items.
18
+ # An Item object contains metadata, for example the timestamp from the moment it was enqueued,
19
+ # that we need to carry through multiple execution tries.
20
+ #
21
+ # When the execution fails, we need to schedule the whole batch for retry, and scheduling
22
+ # an Item will make Upperkut understand that we're not dealing with a new batch,
23
+ # so metrics like latency will increase.
24
+ def normalize_items(items)
25
+ items = [items] unless items.is_a?(Array)
26
+
27
+ items.map do |item|
28
+ next item if item.is_a?(Item)
29
+
30
+ Item.new(id: SecureRandom.uuid, body: item)
31
+ end
32
+ end
33
+
34
+ def encode_json_items(items)
35
+ items = [items] unless items.is_a?(Array)
36
+
37
+ items.map do |item|
38
+ JSON.generate(
39
+ 'id' => item.id,
40
+ 'body' => item.body,
41
+ 'enqueued_at' => item.enqueued_at
42
+ )
43
+ end
44
+ end
45
+
46
+ def decode_json_items(items)
47
+ items.each_with_object([]) do |item_json, memo|
48
+ next unless item_json
49
+
50
+ hash = JSON.parse(item_json)
51
+ id, body, enqueued_at = hash.values_at('id', 'body', 'enqueued_at')
52
+ memo << Item.new(id: id, body: body, enqueued_at: enqueued_at)
53
+ end
54
+ end
55
+
56
+ def retry_block(retries_limit = 3, base_sleep = 2)
57
+ retries = 0
58
+
59
+ begin
60
+ yield
61
+ rescue StandardError => err
62
+ if retries < retries_limit
63
+ retries += 1
64
+ sleep_time = base_sleep**retries
65
+ Kernel.sleep(sleep_time)
66
+ retry
67
+ end
68
+
69
+ raise err
70
+ end
71
+ end
72
+ end
73
+ end
@@ -0,0 +1,3 @@
1
+ module Upperkut
2
+ VERSION = '1.0.4'.freeze
3
+ end
@@ -0,0 +1,42 @@
1
+ require 'forwardable'
2
+ require 'upperkut/strategies/buffered_queue'
3
+ require 'upperkut/middleware'
4
+ require 'upperkut'
5
+
6
+ module Upperkut
7
+ module Worker
8
+ def self.included(base)
9
+ base.extend(ClassMethods)
10
+ end
11
+
12
+ module ClassMethods
13
+ extend Forwardable
14
+
15
+ def_delegators :setup, :strategy, :server_middlewares, :client_middlewares
16
+ def_delegators :strategy, :metrics, :clear
17
+
18
+ def push_items(items)
19
+ client_middlewares.invoke(self, items) do
20
+ strategy.push_items(items)
21
+ end
22
+ end
23
+
24
+ def fetch_items
25
+ strategy.fetch_items
26
+ end
27
+
28
+ def setup_upperkut
29
+ yield(setup) if block_given?
30
+ end
31
+
32
+ def setup
33
+ @config ||=
34
+ begin
35
+ config = Upperkut::Configuration.default.clone
36
+ config.strategy ||= Upperkut::Strategies::BufferedQueue.new(self)
37
+ config
38
+ end
39
+ end
40
+ end
41
+ end
42
+ end