upperkut 0.8.1 → 1.0.0.rc

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: b61a3728cde5e295de277abc22836653fc332640843bce69f3ca3832f830a1ec
4
- data.tar.gz: f781b92bd8923e67e3f26a207904ee4b80bf54104bc98364559d4924a5b3c5e2
3
+ metadata.gz: 497852c1c7edee902a15cb3978f2efaab5ffdd8fe5be6fa2ab3fc62e9de2b661
4
+ data.tar.gz: 39ed05ba320a38368c83a018216ab9db5ab26e760369f5b944e8ecacd24c1acc
5
5
  SHA512:
6
- metadata.gz: 961de37230a1db0d1c8162b9a7e25f69715f647f154b3b36e66aef2568d0620235fbeaa5b92bcfefdb5189bcee0b1de98220e0db6811ee7dc3477114bdea3758
7
- data.tar.gz: f509c2623813dd4132b1df11dc220de1082f441078a5ddc96b753fec9d47c87ea62851af45dd4257c60578e83b0647a36ef6a8f1a643a5748cbf58751206403d
6
+ metadata.gz: 7cccacbc6c943ca5ee5bab8bea75c67d0ba6bb039c732723f16ffb78cd98c85ee5873b04cfc485b8e3fa879a35bfe59f0aedb53ede87a620666aebbe40ef9b77
7
+ data.tar.gz: 3031919f600731f5fb2989ebe02055a1032bb3565ce471b74f6b29025b9f2417fc98a1302cc39ad6d0a05b1c4d641636cbff8d965bbd7194b5ae94f34a1096e4
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- upperkut (0.8.1)
4
+ upperkut (1.0.0.rc)
5
5
  connection_pool (~> 2.2, >= 2.2.2)
6
6
  redis (>= 4.1.0, < 5.0.0)
7
7
 
@@ -9,17 +9,17 @@ GEM
9
9
  remote: https://rubygems.org/
10
10
  specs:
11
11
  coderay (1.1.2)
12
- connection_pool (2.2.2)
12
+ connection_pool (2.2.3)
13
13
  diff-lcs (1.3)
14
14
  docile (1.3.2)
15
15
  fivemat (1.3.7)
16
16
  json (2.3.0)
17
17
  method_source (1.0.0)
18
- pry (0.13.0)
18
+ pry (0.13.1)
19
19
  coderay (~> 1.1)
20
20
  method_source (~> 1.0)
21
21
  rake (13.0.1)
22
- redis (4.1.3)
22
+ redis (4.2.1)
23
23
  rspec (3.9.0)
24
24
  rspec-core (~> 3.9.0)
25
25
  rspec-expectations (~> 3.9.0)
@@ -55,4 +55,4 @@ DEPENDENCIES
55
55
  upperkut!
56
56
 
57
57
  BUNDLED WITH
58
- 1.17.3
58
+ 2.1.4
data/README.md CHANGED
@@ -42,7 +42,7 @@ Or install it yourself as:
42
42
 
43
43
  2) Start pushing items;
44
44
  ```ruby
45
- Myworker.push_items(
45
+ MyWorker.push_items(
46
46
  [
47
47
  {
48
48
  'id' => SecureRandom.uuid,
@@ -80,7 +80,7 @@ Or install it yourself as:
80
80
  2) Start pushing items with `timestamp` parameter;
81
81
  ```ruby
82
82
  # timestamp is 'Thu, 10 May 2019 23:43:58 GMT'
83
- Myworker.push_items(
83
+ MyWorker.push_items(
84
84
  [
85
85
  {
86
86
  'timestamp' => '1557531838',
@@ -1,12 +1,16 @@
1
+ require 'securerandom'
2
+
1
3
  module Upperkut
2
4
  class Item
3
- attr_reader :enqueued_at
5
+ attr_reader :id, :body, :enqueued_at
4
6
 
5
- def initialize(body, enqueued_at = nil)
7
+ def initialize(body:, id: nil, enqueued_at: nil)
6
8
  raise ArgumentError, 'Body should be a Hash' unless body.is_a?(Hash)
7
9
 
8
10
  @body = body
11
+ @id = id || SecureRandom.uuid
9
12
  @enqueued_at = enqueued_at || Time.now.utc.to_i
13
+ @nacked = false
10
14
  end
11
15
 
12
16
  def [](key)
@@ -21,12 +25,17 @@ module Upperkut
21
25
  @body.key?(key)
22
26
  end
23
27
 
24
- def body
25
- @body
28
+ def nack
29
+ @nacked = true
30
+ end
31
+
32
+ def nacked?
33
+ @nacked
26
34
  end
27
35
 
28
36
  def to_json
29
37
  JSON.generate(
38
+ 'id' => @id,
30
39
  'body' => @body,
31
40
  'enqueued_at' => @enqueued_at
32
41
  )
@@ -34,7 +43,8 @@ module Upperkut
34
43
 
35
44
  def self.from_json(item_json)
36
45
  hash = JSON.parse(item_json)
37
- new(hash['body'], hash['enqueued_at'])
46
+ id, body, enqueued_at = hash.values_at('id', 'body', 'enqueued_at')
47
+ new(id: id, body: body, enqueued_at: enqueued_at)
38
48
  end
39
49
  end
40
50
  end
@@ -1,44 +1,50 @@
1
1
  require_relative 'core_ext'
2
- require_relative 'processor'
2
+ require_relative 'worker_thread'
3
+ require_relative 'logging'
3
4
  require_relative 'worker'
4
5
 
5
6
  module Upperkut
6
7
  class Manager
7
8
  attr_accessor :worker
8
- attr_reader :stopped, :logger, :concurrency, :processors
9
+ attr_reader :stopped, :logger, :concurrency
9
10
 
10
11
  def initialize(opts = {})
11
12
  self.worker = opts.fetch(:worker).constantize
12
13
  @concurrency = opts.fetch(:concurrency, 1)
13
- @logger = opts.fetch(:logger, Upperkut::Logging.logger)
14
+ @logger = opts.fetch(:logger, Logging.logger)
14
15
 
15
16
  @stopped = false
16
- @processors = []
17
+ @threads = []
17
18
  end
18
19
 
19
20
  def run
20
21
  @concurrency.times do
21
- processor = Processor.new(self)
22
- @processors << processor
23
- processor.run
22
+ spawn_thread
24
23
  end
25
24
  end
26
25
 
27
26
  def stop
28
27
  @stopped = true
28
+ @threads.each(&:stop)
29
29
  end
30
30
 
31
31
  def kill
32
- @processors.each(&:kill)
32
+ @threads.each(&:kill)
33
33
  end
34
34
 
35
- def notify_killed_processor(processor)
36
- @processors.delete(processor)
37
- return if @stopped
35
+ def notify_killed_processor(thread)
36
+ @threads.delete(thread)
37
+ spawn_thread unless @stopped
38
+ end
39
+
40
+ private
41
+
42
+ def spawn_thread
43
+ processor = Processor.new(worker, logger)
38
44
 
39
- processor = Processor.new(self)
40
- @processors << processor
41
- processor.run
45
+ thread = WorkerThread.new(self, processor)
46
+ @threads << thread
47
+ thread.run
42
48
  end
43
49
  end
44
50
  end
@@ -1,58 +1,63 @@
1
- require_relative 'batch_execution'
1
+ require_relative 'logging'
2
2
 
3
3
  module Upperkut
4
4
  class Processor
5
- def initialize(manager)
6
- @manager = manager
7
- @worker = @manager.worker
8
- @logger = @manager.logger
9
- @strategy = @worker.strategy
10
-
11
- @sleeping_time = 0
5
+ def initialize(worker, logger = Logging.logger)
6
+ @worker = worker
7
+ @strategy = worker.strategy
8
+ @worker_instance = worker.new
9
+ @logger = logger
12
10
  end
13
11
 
14
- def run
15
- @thread ||= Thread.new do
16
- begin
17
- process
18
- rescue Exception => e
19
- @logger.debug(
20
- action: :processor_killed,
21
- reason: e,
22
- stacktrace: e.backtrace
23
- )
24
-
25
- @manager.notify_killed_processor(self)
26
- end
12
+ def process
13
+ items = @worker.fetch_items.freeze
14
+
15
+ @worker.server_middlewares.invoke(@worker, items) do
16
+ @worker_instance.perform(items)
27
17
  end
28
- end
29
18
 
30
- def kill
31
- return unless @thread
19
+ nacked_items, pending_ack_items = items.partition(&:nacked?)
20
+ @strategy.nack(nacked_items) if nacked_items.any?
21
+ @strategy.ack(pending_ack_items) if pending_ack_items.any?
22
+ rescue StandardError => error
23
+ @logger.error(
24
+ action: :handle_execution_error,
25
+ ex: error.to_s,
26
+ backtrace: error.backtrace.join("\n"),
27
+ item_size: Array(items).size
28
+ )
29
+
30
+ if items
31
+ if @worker_instance.respond_to?(:handle_error)
32
+ @worker_instance.handle_error(error, items)
33
+ return
34
+ end
35
+
36
+ @strategy.nack(items)
37
+ end
32
38
 
33
- @thread.raise Upperkut::Shutdown
34
- @thread.value # wait
39
+ raise error
35
40
  end
36
41
 
37
- private
42
+ def blocking_process
43
+ sleeping_time = 0
38
44
 
39
- def process
40
45
  loop do
41
- next if @manager.stopped
46
+ break if @stopped
42
47
 
43
48
  if @strategy.process?
44
- @sleeping_time = 0
45
- process_batch
49
+ sleeping_time = 0
50
+ process
46
51
  next
47
52
  end
48
53
 
49
- @sleeping_time += sleep(@worker.setup.polling_interval)
50
- @logger.debug(sleeping_time: @sleeping_time)
54
+ sleeping_time += sleep(@worker.setup.polling_interval)
55
+ @logger.debug(sleeping_time: sleeping_time)
51
56
  end
52
57
  end
53
58
 
54
- def process_batch
55
- BatchExecution.new(@worker, @logger).execute
59
+ def stop
60
+ @stopped = true
56
61
  end
57
62
  end
58
63
  end
@@ -24,6 +24,20 @@ module Upperkut
24
24
  raise NotImplementedError
25
25
  end
26
26
 
27
+ # Public: Confirms that items have been processed successfully.
28
+ #
29
+ # items - The Array of items do be confirmed.
30
+ def ack(_items)
31
+ raise NotImplementedError
32
+ end
33
+
34
+ # Public: Informs that items have been not processed successfully and therefore must be re-processed.
35
+ #
36
+ # items - The Array of items do be unacknowledged.
37
+ def nack(_items)
38
+ raise NotImplementedError
39
+ end
40
+
27
41
  # Public: Tells when to execute the event processing,
28
42
  # when this condition is met so the events are dispatched to
29
43
  # the worker.
@@ -7,13 +7,65 @@ module Upperkut
7
7
  class BufferedQueue < Upperkut::Strategies::Base
8
8
  include Upperkut::Util
9
9
 
10
+ DEQUEUE_ITEMS = %(
11
+ local key = KEYS[1]
12
+ local waiting_ack_key = KEYS[2]
13
+ local batch_size = ARGV[1]
14
+ local current_timestamp = ARGV[2]
15
+ local expired_ack_timestamp = ARGV[3] + 1
16
+
17
+ -- move expired items back to the queue
18
+ local expired_ack_items = redis.call("ZRANGEBYSCORE", waiting_ack_key, 0, expired_ack_timestamp)
19
+ if table.getn(expired_ack_items) > 0 then
20
+ redis.call("ZREMRANGEBYSCORE", waiting_ack_key, 0, expired_ack_timestamp)
21
+ for i, item in ipairs(expired_ack_items) do
22
+ redis.call("RPUSH", key, item)
23
+ end
24
+ end
25
+
26
+ -- now fetch a batch
27
+ local items = redis.call("LRANGE", key, 0, batch_size - 1)
28
+ for i, item in ipairs(items) do
29
+ redis.call("ZADD", waiting_ack_key, current_timestamp + tonumber('0.' .. i), item)
30
+ end
31
+ redis.call("LTRIM", key, batch_size, -1)
32
+
33
+ return items
34
+ ).freeze
35
+
36
+ ACK_ITEMS = %(
37
+ local waiting_ack_key = KEYS[1]
38
+ local items = ARGV
39
+
40
+ for i, item in ipairs(items) do
41
+ redis.call("ZREM", waiting_ack_key, item)
42
+ end
43
+ ).freeze
44
+
45
+ NACK_ITEMS = %(
46
+ local key = KEYS[1]
47
+ local waiting_ack_key = KEYS[2]
48
+ local items = ARGV
49
+
50
+ for i, item in ipairs(items) do
51
+ redis.call("ZREM", waiting_ack_key, item)
52
+ redis.call("RPUSH", key, item)
53
+ end
54
+ ).freeze
55
+
10
56
  attr_reader :options
11
57
 
12
58
  def initialize(worker, options = {})
13
59
  @options = options
14
60
  @redis_options = options.fetch(:redis, {})
15
- @worker = worker
16
- @max_wait = options.fetch(
61
+ @worker = worker
62
+
63
+ @ack_wait_limit = options.fetch(
64
+ :ack_wait_limit,
65
+ Integer(ENV['UPPERKUT_ACK_WAIT_LIMIT'] || 120)
66
+ )
67
+
68
+ @max_wait = options.fetch(
17
69
  :max_wait,
18
70
  Integer(ENV['UPPERKUT_MAX_WAIT'] || 20)
19
71
  )
@@ -38,12 +90,12 @@ module Upperkut
38
90
  end
39
91
 
40
92
  def fetch_items
41
- stop = [@batch_size, size].min
93
+ batch_size = [@batch_size, size].min
42
94
 
43
95
  items = redis do |conn|
44
- conn.multi do
45
- stop.times { conn.lpop(key) }
46
- end
96
+ conn.eval(DEQUEUE_ITEMS,
97
+ keys: [key, processing_key],
98
+ argv: [batch_size, Time.now.utc.to_i, Time.now.utc.to_i - @ack_wait_limit])
47
99
  end
48
100
 
49
101
  decode_json_items(items)
@@ -53,11 +105,24 @@ module Upperkut
53
105
  redis { |conn| conn.del(key) }
54
106
  end
55
107
 
56
- def metrics
57
- {
58
- 'latency' => latency,
59
- 'size' => size
60
- }
108
+ def ack(items)
109
+ raise ArgumentError, 'Invalid item' unless items.all? { |item| item.is_a?(Item) }
110
+
111
+ redis do |conn|
112
+ conn.eval(ACK_ITEMS,
113
+ keys: [processing_key],
114
+ argv: items.map(&:to_json))
115
+ end
116
+ end
117
+
118
+ def nack(items)
119
+ raise ArgumentError, 'Invalid item' unless items.all? { |item| item.is_a?(Item) }
120
+
121
+ redis do |conn|
122
+ conn.eval(NACK_ITEMS,
123
+ keys: [key, processing_key],
124
+ argv: items.map(&:to_json))
125
+ end
61
126
  end
62
127
 
63
128
  def process?
@@ -72,22 +137,46 @@ module Upperkut
72
137
  end
73
138
  end
74
139
 
140
+ def metrics
141
+ current_latency = latency
142
+
143
+ {
144
+ 'latency' => current_latency,
145
+ 'oldest_unacked_item_age' => oldest_item_age(current_latency),
146
+ 'size' => size
147
+ }
148
+ end
149
+
75
150
  private
76
151
 
77
152
  def key
78
153
  "upperkut:buffers:#{to_underscore(@worker.name)}"
79
154
  end
80
155
 
156
+ def processing_key
157
+ "#{key}:processing"
158
+ end
159
+
81
160
  def fulfill_condition?(buff_size)
82
161
  return false if buff_size.zero?
83
162
 
84
163
  buff_size >= @batch_size || @waiting_time >= @max_wait
85
164
  end
86
165
 
87
- def size
88
- redis do |conn|
89
- conn.llen(key)
166
+ def oldest_item_age(current_latency)
167
+ oldest_processing_item = redis do |conn|
168
+ items = conn.zrange(processing_key, 0, 0)
169
+ decode_json_items(items).first
90
170
  end
171
+
172
+ oldest_processing_age = if oldest_processing_item
173
+ now = Time.now.to_f
174
+ now - oldest_processing_item.enqueued_at.to_f
175
+ else
176
+ 0
177
+ end
178
+
179
+ [current_latency, oldest_processing_age].max
91
180
  end
92
181
 
93
182
  def latency
@@ -99,6 +188,12 @@ module Upperkut
99
188
  now - first_item.enqueued_at.to_f
100
189
  end
101
190
 
191
+ def size
192
+ redis do |conn|
193
+ conn.llen(key)
194
+ end
195
+ end
196
+
102
197
  def redis
103
198
  raise ArgumentError, 'requires a block' unless block_given?
104
199
 
@@ -27,10 +27,13 @@ module Upperkut
27
27
  # processing time.
28
28
  ENQUEUE_ITEM = %(
29
29
  local increment = 1
30
- local current_checkpoint = tonumber(redis.call("GET", KEYS[1])) or 0
31
- local score_key = KEYS[2]
30
+ local checkpoint_key = KEYS[1]
31
+ local counter_key = KEYS[2]
32
+ local score_key = KEYS[3]
33
+ local queue_key = KEYS[4]
34
+ local current_checkpoint = tonumber(redis.call("GET", checkpoint_key)) or 0
35
+ local current_counter = tonumber(redis.call("INCR", counter_key))
32
36
  local current_score = tonumber(redis.call("GET", score_key)) or 0
33
- local queue_key = KEYS[3]
34
37
  local next_score = nil
35
38
 
36
39
  if current_score >= current_checkpoint then
@@ -40,7 +43,7 @@ module Upperkut
40
43
  end
41
44
 
42
45
  redis.call("SETEX", score_key, #{ONE_DAY_IN_SECONDS}, next_score)
43
- redis.call("ZADD", queue_key, next_score, ARGV[1])
46
+ redis.call("ZADD", queue_key, next_score + tonumber('0.' .. current_counter), ARGV[1])
44
47
 
45
48
  return next_score
46
49
  ).freeze
@@ -102,7 +105,8 @@ module Upperkut
102
105
  priority_key = @priority_key.call(item)
103
106
  score_key = "#{queue_key}:#{priority_key}:score"
104
107
 
105
- keys = [queue_checkpoint_key,
108
+ keys = [checkpoint_key,
109
+ counter_key,
106
110
  score_key,
107
111
  queue_key]
108
112
 
@@ -123,7 +127,7 @@ module Upperkut
123
127
 
124
128
  items = redis do |conn|
125
129
  conn.eval(DEQUEUE_ITEM,
126
- keys: [queue_checkpoint_key, queue_key],
130
+ keys: [checkpoint_key, queue_key],
127
131
  argv: [batch_size])
128
132
  end
129
133
 
@@ -135,6 +139,12 @@ module Upperkut
135
139
  redis { |conn| conn.del(queue_key) }
136
140
  end
137
141
 
142
+ def ack(_items); end
143
+
144
+ def nack(items)
145
+ push_items(items)
146
+ end
147
+
138
148
  # Public: Tells when to execute the event processing,
139
149
  # when this condition is met so the events are dispatched to
140
150
  # the worker.
@@ -159,10 +169,14 @@ module Upperkut
159
169
 
160
170
  private
161
171
 
162
- def queue_checkpoint_key
172
+ def checkpoint_key
163
173
  "#{queue_key}:checkpoint"
164
174
  end
165
175
 
176
+ def counter_key
177
+ "#{queue_key}:counter"
178
+ end
179
+
166
180
  def queue_key
167
181
  "upperkut:priority_queue:#{to_underscore(@worker.name)}"
168
182
  end
@@ -70,6 +70,12 @@ module Upperkut
70
70
  redis { |conn| conn.del(key) }
71
71
  end
72
72
 
73
+ def ack(_items); end
74
+
75
+ def nack(items)
76
+ push_items(items)
77
+ end
78
+
73
79
  def metrics
74
80
  {
75
81
  'latency' => latency,
@@ -27,7 +27,7 @@ module Upperkut
27
27
  items.map do |item|
28
28
  next item if item.is_a?(Item)
29
29
 
30
- Item.new(item)
30
+ Item.new(body: item)
31
31
  end
32
32
  end
33
33
 
@@ -1,3 +1,3 @@
1
1
  module Upperkut
2
- VERSION = '0.8.1'.freeze
2
+ VERSION = '1.0.0.rc'.freeze
3
3
  end
@@ -1,7 +1,6 @@
1
1
  require 'forwardable'
2
2
  require 'upperkut/strategies/buffered_queue'
3
3
  require 'upperkut/middleware'
4
- require 'upperkut/util'
5
4
  require 'upperkut'
6
5
 
7
6
  module Upperkut
@@ -0,0 +1,37 @@
1
+ require_relative 'processor'
2
+
3
+ module Upperkut
4
+ class WorkerThread
5
+ def initialize(manager, processor)
6
+ @manager = manager
7
+ @processor = processor
8
+ end
9
+
10
+ def run
11
+ @thread ||= Thread.new do
12
+ begin
13
+ @processor.blocking_process
14
+ rescue Exception => e
15
+ @manager.logger.debug(
16
+ action: :processor_killed,
17
+ reason: e,
18
+ stacktrace: e.backtrace
19
+ )
20
+
21
+ @manager.notify_killed_processor(self)
22
+ end
23
+ end
24
+ end
25
+
26
+ def stop
27
+ @processor.stop
28
+ end
29
+
30
+ def kill
31
+ return unless @thread
32
+
33
+ @thread.raise Upperkut::Shutdown
34
+ @thread.value # wait
35
+ end
36
+ end
37
+ end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: upperkut
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.8.1
4
+ version: 1.0.0.rc
5
5
  platform: ruby
6
6
  authors:
7
7
  - Nando Sousa
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2020-04-02 00:00:00.000000000 Z
11
+ date: 2020-07-31 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: connection_pool
@@ -117,7 +117,6 @@ files:
117
117
  - examples/scheduled_worker.rb
118
118
  - examples/with_middlewares.rb
119
119
  - lib/upperkut.rb
120
- - lib/upperkut/batch_execution.rb
121
120
  - lib/upperkut/cli.rb
122
121
  - lib/upperkut/core_ext.rb
123
122
  - lib/upperkut/item.rb
@@ -136,6 +135,7 @@ files:
136
135
  - lib/upperkut/util.rb
137
136
  - lib/upperkut/version.rb
138
137
  - lib/upperkut/worker.rb
138
+ - lib/upperkut/worker_thread.rb
139
139
  - upperkut.gemspec
140
140
  homepage: http://shipit.resultadosdigitais.com.br/open-source/
141
141
  licenses:
@@ -152,11 +152,11 @@ required_ruby_version: !ruby/object:Gem::Requirement
152
152
  version: 2.2.2
153
153
  required_rubygems_version: !ruby/object:Gem::Requirement
154
154
  requirements:
155
- - - ">="
155
+ - - ">"
156
156
  - !ruby/object:Gem::Version
157
- version: '0'
157
+ version: 1.3.1
158
158
  requirements: []
159
- rubygems_version: 3.0.6
159
+ rubygems_version: 3.1.2
160
160
  signing_key:
161
161
  specification_version: 4
162
162
  summary: Batch background processing tool
@@ -1,38 +0,0 @@
1
- require_relative 'logging'
2
-
3
- module Upperkut
4
- class BatchExecution
5
- include Upperkut::Util
6
-
7
- def initialize(worker, logger = Upperkut::Logging.logger)
8
- @worker = worker
9
- @logger = logger
10
- end
11
-
12
- def execute
13
- worker_instance = @worker.new
14
- items = @worker.fetch_items.freeze
15
- items_body = items.map(&:body)
16
-
17
- @worker.server_middlewares.invoke(@worker, items) do
18
- worker_instance.perform(items_body.dup)
19
- end
20
- rescue StandardError => error
21
- @logger.info(
22
- action: :requeue,
23
- ex: error,
24
- item_size: items.size
25
- )
26
-
27
- @logger.error(error.backtrace.join("\n"))
28
-
29
- if worker_instance.respond_to?(:handle_error)
30
- worker_instance.handle_error(error, items_body)
31
- return
32
- end
33
-
34
- @worker.push_items(items)
35
- raise error
36
- end
37
- end
38
- end