phobos 1.8.0 → 1.8.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.rubocop.yml +26 -0
- data/.rubocop_common.yml +29 -0
- data/.rubocop_todo.yml +7 -0
- data/.rubosync.yml +2 -0
- data/CHANGELOG.md +4 -0
- data/Gemfile +2 -0
- data/README.md +11 -7
- data/Rakefile +5 -3
- data/bin/console +3 -1
- data/bin/phobos +1 -0
- data/examples/handler_saving_events_database.rb +4 -2
- data/examples/handler_using_async_producer.rb +3 -1
- data/examples/publishing_messages_without_consumer.rb +8 -4
- data/lib/phobos.rb +61 -31
- data/lib/phobos/actions/process_batch.rb +3 -1
- data/lib/phobos/actions/process_message.rb +54 -31
- data/lib/phobos/cli.rb +2 -1
- data/lib/phobos/cli/runner.rb +3 -3
- data/lib/phobos/cli/start.rb +17 -25
- data/lib/phobos/constants.rb +33 -0
- data/lib/phobos/deep_struct.rb +13 -14
- data/lib/phobos/echo_handler.rb +2 -0
- data/lib/phobos/errors.rb +2 -0
- data/lib/phobos/executor.rb +39 -42
- data/lib/phobos/handler.rb +7 -7
- data/lib/phobos/instrumentation.rb +4 -2
- data/lib/phobos/listener.rb +81 -74
- data/lib/phobos/log.rb +23 -0
- data/lib/phobos/producer.rb +18 -14
- data/lib/phobos/test.rb +2 -0
- data/lib/phobos/test/helper.rb +4 -4
- data/lib/phobos/version.rb +3 -1
- data/phobos.gemspec +21 -14
- metadata +44 -10
data/lib/phobos/cli.rb
CHANGED
data/lib/phobos/cli/runner.rb
CHANGED
@@ -1,8 +1,9 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
module Phobos
|
2
4
|
module CLI
|
3
5
|
class Runner
|
4
|
-
|
5
|
-
SIGNALS = %i( INT TERM QUIT ).freeze
|
6
|
+
SIGNALS = [:INT, :TERM, :QUIT].freeze
|
6
7
|
|
7
8
|
def initialize
|
8
9
|
@signal_queue = []
|
@@ -42,7 +43,6 @@ module Phobos
|
|
42
43
|
writer.write_nonblock('.')
|
43
44
|
signal_queue << signal
|
44
45
|
end
|
45
|
-
|
46
46
|
end
|
47
47
|
end
|
48
48
|
end
|
data/lib/phobos/cli/start.rb
CHANGED
@@ -1,17 +1,15 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
require 'phobos/cli/runner'
|
2
4
|
|
3
5
|
module Phobos
|
4
6
|
module CLI
|
5
7
|
class Start
|
6
8
|
def initialize(options)
|
7
|
-
unless options[:skip_config]
|
8
|
-
@config_file = File.expand_path(options[:config])
|
9
|
-
end
|
9
|
+
@config_file = File.expand_path(options[:config]) unless options[:skip_config]
|
10
10
|
@boot_file = File.expand_path(options[:boot])
|
11
11
|
|
12
|
-
if options[:listeners]
|
13
|
-
@listeners_file = File.expand_path(options[:listeners])
|
14
|
-
end
|
12
|
+
@listeners_file = File.expand_path(options[:listeners]) if options[:listeners]
|
15
13
|
end
|
16
14
|
|
17
15
|
def execute
|
@@ -22,9 +20,7 @@ module Phobos
|
|
22
20
|
Phobos.configure(config_file)
|
23
21
|
end
|
24
22
|
|
25
|
-
if listeners_file
|
26
|
-
Phobos.add_listeners(listeners_file)
|
27
|
-
end
|
23
|
+
Phobos.add_listeners(listeners_file) if listeners_file
|
28
24
|
|
29
25
|
validate_listeners!
|
30
26
|
|
@@ -36,37 +32,33 @@ module Phobos
|
|
36
32
|
attr_reader :config_file, :boot_file, :listeners_file
|
37
33
|
|
38
34
|
def validate_config_file!
|
39
|
-
|
40
|
-
Phobos::CLI.logger.error { Hash(message: "Config file not found (#{config_file})") }
|
41
|
-
exit(1)
|
42
|
-
end
|
35
|
+
File.exist?(config_file) || error_exit("Config file not found (#{config_file})")
|
43
36
|
end
|
44
37
|
|
45
38
|
def validate_listeners!
|
46
39
|
Phobos.config.listeners.each do |listener|
|
47
|
-
|
40
|
+
handler = listener.handler
|
48
41
|
|
49
|
-
|
50
|
-
handler_class.constantize
|
51
|
-
rescue NameError
|
52
|
-
Phobos::CLI.logger.error { Hash(message: "Handler '#{handler_class}' not defined") }
|
53
|
-
exit(1)
|
54
|
-
end
|
42
|
+
Object.const_defined?(handler) || error_exit("Handler '#{handler}' not defined")
|
55
43
|
|
56
44
|
delivery = listener.delivery
|
57
45
|
if delivery.nil?
|
58
46
|
Phobos::CLI.logger.warn do
|
59
|
-
Hash(message: "Delivery option should be specified, defaulting to 'batch'
|
47
|
+
Hash(message: "Delivery option should be specified, defaulting to 'batch'"\
|
48
|
+
' - specify this option to silence this message')
|
60
49
|
end
|
61
50
|
elsif !Listener::DELIVERY_OPTS.include?(delivery)
|
62
|
-
|
63
|
-
|
64
|
-
end
|
65
|
-
exit(1)
|
51
|
+
error_exit("Invalid delivery option '#{delivery}'. Please specify one of: "\
|
52
|
+
"#{Listener::DELIVERY_OPTS.join(', ')}")
|
66
53
|
end
|
67
54
|
end
|
68
55
|
end
|
69
56
|
|
57
|
+
def error_exit(msg)
|
58
|
+
Phobos::CLI.logger.error { Hash(message: msg) }
|
59
|
+
exit(1)
|
60
|
+
end
|
61
|
+
|
70
62
|
def load_boot_file
|
71
63
|
load(boot_file) if File.exist?(boot_file)
|
72
64
|
end
|
@@ -0,0 +1,33 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Phobos
|
4
|
+
module Constants
|
5
|
+
LOG_DATE_PATTERN = '%Y-%m-%dT%H:%M:%S:%L%zZ'
|
6
|
+
|
7
|
+
KAFKA_CONSUMER_OPTS = [
|
8
|
+
:session_timeout,
|
9
|
+
:offset_commit_interval,
|
10
|
+
:offset_commit_threshold,
|
11
|
+
:heartbeat_interval,
|
12
|
+
:offset_retention_time
|
13
|
+
].freeze
|
14
|
+
|
15
|
+
LISTENER_OPTS = [
|
16
|
+
:handler,
|
17
|
+
:group_id,
|
18
|
+
:topic,
|
19
|
+
:min_bytes,
|
20
|
+
:max_wait_time,
|
21
|
+
:force_encoding,
|
22
|
+
:start_from_beginning,
|
23
|
+
:max_bytes_per_partition,
|
24
|
+
:backoff,
|
25
|
+
:delivery,
|
26
|
+
:session_timeout,
|
27
|
+
:offset_commit_interval,
|
28
|
+
:offset_commit_threshold,
|
29
|
+
:heartbeat_interval,
|
30
|
+
:offset_retention_time
|
31
|
+
].freeze
|
32
|
+
end
|
33
|
+
end
|
data/lib/phobos/deep_struct.rb
CHANGED
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
# Please use this with at least the same consideration as you would when using OpenStruct.
|
2
4
|
# Right now we only use this to parse our internal configuration files. It is not meant to
|
3
5
|
# be used on incoming data.
|
@@ -5,36 +7,33 @@ module Phobos
|
|
5
7
|
class DeepStruct < OpenStruct
|
6
8
|
# Based on
|
7
9
|
# https://docs.omniref.com/ruby/2.3.0/files/lib/ostruct.rb#line=88
|
8
|
-
def initialize(hash=nil)
|
10
|
+
def initialize(hash = nil)
|
9
11
|
@table = {}
|
10
12
|
@hash_table = {}
|
11
13
|
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
@hash_table[k] = v
|
17
|
-
end
|
14
|
+
hash&.each_pair do |key, value|
|
15
|
+
key = key.to_sym
|
16
|
+
@table[key] = to_deep_struct(value)
|
17
|
+
@hash_table[key] = value
|
18
18
|
end
|
19
19
|
end
|
20
20
|
|
21
21
|
def to_h
|
22
22
|
@hash_table.dup
|
23
23
|
end
|
24
|
-
|
24
|
+
alias to_hash to_h
|
25
25
|
|
26
26
|
private
|
27
27
|
|
28
|
-
def to_deep_struct(
|
29
|
-
case
|
28
|
+
def to_deep_struct(value)
|
29
|
+
case value
|
30
30
|
when Hash
|
31
|
-
self.class.new(
|
31
|
+
self.class.new(value)
|
32
32
|
when Enumerable
|
33
|
-
|
33
|
+
value.map { |el| to_deep_struct(el) }
|
34
34
|
else
|
35
|
-
|
35
|
+
value
|
36
36
|
end
|
37
37
|
end
|
38
|
-
protected :to_deep_struct
|
39
38
|
end
|
40
39
|
end
|
data/lib/phobos/echo_handler.rb
CHANGED
data/lib/phobos/errors.rb
CHANGED
data/lib/phobos/executor.rb
CHANGED
@@ -1,23 +1,9 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
module Phobos
|
2
4
|
class Executor
|
3
5
|
include Phobos::Instrumentation
|
4
|
-
|
5
|
-
handler
|
6
|
-
group_id
|
7
|
-
topic
|
8
|
-
min_bytes
|
9
|
-
max_wait_time
|
10
|
-
force_encoding
|
11
|
-
start_from_beginning
|
12
|
-
max_bytes_per_partition
|
13
|
-
backoff
|
14
|
-
delivery
|
15
|
-
session_timeout
|
16
|
-
offset_commit_interval
|
17
|
-
offset_commit_threshold
|
18
|
-
heartbeat_interval
|
19
|
-
offset_retention_time
|
20
|
-
).freeze
|
6
|
+
include Phobos::Log
|
21
7
|
|
22
8
|
def initialize
|
23
9
|
@threads = Concurrent::Array.new
|
@@ -26,7 +12,7 @@ module Phobos
|
|
26
12
|
listener_configs = config.to_hash.deep_symbolize_keys
|
27
13
|
max_concurrency = listener_configs[:max_concurrency] || 1
|
28
14
|
Array.new(max_concurrency).map do
|
29
|
-
configs = listener_configs.select { |k| LISTENER_OPTS.include?(k) }
|
15
|
+
configs = listener_configs.select { |k| Constants::LISTENER_OPTS.include?(k) }
|
30
16
|
Phobos::Listener.new(configs.merge(handler: handler_class))
|
31
17
|
end
|
32
18
|
end
|
@@ -51,10 +37,17 @@ module Phobos
|
|
51
37
|
|
52
38
|
def stop
|
53
39
|
return if @signal_to_stop
|
40
|
+
|
54
41
|
instrument('executor.stop') do
|
55
42
|
@signal_to_stop = true
|
56
43
|
@listeners.each(&:stop)
|
57
|
-
@threads.select(&:alive?).each
|
44
|
+
@threads.select(&:alive?).each do |thread|
|
45
|
+
begin
|
46
|
+
thread.wakeup
|
47
|
+
rescue StandardError
|
48
|
+
nil
|
49
|
+
end
|
50
|
+
end
|
58
51
|
@thread_pool&.shutdown
|
59
52
|
@thread_pool&.wait_for_termination
|
60
53
|
Phobos.logger.info { Hash(message: 'Executor stopped') }
|
@@ -63,44 +56,48 @@ module Phobos
|
|
63
56
|
|
64
57
|
private
|
65
58
|
|
66
|
-
def error_metadata(
|
59
|
+
def error_metadata(exception)
|
67
60
|
{
|
68
|
-
exception_class:
|
69
|
-
exception_message:
|
70
|
-
backtrace:
|
61
|
+
exception_class: exception.class.name,
|
62
|
+
exception_message: exception.message,
|
63
|
+
backtrace: exception.backtrace
|
71
64
|
}
|
72
65
|
end
|
73
66
|
|
67
|
+
# rubocop:disable Lint/RescueException
|
74
68
|
def run_listener(listener)
|
75
69
|
retry_count = 0
|
76
|
-
backoff = listener.create_exponential_backoff
|
77
70
|
|
78
71
|
begin
|
79
72
|
listener.start
|
80
73
|
rescue Exception => e
|
81
|
-
|
82
|
-
# When "listener#start" is interrupted it's safe to assume that the consumer
|
83
|
-
# and the kafka client were properly stopped, it's safe to call start
|
84
|
-
# again
|
85
|
-
#
|
86
|
-
interval = backoff.interval_at(retry_count).round(2)
|
87
|
-
metadata = {
|
88
|
-
listener_id: listener.id,
|
89
|
-
retry_count: retry_count,
|
90
|
-
waiting_time: interval
|
91
|
-
}.merge(error_metadata(e))
|
92
|
-
|
93
|
-
instrument('executor.retry_listener_error', metadata) do
|
94
|
-
Phobos.logger.error { Hash(message: "Listener crashed, waiting #{interval}s (#{e.message})").merge(metadata)}
|
95
|
-
sleep interval
|
96
|
-
end
|
97
|
-
|
74
|
+
handle_crashed_listener(listener, e, retry_count)
|
98
75
|
retry_count += 1
|
99
76
|
retry unless @signal_to_stop
|
100
77
|
end
|
101
78
|
rescue Exception => e
|
102
|
-
|
79
|
+
log_error("Failed to run listener (#{e.message})", error_metadata(e))
|
103
80
|
raise e
|
104
81
|
end
|
82
|
+
# rubocop:enable Lint/RescueException
|
83
|
+
|
84
|
+
# When "listener#start" is interrupted it's safe to assume that the consumer
|
85
|
+
# and the kafka client were properly stopped, it's safe to call start
|
86
|
+
# again
|
87
|
+
def handle_crashed_listener(listener, error, retry_count)
|
88
|
+
backoff = listener.create_exponential_backoff
|
89
|
+
interval = backoff.interval_at(retry_count).round(2)
|
90
|
+
|
91
|
+
metadata = {
|
92
|
+
listener_id: listener.id,
|
93
|
+
retry_count: retry_count,
|
94
|
+
waiting_time: interval
|
95
|
+
}.merge(error_metadata(error))
|
96
|
+
|
97
|
+
instrument('executor.retry_listener_error', metadata) do
|
98
|
+
log_error("Listener crashed, waiting #{interval}s (#{error.message})", metadata)
|
99
|
+
sleep interval
|
100
|
+
end
|
101
|
+
end
|
105
102
|
end
|
106
103
|
end
|
data/lib/phobos/handler.rb
CHANGED
@@ -1,27 +1,27 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
module Phobos
|
2
4
|
module Handler
|
3
5
|
def self.included(base)
|
4
6
|
base.extend(ClassMethods)
|
5
7
|
end
|
6
8
|
|
7
|
-
def before_consume(payload,
|
9
|
+
def before_consume(payload, _metadata)
|
8
10
|
payload
|
9
11
|
end
|
10
12
|
|
11
|
-
def consume(
|
13
|
+
def consume(_payload, _metadata)
|
12
14
|
raise NotImplementedError
|
13
15
|
end
|
14
16
|
|
15
|
-
def around_consume(
|
17
|
+
def around_consume(_payload, _metadata)
|
16
18
|
yield
|
17
19
|
end
|
18
20
|
|
19
21
|
module ClassMethods
|
20
|
-
def start(kafka_client)
|
21
|
-
end
|
22
|
+
def start(kafka_client); end
|
22
23
|
|
23
|
-
def stop
|
24
|
-
end
|
24
|
+
def stop; end
|
25
25
|
end
|
26
26
|
end
|
27
27
|
end
|
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
require 'active_support/notifications'
|
2
4
|
|
3
5
|
module Phobos
|
@@ -15,8 +17,8 @@ module Phobos
|
|
15
17
|
end
|
16
18
|
|
17
19
|
def instrument(event, extra = {})
|
18
|
-
ActiveSupport::Notifications.instrument("#{NAMESPACE}.#{event}", extra) do |
|
19
|
-
yield(
|
20
|
+
ActiveSupport::Notifications.instrument("#{NAMESPACE}.#{event}", extra) do |args|
|
21
|
+
yield(args) if block_given?
|
20
22
|
end
|
21
23
|
end
|
22
24
|
end
|
data/lib/phobos/listener.rb
CHANGED
@@ -1,30 +1,24 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
module Phobos
|
4
|
+
# rubocop:disable Metrics/ParameterLists, Metrics/ClassLength
|
2
5
|
class Listener
|
3
6
|
include Phobos::Instrumentation
|
7
|
+
include Phobos::Log
|
4
8
|
|
5
|
-
|
6
|
-
session_timeout
|
7
|
-
offset_commit_interval
|
8
|
-
offset_commit_threshold
|
9
|
-
heartbeat_interval
|
10
|
-
offset_retention_time
|
11
|
-
).freeze
|
12
|
-
|
13
|
-
DEFAULT_MAX_BYTES_PER_PARTITION = 1048576 # 1 MB
|
9
|
+
DEFAULT_MAX_BYTES_PER_PARTITION = 1_048_576 # 1 MB
|
14
10
|
DELIVERY_OPTS = %w[batch message].freeze
|
15
11
|
|
16
12
|
attr_reader :group_id, :topic, :id
|
17
13
|
attr_reader :handler_class, :encoding
|
18
14
|
|
19
|
-
|
20
|
-
|
21
|
-
start_from_beginning: true, backoff: nil,
|
22
|
-
delivery: 'batch',
|
23
|
-
max_bytes_per_partition: DEFAULT_MAX_BYTES_PER_PARTITION,
|
15
|
+
# rubocop:disable Metrics/MethodLength
|
16
|
+
def initialize(handler:, group_id:, topic:, min_bytes: nil, max_wait_time: nil,
|
17
|
+
force_encoding: nil, start_from_beginning: true, backoff: nil,
|
18
|
+
delivery: 'batch', max_bytes_per_partition: DEFAULT_MAX_BYTES_PER_PARTITION,
|
24
19
|
session_timeout: nil, offset_commit_interval: nil,
|
25
20
|
heartbeat_interval: nil, offset_commit_threshold: nil,
|
26
|
-
offset_retention_time: nil
|
27
|
-
)
|
21
|
+
offset_retention_time: nil)
|
28
22
|
@id = SecureRandom.hex[0...6]
|
29
23
|
@handler_class = handler
|
30
24
|
@group_id = group_id
|
@@ -32,14 +26,11 @@ module Phobos
|
|
32
26
|
@backoff = backoff
|
33
27
|
@delivery = delivery.to_s
|
34
28
|
@subscribe_opts = {
|
35
|
-
start_from_beginning: start_from_beginning,
|
36
|
-
max_bytes_per_partition: max_bytes_per_partition
|
29
|
+
start_from_beginning: start_from_beginning, max_bytes_per_partition: max_bytes_per_partition
|
37
30
|
}
|
38
31
|
@kafka_consumer_opts = compact(
|
39
|
-
session_timeout: session_timeout,
|
40
|
-
offset_commit_interval: offset_commit_interval,
|
41
|
-
heartbeat_interval: heartbeat_interval,
|
42
|
-
offset_retention_time: offset_retention_time,
|
32
|
+
session_timeout: session_timeout, offset_retention_time: offset_retention_time,
|
33
|
+
offset_commit_interval: offset_commit_interval, heartbeat_interval: heartbeat_interval,
|
43
34
|
offset_commit_threshold: offset_commit_threshold
|
44
35
|
)
|
45
36
|
@encoding = Encoding.const_get(force_encoding.to_sym) if force_encoding
|
@@ -47,9 +38,59 @@ module Phobos
|
|
47
38
|
@kafka_client = Phobos.create_kafka_client
|
48
39
|
@producer_enabled = @handler_class.ancestors.include?(Phobos::Producer)
|
49
40
|
end
|
41
|
+
# rubocop:enable Metrics/MethodLength
|
50
42
|
|
51
43
|
def start
|
52
44
|
@signal_to_stop = false
|
45
|
+
|
46
|
+
start_listener
|
47
|
+
|
48
|
+
begin
|
49
|
+
start_consumer_loop
|
50
|
+
rescue Kafka::ProcessingError, Phobos::AbortError
|
51
|
+
# Abort is an exception to prevent the consumer from committing the offset.
|
52
|
+
# Since "listener" had a message being retried while "stop" was called
|
53
|
+
# it's wise to not commit the batch offset to avoid data loss. This will
|
54
|
+
# cause some messages to be reprocessed
|
55
|
+
instrument('listener.retry_aborted', listener_metadata) do
|
56
|
+
log_info('Retry loop aborted, listener is shutting down', listener_metadata)
|
57
|
+
end
|
58
|
+
end
|
59
|
+
ensure
|
60
|
+
stop_listener
|
61
|
+
end
|
62
|
+
|
63
|
+
def stop
|
64
|
+
return if should_stop?
|
65
|
+
|
66
|
+
instrument('listener.stopping', listener_metadata) do
|
67
|
+
log_info('Listener stopping', listener_metadata)
|
68
|
+
@consumer&.stop
|
69
|
+
@signal_to_stop = true
|
70
|
+
end
|
71
|
+
end
|
72
|
+
|
73
|
+
def create_exponential_backoff
|
74
|
+
Phobos.create_exponential_backoff(@backoff)
|
75
|
+
end
|
76
|
+
|
77
|
+
def should_stop?
|
78
|
+
@signal_to_stop == true
|
79
|
+
end
|
80
|
+
|
81
|
+
def send_heartbeat_if_necessary
|
82
|
+
raise Phobos::AbortError if should_stop?
|
83
|
+
|
84
|
+
@consumer&.send_heartbeat_if_necessary
|
85
|
+
end
|
86
|
+
|
87
|
+
private
|
88
|
+
|
89
|
+
def listener_metadata
|
90
|
+
{ listener_id: id, group_id: group_id, topic: topic, handler: handler_class.to_s }
|
91
|
+
end
|
92
|
+
|
93
|
+
def start_listener
|
53
94
|
instrument('listener.start', listener_metadata) do
|
54
95
|
@consumer = create_kafka_consumer
|
55
96
|
@consumer.subscribe(topic, @subscribe_opts)
|
@@ -58,25 +99,14 @@ module Phobos
|
|
58
99
|
# since "start" blocks a thread might be used to call it
|
59
100
|
@handler_class.producer.configure_kafka_client(@kafka_client) if @producer_enabled
|
60
101
|
|
61
|
-
instrument('listener.start_handler', listener_metadata)
|
62
|
-
|
63
|
-
end
|
64
|
-
|
65
|
-
begin
|
66
|
-
@delivery == 'batch' ? consume_each_batch : consume_each_message
|
67
|
-
|
68
|
-
# Abort is an exception to prevent the consumer from committing the offset.
|
69
|
-
# Since "listener" had a message being retried while "stop" was called
|
70
|
-
# it's wise to not commit the batch offset to avoid data loss. This will
|
71
|
-
# cause some messages to be reprocessed
|
72
|
-
#
|
73
|
-
rescue Kafka::ProcessingError, Phobos::AbortError
|
74
|
-
instrument('listener.retry_aborted', listener_metadata) do
|
75
|
-
Phobos.logger.info({ message: 'Retry loop aborted, listener is shutting down' }.merge(listener_metadata))
|
102
|
+
instrument('listener.start_handler', listener_metadata) do
|
103
|
+
@handler_class.start(@kafka_client)
|
76
104
|
end
|
105
|
+
log_info('Listener started', listener_metadata)
|
77
106
|
end
|
107
|
+
end
|
78
108
|
|
79
|
-
|
109
|
+
def stop_listener
|
80
110
|
instrument('listener.stop', listener_metadata) do
|
81
111
|
instrument('listener.stop_handler', listener_metadata) { @handler_class.stop }
|
82
112
|
|
@@ -88,12 +118,14 @@ module Phobos
|
|
88
118
|
end
|
89
119
|
|
90
120
|
@kafka_client.close
|
91
|
-
if should_stop?
|
92
|
-
Phobos.logger.info { Hash(message: 'Listener stopped').merge(listener_metadata) }
|
93
|
-
end
|
121
|
+
log_info('Listener stopped', listener_metadata) if should_stop?
|
94
122
|
end
|
95
123
|
end
|
96
124
|
|
125
|
+
def start_consumer_loop
|
126
|
+
@delivery == 'batch' ? consume_each_batch : consume_each_message
|
127
|
+
end
|
128
|
+
|
97
129
|
def consume_each_batch
|
98
130
|
@consumer.each_batch(@message_processing_opts) do |batch|
|
99
131
|
batch_processor = Phobos::Actions::ProcessBatch.new(
|
@@ -103,8 +135,8 @@ module Phobos
|
|
103
135
|
)
|
104
136
|
|
105
137
|
batch_processor.execute
|
106
|
-
|
107
|
-
return if should_stop?
|
138
|
+
log_debug('Committed offset', batch_processor.metadata)
|
139
|
+
return nil if should_stop?
|
108
140
|
end
|
109
141
|
end
|
110
142
|
|
@@ -117,41 +149,15 @@ module Phobos
|
|
117
149
|
)
|
118
150
|
|
119
151
|
message_processor.execute
|
120
|
-
|
121
|
-
return if should_stop?
|
122
|
-
end
|
123
|
-
end
|
124
|
-
|
125
|
-
def stop
|
126
|
-
return if should_stop?
|
127
|
-
instrument('listener.stopping', listener_metadata) do
|
128
|
-
Phobos.logger.info { Hash(message: 'Listener stopping').merge(listener_metadata) }
|
129
|
-
@consumer&.stop
|
130
|
-
@signal_to_stop = true
|
152
|
+
log_debug('Committed offset', message_processor.metadata)
|
153
|
+
return nil if should_stop?
|
131
154
|
end
|
132
155
|
end
|
133
156
|
|
134
|
-
def create_exponential_backoff
|
135
|
-
Phobos.create_exponential_backoff(@backoff)
|
136
|
-
end
|
137
|
-
|
138
|
-
def should_stop?
|
139
|
-
@signal_to_stop == true
|
140
|
-
end
|
141
|
-
|
142
|
-
def send_heartbeat_if_necessary
|
143
|
-
raise Phobos::AbortError if should_stop?
|
144
|
-
@consumer&.send_heartbeat_if_necessary
|
145
|
-
end
|
146
|
-
|
147
|
-
private
|
148
|
-
|
149
|
-
def listener_metadata
|
150
|
-
{ listener_id: id, group_id: group_id, topic: topic, handler: handler_class.to_s }
|
151
|
-
end
|
152
|
-
|
153
157
|
def create_kafka_consumer
|
154
|
-
configs = Phobos.config.consumer_hash.select
|
158
|
+
configs = Phobos.config.consumer_hash.select do |k|
|
159
|
+
Constants::KAFKA_CONSUMER_OPTS.include?(k)
|
160
|
+
end
|
155
161
|
configs.merge!(@kafka_consumer_opts)
|
156
162
|
@kafka_client.consumer({ group_id: group_id }.merge(configs))
|
157
163
|
end
|
@@ -160,4 +166,5 @@ module Phobos
|
|
160
166
|
hash.delete_if { |_, v| v.nil? }
|
161
167
|
end
|
162
168
|
end
|
169
|
+
# rubocop:enable Metrics/ParameterLists, Metrics/ClassLength
|
163
170
|
end
|