phobos_temp_fork 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. checksums.yaml +7 -0
  2. data/.dockerignore +13 -0
  3. data/.env +1 -0
  4. data/.gitignore +16 -0
  5. data/.rspec +3 -0
  6. data/.rubocop.yml +26 -0
  7. data/.rubocop_common.yml +29 -0
  8. data/.rubocop_todo.yml +7 -0
  9. data/.rubosync.yml +2 -0
  10. data/.ruby-version +1 -0
  11. data/.travis.yml +37 -0
  12. data/CHANGELOG.md +170 -0
  13. data/Dockerfile +14 -0
  14. data/Gemfile +8 -0
  15. data/LICENSE.txt +176 -0
  16. data/README.md +699 -0
  17. data/Rakefile +8 -0
  18. data/bin/console +19 -0
  19. data/bin/phobos +10 -0
  20. data/bin/setup +8 -0
  21. data/config/phobos.yml.example +137 -0
  22. data/docker-compose.yml +28 -0
  23. data/examples/handler_saving_events_database.rb +51 -0
  24. data/examples/handler_using_async_producer.rb +17 -0
  25. data/examples/publishing_messages_without_consumer.rb +82 -0
  26. data/lib/phobos/actions/process_batch.rb +35 -0
  27. data/lib/phobos/actions/process_batch_inline.rb +61 -0
  28. data/lib/phobos/actions/process_message.rb +49 -0
  29. data/lib/phobos/batch_handler.rb +23 -0
  30. data/lib/phobos/batch_message.rb +21 -0
  31. data/lib/phobos/cli.rb +69 -0
  32. data/lib/phobos/cli/runner.rb +48 -0
  33. data/lib/phobos/cli/start.rb +71 -0
  34. data/lib/phobos/constants.rb +33 -0
  35. data/lib/phobos/deep_struct.rb +39 -0
  36. data/lib/phobos/echo_handler.rb +11 -0
  37. data/lib/phobos/errors.rb +6 -0
  38. data/lib/phobos/executor.rb +103 -0
  39. data/lib/phobos/handler.rb +23 -0
  40. data/lib/phobos/instrumentation.rb +25 -0
  41. data/lib/phobos/listener.rb +192 -0
  42. data/lib/phobos/log.rb +23 -0
  43. data/lib/phobos/processor.rb +67 -0
  44. data/lib/phobos/producer.rb +171 -0
  45. data/lib/phobos/test.rb +3 -0
  46. data/lib/phobos/test/helper.rb +29 -0
  47. data/lib/phobos/version.rb +5 -0
  48. data/lib/phobos_temp_fork.rb +175 -0
  49. data/logo.png +0 -0
  50. data/phobos.gemspec +69 -0
  51. data/phobos_boot.rb +31 -0
  52. data/utils/create-topic.sh +13 -0
  53. metadata +308 -0
@@ -0,0 +1,23 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Phobos
4
+ module Handler
5
+ def self.included(base)
6
+ base.extend(ClassMethods)
7
+ end
8
+
9
+ def consume(_payload, _metadata)
10
+ raise NotImplementedError
11
+ end
12
+
13
+ def around_consume(payload, metadata)
14
+ yield payload, metadata
15
+ end
16
+
17
+ module ClassMethods
18
+ def start(kafka_client); end
19
+
20
+ def stop; end
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,25 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'active_support/notifications'
4
+
5
+ module Phobos
6
+ module Instrumentation
7
+ NAMESPACE = 'phobos'
8
+
9
+ def self.subscribe(event)
10
+ ActiveSupport::Notifications.subscribe("#{NAMESPACE}.#{event}") do |*args|
11
+ yield ActiveSupport::Notifications::Event.new(*args) if block_given?
12
+ end
13
+ end
14
+
15
+ def self.unsubscribe(subscriber)
16
+ ActiveSupport::Notifications.unsubscribe(subscriber)
17
+ end
18
+
19
+ def instrument(event, extra = {})
20
+ ActiveSupport::Notifications.instrument("#{NAMESPACE}.#{event}", extra) do |args|
21
+ yield(args) if block_given?
22
+ end
23
+ end
24
+ end
25
+ end
@@ -0,0 +1,192 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Phobos
4
+ # rubocop:disable Metrics/ParameterLists, Metrics/ClassLength
5
+ class Listener
6
+ include Phobos::Instrumentation
7
+ include Phobos::Log
8
+
9
+ DEFAULT_MAX_BYTES_PER_PARTITION = 1_048_576 # 1 MB
10
+ DELIVERY_OPTS = %w[batch message inline_batch].freeze
11
+
12
+ attr_reader :group_id, :topic, :id
13
+ attr_reader :handler_class, :encoding, :consumer
14
+
15
+ # rubocop:disable Metrics/MethodLength
16
+ def initialize(handler:, group_id:, topic:, min_bytes: nil, max_wait_time: nil,
17
+ force_encoding: nil, start_from_beginning: true, backoff: nil,
18
+ delivery: 'batch', max_bytes_per_partition: DEFAULT_MAX_BYTES_PER_PARTITION,
19
+ session_timeout: nil, offset_commit_interval: nil,
20
+ heartbeat_interval: nil, offset_commit_threshold: nil,
21
+ offset_retention_time: nil)
22
+ @id = SecureRandom.hex[0...6]
23
+ @handler_class = handler
24
+ @group_id = group_id
25
+ @topic = topic
26
+ @backoff = backoff
27
+ @delivery = delivery.to_s
28
+ @subscribe_opts = {
29
+ start_from_beginning: start_from_beginning, max_bytes_per_partition: max_bytes_per_partition
30
+ }
31
+ @kafka_consumer_opts = compact(
32
+ session_timeout: session_timeout, offset_retention_time: offset_retention_time,
33
+ offset_commit_interval: offset_commit_interval, heartbeat_interval: heartbeat_interval,
34
+ offset_commit_threshold: offset_commit_threshold
35
+ )
36
+ @encoding = Encoding.const_get(force_encoding.to_sym) if force_encoding
37
+ @message_processing_opts = compact(min_bytes: min_bytes, max_wait_time: max_wait_time)
38
+ @kafka_client = Phobos.create_kafka_client(:consumer)
39
+ @producer_enabled = @handler_class.ancestors.include?(Phobos::Producer)
40
+ end
41
+ # rubocop:enable Metrics/MethodLength
42
+
43
+ def start
44
+ @signal_to_stop = false
45
+
46
+ start_listener
47
+
48
+ begin
49
+ start_consumer_loop
50
+ rescue Kafka::ProcessingError, Phobos::AbortError
51
+ # Abort is an exception to prevent the consumer from committing the offset.
52
+ # Since "listener" had a message being retried while "stop" was called
53
+ # it's wise to not commit the batch offset to avoid data loss. This will
54
+ # cause some messages to be reprocessed
55
+ instrument('listener.retry_aborted', listener_metadata) do
56
+ log_info('Retry loop aborted, listener is shutting down', listener_metadata)
57
+ end
58
+ end
59
+ ensure
60
+ stop_listener
61
+ end
62
+
63
+ def stop
64
+ return if should_stop?
65
+
66
+ instrument('listener.stopping', listener_metadata) do
67
+ log_info('Listener stopping', listener_metadata)
68
+ @consumer&.stop
69
+ @signal_to_stop = true
70
+ end
71
+ end
72
+
73
+ def create_exponential_backoff
74
+ Phobos.create_exponential_backoff(@backoff)
75
+ end
76
+
77
+ def should_stop?
78
+ @signal_to_stop == true
79
+ end
80
+
81
+ def send_heartbeat_if_necessary
82
+ raise Phobos::AbortError if should_stop?
83
+
84
+ @consumer&.send_heartbeat_if_necessary
85
+ end
86
+
87
+ private
88
+
89
+ def listener_metadata
90
+ { listener_id: id, group_id: group_id, topic: topic, handler: handler_class.to_s }
91
+ end
92
+
93
+ def start_listener
94
+ instrument('listener.start', listener_metadata) do
95
+ @consumer = create_kafka_consumer
96
+ @consumer.subscribe(topic, **@subscribe_opts)
97
+
98
+ # This is done here because the producer client is bound to the current thread and
99
+ # since "start" blocks a thread might be used to call it
100
+ @handler_class.producer.configure_kafka_client(@kafka_client) if @producer_enabled
101
+
102
+ instrument('listener.start_handler', listener_metadata) do
103
+ @handler_class.start(@kafka_client)
104
+ end
105
+ log_info('Listener started', listener_metadata)
106
+ end
107
+ end
108
+
109
+ def stop_listener
110
+ instrument('listener.stop', listener_metadata) do
111
+ instrument('listener.stop_handler', listener_metadata) { @handler_class.stop }
112
+
113
+ @consumer&.stop
114
+
115
+ if @producer_enabled
116
+ @handler_class.producer.async_producer_shutdown
117
+ @handler_class.producer.configure_kafka_client(nil)
118
+ end
119
+
120
+ @kafka_client.close
121
+ log_info('Listener stopped', listener_metadata) if should_stop?
122
+ end
123
+ end
124
+
125
+ def start_consumer_loop
126
+ # validate batch handling
127
+ case @delivery
128
+ when 'batch'
129
+ consume_each_batch
130
+ when 'inline_batch'
131
+ consume_each_batch_inline
132
+ else
133
+ consume_each_message
134
+ end
135
+ end
136
+
137
+ def consume_each_batch
138
+ @consumer.each_batch(**@message_processing_opts) do |batch|
139
+ batch_processor = Phobos::Actions::ProcessBatch.new(
140
+ listener: self,
141
+ batch: batch,
142
+ listener_metadata: listener_metadata
143
+ )
144
+
145
+ batch_processor.execute
146
+ log_debug('Committed offset', batch_processor.metadata)
147
+ return nil if should_stop?
148
+ end
149
+ end
150
+
151
+ def consume_each_batch_inline
152
+ @consumer.each_batch(**@message_processing_opts) do |batch|
153
+ batch_processor = Phobos::Actions::ProcessBatchInline.new(
154
+ listener: self,
155
+ batch: batch,
156
+ metadata: listener_metadata
157
+ )
158
+
159
+ batch_processor.execute
160
+ log_debug('Committed offset', batch_processor.metadata)
161
+ return nil if should_stop?
162
+ end
163
+ end
164
+
165
+ def consume_each_message
166
+ @consumer.each_message(**@message_processing_opts) do |message|
167
+ message_processor = Phobos::Actions::ProcessMessage.new(
168
+ listener: self,
169
+ message: message,
170
+ listener_metadata: listener_metadata
171
+ )
172
+
173
+ message_processor.execute
174
+ log_debug('Committed offset', message_processor.metadata)
175
+ return nil if should_stop?
176
+ end
177
+ end
178
+
179
+ def create_kafka_consumer
180
+ configs = Phobos.config.consumer_hash.select do |k|
181
+ Constants::KAFKA_CONSUMER_OPTS.include?(k)
182
+ end
183
+ configs.merge!(@kafka_consumer_opts)
184
+ @kafka_client.consumer(**{ group_id: group_id }.merge(configs))
185
+ end
186
+
187
+ def compact(hash)
188
+ hash.delete_if { |_, v| v.nil? }
189
+ end
190
+ end
191
+ # rubocop:enable Metrics/ParameterLists, Metrics/ClassLength
192
+ end
data/lib/phobos/log.rb ADDED
@@ -0,0 +1,23 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Phobos
4
+ module Log
5
+ def log_info(msg, metadata = {})
6
+ LoggerHelper.log(:info, msg, metadata)
7
+ end
8
+
9
+ def log_debug(msg, metadata = {})
10
+ LoggerHelper.log(:debug, msg, metadata)
11
+ end
12
+
13
+ def log_error(msg, metadata)
14
+ LoggerHelper.log(:error, msg, metadata)
15
+ end
16
+ end
17
+
18
+ module LoggerHelper
19
+ def self.log(method, msg, metadata)
20
+ Phobos.logger.send(method, Hash(message: msg).merge(metadata))
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,67 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'active_support/concern'
4
+
5
+ module Phobos
6
+ module Processor
7
+ extend ActiveSupport::Concern
8
+ include Phobos::Instrumentation
9
+
10
+ MAX_SLEEP_INTERVAL = 3
11
+
12
+ def snooze(interval)
13
+ remaining_interval = interval
14
+
15
+ @listener.send_heartbeat_if_necessary
16
+
17
+ while remaining_interval.positive?
18
+ sleep [remaining_interval, MAX_SLEEP_INTERVAL].min
19
+ remaining_interval -= MAX_SLEEP_INTERVAL
20
+ @listener.send_heartbeat_if_necessary
21
+ end
22
+ end
23
+
24
+ private
25
+
26
+ def force_encoding(value)
27
+ @listener.encoding ? value&.force_encoding(@listener.encoding) : value
28
+ end
29
+
30
+ def handle_error(error, instrumentation_key, error_message)
31
+ error_hash = {
32
+ waiting_time: backoff_interval,
33
+ exception_class: error.class.name,
34
+ exception_message: error.message,
35
+ backtrace: error.backtrace
36
+ }
37
+
38
+ instrument(instrumentation_key, error_hash.merge(@metadata)) do
39
+ Phobos.logger.error do
40
+ { message: error_message }
41
+ .merge(error_hash)
42
+ .merge(@metadata)
43
+ end
44
+
45
+ snooze(backoff_interval)
46
+ end
47
+
48
+ increment_retry_count
49
+ end
50
+
51
+ def retry_count
52
+ @metadata[:retry_count]
53
+ end
54
+
55
+ def increment_retry_count
56
+ @metadata[:retry_count] = retry_count + 1
57
+ end
58
+
59
+ def backoff
60
+ @backoff ||= @listener.create_exponential_backoff
61
+ end
62
+
63
+ def backoff_interval
64
+ backoff.interval_at(retry_count).round(2)
65
+ end
66
+ end
67
+ end
@@ -0,0 +1,171 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Phobos
4
+ module Producer
5
+ def self.included(base)
6
+ base.extend(Phobos::Producer::ClassMethods)
7
+ end
8
+
9
+ def producer
10
+ Phobos::Producer::PublicAPI.new(self)
11
+ end
12
+
13
+ class PublicAPI
14
+ def initialize(host_obj)
15
+ @host_obj = host_obj
16
+ end
17
+
18
+ def publish(topic:, payload:, key: nil, partition_key: nil, headers: nil)
19
+ class_producer.publish(topic: topic,
20
+ payload: payload,
21
+ key: key,
22
+ partition_key: partition_key,
23
+ headers: headers)
24
+ end
25
+
26
+ def async_publish(topic:, payload:, key: nil, partition_key: nil, headers: nil)
27
+ class_producer.async_publish(topic: topic,
28
+ payload: payload,
29
+ key: key,
30
+ partition_key: partition_key,
31
+ headers: headers)
32
+ end
33
+
34
+ # @param messages [Array(Hash(:topic, :payload, :key, :headers))]
35
+ # e.g.: [
36
+ # { topic: 'A', payload: 'message-1', key: '1', headers: { foo: 'bar' } },
37
+ # { topic: 'B', payload: 'message-2', key: '2', headers: { foo: 'bar' } }
38
+ # ]
39
+ #
40
+ def publish_list(messages)
41
+ class_producer.publish_list(messages)
42
+ end
43
+
44
+ def async_publish_list(messages)
45
+ class_producer.async_publish_list(messages)
46
+ end
47
+
48
+ private
49
+
50
+ def class_producer
51
+ @host_obj.class.producer
52
+ end
53
+ end
54
+
55
+ module ClassMethods
56
+ def producer
57
+ Phobos::Producer::ClassMethods::PublicAPI.new
58
+ end
59
+
60
+ class PublicAPI
61
+ NAMESPACE = :phobos_producer_store
62
+ ASYNC_PRODUCER_PARAMS = [:max_queue_size, :delivery_threshold, :delivery_interval].freeze
63
+ INTERNAL_PRODUCER_PARAMS = [:persistent_connections].freeze
64
+
65
+ # This method configures the kafka client used with publish operations
66
+ # performed by the host class
67
+ #
68
+ # @param kafka_client [Kafka::Client]
69
+ #
70
+ def configure_kafka_client(kafka_client)
71
+ async_producer_shutdown
72
+ producer_store[:kafka_client] = kafka_client
73
+ end
74
+
75
+ def kafka_client
76
+ producer_store[:kafka_client]
77
+ end
78
+
79
+ def create_sync_producer
80
+ client = kafka_client || configure_kafka_client(Phobos.create_kafka_client(:producer))
81
+ sync_producer = client.producer(**regular_configs)
82
+ if Phobos.config.producer_hash[:persistent_connections]
83
+ producer_store[:sync_producer] = sync_producer
84
+ end
85
+ sync_producer
86
+ end
87
+
88
+ def sync_producer
89
+ producer_store[:sync_producer]
90
+ end
91
+
92
+ def sync_producer_shutdown
93
+ sync_producer&.shutdown
94
+ producer_store[:sync_producer] = nil
95
+ end
96
+
97
+ def publish(topic:, payload:, key: nil, partition_key: nil, headers: nil)
98
+ publish_list([{ topic: topic, payload: payload, key: key,
99
+ partition_key: partition_key, headers: headers }])
100
+ end
101
+
102
+ def publish_list(messages)
103
+ producer = sync_producer || create_sync_producer
104
+ produce_messages(producer, messages)
105
+ producer.deliver_messages
106
+ ensure
107
+ producer&.shutdown unless Phobos.config.producer_hash[:persistent_connections]
108
+ end
109
+
110
+ def create_async_producer
111
+ client = kafka_client || configure_kafka_client(Phobos.create_kafka_client(:producer))
112
+ async_producer = client.async_producer(**async_configs)
113
+ producer_store[:async_producer] = async_producer
114
+ end
115
+
116
+ def async_producer
117
+ producer_store[:async_producer]
118
+ end
119
+
120
+ def async_publish(topic:, payload:, key: nil, partition_key: nil, headers: nil)
121
+ async_publish_list([{ topic: topic, payload: payload, key: key,
122
+ partition_key: partition_key, headers: headers }])
123
+ end
124
+
125
+ def async_publish_list(messages)
126
+ producer = async_producer || create_async_producer
127
+ produce_messages(producer, messages)
128
+ producer.deliver_messages unless async_automatic_delivery?
129
+ end
130
+
131
+ def async_producer_shutdown
132
+ async_producer&.deliver_messages
133
+ async_producer&.shutdown
134
+ producer_store[:async_producer] = nil
135
+ end
136
+
137
+ def regular_configs
138
+ Phobos.config.producer_hash
139
+ .reject { |k, _| ASYNC_PRODUCER_PARAMS.include?(k) }
140
+ .reject { |k, _| INTERNAL_PRODUCER_PARAMS.include?(k) }
141
+ end
142
+
143
+ def async_configs
144
+ Phobos.config.producer_hash
145
+ .reject { |k, _| INTERNAL_PRODUCER_PARAMS.include?(k) }
146
+ end
147
+
148
+ private
149
+
150
+ def produce_messages(producer, messages)
151
+ messages.each do |message|
152
+ partition_key = message[:partition_key] || message[:key]
153
+ producer.produce(message[:payload], topic: message[:topic],
154
+ key: message[:key],
155
+ headers: message[:headers],
156
+ partition_key: partition_key)
157
+ end
158
+ end
159
+
160
+ def async_automatic_delivery?
161
+ async_configs.fetch(:delivery_threshold, 0).positive? ||
162
+ async_configs.fetch(:delivery_interval, 0).positive?
163
+ end
164
+
165
+ def producer_store
166
+ Thread.current[NAMESPACE] ||= {}
167
+ end
168
+ end
169
+ end
170
+ end
171
+ end