deimos-ruby 1.24.2 → 2.0.0.pre.alpha1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (120) hide show
  1. checksums.yaml +4 -4
  2. data/.rubocop_todo.yml +0 -17
  3. data/.tool-versions +1 -0
  4. data/CHANGELOG.md +5 -0
  5. data/README.md +287 -498
  6. data/deimos-ruby.gemspec +4 -4
  7. data/docs/CONFIGURATION.md +133 -226
  8. data/docs/UPGRADING.md +237 -0
  9. data/lib/deimos/active_record_consume/batch_consumption.rb +29 -28
  10. data/lib/deimos/active_record_consume/mass_updater.rb +59 -4
  11. data/lib/deimos/active_record_consume/message_consumption.rb +15 -21
  12. data/lib/deimos/active_record_consumer.rb +36 -21
  13. data/lib/deimos/active_record_producer.rb +28 -9
  14. data/lib/deimos/backends/base.rb +4 -35
  15. data/lib/deimos/backends/kafka.rb +6 -22
  16. data/lib/deimos/backends/kafka_async.rb +6 -22
  17. data/lib/deimos/backends/{db.rb → outbox.rb} +13 -9
  18. data/lib/deimos/config/configuration.rb +116 -379
  19. data/lib/deimos/consume/batch_consumption.rb +24 -124
  20. data/lib/deimos/consume/message_consumption.rb +36 -63
  21. data/lib/deimos/consumer.rb +16 -75
  22. data/lib/deimos/ext/consumer_route.rb +35 -0
  23. data/lib/deimos/ext/producer_middleware.rb +94 -0
  24. data/lib/deimos/ext/producer_route.rb +22 -0
  25. data/lib/deimos/ext/redraw.rb +29 -0
  26. data/lib/deimos/ext/routing_defaults.rb +72 -0
  27. data/lib/deimos/ext/schema_route.rb +70 -0
  28. data/lib/deimos/kafka_message.rb +2 -2
  29. data/lib/deimos/kafka_source.rb +2 -7
  30. data/lib/deimos/kafka_topic_info.rb +1 -1
  31. data/lib/deimos/logging.rb +71 -0
  32. data/lib/deimos/message.rb +2 -11
  33. data/lib/deimos/metrics/datadog.rb +40 -1
  34. data/lib/deimos/metrics/provider.rb +4 -4
  35. data/lib/deimos/producer.rb +39 -116
  36. data/lib/deimos/railtie.rb +6 -0
  37. data/lib/deimos/schema_backends/avro_base.rb +21 -21
  38. data/lib/deimos/schema_backends/avro_schema_registry.rb +1 -2
  39. data/lib/deimos/schema_backends/avro_validation.rb +2 -2
  40. data/lib/deimos/schema_backends/base.rb +19 -12
  41. data/lib/deimos/schema_backends/mock.rb +6 -1
  42. data/lib/deimos/schema_backends/plain.rb +47 -0
  43. data/lib/deimos/schema_class/base.rb +2 -2
  44. data/lib/deimos/schema_class/enum.rb +1 -1
  45. data/lib/deimos/schema_class/record.rb +2 -2
  46. data/lib/deimos/test_helpers.rb +95 -320
  47. data/lib/deimos/tracing/provider.rb +6 -6
  48. data/lib/deimos/transcoder.rb +88 -0
  49. data/lib/deimos/utils/db_poller/base.rb +16 -14
  50. data/lib/deimos/utils/db_poller/state_based.rb +3 -3
  51. data/lib/deimos/utils/db_poller/time_based.rb +4 -4
  52. data/lib/deimos/utils/db_poller.rb +1 -1
  53. data/lib/deimos/utils/deadlock_retry.rb +1 -1
  54. data/lib/deimos/utils/{db_producer.rb → outbox_producer.rb} +16 -47
  55. data/lib/deimos/utils/schema_class.rb +0 -7
  56. data/lib/deimos/version.rb +1 -1
  57. data/lib/deimos.rb +79 -26
  58. data/lib/generators/deimos/{db_backend_generator.rb → outbox_backend_generator.rb} +4 -4
  59. data/lib/generators/deimos/schema_class_generator.rb +0 -1
  60. data/lib/generators/deimos/v2/templates/karafka.rb.tt +149 -0
  61. data/lib/generators/deimos/v2_generator.rb +193 -0
  62. data/lib/tasks/deimos.rake +5 -7
  63. data/spec/active_record_batch_consumer_association_spec.rb +22 -13
  64. data/spec/active_record_batch_consumer_spec.rb +84 -65
  65. data/spec/active_record_consume/batch_consumption_spec.rb +10 -10
  66. data/spec/active_record_consume/batch_slicer_spec.rb +12 -12
  67. data/spec/active_record_consume/mass_updater_spec.rb +137 -0
  68. data/spec/active_record_consumer_spec.rb +29 -13
  69. data/spec/active_record_producer_spec.rb +36 -26
  70. data/spec/backends/base_spec.rb +0 -23
  71. data/spec/backends/kafka_async_spec.rb +1 -3
  72. data/spec/backends/kafka_spec.rb +1 -3
  73. data/spec/backends/{db_spec.rb → outbox_spec.rb} +14 -20
  74. data/spec/batch_consumer_spec.rb +66 -116
  75. data/spec/consumer_spec.rb +53 -147
  76. data/spec/deimos_spec.rb +10 -126
  77. data/spec/kafka_source_spec.rb +19 -52
  78. data/spec/karafka/karafka.rb +69 -0
  79. data/spec/karafka_config/karafka_spec.rb +97 -0
  80. data/spec/logging_spec.rb +25 -0
  81. data/spec/message_spec.rb +9 -9
  82. data/spec/producer_spec.rb +112 -254
  83. data/spec/rake_spec.rb +1 -3
  84. data/spec/schema_backends/avro_validation_spec.rb +1 -1
  85. data/spec/schemas/com/my-namespace/MySchemaWithTitle.avsc +22 -0
  86. data/spec/snapshots/consumers-no-nest.snap +49 -0
  87. data/spec/snapshots/consumers.snap +49 -0
  88. data/spec/snapshots/consumers_and_producers-no-nest.snap +49 -0
  89. data/spec/snapshots/consumers_and_producers.snap +49 -0
  90. data/spec/snapshots/consumers_circular-no-nest.snap +49 -0
  91. data/spec/snapshots/consumers_circular.snap +49 -0
  92. data/spec/snapshots/consumers_complex_types-no-nest.snap +49 -0
  93. data/spec/snapshots/consumers_complex_types.snap +49 -0
  94. data/spec/snapshots/consumers_nested-no-nest.snap +49 -0
  95. data/spec/snapshots/consumers_nested.snap +49 -0
  96. data/spec/snapshots/namespace_folders.snap +49 -0
  97. data/spec/snapshots/namespace_map.snap +49 -0
  98. data/spec/snapshots/producers_with_key-no-nest.snap +49 -0
  99. data/spec/snapshots/producers_with_key.snap +49 -0
  100. data/spec/spec_helper.rb +61 -29
  101. data/spec/utils/db_poller_spec.rb +49 -39
  102. data/spec/utils/{db_producer_spec.rb → outbox_producer_spec.rb} +17 -184
  103. metadata +58 -67
  104. data/lib/deimos/batch_consumer.rb +0 -7
  105. data/lib/deimos/config/phobos_config.rb +0 -163
  106. data/lib/deimos/instrumentation.rb +0 -95
  107. data/lib/deimos/monkey_patches/phobos_cli.rb +0 -35
  108. data/lib/deimos/utils/inline_consumer.rb +0 -158
  109. data/lib/deimos/utils/lag_reporter.rb +0 -186
  110. data/lib/deimos/utils/schema_controller_mixin.rb +0 -129
  111. data/spec/config/configuration_spec.rb +0 -321
  112. data/spec/kafka_listener_spec.rb +0 -55
  113. data/spec/phobos.bad_db.yml +0 -73
  114. data/spec/phobos.yml +0 -77
  115. data/spec/utils/inline_consumer_spec.rb +0 -31
  116. data/spec/utils/lag_reporter_spec.rb +0 -76
  117. data/spec/utils/platform_schema_validation_spec.rb +0 -0
  118. data/spec/utils/schema_controller_mixin_spec.rb +0 -84
  119. /data/lib/generators/deimos/{db_backend → outbox_backend}/templates/migration +0 -0
  120. /data/lib/generators/deimos/{db_backend → outbox_backend}/templates/rails3_migration +0 -0
@@ -11,6 +11,7 @@ module Deimos
11
11
  # Base poller class for retrieving and publishing messages.
12
12
  class Base
13
13
 
14
+ FATAL_CODES = %i(invalid_msg_size msg_size_too_large)
14
15
  # @return [Integer]
15
16
  BATCH_SIZE = 1000
16
17
 
@@ -58,14 +59,14 @@ module Deimos
58
59
  if Deimos.config.producers.backend == :kafka_async
59
60
  Deimos.config.producers.backend = :kafka
60
61
  end
61
- Deimos.config.logger.info('Starting...')
62
+ Deimos::Logging.log_info('Starting...')
62
63
  @signal_to_stop = false
63
64
  ActiveRecord::Base.connection.reconnect! unless ActiveRecord::Base.connection.open_transactions.positive?
64
65
 
65
66
  retrieve_poll_info
66
67
  loop do
67
68
  if @signal_to_stop
68
- Deimos.config.logger.info('Shutting down')
69
+ Deimos::Logging.log_info('Shutting down')
69
70
  break
70
71
  end
71
72
  process_updates if should_run?
@@ -95,7 +96,7 @@ module Deimos
95
96
  # Stop the poll.
96
97
  # @return [void]
97
98
  def stop
98
- Deimos.config.logger.info('Received signal to stop')
99
+ Deimos::Logging.log_info('Received signal to stop')
99
100
  @signal_to_stop = true
100
101
  end
101
102
 
@@ -111,9 +112,9 @@ module Deimos
111
112
  # @param span [Object]
112
113
  # @return [Boolean]
113
114
  def handle_message_too_large(exception, batch, status, span)
114
- Deimos.config.logger.error("Error publishing through DB Poller: #{exception.message}")
115
+ Deimos::Logging.log_error("Error publishing through DB Poller: #{exception.message}")
115
116
  if @config.skip_too_large_messages
116
- Deimos.config.logger.error("Skipping messages #{batch.map(&:id).join(', ')} since they are too large")
117
+ Deimos::Logging.log_error("Skipping messages #{batch.map(&:id).join(', ')} since they are too large")
117
118
  Deimos.config.tracer&.set_error(span, exception)
118
119
  status.batches_errored += 1
119
120
  true
@@ -137,21 +138,22 @@ module Deimos
137
138
  process_batch(batch)
138
139
  Deimos.config.tracer&.finish(span)
139
140
  status.batches_processed += 1
140
- rescue Kafka::BufferOverflow, Kafka::MessageSizeTooLarge,
141
- Kafka::RecordListTooLarge => e
142
- retry unless handle_message_too_large(e, batch, status, span)
143
- rescue Kafka::Error => e # keep trying till it fixes itself
144
- Deimos.config.logger.error("Error publishing through DB Poller: #{e.message}")
145
- sleep(0.5)
146
- retry
141
+ rescue WaterDrop::Errors::ProduceManyError => e
142
+ if FATAL_CODES.include?(e.cause.try(:code))
143
+ retry unless handle_message_too_large(e, batch, status, span)
144
+ else
145
+ Deimos::Logging.log_error("Error publishing through DB Poller: #{e.message}")
146
+ sleep(0.5)
147
+ retry
148
+ end
147
149
  rescue StandardError => e
148
- Deimos.config.logger.error("Error publishing through DB poller: #{e.message}}")
150
+ Deimos::Logging.log_error("Error publishing through DB poller: #{e.message}}")
149
151
  if @config.retries.nil? || retries < @config.retries
150
152
  retries += 1
151
153
  sleep(0.5)
152
154
  retry
153
155
  else
154
- Deimos.config.logger.error('Retries exceeded, moving on to next batch')
156
+ Deimos::Logging.log_error('Retries exceeded, moving on to next batch')
155
157
  Deimos.config.tracer&.set_error(span, e)
156
158
  status.batches_errored += 1
157
159
  return false
@@ -10,14 +10,14 @@ module Deimos
10
10
  # Send messages for updated data.
11
11
  # @return [void]
12
12
  def process_updates
13
- Deimos.config.logger.info("Polling #{log_identifier}")
13
+ Deimos::Logging.log_info("Polling #{log_identifier}")
14
14
  status = PollStatus.new(0, 0, 0)
15
15
  first_batch = true
16
16
 
17
17
  # poll_query gets all the relevant data from the database, as defined
18
18
  # by the producer itself.
19
19
  loop do
20
- Deimos.config.logger.debug("Polling #{log_identifier}, batch #{status.current_batch}")
20
+ Deimos::Logging.log_debug("Polling #{log_identifier}, batch #{status.current_batch}")
21
21
  batch = fetch_results.to_a
22
22
  break if batch.empty?
23
23
 
@@ -29,7 +29,7 @@ module Deimos
29
29
  # If there were no results at all, we update last_sent so that we still get a wait
30
30
  # before the next poll.
31
31
  @info.touch(:last_sent) if first_batch
32
- Deimos.config.logger.info("Poll #{log_identifier} complete (#{status.report}")
32
+ Deimos::Logging.log_info("Poll #{log_identifier} complete (#{status.report}")
33
33
  end
34
34
 
35
35
  # @return [ActiveRecord::Relation]
@@ -28,14 +28,14 @@ module Deimos
28
28
  def process_updates
29
29
  time_from = @config.full_table ? Time.new(0) : @info.last_sent.in_time_zone
30
30
  time_to = Time.zone.now - @config.delay_time
31
- Deimos.config.logger.info("Polling #{log_identifier} from #{time_from} to #{time_to}")
31
+ Deimos::Logging.log_info("Polling #{log_identifier} from #{time_from} to #{time_to}")
32
32
  status = PollStatus.new(0, 0, 0)
33
33
  first_batch = true
34
34
 
35
35
  # poll_query gets all the relevant data from the database, as defined
36
36
  # by the producer itself.
37
37
  loop do
38
- Deimos.config.logger.debug("Polling #{log_identifier}, batch #{status.current_batch}")
38
+ Deimos::Logging.log_debug("Polling #{log_identifier}, batch #{status.current_batch}")
39
39
  batch = fetch_results(time_from, time_to).to_a
40
40
  break if batch.empty?
41
41
 
@@ -47,14 +47,14 @@ module Deimos
47
47
  # If there were no results at all, we update last_sent so that we still get a wait
48
48
  # before the next poll.
49
49
  @info.touch(:last_sent) if first_batch
50
- Deimos.config.logger.info("Poll #{log_identifier} complete at #{time_to} (#{status.report})")
50
+ Deimos::Logging.log_info("Poll #{log_identifier} complete at #{time_to} (#{status.report})")
51
51
  end
52
52
 
53
53
  # @param time_from [ActiveSupport::TimeWithZone]
54
54
  # @param time_to [ActiveSupport::TimeWithZone]
55
55
  # @return [ActiveRecord::Relation]
56
56
  def fetch_results(time_from, time_to)
57
- id = self.producer_classes.first.config[:record_class].primary_key
57
+ id = self.producer_classes.first.record_class.primary_key
58
58
  quoted_timestamp = ActiveRecord::Base.connection.quote_column_name(@config.timestamp_column)
59
59
  quoted_id = ActiveRecord::Base.connection.quote_column_name(id)
60
60
  @resource_class.poll_query(time_from: time_from,
@@ -16,7 +16,7 @@ module Deimos
16
16
  end
17
17
  executor = Sigurd::Executor.new(pollers,
18
18
  sleep_seconds: 5,
19
- logger: Deimos.config.logger)
19
+ logger: Karafka.logger)
20
20
  signal_handler = Sigurd::SignalHandler.new(executor)
21
21
  signal_handler.run!
22
22
  end
@@ -45,7 +45,7 @@ module Deimos
45
45
  # Reraise if all retries exhausted
46
46
  raise if count <= 0
47
47
 
48
- Deimos.config.logger.warn(
48
+ Deimos::Logging.log_warn(
49
49
  message: 'Deadlock encountered when trying to execute query. '\
50
50
  "Retrying. #{count} attempt(s) remaining",
51
51
  tags: tags
@@ -4,8 +4,7 @@ module Deimos
4
4
  module Utils
5
5
  # Class which continually polls the kafka_messages table
6
6
  # in the database and sends Kafka messages.
7
- class DbProducer
8
- include Phobos::Producer
7
+ class OutboxProducer
9
8
  attr_accessor :id, :current_topic
10
9
 
11
10
  # @return [Integer]
@@ -14,17 +13,19 @@ module Deimos
14
13
  DELETE_BATCH_SIZE = 10
15
14
  # @return [Integer]
16
15
  MAX_DELETE_ATTEMPTS = 3
16
+ # @return [Array<Symbol>]
17
+ FATAL_CODES = %i(invalid_msg_size msg_size_too_large)
17
18
 
18
19
  # @param logger [Logger]
19
20
  def initialize(logger=Logger.new(STDOUT))
20
21
  @id = SecureRandom.uuid
21
22
  @logger = logger
22
- @logger.push_tags("DbProducer #{@id}") if @logger.respond_to?(:push_tags)
23
+ @logger.push_tags("OutboxProducer #{@id}") if @logger.respond_to?(:push_tags)
23
24
  end
24
25
 
25
26
  # @return [FigTree]
26
27
  def config
27
- Deimos.config.db_producer
28
+ Deimos.config.outbox
28
29
  end
29
30
 
30
31
  # Start the poll.
@@ -82,7 +83,6 @@ module Deimos
82
83
  rescue StandardError => e
83
84
  @logger.error("Error processing messages for topic #{@current_topic}: #{e.class.name}: #{e.message} #{e.backtrace.join("\n")}")
84
85
  KafkaTopicInfo.register_error(@current_topic, @id)
85
- shutdown_producer
86
86
  end
87
87
 
88
88
  # Process a single batch in a topic.
@@ -94,24 +94,23 @@ module Deimos
94
94
  batch_size = messages.size
95
95
  compacted_messages = compact_messages(messages)
96
96
  log_messages(compacted_messages)
97
- Deimos.instrument('db_producer.produce', topic: @current_topic, messages: compacted_messages) do
97
+ Karafka.monitor.instrument('deimos.outbox.produce', topic: @current_topic, messages: compacted_messages) do
98
98
  begin
99
- produce_messages(compacted_messages.map(&:phobos_message))
100
- rescue Kafka::BufferOverflow, Kafka::MessageSizeTooLarge, Kafka::RecordListTooLarge => e
101
- delete_messages(messages)
102
- @logger.error('Message batch too large, deleting...')
103
- begin
104
- @logger.error(Deimos::KafkaMessage.decoded(messages))
105
- rescue StandardError => logging_exception # rubocop:disable Naming/RescuedExceptionsVariableName
106
- @logger.error("Large message details logging failure: #{logging_exception.message}")
107
- ensure
99
+ produce_messages(compacted_messages.map(&:karafka_message))
100
+ rescue WaterDrop::Errors::ProduceManyError => e
101
+ if FATAL_CODES.include?(e.cause.try(:code))
102
+ @logger.error('Message batch too large, deleting...')
103
+ delete_messages(messages)
108
104
  raise e
105
+ else
106
+ Deimos.log_error("Got error #{e.cause.class.name} when publishing #{batch_size} messages, retrying...")
107
+ retry
109
108
  end
110
109
  end
111
110
  end
112
111
  delete_messages(messages)
113
112
  Deimos.config.metrics&.increment(
114
- 'db_producer.process',
113
+ 'outbox.process',
115
114
  tags: %W(topic:#{@current_topic}),
116
115
  by: messages.size
117
116
  )
@@ -197,16 +196,6 @@ module Deimos
197
196
  end
198
197
  end
199
198
 
200
- # Shut down the sync producer if we have to. Phobos will automatically
201
- # create a new one. We should call this if the producer can be in a bad
202
- # state and e.g. we need to clear the buffer.
203
- # @return [void]
204
- def shutdown_producer
205
- if self.class.producer.respond_to?(:sync_producer_shutdown) # Phobos 1.8.3
206
- self.class.producer.sync_producer_shutdown
207
- end
208
- end
209
-
210
199
  # Produce messages in batches, reducing the size 1/10 if the batch is too
211
200
  # large. Does not retry batches of messages that have already been sent.
212
201
  # @param batch [Array<Hash>]
@@ -217,30 +206,10 @@ module Deimos
217
206
  begin
218
207
  batch[current_index..-1].in_groups_of(batch_size, false).each do |group|
219
208
  @logger.debug("Publishing #{group.size} messages to #{@current_topic}")
220
- producer.publish_list(group)
221
- Deimos.config.metrics&.increment(
222
- 'publish',
223
- tags: %W(status:success topic:#{@current_topic}),
224
- by: group.size
225
- )
209
+ Karafka.producer.produce_many_sync(group)
226
210
  current_index += group.size
227
211
  @logger.info("Sent #{group.size} messages to #{@current_topic}")
228
212
  end
229
- rescue Kafka::BufferOverflow, Kafka::MessageSizeTooLarge,
230
- Kafka::RecordListTooLarge => e
231
- if batch_size == 1
232
- shutdown_producer
233
- raise
234
- end
235
-
236
- @logger.error("Got error #{e.class.name} when publishing #{batch.size} in groups of #{batch_size}, retrying...")
237
- batch_size = if batch_size < 10
238
- 1
239
- else
240
- (batch_size / 10).to_i
241
- end
242
- shutdown_producer
243
- retry
244
213
  end
245
214
  end
246
215
 
@@ -55,13 +55,6 @@ module Deimos
55
55
  constants.join('::').safe_constantize
56
56
  end
57
57
 
58
- # @param config [Hash] Producer or Consumer config
59
- # @return [Boolean]
60
- def use?(config)
61
- use_schema_classes = config[:use_schema_classes]
62
- use_schema_classes.present? ? use_schema_classes : Deimos.config.schema.use_schema_classes
63
- end
64
-
65
58
  end
66
59
  end
67
60
  end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Deimos
4
- VERSION = '1.24.2'
4
+ VERSION = '2.0.0-alpha1'
5
5
  end
data/lib/deimos.rb CHANGED
@@ -1,17 +1,16 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require 'active_support'
4
+ require 'deimos/ext/redraw'
5
+ require 'karafka'
4
6
 
5
- require 'phobos'
6
7
  require 'deimos/version'
8
+ require 'deimos/logging'
7
9
  require 'deimos/config/configuration'
8
10
  require 'deimos/producer'
9
11
  require 'deimos/active_record_producer'
10
12
  require 'deimos/active_record_consumer'
11
13
  require 'deimos/consumer'
12
- require 'deimos/batch_consumer'
13
- require 'deimos/instrumentation'
14
- require 'deimos/utils/lag_reporter'
15
14
 
16
15
  require 'deimos/backends/base'
17
16
  require 'deimos/backends/kafka'
@@ -23,27 +22,38 @@ require 'deimos/utils/schema_class'
23
22
  require 'deimos/schema_class/enum'
24
23
  require 'deimos/schema_class/record'
25
24
 
26
- require 'deimos/monkey_patches/phobos_cli'
25
+ require 'deimos/ext/schema_route'
26
+ require 'deimos/ext/consumer_route'
27
+ require 'deimos/ext/producer_route'
28
+ require 'deimos/ext/producer_middleware'
29
+ require 'deimos/ext/routing_defaults'
27
30
 
28
31
  require 'deimos/railtie' if defined?(Rails)
29
- require 'deimos/utils/schema_controller_mixin' if defined?(ActionController)
30
32
 
31
33
  if defined?(ActiveRecord)
32
34
  require 'deimos/kafka_source'
33
35
  require 'deimos/kafka_topic_info'
34
- require 'deimos/backends/db'
36
+ require 'deimos/backends/outbox'
35
37
  require 'sigurd'
36
- require 'deimos/utils/db_producer'
38
+ require 'deimos/utils/outbox_producer'
37
39
  require 'deimos/utils/db_poller'
38
40
  end
39
41
 
40
- require 'deimos/utils/inline_consumer'
41
42
  require 'yaml'
42
43
  require 'erb'
43
44
 
44
45
  # Parent module.
45
46
  module Deimos
47
+ EVENT_TYPES = %w(
48
+ deimos.ar_consumer.consume_batch
49
+ deimos.encode_message
50
+ deimos.batch_consumption.invalid_records
51
+ deimos.batch_consumption.valid_records
52
+ deimos.outbox.produce
53
+ )
54
+
46
55
  class << self
56
+
47
57
  # @return [Class<Deimos::SchemaBackends::Base>]
48
58
  def schema_backend_class
49
59
  backend = Deimos.config.schema.backend.to_s
@@ -57,7 +67,7 @@ module Deimos
57
67
  # @param namespace [String]
58
68
  # @return [Deimos::SchemaBackends::Base]
59
69
  def schema_backend(schema:, namespace:)
60
- if Utils::SchemaClass.use?(config.to_h)
70
+ if config.schema.use_schema_classes
61
71
  # Initialize an instance of the provided schema
62
72
  # in the event the schema class is an override, the inherited
63
73
  # schema and namespace will be applied
@@ -91,13 +101,26 @@ module Deimos
91
101
  self.schema_backend(schema: schema, namespace: namespace).decode(payload)
92
102
  end
93
103
 
104
+ # @param message [Hash] a Karafka message with keys :payload, :key and :topic
105
+ def decode_message(message)
106
+ topic = message[:topic]
107
+ if Deimos.config.producers.topic_prefix
108
+ topic = topic.sub(Deimos.config.producers.topic_prefix, '')
109
+ end
110
+ config = karafka_config_for(topic: topic)
111
+ message[:payload] = config.deserializers[:payload].decode_message_hash(message[:payload])
112
+ if message[:key] && config.deserializers[:key].respond_to?(:decode_message_hash)
113
+ message[:key] = config.deserializers[:key].decode_message_hash(message[:key])
114
+ end
115
+ end
116
+
94
117
  # Start the DB producers to send Kafka messages.
95
118
  # @param thread_count [Integer] the number of threads to start.
96
119
  # @return [void]
97
- def start_db_backend!(thread_count: 1)
120
+ def start_outbox_backend!(thread_count: 1)
98
121
  Sigurd.exit_on_signal = true
99
- if self.config.producers.backend != :db
100
- raise('Publish backend is not set to :db, exiting')
122
+ if self.config.producers.backend != :outbox
123
+ raise('Publish backend is not set to :outbox, exiting')
101
124
  end
102
125
 
103
126
  if thread_count.nil? || thread_count.zero?
@@ -105,25 +128,55 @@ module Deimos
105
128
  end
106
129
 
107
130
  producers = (1..thread_count).map do
108
- Deimos::Utils::DbProducer.
109
- new(self.config.db_producer.logger || self.config.logger)
131
+ Deimos::Utils::OutboxProducer.
132
+ new(self.config.outbox.logger || Karafka.logger)
110
133
  end
111
134
  executor = Sigurd::Executor.new(producers,
112
135
  sleep_seconds: 5,
113
- logger: self.config.logger)
136
+ logger: Karafka.logger)
114
137
  signal_handler = Sigurd::SignalHandler.new(executor)
115
138
  signal_handler.run!
116
139
  end
117
- end
118
- end
119
140
 
120
- at_exit do
121
- begin
122
- Deimos::Backends::KafkaAsync.shutdown_producer
123
- Deimos::Backends::Kafka.shutdown_producer
124
- rescue StandardError => e
125
- Deimos.config.logger.error(
126
- "Error closing producer on shutdown: #{e.message} #{e.backtrace.join("\n")}"
127
- )
141
+ def setup_karafka
142
+ Karafka.producer.middleware.append(Deimos::ProducerMiddleware)
143
+ EVENT_TYPES.each { |type| Karafka.monitor.notifications_bus.register_event(type) }
144
+
145
+ Karafka.producer.monitor.subscribe('error.occurred') do |event|
146
+ if event.payload.key?(:messages)
147
+ topic = event[:messages].first[:topic]
148
+ config = Deimos.karafka_config_for(topic: topic)
149
+ message = Deimos::Logging.messages_log_text(config&.payload_log, event[:messages])
150
+ Karafka.logger.error("Error producing messages: #{event[:error].message} #{message.to_json}")
151
+ end
152
+ end
153
+ end
154
+
155
+ # @return [Array<Karafka::Routing::Topic]
156
+ def karafka_configs
157
+ Karafka::App.routes.flat_map(&:topics).flat_map(&:to_a)
158
+ end
159
+
160
+ # @param topic [String]
161
+ # @return [Karafka::Routing::Topic,nil]
162
+ def karafka_config_for(topic: nil, producer: nil)
163
+ if topic
164
+ karafka_configs.find { |t| t.name == topic}
165
+ elsif producer
166
+ karafka_configs.find { |t| t.producer_class == producer}
167
+ end
168
+ end
169
+
170
+ # @param handler_class [Class]
171
+ # @return [String,nil]
172
+ def topic_for_consumer(handler_class)
173
+ Deimos.karafka_configs.each do |topic|
174
+ if topic.consumer == handler_class
175
+ return topic.name
176
+ end
177
+ end
178
+ nil
179
+ end
180
+
128
181
  end
129
182
  end
@@ -6,14 +6,14 @@ require 'rails/generators/active_record/migration'
6
6
  module Deimos
7
7
  module Generators
8
8
  # Generate the database backend migration.
9
- class DbBackendGenerator < Rails::Generators::Base
9
+ class OutboxBackendGenerator < Rails::Generators::Base
10
10
  include Rails::Generators::Migration
11
11
  if Rails.version < '4'
12
12
  extend(ActiveRecord::Generators::Migration)
13
13
  else
14
14
  include ActiveRecord::Generators::Migration
15
15
  end
16
- source_root File.expand_path('db_backend/templates', __dir__)
16
+ source_root File.expand_path('outbox_backend/templates', __dir__)
17
17
  desc 'Add migrations for the database backend'
18
18
 
19
19
  # @return [String]
@@ -38,10 +38,10 @@ module Deimos
38
38
  def generate
39
39
  if Rails.version < '4'
40
40
  migration_template('rails3_migration',
41
- "#{db_migrate_path}/create_db_backend.rb")
41
+ "#{db_migrate_path}/create_outbox_backend.rb")
42
42
  else
43
43
  migration_template('migration',
44
- "#{db_migrate_path}/create_db_backend.rb")
44
+ "#{db_migrate_path}/create_outbox_backend.rb")
45
45
  end
46
46
  end
47
47
  end
@@ -3,7 +3,6 @@
3
3
  require 'rails/generators'
4
4
  require 'deimos'
5
5
  require 'deimos/schema_backends/avro_base'
6
- require 'deimos/config/configuration'
7
6
 
8
7
  # Generates new schema classes.
9
8
  module Deimos
@@ -0,0 +1,149 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'deimos'
4
+ require 'karafka'
5
+
6
+ Deimos.configure do
7
+ <%- deimos_configs.each do |group, settings| -%>
8
+ <%= group -%> do
9
+ <%- settings.each do |k, v| -%>
10
+ <%= k.to_s %> <%= v.inspect %>
11
+ <%- end -%>
12
+ end
13
+ <%- end -%>
14
+ <%- deimos_config.db_poller_objects.each do |poller| -%>
15
+ db_poller do
16
+ <%- poller.non_default_settings! -%>
17
+ <%- poller.to_h.each do |k, v| -%>
18
+ <%= k.to_s %> <%= v.inspect %>
19
+ <%- end -%>
20
+ <%- end -%>
21
+ end
22
+ end
23
+
24
+ class KarafkaApp < Karafka::App
25
+ setup do |config|
26
+ <%- setup_configs.each do |k, v| -%>
27
+ config.<%= k %> = <%= v.inspect %>
28
+ <%- end -%>
29
+ config.kafka = {
30
+ <%- default_kafka_configs.compact.each do |k, v| -%>
31
+ "<%= k.to_s %>": <%= v.inspect %>,
32
+ <%- end -%>
33
+ }
34
+ # Recreate consumers with each batch. This will allow Rails code reload to work in the
35
+ # development mode. Otherwise Karafka process would not be aware of code changes
36
+ config.consumer_persistence = !Rails.env.development?
37
+ end
38
+
39
+ # Comment out this part if you are not using instrumentation and/or you are not
40
+ # interested in logging events for certain environments. Since instrumentation
41
+ # notifications add extra boilerplate, if you want to achieve max performance,
42
+ # listen to only what you really need for given environment.
43
+ Karafka.monitor.subscribe(Karafka::Instrumentation::LoggerListener.new)
44
+ # Karafka.monitor.subscribe(Karafka::Instrumentation::ProctitleListener.new)
45
+
46
+ # This logger prints the producer development info using the Karafka logger.
47
+ # It is similar to the consumer logger listener but producer oriented.
48
+ Karafka.producer.monitor.subscribe(
49
+ WaterDrop::Instrumentation::LoggerListener.new(
50
+ # Log producer operations using the Karafka logger
51
+ Karafka.logger,
52
+ # If you set this to true, logs will contain each message details
53
+ # Please note, that this can be extensive
54
+ log_messages: false
55
+ )
56
+ )
57
+
58
+ # You can subscribe to all consumer related errors and record/track then that way
59
+ #
60
+ # Karafka.monitor.subscribe 'error.occurred' do |event|
61
+ # type = event[:type]
62
+ # error = event[:error]
63
+ # details = (error.backtrace || []).join("\n")
64
+ # ErrorTracker.send_error(error, type, details)
65
+ # end
66
+
67
+ # You can subscribe to all producer related errors and record/track then that way
68
+ # Please note, that producer and consumer have their own notifications pipeline so you need to
69
+ # setup error tracking independently for each of them
70
+ #
71
+ # Karafka.producer.monitor.subscribe('error.occurred') do |event|
72
+ # type = event[:type]
73
+ # error = event[:error]
74
+ # details = (error.backtrace || []).join("\n")
75
+ # ErrorTracker.send_error(error, type, details)
76
+ # end
77
+
78
+ routes.draw do
79
+ defaults do
80
+ <%- default_configs.each do |k, v| -%>
81
+ <%= k.to_s %> <%= v.inspect %>
82
+ <%- end -%>
83
+ end
84
+
85
+ <%- producer_configs.each do |producer| -%>
86
+ topic "<%= producer[:topic] %>" do
87
+ <%- producer.except(:topic).each do |k, v| -%>
88
+ <%- if k.to_sym == :key_config -%>
89
+ <%= k.to_s %>(<%= v.inspect %>)
90
+ <%- else -%>
91
+ <%= k.to_s %> <%= v.inspect %>
92
+ <%- end -%>
93
+ <%- end -%>
94
+ end
95
+ <%- end -%>
96
+
97
+ <%- consumer_configs.each do |group_id, topics| -%>
98
+ <%- if consumer_configs.length > 1 -%>
99
+ consumer_group :<%= group_id %> do<%- end -%>
100
+ <%- topics.each do |consumer| %>
101
+ topic "<%= consumer[:topic] -%>" do
102
+ <%- if consumer[:kafka].present? -%>
103
+ kafka(
104
+ <%- consumer[:kafka].each do |k, v| -%>
105
+ "<%= k.to_s %>": <%= v.inspect %>,
106
+ <%- end -%>
107
+ )
108
+ <%- end -%>
109
+ <%- consumer.except(:topic, :kafka).each do |k, v| -%>
110
+ <%- if k.to_sym == :key_config -%>
111
+ <%= k.to_s %>(<%= v.inspect %>)
112
+ <%- else -%>
113
+ <%= k.to_s %> <%= v.inspect %>
114
+ <%- end -%>
115
+ <%- end -%>
116
+ end
117
+ <%- end -%>
118
+ <%- if consumer_configs.length > 1 -%>
119
+ end<%- end %>
120
+ <%- end -%>
121
+
122
+ # Uncomment this if you use Karafka with ActiveJob
123
+ # You need to define the topic per each queue name you use
124
+ # active_job_topic :default
125
+ # topic :example do
126
+ # Uncomment this if you want Karafka to manage your topics configuration
127
+ # Managing topics configuration via routing will allow you to ensure config consistency
128
+ # across multiple environments
129
+ #
130
+ # config(partitions: 2, 'cleanup.policy': 'compact')
131
+ # consumer ExampleConsumer
132
+ # end
133
+ end
134
+ end
135
+
136
+ Deimos.setup_karafka
137
+
138
+ # Karafka now features a Web UI!
139
+ # Visit the setup documentation to get started and enhance your experience.
140
+ #
141
+ # https://karafka.io/docs/Web-UI-Getting-Started
142
+
143
+ # Karafka::Web.setup do |config|
144
+ # # You may want to set it per ENV. This value was randomly generated.
145
+ # config.ui.sessions.secret = '<%= SecureRandom.hex %>'
146
+ # end
147
+
148
+ # Karafka::Web.enable!
149
+