deimos-temp-fork 0.0.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (146) hide show
  1. checksums.yaml +7 -0
  2. data/.circleci/config.yml +83 -0
  3. data/.gitignore +41 -0
  4. data/.gitmodules +0 -0
  5. data/.rspec +1 -0
  6. data/.rubocop.yml +333 -0
  7. data/.ruby-gemset +1 -0
  8. data/.ruby-version +1 -0
  9. data/CHANGELOG.md +349 -0
  10. data/CODE_OF_CONDUCT.md +77 -0
  11. data/Dockerfile +23 -0
  12. data/Gemfile +6 -0
  13. data/Gemfile.lock +286 -0
  14. data/Guardfile +22 -0
  15. data/LICENSE.md +195 -0
  16. data/README.md +1099 -0
  17. data/Rakefile +13 -0
  18. data/bin/deimos +4 -0
  19. data/deimos-ruby.gemspec +44 -0
  20. data/docker-compose.yml +71 -0
  21. data/docs/ARCHITECTURE.md +140 -0
  22. data/docs/CONFIGURATION.md +236 -0
  23. data/docs/DATABASE_BACKEND.md +147 -0
  24. data/docs/INTEGRATION_TESTS.md +52 -0
  25. data/docs/PULL_REQUEST_TEMPLATE.md +35 -0
  26. data/docs/UPGRADING.md +128 -0
  27. data/lib/deimos-temp-fork.rb +95 -0
  28. data/lib/deimos/active_record_consume/batch_consumption.rb +164 -0
  29. data/lib/deimos/active_record_consume/batch_slicer.rb +27 -0
  30. data/lib/deimos/active_record_consume/message_consumption.rb +79 -0
  31. data/lib/deimos/active_record_consume/schema_model_converter.rb +52 -0
  32. data/lib/deimos/active_record_consumer.rb +67 -0
  33. data/lib/deimos/active_record_producer.rb +87 -0
  34. data/lib/deimos/backends/base.rb +32 -0
  35. data/lib/deimos/backends/db.rb +41 -0
  36. data/lib/deimos/backends/kafka.rb +33 -0
  37. data/lib/deimos/backends/kafka_async.rb +33 -0
  38. data/lib/deimos/backends/test.rb +20 -0
  39. data/lib/deimos/batch_consumer.rb +7 -0
  40. data/lib/deimos/config/configuration.rb +381 -0
  41. data/lib/deimos/config/phobos_config.rb +137 -0
  42. data/lib/deimos/consume/batch_consumption.rb +150 -0
  43. data/lib/deimos/consume/message_consumption.rb +94 -0
  44. data/lib/deimos/consumer.rb +104 -0
  45. data/lib/deimos/instrumentation.rb +76 -0
  46. data/lib/deimos/kafka_message.rb +60 -0
  47. data/lib/deimos/kafka_source.rb +128 -0
  48. data/lib/deimos/kafka_topic_info.rb +102 -0
  49. data/lib/deimos/message.rb +79 -0
  50. data/lib/deimos/metrics/datadog.rb +47 -0
  51. data/lib/deimos/metrics/mock.rb +39 -0
  52. data/lib/deimos/metrics/provider.rb +36 -0
  53. data/lib/deimos/monkey_patches/phobos_cli.rb +35 -0
  54. data/lib/deimos/monkey_patches/phobos_producer.rb +51 -0
  55. data/lib/deimos/poll_info.rb +9 -0
  56. data/lib/deimos/producer.rb +224 -0
  57. data/lib/deimos/railtie.rb +8 -0
  58. data/lib/deimos/schema_backends/avro_base.rb +140 -0
  59. data/lib/deimos/schema_backends/avro_local.rb +30 -0
  60. data/lib/deimos/schema_backends/avro_schema_coercer.rb +119 -0
  61. data/lib/deimos/schema_backends/avro_schema_registry.rb +34 -0
  62. data/lib/deimos/schema_backends/avro_validation.rb +21 -0
  63. data/lib/deimos/schema_backends/base.rb +150 -0
  64. data/lib/deimos/schema_backends/mock.rb +42 -0
  65. data/lib/deimos/shared_config.rb +63 -0
  66. data/lib/deimos/test_helpers.rb +360 -0
  67. data/lib/deimos/tracing/datadog.rb +35 -0
  68. data/lib/deimos/tracing/mock.rb +40 -0
  69. data/lib/deimos/tracing/provider.rb +29 -0
  70. data/lib/deimos/utils/db_poller.rb +150 -0
  71. data/lib/deimos/utils/db_producer.rb +243 -0
  72. data/lib/deimos/utils/deadlock_retry.rb +68 -0
  73. data/lib/deimos/utils/inline_consumer.rb +150 -0
  74. data/lib/deimos/utils/lag_reporter.rb +175 -0
  75. data/lib/deimos/utils/schema_controller_mixin.rb +115 -0
  76. data/lib/deimos/version.rb +5 -0
  77. data/lib/generators/deimos/active_record/templates/migration.rb.tt +28 -0
  78. data/lib/generators/deimos/active_record/templates/model.rb.tt +5 -0
  79. data/lib/generators/deimos/active_record_generator.rb +79 -0
  80. data/lib/generators/deimos/db_backend/templates/migration +25 -0
  81. data/lib/generators/deimos/db_backend/templates/rails3_migration +31 -0
  82. data/lib/generators/deimos/db_backend_generator.rb +48 -0
  83. data/lib/generators/deimos/db_poller/templates/migration +11 -0
  84. data/lib/generators/deimos/db_poller/templates/rails3_migration +16 -0
  85. data/lib/generators/deimos/db_poller_generator.rb +48 -0
  86. data/lib/tasks/deimos.rake +34 -0
  87. data/spec/active_record_batch_consumer_spec.rb +481 -0
  88. data/spec/active_record_consume/batch_slicer_spec.rb +42 -0
  89. data/spec/active_record_consume/schema_model_converter_spec.rb +105 -0
  90. data/spec/active_record_consumer_spec.rb +154 -0
  91. data/spec/active_record_producer_spec.rb +85 -0
  92. data/spec/backends/base_spec.rb +10 -0
  93. data/spec/backends/db_spec.rb +54 -0
  94. data/spec/backends/kafka_async_spec.rb +11 -0
  95. data/spec/backends/kafka_spec.rb +11 -0
  96. data/spec/batch_consumer_spec.rb +256 -0
  97. data/spec/config/configuration_spec.rb +248 -0
  98. data/spec/consumer_spec.rb +209 -0
  99. data/spec/deimos_spec.rb +169 -0
  100. data/spec/generators/active_record_generator_spec.rb +56 -0
  101. data/spec/handlers/my_batch_consumer.rb +10 -0
  102. data/spec/handlers/my_consumer.rb +10 -0
  103. data/spec/kafka_listener_spec.rb +55 -0
  104. data/spec/kafka_source_spec.rb +381 -0
  105. data/spec/kafka_topic_info_spec.rb +111 -0
  106. data/spec/message_spec.rb +19 -0
  107. data/spec/phobos.bad_db.yml +73 -0
  108. data/spec/phobos.yml +77 -0
  109. data/spec/producer_spec.rb +498 -0
  110. data/spec/rake_spec.rb +19 -0
  111. data/spec/schema_backends/avro_base_shared.rb +199 -0
  112. data/spec/schema_backends/avro_local_spec.rb +32 -0
  113. data/spec/schema_backends/avro_schema_registry_spec.rb +32 -0
  114. data/spec/schema_backends/avro_validation_spec.rb +24 -0
  115. data/spec/schema_backends/base_spec.rb +33 -0
  116. data/spec/schemas/com/my-namespace/Generated.avsc +71 -0
  117. data/spec/schemas/com/my-namespace/MyNestedSchema.avsc +62 -0
  118. data/spec/schemas/com/my-namespace/MySchema-key.avsc +13 -0
  119. data/spec/schemas/com/my-namespace/MySchema.avsc +18 -0
  120. data/spec/schemas/com/my-namespace/MySchemaCompound-key.avsc +18 -0
  121. data/spec/schemas/com/my-namespace/MySchemaWithBooleans.avsc +18 -0
  122. data/spec/schemas/com/my-namespace/MySchemaWithDateTimes.avsc +33 -0
  123. data/spec/schemas/com/my-namespace/MySchemaWithId.avsc +28 -0
  124. data/spec/schemas/com/my-namespace/MySchemaWithUniqueId.avsc +32 -0
  125. data/spec/schemas/com/my-namespace/Wibble.avsc +43 -0
  126. data/spec/schemas/com/my-namespace/Widget.avsc +27 -0
  127. data/spec/schemas/com/my-namespace/WidgetTheSecond.avsc +27 -0
  128. data/spec/schemas/com/my-namespace/request/CreateTopic.avsc +11 -0
  129. data/spec/schemas/com/my-namespace/request/Index.avsc +11 -0
  130. data/spec/schemas/com/my-namespace/request/UpdateRequest.avsc +11 -0
  131. data/spec/schemas/com/my-namespace/response/CreateTopic.avsc +11 -0
  132. data/spec/schemas/com/my-namespace/response/Index.avsc +11 -0
  133. data/spec/schemas/com/my-namespace/response/UpdateResponse.avsc +11 -0
  134. data/spec/spec_helper.rb +267 -0
  135. data/spec/utils/db_poller_spec.rb +320 -0
  136. data/spec/utils/db_producer_spec.rb +514 -0
  137. data/spec/utils/deadlock_retry_spec.rb +74 -0
  138. data/spec/utils/inline_consumer_spec.rb +31 -0
  139. data/spec/utils/lag_reporter_spec.rb +76 -0
  140. data/spec/utils/platform_schema_validation_spec.rb +0 -0
  141. data/spec/utils/schema_controller_mixin_spec.rb +84 -0
  142. data/support/deimos-solo.png +0 -0
  143. data/support/deimos-with-name-next.png +0 -0
  144. data/support/deimos-with-name.png +0 -0
  145. data/support/flipp-logo.png +0 -0
  146. metadata +551 -0
@@ -0,0 +1,32 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Deimos
4
+ module Backends
5
+ # Abstract class for all publish backends.
6
+ class Base
7
+ class << self
8
+ # @param producer_class [Class < Deimos::Producer]
9
+ # @param messages [Array<Deimos::Message>]
10
+ def publish(producer_class:, messages:)
11
+ Deimos.config.logger.info(
12
+ message: 'Publishing messages',
13
+ topic: producer_class.topic,
14
+ payloads: messages.map do |message|
15
+ {
16
+ payload: message.payload,
17
+ key: message.key
18
+ }
19
+ end
20
+ )
21
+ execute(producer_class: producer_class, messages: messages)
22
+ end
23
+
24
+ # @param producer_class [Class < Deimos::Producer]
25
+ # @param messages [Array<Deimos::Message>]
26
+ def execute(producer_class:, messages:)
27
+ raise NotImplementedError
28
+ end
29
+ end
30
+ end
31
+ end
32
+ end
@@ -0,0 +1,41 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'deimos/kafka_message'
4
+
5
+ module Deimos
6
+ module Backends
7
+ # Backend which saves messages to the database instead of immediately
8
+ # sending them.
9
+ class Db < Base
10
+ class << self
11
+ # :nodoc:
12
+ def execute(producer_class:, messages:)
13
+ records = messages.map do |m|
14
+ message = Deimos::KafkaMessage.new(
15
+ message: m.encoded_payload ? m.encoded_payload.to_s.b : nil,
16
+ topic: m.topic,
17
+ partition_key: partition_key_for(m)
18
+ )
19
+ message.key = m.encoded_key.to_s.b unless producer_class.config[:no_keys]
20
+ message
21
+ end
22
+ Deimos::KafkaMessage.import(records)
23
+ Deimos.config.metrics&.increment(
24
+ 'db_producer.insert',
25
+ tags: %W(topic:#{producer_class.topic}),
26
+ by: records.size
27
+ )
28
+ end
29
+
30
+ # @param message [Deimos::Message]
31
+ # @return [String] the partition key to use for this message
32
+ def partition_key_for(message)
33
+ return message.partition_key if message.partition_key.present?
34
+ return message.key unless message.key.is_a?(Hash)
35
+
36
+ message.key.to_yaml
37
+ end
38
+ end
39
+ end
40
+ end
41
+ end
@@ -0,0 +1,33 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Deimos
4
+ module Backends
5
+ # Default backend to produce to Kafka.
6
+ class Kafka < Base
7
+ include Phobos::Producer
8
+
9
+ # Shut down the producer if necessary.
10
+ def self.shutdown_producer
11
+ producer.sync_producer_shutdown if producer.respond_to?(:sync_producer_shutdown)
12
+ producer.kafka_client&.close
13
+ end
14
+
15
+ # :nodoc:
16
+ def self.execute(producer_class:, messages:)
17
+ Deimos.instrument(
18
+ 'produce',
19
+ producer: producer_class,
20
+ topic: producer_class.topic,
21
+ payloads: messages.map(&:payload)
22
+ ) do
23
+ producer.publish_list(messages.map(&:encoded_hash))
24
+ Deimos.config.metrics&.increment(
25
+ 'publish',
26
+ tags: %W(status:success topic:#{producer_class.topic}),
27
+ by: messages.size
28
+ )
29
+ end
30
+ end
31
+ end
32
+ end
33
+ end
@@ -0,0 +1,33 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Deimos
4
+ module Backends
5
+ # Backend which produces to Kafka via an async producer.
6
+ class KafkaAsync < Base
7
+ include Phobos::Producer
8
+
9
+ # Shut down the producer cleanly.
10
+ def self.shutdown_producer
11
+ producer.async_producer_shutdown
12
+ producer.kafka_client&.close
13
+ end
14
+
15
+ # :nodoc:
16
+ def self.execute(producer_class:, messages:)
17
+ Deimos.instrument(
18
+ 'produce',
19
+ producer: producer_class,
20
+ topic: producer_class.topic,
21
+ payloads: messages.map(&:payload)
22
+ ) do
23
+ producer.async_publish_list(messages.map(&:encoded_hash))
24
+ Deimos.config.metrics&.increment(
25
+ 'publish',
26
+ tags: %W(status:success topic:#{producer_class.topic}),
27
+ by: messages.size
28
+ )
29
+ end
30
+ end
31
+ end
32
+ end
33
+ end
@@ -0,0 +1,20 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Deimos
4
+ module Backends
5
+ # Backend which saves messages to an in-memory hash.
6
+ class Test < Deimos::Backends::Base
7
+ class << self
8
+ # @return [Array<Hash>]
9
+ def sent_messages
10
+ @sent_messages ||= []
11
+ end
12
+ end
13
+
14
+ # @override
15
+ def self.execute(producer_class:, messages:)
16
+ self.sent_messages.concat(messages.map(&:to_h))
17
+ end
18
+ end
19
+ end
20
+ end
@@ -0,0 +1,7 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Deimos
4
+ # @deprecated Use Deimos::Consumer with `delivery: inline_batch` configured instead
5
+ class BatchConsumer < Consumer
6
+ end
7
+ end
@@ -0,0 +1,381 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'fig_tree'
4
+ require_relative 'phobos_config'
5
+ require_relative '../metrics/mock'
6
+ require_relative '../tracing/mock'
7
+ require 'active_support/core_ext/numeric'
8
+
9
+ # :nodoc:
10
+ module Deimos
11
+ include FigTree
12
+
13
+ # :nodoc:
14
+ class FigTree::ConfigStruct
15
+ include Deimos::PhobosConfig
16
+ end
17
+
18
+ # :nodoc:
19
+ after_configure do
20
+ Phobos.configure(self.config.phobos_config)
21
+ self.config.producer_objects.each do |producer|
22
+ configure_producer_or_consumer(producer)
23
+ end
24
+ self.config.consumer_objects.each do |consumer|
25
+ configure_producer_or_consumer(consumer)
26
+ end
27
+ validate_consumers
28
+ validate_db_backend if self.config.producers.backend == :db
29
+ end
30
+
31
+ # Ensure everything is set up correctly for the DB backend.
32
+ def self.validate_db_backend
33
+ begin
34
+ require 'activerecord-import'
35
+ rescue LoadError
36
+ raise 'Cannot set producers.backend to :db without activerecord-import! Please add it to your Gemfile.'
37
+ end
38
+ if Deimos.config.producers.required_acks != :all
39
+ raise 'Cannot set producers.backend to :db unless producers.required_acks is set to ":all"!'
40
+ end
41
+ end
42
+
43
+ # Validate that consumers are configured correctly, including their
44
+ # delivery mode.
45
+ def self.validate_consumers
46
+ Phobos.config.listeners.each do |listener|
47
+ handler_class = listener.handler.constantize
48
+ delivery = listener.delivery
49
+
50
+ next unless handler_class < Deimos::Consumer
51
+
52
+ # Validate that each consumer implements the correct method for its type
53
+ if delivery == 'inline_batch'
54
+ if handler_class.instance_method(:consume_batch).owner == Deimos::Consume::BatchConsumption
55
+ raise "BatchConsumer #{listener.handler} does not implement `consume_batch`"
56
+ end
57
+ elsif handler_class.instance_method(:consume).owner == Deimos::Consume::MessageConsumption
58
+ raise "Non-batch Consumer #{listener.handler} does not implement `consume`"
59
+ end
60
+ end
61
+ end
62
+
63
+ # @param kafka_config [FigTree::ConfigStruct]
64
+ def self.configure_producer_or_consumer(kafka_config)
65
+ klass = kafka_config.class_name.constantize
66
+ klass.class_eval do
67
+ topic(kafka_config.topic) if kafka_config.topic.present? && klass.respond_to?(:topic)
68
+ schema(kafka_config.schema) if kafka_config.schema.present?
69
+ namespace(kafka_config.namespace) if kafka_config.namespace.present?
70
+ key_config(**kafka_config.key_config) if kafka_config.key_config.present?
71
+ end
72
+ end
73
+
74
+ define_settings do
75
+
76
+ # @return [Logger]
77
+ setting :logger, Logger.new(STDOUT)
78
+
79
+ # @return [Logger]
80
+ setting :phobos_logger, default_proc: proc { Deimos.config.logger.clone }
81
+
82
+ setting :kafka do
83
+
84
+ # @return [Logger]
85
+ setting :logger, default_proc: proc { Deimos.config.logger.clone }
86
+
87
+ # URL of the seed broker.
88
+ # @return [Array<String>]
89
+ setting :seed_brokers, ['localhost:9092']
90
+
91
+ # Identifier for this application.
92
+ # @return [String]
93
+ setting :client_id, 'phobos'
94
+
95
+ # The socket timeout for connecting to the broker, in seconds.
96
+ # @return [Integer]
97
+ setting :connect_timeout, 15
98
+
99
+ # The socket timeout for reading and writing to the broker, in seconds.
100
+ # @return [Integer]
101
+ setting :socket_timeout, 15
102
+
103
+ setting :ssl do
104
+ # Whether SSL is enabled on the brokers.
105
+ # @return [Boolean]
106
+ setting :enabled
107
+
108
+ # a PEM encoded CA cert, a file path to the cert, or an Array of certs,
109
+ # to use with an SSL connection.
110
+ # @return [String|Array<String>]
111
+ setting :ca_cert
112
+
113
+ # a PEM encoded client cert to use with an SSL connection, or a file path
114
+ # to the cert.
115
+ # @return [String]
116
+ setting :client_cert
117
+
118
+ # a PEM encoded client cert key to use with an SSL connection.
119
+ # @return [String]
120
+ setting :client_cert_key
121
+
122
+ # Verify certificate hostname if supported (ruby >= 2.4.0)
123
+ setting :verify_hostname, true
124
+ end
125
+ end
126
+
127
+ setting :consumers do
128
+
129
+ # Number of seconds after which, if a client hasn't contacted the Kafka cluster,
130
+ # it will be kicked out of the group.
131
+ # @return [Integer]
132
+ setting :session_timeout, 300
133
+
134
+ # Interval between offset commits, in seconds.
135
+ # @return [Integer]
136
+ setting :offset_commit_interval, 10
137
+
138
+ # Number of messages that can be processed before their offsets are committed.
139
+ # If zero, offset commits are not triggered by message processing
140
+ # @return [Integer]
141
+ setting :offset_commit_threshold, 0
142
+
143
+ # Interval between heartbeats; must be less than the session window.
144
+ # @return [Integer]
145
+ setting :heartbeat_interval, 10
146
+
147
+ # Minimum and maximum number of milliseconds to back off after a consumer
148
+ # error.
149
+ setting :backoff, (1000..60_000)
150
+
151
+ # By default, consumer errors will be consumed and logged to
152
+ # the metrics provider.
153
+ # Set this to true to force the error to be raised.
154
+ # @return [Boolean]
155
+ setting :reraise_errors
156
+
157
+ # @return [Boolean]
158
+ setting :report_lag
159
+
160
+ # Block taking an exception, payload and metadata and returning
161
+ # true if this should be considered a fatal error and false otherwise.
162
+ # Not needed if reraise_errors is set to true.
163
+ # @return [Block]
164
+ setting(:fatal_error, proc { false })
165
+ end
166
+
167
+ setting :producers do
168
+ # Number of seconds a broker can wait for replicas to acknowledge
169
+ # a write before responding with a timeout.
170
+ # @return [Integer]
171
+ setting :ack_timeout, 5
172
+
173
+ # Number of replicas that must acknowledge a write, or `:all`
174
+ # if all in-sync replicas must acknowledge.
175
+ # @return [Integer|Symbol]
176
+ setting :required_acks, 1
177
+
178
+ # Number of retries that should be attempted before giving up sending
179
+ # messages to the cluster. Does not include the original attempt.
180
+ # @return [Integer]
181
+ setting :max_retries, 2
182
+
183
+ # Number of seconds to wait between retries.
184
+ # @return [Integer]
185
+ setting :retry_backoff, 1
186
+
187
+ # Number of messages allowed in the buffer before new writes will
188
+ # raise {BufferOverflow} exceptions.
189
+ # @return [Integer]
190
+ setting :max_buffer_size, 10_000
191
+
192
+ # Maximum size of the buffer in bytes. Attempting to produce messages
193
+ # when the buffer reaches this size will result in {BufferOverflow} being raised.
194
+ # @return [Integer]
195
+ setting :max_buffer_bytesize, 10_000_000
196
+
197
+ # Name of the compression codec to use, or nil if no compression should be performed.
198
+ # Valid codecs: `:snappy` and `:gzip`
199
+ # @return [Symbol]
200
+ setting :compression_codec
201
+
202
+ # Number of messages that needs to be in a message set before it should be compressed.
203
+ # Note that message sets are per-partition rather than per-topic or per-producer.
204
+ # @return [Integer]
205
+ setting :compression_threshold, 1
206
+
207
+ # Maximum number of messages allowed in the queue. Only used for async_producer.
208
+ # @return [Integer]
209
+ setting :max_queue_size, 10_000
210
+
211
+ # If greater than zero, the number of buffered messages that will automatically
212
+ # trigger a delivery. Only used for async_producer.
213
+ # @return [Integer]
214
+ setting :delivery_threshold, 0
215
+
216
+ # if greater than zero, the number of seconds between automatic message
217
+ # deliveries. Only used for async_producer.
218
+ # @return [Integer]
219
+ setting :delivery_interval, 0
220
+
221
+ # Set this to true to keep the producer connection between publish calls.
222
+ # This can speed up subsequent messages by around 30%, but it does mean
223
+ # that you need to manually call sync_producer_shutdown before exiting,
224
+ # similar to async_producer_shutdown.
225
+ # @return [Boolean]
226
+ setting :persistent_connections, false
227
+
228
+ # Default namespace for all producers. Can remain nil. Individual
229
+ # producers can override.
230
+ # @return [String]
231
+ setting :schema_namespace
232
+
233
+ # Add a prefix to all topic names. This can be useful if you're using
234
+ # the same Kafka broker for different environments that are producing
235
+ # the same topics.
236
+ # @return [String]
237
+ setting :topic_prefix
238
+
239
+ # Disable all actual message producing. Generally more useful to use
240
+ # the `disable_producers` method instead.
241
+ # @return [Boolean]
242
+ setting :disabled
243
+
244
+ # Currently can be set to :db, :kafka, or :kafka_async. If using Kafka
245
+ # directly, a good pattern is to set to async in your user-facing app, and
246
+ # sync in your consumers or delayed workers.
247
+ # @return [Symbol]
248
+ setting :backend, :kafka_async
249
+ end
250
+
251
+ setting :schema do
252
+
253
+ # Backend class to use when encoding/decoding messages.
254
+ setting :backend, :mock
255
+
256
+ # URL of the Confluent schema registry.
257
+ # @return [String]
258
+ setting :registry_url, 'http://localhost:8081'
259
+
260
+ # Local path to look for schemas in.
261
+ # @return [String]
262
+ setting :path
263
+ end
264
+
265
+ # The configured metrics provider.
266
+ # @return [Metrics::Provider]
267
+ setting :metrics, Metrics::Mock.new
268
+
269
+ # The configured tracing / APM provider.
270
+ # @return [Tracing::Provider]
271
+ setting :tracer, Tracing::Mock.new
272
+
273
+ setting :db_producer do
274
+
275
+ # @return [Logger]
276
+ setting :logger, default_proc: proc { Deimos.config.logger }
277
+
278
+ # @return [Symbol|Array<String>] A list of topics to log all messages, or
279
+ # :all to log all topics.
280
+ setting :log_topics, []
281
+
282
+ # @return [Symbol|Array<String>] A list of topics to compact messages for
283
+ # before sending, or :all to compact all keyed messages.
284
+ setting :compact_topics, []
285
+
286
+ end
287
+
288
+ setting_object :producer do
289
+ # Producer class.
290
+ # @return [String]
291
+ setting :class_name
292
+ # Topic to produce to.
293
+ # @return [String]
294
+ setting :topic
295
+ # Schema of the data in the topic.
296
+ # @return [String]
297
+ setting :schema
298
+ # Optional namespace to access the schema.
299
+ # @return [String]
300
+ setting :namespace
301
+ # Key configuration (see docs).
302
+ # @return [Hash]
303
+ setting :key_config
304
+ end
305
+
306
+ setting_object :consumer do
307
+ # Consumer class.
308
+ # @return [String]
309
+ setting :class_name
310
+ # Topic to read from.
311
+ # @return [String]
312
+ setting :topic
313
+ # Schema of the data in the topic.
314
+ # @return [String]
315
+ setting :schema
316
+ # Optional namespace to access the schema.
317
+ # @return [String]
318
+ setting :namespace
319
+ # Key configuration (see docs).
320
+ # @return [Hash]
321
+ setting :key_config
322
+ # Set to true to ignore the consumer in the Phobos config and not actually start up a
323
+ # listener.
324
+ # @return [Boolean]
325
+ setting :disabled, false
326
+
327
+ # These are the phobos "listener" configs. See CONFIGURATION.md for more
328
+ # info.
329
+ setting :group_id
330
+ setting :max_concurrency, 1
331
+ setting :start_from_beginning, true
332
+ setting :max_bytes_per_partition, 500.kilobytes
333
+ setting :min_bytes, 1
334
+ setting :max_wait_time, 5
335
+ setting :force_encoding
336
+ setting :delivery, :batch
337
+ setting :backoff
338
+ setting :session_timeout, 300
339
+ setting :offset_commit_interval, 10
340
+ setting :offset_commit_threshold, 0
341
+ setting :offset_retention_time
342
+ setting :heartbeat_interval, 10
343
+ end
344
+
345
+ setting_object :db_poller do
346
+ # Producer class to use for the poller.
347
+ setting :producer_class
348
+ # How often to run the poller, in seconds. If the poll takes longer than this
349
+ # time, it will run again immediately and the timeout
350
+ # will be pushed to the next e.g. 1 minute.
351
+ setting :run_every, 60
352
+ # Column to use to find updates. Must have an index on it.
353
+ setting :timestamp_column, :updated_at
354
+ # Amount of time, in seconds, to wait before catching updates, to allow transactions
355
+ # to complete but still pick up the right records.
356
+ setting :delay_time, 2
357
+ # If true, dump the full table rather than incremental changes. Should
358
+ # only be used for very small tables.
359
+ setting :full_table, false
360
+ # If false, start from the current time instead of the beginning of time
361
+ # if this is the first time running the poller.
362
+ setting :start_from_beginning, true
363
+ end
364
+
365
+ deprecate 'kafka_logger', 'kafka.logger'
366
+ deprecate 'reraise_consumer_errors', 'consumers.reraise_errors'
367
+ deprecate 'schema_registry_url', 'schema.registry_url'
368
+ deprecate 'seed_broker', 'kafka.seed_brokers'
369
+ deprecate 'schema_path', 'schema.path'
370
+ deprecate 'producer_schema_namespace', 'producers.schema_namespace'
371
+ deprecate 'producer_topic_prefix', 'producers.topic_prefix'
372
+ deprecate 'disable_producers', 'producers.disabled'
373
+ deprecate 'ssl_enabled', 'kafka.ssl.enabled'
374
+ deprecate 'ssl_ca_cert', 'kafka.ssl.ca_cert'
375
+ deprecate 'ssl_client_cert', 'kafka.ssl.client_cert'
376
+ deprecate 'ssl_client_cert_key', 'kafka.ssl.client_cert_key'
377
+ deprecate 'publish_backend', 'producers.backend'
378
+ deprecate 'report_lag', 'consumers.report_lag'
379
+
380
+ end
381
+ end