deimos-ruby 1.6.3 → 1.8.1.pre.beta1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.circleci/config.yml +9 -0
- data/.rubocop.yml +22 -16
- data/.ruby-version +1 -1
- data/CHANGELOG.md +42 -0
- data/Gemfile.lock +125 -98
- data/README.md +164 -16
- data/Rakefile +1 -1
- data/deimos-ruby.gemspec +4 -3
- data/docs/ARCHITECTURE.md +144 -0
- data/docs/CONFIGURATION.md +27 -0
- data/lib/deimos.rb +8 -7
- data/lib/deimos/active_record_consume/batch_consumption.rb +159 -0
- data/lib/deimos/active_record_consume/batch_slicer.rb +27 -0
- data/lib/deimos/active_record_consume/message_consumption.rb +58 -0
- data/lib/deimos/active_record_consume/schema_model_converter.rb +52 -0
- data/lib/deimos/active_record_consumer.rb +33 -75
- data/lib/deimos/active_record_producer.rb +23 -0
- data/lib/deimos/batch_consumer.rb +2 -140
- data/lib/deimos/config/configuration.rb +28 -10
- data/lib/deimos/consume/batch_consumption.rb +150 -0
- data/lib/deimos/consume/message_consumption.rb +94 -0
- data/lib/deimos/consumer.rb +79 -70
- data/lib/deimos/kafka_message.rb +1 -1
- data/lib/deimos/kafka_topic_info.rb +22 -3
- data/lib/deimos/message.rb +6 -1
- data/lib/deimos/metrics/provider.rb +0 -2
- data/lib/deimos/poll_info.rb +9 -0
- data/lib/deimos/schema_backends/avro_base.rb +28 -1
- data/lib/deimos/schema_backends/base.rb +15 -2
- data/lib/deimos/tracing/provider.rb +0 -2
- data/lib/deimos/utils/db_poller.rb +149 -0
- data/lib/deimos/utils/db_producer.rb +59 -16
- data/lib/deimos/utils/deadlock_retry.rb +68 -0
- data/lib/deimos/utils/lag_reporter.rb +19 -26
- data/lib/deimos/version.rb +1 -1
- data/lib/generators/deimos/active_record/templates/migration.rb.tt +28 -0
- data/lib/generators/deimos/active_record/templates/model.rb.tt +5 -0
- data/lib/generators/deimos/active_record_generator.rb +79 -0
- data/lib/generators/deimos/db_backend/templates/migration +1 -0
- data/lib/generators/deimos/db_backend/templates/rails3_migration +1 -0
- data/lib/generators/deimos/db_poller/templates/migration +11 -0
- data/lib/generators/deimos/db_poller/templates/rails3_migration +16 -0
- data/lib/generators/deimos/db_poller_generator.rb +48 -0
- data/lib/tasks/deimos.rake +7 -0
- data/spec/active_record_batch_consumer_spec.rb +481 -0
- data/spec/active_record_consume/batch_slicer_spec.rb +42 -0
- data/spec/active_record_consume/schema_model_converter_spec.rb +105 -0
- data/spec/active_record_consumer_spec.rb +3 -11
- data/spec/active_record_producer_spec.rb +66 -88
- data/spec/batch_consumer_spec.rb +24 -7
- data/spec/config/configuration_spec.rb +4 -0
- data/spec/consumer_spec.rb +8 -8
- data/spec/deimos_spec.rb +57 -49
- data/spec/generators/active_record_generator_spec.rb +56 -0
- data/spec/handlers/my_batch_consumer.rb +6 -1
- data/spec/handlers/my_consumer.rb +6 -1
- data/spec/kafka_topic_info_spec.rb +39 -16
- data/spec/message_spec.rb +19 -0
- data/spec/producer_spec.rb +3 -3
- data/spec/rake_spec.rb +1 -1
- data/spec/schemas/com/my-namespace/Generated.avsc +71 -0
- data/spec/schemas/com/my-namespace/MySchemaCompound-key.avsc +18 -0
- data/spec/schemas/com/my-namespace/Wibble.avsc +43 -0
- data/spec/spec_helper.rb +62 -6
- data/spec/utils/db_poller_spec.rb +320 -0
- data/spec/utils/db_producer_spec.rb +84 -10
- data/spec/utils/deadlock_retry_spec.rb +74 -0
- data/spec/utils/lag_reporter_spec.rb +29 -22
- metadata +66 -30
- data/lib/deimos/base_consumer.rb +0 -104
- data/lib/deimos/utils/executor.rb +0 -124
- data/lib/deimos/utils/platform_schema_validation.rb +0 -0
- data/lib/deimos/utils/signal_handler.rb +0 -68
- data/spec/utils/executor_spec.rb +0 -53
- data/spec/utils/signal_handler_spec.rb +0 -16
@@ -2,12 +2,15 @@
|
|
2
2
|
|
3
3
|
module Deimos
|
4
4
|
module Utils
|
5
|
-
# Class which continually polls the
|
5
|
+
# Class which continually polls the kafka_messages table
|
6
|
+
# in the database and sends Kafka messages.
|
6
7
|
class DbProducer
|
7
8
|
include Phobos::Producer
|
8
9
|
attr_accessor :id, :current_topic
|
9
10
|
|
10
11
|
BATCH_SIZE = 1000
|
12
|
+
DELETE_BATCH_SIZE = 10
|
13
|
+
MAX_DELETE_ATTEMPTS = 3
|
11
14
|
|
12
15
|
# @param logger [Logger]
|
13
16
|
def initialize(logger=Logger.new(STDOUT))
|
@@ -47,6 +50,7 @@ module Deimos
|
|
47
50
|
topics = retrieve_topics
|
48
51
|
@logger.info("Found topics: #{topics}")
|
49
52
|
topics.each(&method(:process_topic))
|
53
|
+
KafkaTopicInfo.ping_empty_topics(topics)
|
50
54
|
sleep(0.5)
|
51
55
|
end
|
52
56
|
|
@@ -86,13 +90,13 @@ module Deimos
|
|
86
90
|
begin
|
87
91
|
produce_messages(compacted_messages.map(&:phobos_message))
|
88
92
|
rescue Kafka::BufferOverflow, Kafka::MessageSizeTooLarge, Kafka::RecordListTooLarge
|
89
|
-
|
93
|
+
delete_messages(messages)
|
90
94
|
@logger.error('Message batch too large, deleting...')
|
91
95
|
@logger.error(Deimos::KafkaMessage.decoded(messages))
|
92
96
|
raise
|
93
97
|
end
|
94
98
|
end
|
95
|
-
|
99
|
+
delete_messages(messages)
|
96
100
|
Deimos.config.metrics&.increment(
|
97
101
|
'db_producer.process',
|
98
102
|
tags: %W(topic:#{@current_topic}),
|
@@ -105,6 +109,27 @@ module Deimos
|
|
105
109
|
true
|
106
110
|
end
|
107
111
|
|
112
|
+
# @param messages [Array<Deimos::KafkaMessage>]
|
113
|
+
def delete_messages(messages)
|
114
|
+
attempts = 1
|
115
|
+
begin
|
116
|
+
messages.in_groups_of(DELETE_BATCH_SIZE, false).each do |batch|
|
117
|
+
Deimos::KafkaMessage.where(topic: batch.first.topic,
|
118
|
+
id: batch.map(&:id)).
|
119
|
+
delete_all
|
120
|
+
end
|
121
|
+
rescue StandardError => e
|
122
|
+
if (e.message =~ /Lock wait/i || e.message =~ /Lost connection/i) &&
|
123
|
+
attempts <= MAX_DELETE_ATTEMPTS
|
124
|
+
attempts += 1
|
125
|
+
ActiveRecord::Base.connection.verify!
|
126
|
+
sleep(1)
|
127
|
+
retry
|
128
|
+
end
|
129
|
+
raise
|
130
|
+
end
|
131
|
+
end
|
132
|
+
|
108
133
|
# @return [Array<Deimos::KafkaMessage>]
|
109
134
|
def retrieve_messages
|
110
135
|
KafkaMessage.where(topic: @current_topic).order(:id).limit(BATCH_SIZE)
|
@@ -125,15 +150,33 @@ module Deimos
|
|
125
150
|
metrics = Deimos.config.metrics
|
126
151
|
return unless metrics
|
127
152
|
|
153
|
+
topics = KafkaTopicInfo.select(%w(topic last_processed_at))
|
128
154
|
messages = Deimos::KafkaMessage.
|
129
155
|
select('count(*) as num_messages, min(created_at) as earliest, topic').
|
130
|
-
group(:topic)
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
156
|
+
group(:topic).
|
157
|
+
index_by(&:topic)
|
158
|
+
topics.each do |record|
|
159
|
+
message_record = messages[record.topic]
|
160
|
+
# We want to record the last time we saw any activity, meaning either
|
161
|
+
# the oldest message, or the last time we processed, whichever comes
|
162
|
+
# last.
|
163
|
+
if message_record
|
164
|
+
record_earliest = record.earliest
|
165
|
+
# SQLite gives a string here
|
166
|
+
if record_earliest.is_a?(String)
|
167
|
+
record_earliest = Time.zone.parse(record_earliest)
|
168
|
+
end
|
169
|
+
|
170
|
+
earliest = [record.last_processed_at, record_earliest].max
|
171
|
+
time_diff = Time.zone.now - earliest
|
172
|
+
metrics.gauge('pending_db_messages_max_wait', time_diff,
|
173
|
+
tags: ["topic:#{record.topic}"])
|
174
|
+
else
|
175
|
+
# no messages waiting
|
176
|
+
metrics.gauge('pending_db_messages_max_wait', 0,
|
177
|
+
tags: ["topic:#{record.topic}"])
|
178
|
+
end
|
179
|
+
metrics.gauge('pending_db_messages_count', message_record&.num_messages || 0,
|
137
180
|
tags: ["topic:#{record.topic}"])
|
138
181
|
end
|
139
182
|
end
|
@@ -169,11 +212,11 @@ module Deimos
|
|
169
212
|
end
|
170
213
|
|
171
214
|
@logger.error("Got error #{e.class.name} when publishing #{batch.size} in groups of #{batch_size}, retrying...")
|
172
|
-
if batch_size < 10
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
215
|
+
batch_size = if batch_size < 10
|
216
|
+
1
|
217
|
+
else
|
218
|
+
(batch_size / 10).to_i
|
219
|
+
end
|
177
220
|
shutdown_producer
|
178
221
|
retry
|
179
222
|
end
|
@@ -182,7 +225,7 @@ module Deimos
|
|
182
225
|
# @param batch [Array<Deimos::KafkaMessage>]
|
183
226
|
# @return [Array<Deimos::KafkaMessage>]
|
184
227
|
def compact_messages(batch)
|
185
|
-
return batch
|
228
|
+
return batch if batch.first&.key.blank?
|
186
229
|
|
187
230
|
topic = batch.first.topic
|
188
231
|
return batch if config.compact_topics != :all &&
|
@@ -0,0 +1,68 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Deimos
|
4
|
+
module Utils
|
5
|
+
# Utility class to retry a given block if a a deadlock is encountered.
|
6
|
+
# Supports Postgres and MySQL deadlocks and lock wait timeouts.
|
7
|
+
class DeadlockRetry
|
8
|
+
class << self
|
9
|
+
# Maximum number of times to retry the block after encountering a deadlock
|
10
|
+
RETRY_COUNT = 2
|
11
|
+
|
12
|
+
# Need to match on error messages to support older Rails versions
|
13
|
+
DEADLOCK_MESSAGES = [
|
14
|
+
# MySQL
|
15
|
+
'Deadlock found when trying to get lock',
|
16
|
+
'Lock wait timeout exceeded',
|
17
|
+
|
18
|
+
# Postgres
|
19
|
+
'deadlock detected'
|
20
|
+
].freeze
|
21
|
+
|
22
|
+
# Retry the given block when encountering a deadlock. For any other
|
23
|
+
# exceptions, they are reraised. This is used to handle cases where
|
24
|
+
# the database may be busy but the transaction would succeed if
|
25
|
+
# retried later. Note that your block should be idempotent and it will
|
26
|
+
# be wrapped in a transaction.
|
27
|
+
# Sleeps for a random number of seconds to prevent multiple transactions
|
28
|
+
# from retrying at the same time.
|
29
|
+
# @param tags [Array] Tags to attach when logging and reporting metrics.
|
30
|
+
# @yield Yields to the block that may deadlock.
|
31
|
+
def wrap(tags=[])
|
32
|
+
count = RETRY_COUNT
|
33
|
+
|
34
|
+
begin
|
35
|
+
ActiveRecord::Base.transaction do
|
36
|
+
yield
|
37
|
+
end
|
38
|
+
rescue ActiveRecord::StatementInvalid => e
|
39
|
+
# Reraise if not a known deadlock
|
40
|
+
raise if DEADLOCK_MESSAGES.none? { |m| e.message.include?(m) }
|
41
|
+
|
42
|
+
# Reraise if all retries exhausted
|
43
|
+
raise if count <= 0
|
44
|
+
|
45
|
+
Deimos.config.logger.warn(
|
46
|
+
message: 'Deadlock encountered when trying to execute query. '\
|
47
|
+
"Retrying. #{count} attempt(s) remaining",
|
48
|
+
tags: tags
|
49
|
+
)
|
50
|
+
|
51
|
+
Deimos.config.metrics&.increment(
|
52
|
+
'deadlock',
|
53
|
+
tags: tags
|
54
|
+
)
|
55
|
+
|
56
|
+
count -= 1
|
57
|
+
|
58
|
+
# Sleep for a random amount so that if there are multiple
|
59
|
+
# transactions deadlocking, they don't all retry at the same time
|
60
|
+
sleep(Random.rand(5.0) + 0.5)
|
61
|
+
|
62
|
+
retry
|
63
|
+
end
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
@@ -29,30 +29,21 @@ module Deimos
|
|
29
29
|
self.topics[topic.to_s].report_lag(partition)
|
30
30
|
end
|
31
31
|
|
32
|
-
# @param topic [String]
|
33
|
-
# @param partition [Integer]
|
34
|
-
# @param lag [Integer]
|
35
|
-
def assign_lag(topic, partition, lag)
|
36
|
-
self.topics[topic.to_s] ||= Topic.new(topic, self)
|
37
|
-
self.topics[topic.to_s].assign_lag(partition, lag)
|
38
|
-
end
|
39
|
-
|
40
|
-
# Figure out the current lag by asking Kafka based on the current offset.
|
41
32
|
# @param topic [String]
|
42
33
|
# @param partition [Integer]
|
43
34
|
# @param offset [Integer]
|
44
|
-
def
|
35
|
+
def assign_current_offset(topic, partition, offset)
|
45
36
|
self.topics[topic.to_s] ||= Topic.new(topic, self)
|
46
|
-
self.topics[topic.to_s].
|
37
|
+
self.topics[topic.to_s].assign_current_offset(partition, offset)
|
47
38
|
end
|
48
39
|
end
|
49
40
|
|
50
|
-
# Topic which has a hash of partition => last known
|
41
|
+
# Topic which has a hash of partition => last known current offsets
|
51
42
|
class Topic
|
52
43
|
# @return [String]
|
53
44
|
attr_accessor :topic_name
|
54
45
|
# @return [Hash<Integer, Integer>]
|
55
|
-
attr_accessor :
|
46
|
+
attr_accessor :partition_current_offsets
|
56
47
|
# @return [ConsumerGroup]
|
57
48
|
attr_accessor :consumer_group
|
58
49
|
|
@@ -61,35 +52,33 @@ module Deimos
|
|
61
52
|
def initialize(topic_name, group)
|
62
53
|
self.topic_name = topic_name
|
63
54
|
self.consumer_group = group
|
64
|
-
self.
|
55
|
+
self.partition_current_offsets = {}
|
65
56
|
end
|
66
57
|
|
67
58
|
# @param partition [Integer]
|
68
|
-
|
69
|
-
|
70
|
-
self.partition_offset_lags[partition.to_i] = lag
|
59
|
+
def assign_current_offset(partition, offset)
|
60
|
+
self.partition_current_offsets[partition.to_i] = offset
|
71
61
|
end
|
72
62
|
|
73
63
|
# @param partition [Integer]
|
74
|
-
# @param offset [Integer]
|
75
64
|
def compute_lag(partition, offset)
|
76
|
-
return if self.partition_offset_lags[partition.to_i]
|
77
|
-
|
78
65
|
begin
|
79
66
|
client = Phobos.create_kafka_client
|
80
67
|
last_offset = client.last_offset_for(self.topic_name, partition)
|
81
|
-
|
68
|
+
lag = last_offset - offset
|
82
69
|
rescue StandardError # don't do anything, just wait
|
83
70
|
Deimos.config.logger.
|
84
71
|
debug("Error computing lag for #{self.topic_name}, will retry")
|
85
72
|
end
|
73
|
+
lag || 0
|
86
74
|
end
|
87
75
|
|
88
76
|
# @param partition [Integer]
|
89
77
|
def report_lag(partition)
|
90
|
-
|
91
|
-
return unless
|
78
|
+
current_offset = self.partition_current_offsets[partition.to_i]
|
79
|
+
return unless current_offset
|
92
80
|
|
81
|
+
lag = compute_lag(partition, current_offset)
|
93
82
|
group = self.consumer_group.id
|
94
83
|
Deimos.config.logger.
|
95
84
|
debug("Sending lag: #{group}/#{partition}: #{lag}")
|
@@ -109,16 +98,20 @@ module Deimos
|
|
109
98
|
@groups = {}
|
110
99
|
end
|
111
100
|
|
101
|
+
# offset_lag = event.payload.fetch(:offset_lag)
|
102
|
+
# group_id = event.payload.fetch(:group_id)
|
103
|
+
# topic = event.payload.fetch(:topic)
|
104
|
+
# partition = event.payload.fetch(:partition)
|
112
105
|
# @param payload [Hash]
|
113
106
|
def message_processed(payload)
|
114
|
-
|
107
|
+
offset = payload[:offset] || payload[:last_offset]
|
115
108
|
topic = payload[:topic]
|
116
109
|
group = payload[:group_id]
|
117
110
|
partition = payload[:partition]
|
118
111
|
|
119
112
|
synchronize do
|
120
113
|
@groups[group.to_s] ||= ConsumerGroup.new(group)
|
121
|
-
@groups[group.to_s].
|
114
|
+
@groups[group.to_s].assign_current_offset(topic, partition, offset)
|
122
115
|
end
|
123
116
|
end
|
124
117
|
|
@@ -131,7 +124,7 @@ module Deimos
|
|
131
124
|
|
132
125
|
synchronize do
|
133
126
|
@groups[group.to_s] ||= ConsumerGroup.new(group)
|
134
|
-
@groups[group.to_s].
|
127
|
+
@groups[group.to_s].assign_current_offset(topic, partition, offset)
|
135
128
|
end
|
136
129
|
end
|
137
130
|
|
data/lib/deimos/version.rb
CHANGED
@@ -0,0 +1,28 @@
|
|
1
|
+
class <%= migration_class_name %> < ActiveRecord::Migration<%= migration_version %>
|
2
|
+
def up
|
3
|
+
if table_exists?(:<%= table_name %>)
|
4
|
+
warn "<%= table_name %> already exists, exiting"
|
5
|
+
return
|
6
|
+
end
|
7
|
+
create_table :<%= table_name %> do |t|
|
8
|
+
<%- fields.each do |key| -%>
|
9
|
+
<%- next if %w(id message_id timestamp).include?(key.name) -%>
|
10
|
+
<%- sql_type = schema_base.sql_type(key)
|
11
|
+
if %w(record array map).include?(sql_type)
|
12
|
+
conn = ActiveRecord::Base.connection
|
13
|
+
sql_type = conn.respond_to?(:supports_json?) && conn.supports_json? ? :json : :string
|
14
|
+
end
|
15
|
+
-%>
|
16
|
+
t.<%= sql_type %> :<%= key.name %>
|
17
|
+
<%- end -%>
|
18
|
+
end
|
19
|
+
|
20
|
+
# TODO add indexes as necessary
|
21
|
+
end
|
22
|
+
|
23
|
+
def down
|
24
|
+
return unless table_exists?(:<%= table_name %>)
|
25
|
+
drop_table :<%= table_name %>
|
26
|
+
end
|
27
|
+
|
28
|
+
end
|
@@ -0,0 +1,79 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'rails/generators'
|
4
|
+
require 'rails/generators/active_record/migration'
|
5
|
+
require 'rails/version'
|
6
|
+
|
7
|
+
# Generates a new consumer.
|
8
|
+
module Deimos
|
9
|
+
module Generators
|
10
|
+
# Generator for ActiveRecord model and migration.
|
11
|
+
class ActiveRecordGenerator < Rails::Generators::Base
|
12
|
+
include Rails::Generators::Migration
|
13
|
+
if Rails.version < '4'
|
14
|
+
extend(ActiveRecord::Generators::Migration)
|
15
|
+
else
|
16
|
+
include ActiveRecord::Generators::Migration
|
17
|
+
end
|
18
|
+
source_root File.expand_path('active_record/templates', __dir__)
|
19
|
+
|
20
|
+
argument :table_name, desc: 'The table to create.', required: true
|
21
|
+
argument :full_schema, desc: 'The fully qualified schema name.', required: true
|
22
|
+
|
23
|
+
no_commands do
|
24
|
+
|
25
|
+
# @return [String]
|
26
|
+
def db_migrate_path
|
27
|
+
if defined?(Rails.application) && Rails.application
|
28
|
+
paths = Rails.application.config.paths['db/migrate']
|
29
|
+
paths.respond_to?(:to_ary) ? paths.to_ary.first : paths.to_a.first
|
30
|
+
else
|
31
|
+
'db/migrate'
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
# @return [String]
|
36
|
+
def migration_version
|
37
|
+
"[#{ActiveRecord::Migration.current_version}]"
|
38
|
+
rescue StandardError
|
39
|
+
''
|
40
|
+
end
|
41
|
+
|
42
|
+
# @return [String]
|
43
|
+
def table_class
|
44
|
+
self.table_name.classify
|
45
|
+
end
|
46
|
+
|
47
|
+
# @return [String]
|
48
|
+
def schema
|
49
|
+
last_dot = self.full_schema.rindex('.')
|
50
|
+
self.full_schema[last_dot + 1..-1]
|
51
|
+
end
|
52
|
+
|
53
|
+
# @return [String]
|
54
|
+
def namespace
|
55
|
+
last_dot = self.full_schema.rindex('.')
|
56
|
+
self.full_schema[0...last_dot]
|
57
|
+
end
|
58
|
+
|
59
|
+
# @return [Deimos::SchemaBackends::Base]
|
60
|
+
def schema_base
|
61
|
+
@schema_base ||= Deimos.schema_backend_class.new(schema: schema, namespace: namespace)
|
62
|
+
end
|
63
|
+
|
64
|
+
# @return [Array<SchemaField>]
|
65
|
+
def fields
|
66
|
+
schema_base.schema_fields
|
67
|
+
end
|
68
|
+
|
69
|
+
end
|
70
|
+
|
71
|
+
desc 'Generate migration for a table based on an existing schema.'
|
72
|
+
# :nodoc:
|
73
|
+
def generate
|
74
|
+
migration_template('migration.rb', "db/migrate/create_#{table_name.underscore}.rb")
|
75
|
+
template('model.rb', "app/models/#{table_name.underscore}.rb")
|
76
|
+
end
|
77
|
+
end
|
78
|
+
end
|
79
|
+
end
|
@@ -16,6 +16,7 @@ class <%= migration_class_name %> < ActiveRecord::Migration<%= migration_version
|
|
16
16
|
t.datetime :locked_at
|
17
17
|
t.boolean :error, null: false, default: false
|
18
18
|
t.integer :retries, null: false, default: 0
|
19
|
+
t.datetime :last_processed_at
|
19
20
|
end
|
20
21
|
add_index :kafka_topic_info, :topic, unique: true
|
21
22
|
add_index :kafka_topic_info, [:locked_by, :error]
|
@@ -16,6 +16,7 @@ class <%= migration_class_name %> < ActiveRecord::Migration<%= migration_version
|
|
16
16
|
t.datetime :locked_at
|
17
17
|
t.boolean :error, null: false, default: false
|
18
18
|
t.integer :retries, null: false, default: 0
|
19
|
+
t.datetime :last_processed_at
|
19
20
|
end
|
20
21
|
add_index :kafka_topic_info, :topic, unique: true
|
21
22
|
add_index :kafka_topic_info, [:locked_by, :error]
|