deimos-ruby 1.0.0.pre.beta22

Sign up to get free protection for your applications and to get access to all the features.
Files changed (100) hide show
  1. checksums.yaml +7 -0
  2. data/.circleci/config.yml +74 -0
  3. data/.gitignore +41 -0
  4. data/.gitmodules +0 -0
  5. data/.rspec +1 -0
  6. data/.rubocop.yml +321 -0
  7. data/.ruby-gemset +1 -0
  8. data/.ruby-version +1 -0
  9. data/CHANGELOG.md +32 -0
  10. data/CODE_OF_CONDUCT.md +77 -0
  11. data/Dockerfile +23 -0
  12. data/Gemfile +6 -0
  13. data/Gemfile.lock +165 -0
  14. data/Guardfile +22 -0
  15. data/LICENSE.md +195 -0
  16. data/README.md +752 -0
  17. data/Rakefile +13 -0
  18. data/bin/deimos +4 -0
  19. data/deimos-kafka.gemspec +42 -0
  20. data/docker-compose.yml +71 -0
  21. data/docs/DATABASE_BACKEND.md +147 -0
  22. data/docs/PULL_REQUEST_TEMPLATE.md +34 -0
  23. data/lib/deimos/active_record_consumer.rb +81 -0
  24. data/lib/deimos/active_record_producer.rb +64 -0
  25. data/lib/deimos/avro_data_coder.rb +89 -0
  26. data/lib/deimos/avro_data_decoder.rb +36 -0
  27. data/lib/deimos/avro_data_encoder.rb +51 -0
  28. data/lib/deimos/backends/db.rb +27 -0
  29. data/lib/deimos/backends/kafka.rb +27 -0
  30. data/lib/deimos/backends/kafka_async.rb +27 -0
  31. data/lib/deimos/configuration.rb +90 -0
  32. data/lib/deimos/consumer.rb +164 -0
  33. data/lib/deimos/instrumentation.rb +71 -0
  34. data/lib/deimos/kafka_message.rb +27 -0
  35. data/lib/deimos/kafka_source.rb +126 -0
  36. data/lib/deimos/kafka_topic_info.rb +86 -0
  37. data/lib/deimos/message.rb +74 -0
  38. data/lib/deimos/metrics/datadog.rb +47 -0
  39. data/lib/deimos/metrics/mock.rb +39 -0
  40. data/lib/deimos/metrics/provider.rb +38 -0
  41. data/lib/deimos/monkey_patches/phobos_cli.rb +35 -0
  42. data/lib/deimos/monkey_patches/phobos_producer.rb +51 -0
  43. data/lib/deimos/monkey_patches/ruby_kafka_heartbeat.rb +85 -0
  44. data/lib/deimos/monkey_patches/schema_store.rb +19 -0
  45. data/lib/deimos/producer.rb +218 -0
  46. data/lib/deimos/publish_backend.rb +30 -0
  47. data/lib/deimos/railtie.rb +8 -0
  48. data/lib/deimos/schema_coercer.rb +108 -0
  49. data/lib/deimos/shared_config.rb +59 -0
  50. data/lib/deimos/test_helpers.rb +356 -0
  51. data/lib/deimos/tracing/datadog.rb +35 -0
  52. data/lib/deimos/tracing/mock.rb +40 -0
  53. data/lib/deimos/tracing/provider.rb +31 -0
  54. data/lib/deimos/utils/db_producer.rb +122 -0
  55. data/lib/deimos/utils/executor.rb +117 -0
  56. data/lib/deimos/utils/inline_consumer.rb +144 -0
  57. data/lib/deimos/utils/lag_reporter.rb +182 -0
  58. data/lib/deimos/utils/platform_schema_validation.rb +0 -0
  59. data/lib/deimos/utils/signal_handler.rb +68 -0
  60. data/lib/deimos/version.rb +5 -0
  61. data/lib/deimos.rb +133 -0
  62. data/lib/generators/deimos/db_backend/templates/migration +24 -0
  63. data/lib/generators/deimos/db_backend/templates/rails3_migration +30 -0
  64. data/lib/generators/deimos/db_backend_generator.rb +48 -0
  65. data/lib/tasks/deimos.rake +27 -0
  66. data/spec/active_record_consumer_spec.rb +81 -0
  67. data/spec/active_record_producer_spec.rb +107 -0
  68. data/spec/avro_data_decoder_spec.rb +18 -0
  69. data/spec/avro_data_encoder_spec.rb +37 -0
  70. data/spec/backends/db_spec.rb +35 -0
  71. data/spec/backends/kafka_async_spec.rb +11 -0
  72. data/spec/backends/kafka_spec.rb +11 -0
  73. data/spec/consumer_spec.rb +169 -0
  74. data/spec/deimos_spec.rb +120 -0
  75. data/spec/kafka_source_spec.rb +168 -0
  76. data/spec/kafka_topic_info_spec.rb +88 -0
  77. data/spec/phobos.bad_db.yml +73 -0
  78. data/spec/phobos.yml +73 -0
  79. data/spec/producer_spec.rb +397 -0
  80. data/spec/publish_backend_spec.rb +10 -0
  81. data/spec/schemas/com/my-namespace/MySchema-key.avsc +13 -0
  82. data/spec/schemas/com/my-namespace/MySchema.avsc +18 -0
  83. data/spec/schemas/com/my-namespace/MySchemaWithBooleans.avsc +18 -0
  84. data/spec/schemas/com/my-namespace/MySchemaWithDateTimes.avsc +33 -0
  85. data/spec/schemas/com/my-namespace/MySchemaWithId.avsc +28 -0
  86. data/spec/schemas/com/my-namespace/MySchemaWithUniqueId.avsc +32 -0
  87. data/spec/schemas/com/my-namespace/Widget.avsc +27 -0
  88. data/spec/schemas/com/my-namespace/WidgetTheSecond.avsc +27 -0
  89. data/spec/spec_helper.rb +207 -0
  90. data/spec/updateable_schema_store_spec.rb +36 -0
  91. data/spec/utils/db_producer_spec.rb +259 -0
  92. data/spec/utils/executor_spec.rb +42 -0
  93. data/spec/utils/lag_reporter_spec.rb +69 -0
  94. data/spec/utils/platform_schema_validation_spec.rb +0 -0
  95. data/spec/utils/signal_handler_spec.rb +16 -0
  96. data/support/deimos-solo.png +0 -0
  97. data/support/deimos-with-name-next.png +0 -0
  98. data/support/deimos-with-name.png +0 -0
  99. data/support/flipp-logo.png +0 -0
  100. metadata +452 -0
@@ -0,0 +1,120 @@
1
+ # frozen_string_literal: true
2
+
3
+ describe Deimos do
4
+
5
+ it 'should have a version number' do
6
+ expect(Deimos::VERSION).not_to be_nil
7
+ end
8
+
9
+ specify 'configure' do
10
+ phobos_configuration = { 'logger' =>
11
+ { 'file' => 'log/phobos.log',
12
+ 'stdout_json' => false,
13
+ 'level' => 'debug',
14
+ 'ruby_kafka' =>
15
+ { 'level' => 'debug' } },
16
+ 'kafka' =>
17
+ { 'client_id' => 'phobos',
18
+ 'connect_timeout' => 15,
19
+ 'socket_timeout' => 15,
20
+ 'seed_brokers' => 'my_seed_broker.com',
21
+ 'ssl_ca_cert' => 'my_ssl_ca_cert',
22
+ 'ssl_client_cert' => 'my_ssl_client_cert',
23
+ 'ssl_client_cert_key' => 'my_ssl_client_cert_key' },
24
+ 'producer' =>
25
+ { 'ack_timeout' => 5,
26
+ 'required_acks' => :all,
27
+ 'max_retries' => 2,
28
+ 'retry_backoff' => 1,
29
+ 'max_buffer_size' => 10_000,
30
+ 'max_buffer_bytesize' => 10_000_000,
31
+ 'compression_codec' => nil,
32
+ 'compression_threshold' => 1,
33
+ 'max_queue_size' => 10_000,
34
+ 'delivery_threshold' => 0,
35
+ 'delivery_interval' => 0 },
36
+ 'consumer' =>
37
+ { 'session_timeout' => 300,
38
+ 'offset_commit_interval' => 10,
39
+ 'offset_commit_threshold' => 0,
40
+ 'heartbeat_interval' => 10 },
41
+ 'backoff' =>
42
+ { 'min_ms' => 1000,
43
+ 'max_ms' => 60_000 },
44
+ 'listeners' => [
45
+ { 'handler' => 'ConsumerTest::MyConsumer',
46
+ 'topic' => 'my_consume_topic',
47
+ 'group_id' => 'my_group_id',
48
+ 'max_bytes_per_partition' => 524_288 }
49
+ ],
50
+ 'custom_logger' => nil,
51
+ 'custom_kafka_logger' => nil }
52
+
53
+ expect(Phobos).to receive(:configure).with(phobos_configuration)
54
+ allow(described_class).to receive(:ssl_var_contents) { |key| key }
55
+ described_class.configure do |config|
56
+ config.phobos_config_file = File.join(File.dirname(__FILE__), 'phobos.yml')
57
+ config.seed_broker = 'my_seed_broker.com'
58
+ config.ssl_enabled = true
59
+ config.ssl_ca_cert = 'my_ssl_ca_cert'
60
+ config.ssl_client_cert = 'my_ssl_client_cert'
61
+ config.ssl_client_cert_key = 'my_ssl_client_cert_key'
62
+ end
63
+ end
64
+
65
+ it 'should error if required_acks is not all' do
66
+ expect {
67
+ described_class.configure do |config|
68
+ config.publish_backend = :db
69
+ config.phobos_config_file = File.join(File.dirname(__FILE__), 'phobos.bad_db.yml')
70
+ end
71
+ }.to raise_error('Cannot set publish_backend to :db unless required_acks is set to ":all" in phobos.yml!')
72
+ end
73
+
74
+ describe '#start_db_backend!' do
75
+ before(:each) do
76
+ allow(described_class).to receive(:run_db_backend)
77
+ end
78
+
79
+ it 'should start if backend is db and num_producer_threads is > 0' do
80
+ signal_handler = instance_double(Deimos::Utils::SignalHandler)
81
+ allow(signal_handler).to receive(:run!)
82
+ expect(Deimos::Utils::SignalHandler).to receive(:new) do |executor|
83
+ expect(executor.runners.size).to eq(2)
84
+ signal_handler
85
+ end
86
+ described_class.configure do |config|
87
+ config.publish_backend = :db
88
+ end
89
+ described_class.start_db_backend!(thread_count: 2)
90
+ end
91
+
92
+ it 'should not start if backend is not db' do
93
+ expect(Deimos::Utils::SignalHandler).not_to receive(:new)
94
+ described_class.configure do |config|
95
+ config.publish_backend = :kafka
96
+ end
97
+ expect { described_class.start_db_backend!(thread_count: 2) }.
98
+ to raise_error('Publish backend is not set to :db, exiting')
99
+ end
100
+
101
+ it 'should not start if num_producer_threads is nil' do
102
+ expect(Deimos::Utils::SignalHandler).not_to receive(:new)
103
+ described_class.configure do |config|
104
+ config.publish_backend = :db
105
+ end
106
+ expect { described_class.start_db_backend!(thread_count: nil) }.
107
+ to raise_error('Thread count is not given or set to zero, exiting')
108
+ end
109
+
110
+ it 'should not start if num_producer_threads is 0' do
111
+ expect(Deimos::Utils::SignalHandler).not_to receive(:new)
112
+ described_class.configure do |config|
113
+ config.publish_backend = :db
114
+ end
115
+ expect { described_class.start_db_backend!(thread_count: 0) }.
116
+ to raise_error('Thread count is not given or set to zero, exiting')
117
+ end
118
+
119
+ end
120
+ end
@@ -0,0 +1,168 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'activerecord-import'
4
+
5
+ # Wrap in a module so our classes don't leak out afterwards
6
+ module KafkaSourceSpec
7
+ RSpec.describe Deimos::KafkaSource do
8
+ before(:all) do
9
+ ActiveRecord::Base.connection.create_table(:widgets, force: true) do |t|
10
+ t.integer(:widget_id)
11
+ t.string(:description)
12
+ t.string(:model_id, default: '')
13
+ t.string(:name)
14
+ t.timestamps
15
+ end
16
+ ActiveRecord::Base.connection.add_index(:widgets, :widget_id)
17
+
18
+ # Dummy producer which mimicks the behavior of a real producer
19
+ class WidgetProducer < Deimos::ActiveRecordProducer
20
+ topic 'my-topic'
21
+ namespace 'com.my-namespace'
22
+ schema 'Widget'
23
+ key_config field: :id
24
+ end
25
+
26
+ # Dummy producer which mimicks the behavior of a real producer
27
+ class WidgetProducerTheSecond < Deimos::ActiveRecordProducer
28
+ topic 'my-topic-the-second'
29
+ namespace 'com.my-namespace'
30
+ schema 'WidgetTheSecond'
31
+ key_config field: :id
32
+ end
33
+
34
+ # Dummy class we can include the mixin in. Has a backing table created
35
+ # earlier.
36
+ class Widget < ActiveRecord::Base
37
+ include Deimos::KafkaSource
38
+
39
+ # :nodoc:
40
+ def self.kafka_producers
41
+ [WidgetProducer, WidgetProducerTheSecond]
42
+ end
43
+ end
44
+ Widget.reset_column_information
45
+
46
+ end
47
+
48
+ after(:all) do
49
+ ActiveRecord::Base.connection.drop_table(:widgets)
50
+ end
51
+
52
+ before(:each) do
53
+ Widget.delete_all
54
+ end
55
+
56
+ it 'should send events on creation, update, and deletion' do
57
+ widget = Widget.create!(widget_id: 1, name: 'widget')
58
+ expect('my-topic').to have_sent({
59
+ widget_id: 1,
60
+ name: 'widget',
61
+ id: widget.id,
62
+ created_at: anything,
63
+ updated_at: anything
64
+ }, 1)
65
+ expect('my-topic-the-second').to have_sent({
66
+ widget_id: 1,
67
+ model_id: '',
68
+ id: widget.id,
69
+ created_at: anything,
70
+ updated_at: anything
71
+ }, 1)
72
+ widget.update_attribute(:name, 'widget 2')
73
+ expect('my-topic').to have_sent({
74
+ widget_id: 1,
75
+ name: 'widget 2',
76
+ id: widget.id,
77
+ created_at: anything,
78
+ updated_at: anything
79
+ }, 1)
80
+ expect('my-topic-the-second').to have_sent({
81
+ widget_id: 1,
82
+ model_id: '',
83
+ id: widget.id,
84
+ created_at: anything,
85
+ updated_at: anything
86
+ }, 1)
87
+ widget.destroy
88
+ expect('my-topic').to have_sent(nil, 1)
89
+ expect('my-topic-the-second').to have_sent(nil, 1)
90
+ end
91
+
92
+ it 'should send events on import' do
93
+ widgets = (1..3).map do |i|
94
+ Widget.new(widget_id: i, name: "Widget #{i}")
95
+ end
96
+ Widget.import(widgets)
97
+ widgets = Widget.all
98
+ expect('my-topic').to have_sent({
99
+ widget_id: 1,
100
+ name: 'Widget 1',
101
+ id: widgets[0].id,
102
+ created_at: anything,
103
+ updated_at: anything
104
+ }, widgets[0].id)
105
+ expect('my-topic').to have_sent({
106
+ widget_id: 2,
107
+ name: 'Widget 2',
108
+ id: widgets[1].id,
109
+ created_at: anything,
110
+ updated_at: anything
111
+ }, widgets[1].id)
112
+ expect('my-topic').to have_sent({
113
+ widget_id: 3,
114
+ name: 'Widget 3',
115
+ id: widgets[2].id,
116
+ created_at: anything,
117
+ updated_at: anything
118
+ }, widgets[2].id)
119
+ end
120
+
121
+ it 'should send events even if the save fails' do
122
+ widget = Widget.create!(widget_id: 1, name: 'widget')
123
+ expect('my-topic').to have_sent({
124
+ widget_id: 1,
125
+ name: widget.name,
126
+ id: widget.id,
127
+ created_at: anything,
128
+ updated_at: anything
129
+ }, widget.id)
130
+ clear_kafka_messages!
131
+ Widget.transaction do
132
+ widget.update_attribute(:name, 'widget 3')
133
+ raise ActiveRecord::Rollback
134
+ end
135
+ expect('my-topic').to have_sent(anything)
136
+ end
137
+
138
+ it 'should not send events if an unrelated field changes' do
139
+ widget = Widget.create!(widget_id: 1, name: 'widget')
140
+ clear_kafka_messages!
141
+ widget.update_attribute(:description, 'some description')
142
+ expect('my-topic').not_to have_sent(anything)
143
+ end
144
+
145
+ context 'with DB backend' do
146
+ before(:each) do
147
+ Deimos.configure do |config|
148
+ config.publish_backend = :db
149
+ end
150
+ setup_db(DB_OPTIONS.last) # sqlite
151
+ allow(Deimos::Producer).to receive(:produce_batch).and_call_original
152
+ end
153
+
154
+ it 'should save to the DB' do
155
+ Widget.create!(widget_id: 1, name: 'widget')
156
+ expect(Deimos::KafkaMessage.count).to eq(2) # 2 producers
157
+ end
158
+
159
+ it 'should not save with a rollback' do
160
+ Widget.transaction do
161
+ Widget.create!(widget_id: 1, name: 'widget')
162
+ raise ActiveRecord::Rollback
163
+ end
164
+ expect(Deimos::KafkaMessage.count).to eq(0)
165
+ end
166
+ end
167
+ end
168
+ end
@@ -0,0 +1,88 @@
1
+ # frozen_string_literal: true
2
+
3
+ each_db_config(Deimos::KafkaTopicInfo) do
4
+
5
+ it 'should lock the topic' do
6
+ expect(described_class.lock('my-topic', 'abc')).to be_truthy
7
+ expect(described_class.lock('my-topic', 'def')).to be_falsey
8
+ expect(described_class.lock('my-topic2', 'def')).to be_truthy
9
+ expect(described_class.count).to eq(2)
10
+ expect(described_class.first.locked_by).to eq('abc')
11
+ expect(described_class.last.locked_by).to eq('def')
12
+ end
13
+
14
+ it "should lock the topic if it's old" do
15
+ described_class.create!(topic: 'my-topic', locked_by: 'abc', error: true,
16
+ locked_at: 2.minutes.ago)
17
+ expect(described_class.lock('my-topic', 'abc')).to be_truthy
18
+ expect(described_class.count).to eq(1)
19
+ expect(described_class.first.locked_by).to eq('abc')
20
+
21
+ end
22
+
23
+ it "should lock the topic if it's not currently locked" do
24
+ described_class.create!(topic: 'my-topic', locked_by: nil,
25
+ locked_at: nil)
26
+ expect(described_class.lock('my-topic', 'abc')).to be_truthy
27
+ expect(described_class.count).to eq(1)
28
+ expect(described_class.first.locked_by).to eq('abc')
29
+ end
30
+
31
+ it "should not lock the topic if it's errored" do
32
+ described_class.create!(topic: 'my-topic', locked_by: nil,
33
+ locked_at: nil, error: true)
34
+ expect(described_class.lock('my-topic', 'abc')).to be_falsey
35
+ expect(described_class.count).to eq(1)
36
+ expect(described_class.first.locked_by).to eq(nil)
37
+ end
38
+
39
+ specify '#clear_lock' do
40
+ described_class.create!(topic: 'my-topic', locked_by: 'abc',
41
+ locked_at: 10.seconds.ago, error: true, retries: 1)
42
+ described_class.create!(topic: 'my-topic2', locked_by: 'def',
43
+ locked_at: 10.seconds.ago, error: true, retries: 1)
44
+ described_class.clear_lock('my-topic', 'abc')
45
+ expect(described_class.count).to eq(2)
46
+ record = described_class.first
47
+ expect(record.locked_by).to eq(nil)
48
+ expect(record.locked_at).to eq(nil)
49
+ expect(record.error).to eq(false)
50
+ expect(record.retries).to eq(0)
51
+ record = described_class.last
52
+ expect(record.locked_by).not_to eq(nil)
53
+ expect(record.locked_at).not_to eq(nil)
54
+ expect(record.error).not_to eq(false)
55
+ expect(record.retries).not_to eq(0)
56
+ end
57
+
58
+ specify '#register_error' do
59
+ freeze_time do
60
+ described_class.create!(topic: 'my-topic', locked_by: 'abc',
61
+ locked_at: 10.seconds.ago)
62
+ described_class.create!(topic: 'my-topic2', locked_by: 'def',
63
+ locked_at: 10.seconds.ago, error: true, retries: 1)
64
+ described_class.register_error('my-topic', 'abc')
65
+ record = described_class.first
66
+ expect(record.locked_by).to be_nil
67
+ expect(record.locked_at).to eq(Time.zone.now)
68
+ expect(record.error).to be_truthy
69
+ expect(record.retries).to eq(1)
70
+
71
+ described_class.register_error('my-topic2', 'def')
72
+ record = described_class.last
73
+ expect(record.error).to be_truthy
74
+ expect(record.retries).to eq(2)
75
+ expect(record.locked_at).to eq(Time.zone.now)
76
+ end
77
+ end
78
+
79
+ specify '#heartbeat' do
80
+ freeze_time do
81
+ described_class.create!(topic: 'my-topic', locked_by: 'abc',
82
+ locked_at: 10.seconds.ago)
83
+ described_class.heartbeat('my-topic', 'abc')
84
+ expect(described_class.last.locked_at).to eq(Time.zone.now)
85
+ end
86
+ end
87
+
88
+ end
@@ -0,0 +1,73 @@
1
+ logger:
2
+ # Optional log file, set to false or remove to disable it
3
+ file: log/phobos.log
4
+ # Optional output format for stdout, default is false (human readable).
5
+ # Set to true to enable json output.
6
+ stdout_json: false
7
+ level: debug
8
+ # Comment the block to disable ruby-kafka logs
9
+ ruby_kafka:
10
+ level: debug
11
+
12
+ kafka:
13
+ # identifier for this application
14
+ client_id: phobos
15
+ # timeout setting for connecting to brokers
16
+ connect_timeout: 15
17
+ # timeout setting for socket connections
18
+ socket_timeout: 15
19
+
20
+ producer:
21
+ # number of seconds a broker can wait for replicas to acknowledge
22
+ # a write before responding with a timeout
23
+ ack_timeout: 5
24
+ # number of replicas that must acknowledge a write, or `:all`
25
+ # if all in-sync replicas must acknowledge
26
+ required_acks: 1
27
+ # number of retries that should be attempted before giving up sending
28
+ # messages to the cluster. Does not include the original attempt
29
+ max_retries: 2
30
+ # number of seconds to wait between retries
31
+ retry_backoff: 1
32
+ # number of messages allowed in the buffer before new writes will
33
+ # raise {BufferOverflow} exceptions
34
+ max_buffer_size: 10000
35
+ # maximum size of the buffer in bytes. Attempting to produce messages
36
+ # when the buffer reaches this size will result in {BufferOverflow} being raised
37
+ max_buffer_bytesize: 10000000
38
+ # name of the compression codec to use, or nil if no compression should be performed.
39
+ # Valid codecs: `:snappy` and `:gzip`
40
+ compression_codec:
41
+ # number of messages that needs to be in a message set before it should be compressed.
42
+ # Note that message sets are per-partition rather than per-topic or per-producer
43
+ compression_threshold: 1
44
+ # maximum number of messages allowed in the queue. Only used for async_producer
45
+ max_queue_size: 10000
46
+ # if greater than zero, the number of buffered messages that will automatically
47
+ # trigger a delivery. Only used for async_producer
48
+ delivery_threshold: 0
49
+ # if greater than zero, the number of seconds between automatic message
50
+ # deliveries. Only used for async_producer
51
+ delivery_interval: 0
52
+
53
+ consumer:
54
+ # number of seconds after which, if a client hasn't contacted the Kafka cluster,
55
+ # it will be kicked out of the group
56
+ session_timeout: 300
57
+ # interval between offset commits, in seconds
58
+ offset_commit_interval: 10
59
+ # number of messages that can be processed before their offsets are committed.
60
+ # If zero, offset commits are not triggered by message processing
61
+ offset_commit_threshold: 0
62
+ # interval between heartbeats; must be less than the session window
63
+ heartbeat_interval: 10
64
+
65
+ backoff:
66
+ min_ms: 1000
67
+ max_ms: 60000
68
+
69
+ listeners:
70
+ - handler: ConsumerTest::MyConsumer
71
+ topic: my_consume_topic
72
+ group_id: my_group_id
73
+ max_bytes_per_partition: 524288 # 512 KB
data/spec/phobos.yml ADDED
@@ -0,0 +1,73 @@
1
+ logger:
2
+ # Optional log file, set to false or remove to disable it
3
+ file: log/phobos.log
4
+ # Optional output format for stdout, default is false (human readable).
5
+ # Set to true to enable json output.
6
+ stdout_json: false
7
+ level: debug
8
+ # Comment the block to disable ruby-kafka logs
9
+ ruby_kafka:
10
+ level: debug
11
+
12
+ kafka:
13
+ # identifier for this application
14
+ client_id: phobos
15
+ # timeout setting for connecting to brokers
16
+ connect_timeout: 15
17
+ # timeout setting for socket connections
18
+ socket_timeout: 15
19
+
20
+ producer:
21
+ # number of seconds a broker can wait for replicas to acknowledge
22
+ # a write before responding with a timeout
23
+ ack_timeout: 5
24
+ # number of replicas that must acknowledge a write, or `:all`
25
+ # if all in-sync replicas must acknowledge
26
+ required_acks: :all
27
+ # number of retries that should be attempted before giving up sending
28
+ # messages to the cluster. Does not include the original attempt
29
+ max_retries: 2
30
+ # number of seconds to wait between retries
31
+ retry_backoff: 1
32
+ # number of messages allowed in the buffer before new writes will
33
+ # raise {BufferOverflow} exceptions
34
+ max_buffer_size: 10000
35
+ # maximum size of the buffer in bytes. Attempting to produce messages
36
+ # when the buffer reaches this size will result in {BufferOverflow} being raised
37
+ max_buffer_bytesize: 10000000
38
+ # name of the compression codec to use, or nil if no compression should be performed.
39
+ # Valid codecs: `:snappy` and `:gzip`
40
+ compression_codec:
41
+ # number of messages that needs to be in a message set before it should be compressed.
42
+ # Note that message sets are per-partition rather than per-topic or per-producer
43
+ compression_threshold: 1
44
+ # maximum number of messages allowed in the queue. Only used for async_producer
45
+ max_queue_size: 10000
46
+ # if greater than zero, the number of buffered messages that will automatically
47
+ # trigger a delivery. Only used for async_producer
48
+ delivery_threshold: 0
49
+ # if greater than zero, the number of seconds between automatic message
50
+ # deliveries. Only used for async_producer
51
+ delivery_interval: 0
52
+
53
+ consumer:
54
+ # number of seconds after which, if a client hasn't contacted the Kafka cluster,
55
+ # it will be kicked out of the group
56
+ session_timeout: 300
57
+ # interval between offset commits, in seconds
58
+ offset_commit_interval: 10
59
+ # number of messages that can be processed before their offsets are committed.
60
+ # If zero, offset commits are not triggered by message processing
61
+ offset_commit_threshold: 0
62
+ # interval between heartbeats; must be less than the session window
63
+ heartbeat_interval: 10
64
+
65
+ backoff:
66
+ min_ms: 1000
67
+ max_ms: 60000
68
+
69
+ listeners:
70
+ - handler: ConsumerTest::MyConsumer
71
+ topic: my_consume_topic
72
+ group_id: my_group_id
73
+ max_bytes_per_partition: 524288 # 512 KB