delivery_boy 1.3.1 → 2.0.0.alpha.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/CODEOWNERS +4 -0
- data/.github/workflows/ci.yml +12 -10
- data/.rspec +1 -1
- data/.standard.yml +1 -0
- data/CHANGELOG +50 -1
- data/Gemfile +6 -0
- data/README.md +93 -45
- data/Rakefile +6 -1
- data/SECURITY.md +5 -0
- data/delivery_boy.gemspec +11 -12
- data/lib/delivery_boy/config.rb +74 -7
- data/lib/delivery_boy/config_error.rb +0 -1
- data/lib/delivery_boy/datadog.rb +192 -0
- data/lib/delivery_boy/fake.rb +3 -4
- data/lib/delivery_boy/instance.rb +301 -68
- data/lib/delivery_boy/instrumenter.rb +26 -0
- data/lib/delivery_boy/railtie.rb +10 -5
- data/lib/delivery_boy/version.rb +1 -1
- data/lib/delivery_boy.rb +23 -13
- metadata +15 -10
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
begin
|
|
4
|
+
require "datadog/statsd"
|
|
5
|
+
rescue LoadError
|
|
6
|
+
warn "In order to report Kafka client metrics to Datadog you need to install the `dogstatsd-ruby` gem."
|
|
7
|
+
raise
|
|
8
|
+
end
|
|
9
|
+
|
|
10
|
+
require "active_support/subscriber"
|
|
11
|
+
|
|
12
|
+
module DeliveryBoy
|
|
13
|
+
# Reports operational metrics to a Datadog agent using the Statsd protocol.
|
|
14
|
+
#
|
|
15
|
+
# require "delivery_boy/datadog"
|
|
16
|
+
#
|
|
17
|
+
# # Default is "ruby_kafka" (kept for backward compatibility).
|
|
18
|
+
# DeliveryBoy::Datadog.namespace = "custom-namespace"
|
|
19
|
+
#
|
|
20
|
+
# # Default is "127.0.0.1".
|
|
21
|
+
# DeliveryBoy::Datadog.host = "statsd.something.com"
|
|
22
|
+
#
|
|
23
|
+
# # Default is 8125.
|
|
24
|
+
# DeliveryBoy::Datadog.port = 1234
|
|
25
|
+
#
|
|
26
|
+
module Datadog
|
|
27
|
+
STATSD_NAMESPACE = "ruby_kafka"
|
|
28
|
+
|
|
29
|
+
class << self
|
|
30
|
+
attr_reader :host, :port, :socket_path
|
|
31
|
+
|
|
32
|
+
def configure
|
|
33
|
+
yield self
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
def statsd
|
|
37
|
+
@statsd ||= if socket_path
|
|
38
|
+
::Datadog::Statsd.new(socket_path: socket_path, namespace: namespace, tags: tags)
|
|
39
|
+
else
|
|
40
|
+
::Datadog::Statsd.new(host, port, namespace: namespace, tags: tags)
|
|
41
|
+
end
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
def statsd=(statsd)
|
|
45
|
+
clear
|
|
46
|
+
@statsd = statsd
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
def host=(host)
|
|
50
|
+
@host = host
|
|
51
|
+
clear
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
def port=(port)
|
|
55
|
+
@port = port
|
|
56
|
+
clear
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
def socket_path=(socket_path)
|
|
60
|
+
@socket_path = socket_path
|
|
61
|
+
clear
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
def namespace
|
|
65
|
+
@namespace ||= STATSD_NAMESPACE
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
def namespace=(namespace)
|
|
69
|
+
@namespace = namespace
|
|
70
|
+
clear
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
def tags
|
|
74
|
+
@tags ||= []
|
|
75
|
+
end
|
|
76
|
+
|
|
77
|
+
def tags=(tags)
|
|
78
|
+
@tags = tags
|
|
79
|
+
clear
|
|
80
|
+
end
|
|
81
|
+
|
|
82
|
+
def close
|
|
83
|
+
@statsd&.close
|
|
84
|
+
end
|
|
85
|
+
|
|
86
|
+
private
|
|
87
|
+
|
|
88
|
+
def clear
|
|
89
|
+
close
|
|
90
|
+
@statsd = nil
|
|
91
|
+
end
|
|
92
|
+
end
|
|
93
|
+
|
|
94
|
+
class StatsdSubscriber < ActiveSupport::Subscriber
|
|
95
|
+
private
|
|
96
|
+
|
|
97
|
+
%w[increment histogram count timing gauge].each do |type|
|
|
98
|
+
define_method(type) do |*args, **kwargs|
|
|
99
|
+
emit(type, *args, **kwargs)
|
|
100
|
+
end
|
|
101
|
+
end
|
|
102
|
+
|
|
103
|
+
def emit(type, *args, tags: {})
|
|
104
|
+
tags = tags.map { |k, v| "#{k}:#{v}" }.to_a
|
|
105
|
+
DeliveryBoy::Datadog.statsd.send(type, *args, tags: tags)
|
|
106
|
+
end
|
|
107
|
+
end
|
|
108
|
+
|
|
109
|
+
class ProducerSubscriber < StatsdSubscriber
|
|
110
|
+
def produce_message(event)
|
|
111
|
+
client = event.payload.fetch(:client_id)
|
|
112
|
+
topic = event.payload.fetch(:topic)
|
|
113
|
+
message_size = event.payload.fetch(:message_size)
|
|
114
|
+
buffer_size = event.payload.fetch(:buffer_size)
|
|
115
|
+
|
|
116
|
+
tags = {client: client, topic: topic}
|
|
117
|
+
|
|
118
|
+
if event.payload.key?(:exception)
|
|
119
|
+
increment("producer.produce.errors", tags: tags)
|
|
120
|
+
else
|
|
121
|
+
increment("producer.produce.messages", tags: tags)
|
|
122
|
+
histogram("producer.produce.message_size", message_size, tags: tags)
|
|
123
|
+
count("producer.produce.message_size.sum", message_size, tags: tags)
|
|
124
|
+
histogram("producer.buffer.size", buffer_size, tags: tags)
|
|
125
|
+
end
|
|
126
|
+
end
|
|
127
|
+
|
|
128
|
+
def deliver_messages(event)
|
|
129
|
+
client = event.payload.fetch(:client_id)
|
|
130
|
+
message_count = event.payload.fetch(:delivered_message_count)
|
|
131
|
+
|
|
132
|
+
tags = {client: client}
|
|
133
|
+
|
|
134
|
+
increment("producer.deliver.errors", tags: tags) if event.payload.key?(:exception)
|
|
135
|
+
timing("producer.deliver.latency", event.duration, tags: tags)
|
|
136
|
+
count("producer.deliver.messages", message_count, tags: tags)
|
|
137
|
+
end
|
|
138
|
+
|
|
139
|
+
def deliver(event)
|
|
140
|
+
client = event.payload.fetch(:client_id)
|
|
141
|
+
topic = event.payload.fetch(:topic)
|
|
142
|
+
message_size = event.payload.fetch(:message_size)
|
|
143
|
+
|
|
144
|
+
tags = {client: client, topic: topic}
|
|
145
|
+
|
|
146
|
+
if event.payload.key?(:exception)
|
|
147
|
+
increment("producer.deliver.errors", tags: tags)
|
|
148
|
+
else
|
|
149
|
+
increment("producer.produce.messages", tags: tags)
|
|
150
|
+
histogram("producer.produce.message_size", message_size, tags: tags)
|
|
151
|
+
count("producer.produce.message_size.sum", message_size, tags: tags)
|
|
152
|
+
timing("producer.deliver.latency", event.duration, tags: tags)
|
|
153
|
+
count("producer.deliver.messages", 1, tags: tags)
|
|
154
|
+
end
|
|
155
|
+
end
|
|
156
|
+
|
|
157
|
+
def deliver_async(event)
|
|
158
|
+
client = event.payload.fetch(:client_id)
|
|
159
|
+
topic = event.payload.fetch(:topic)
|
|
160
|
+
message_size = event.payload.fetch(:message_size)
|
|
161
|
+
queue_size = event.payload.fetch(:queue_size, 0)
|
|
162
|
+
|
|
163
|
+
tags = {client: client, topic: topic}
|
|
164
|
+
|
|
165
|
+
if event.payload.key?(:exception)
|
|
166
|
+
increment("async_producer.produce.errors", tags: tags)
|
|
167
|
+
else
|
|
168
|
+
increment("producer.produce.messages", tags: tags)
|
|
169
|
+
histogram("producer.produce.message_size", message_size, tags: tags)
|
|
170
|
+
count("producer.produce.message_size.sum", message_size, tags: tags)
|
|
171
|
+
histogram("async_producer.queue.size", queue_size, tags: tags)
|
|
172
|
+
end
|
|
173
|
+
end
|
|
174
|
+
|
|
175
|
+
def ack_message(event)
|
|
176
|
+
tags = {
|
|
177
|
+
client: event.payload.fetch(:client_id),
|
|
178
|
+
topic: event.payload.fetch(:topic)
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
increment("producer.ack.messages", tags: tags)
|
|
182
|
+
end
|
|
183
|
+
|
|
184
|
+
def delivery_error(event)
|
|
185
|
+
tags = {client: event.payload.fetch(:client_id)}
|
|
186
|
+
increment("producer.ack.errors", tags: tags)
|
|
187
|
+
end
|
|
188
|
+
|
|
189
|
+
attach_to "delivery_boy"
|
|
190
|
+
end
|
|
191
|
+
end
|
|
192
|
+
end
|
data/lib/delivery_boy/fake.rb
CHANGED
|
@@ -1,5 +1,4 @@
|
|
|
1
1
|
module DeliveryBoy
|
|
2
|
-
|
|
3
2
|
# A fake implementation that is useful for testing.
|
|
4
3
|
class Fake
|
|
5
4
|
FakeMessage = Struct.new(:value, :topic, :key, :headers, :offset, :partition, :partition_key, :create_time) do
|
|
@@ -9,8 +8,8 @@ module DeliveryBoy
|
|
|
9
8
|
end
|
|
10
9
|
|
|
11
10
|
def initialize
|
|
12
|
-
@messages = Hash.new {|h, k| h[k] = [] }
|
|
13
|
-
@buffer = Hash.new {|h, k| h[k] = [] }
|
|
11
|
+
@messages = Hash.new { |h, k| h[k] = [] }
|
|
12
|
+
@buffer = Hash.new { |h, k| h[k] = [] }
|
|
14
13
|
@delivery_lock = Mutex.new
|
|
15
14
|
end
|
|
16
15
|
|
|
@@ -25,7 +24,7 @@ module DeliveryBoy
|
|
|
25
24
|
nil
|
|
26
25
|
end
|
|
27
26
|
|
|
28
|
-
|
|
27
|
+
alias_method :deliver_async!, :deliver
|
|
29
28
|
|
|
30
29
|
def produce(value, topic:, key: nil, headers: {}, partition: nil, partition_key: nil, create_time: Time.now)
|
|
31
30
|
@delivery_lock.synchronize do
|
|
@@ -1,49 +1,106 @@
|
|
|
1
|
-
|
|
1
|
+
# frozen_string_literal: true
|
|
2
2
|
|
|
3
|
+
module DeliveryBoy
|
|
3
4
|
# This class implements the actual logic of DeliveryBoy. The DeliveryBoy module
|
|
4
5
|
# has a module-level singleton instance.
|
|
5
6
|
class Instance
|
|
6
|
-
def initialize(config, logger)
|
|
7
|
+
def initialize(config, logger, instrumenter: NullInstrumenter.new)
|
|
7
8
|
@config = config
|
|
8
9
|
@logger = logger
|
|
9
|
-
@
|
|
10
|
+
@instrumenter = instrumenter
|
|
10
11
|
end
|
|
11
12
|
|
|
12
13
|
def deliver(value, topic:, **options)
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
14
|
+
options_clone = options.clone
|
|
15
|
+
if options[:create_time]
|
|
16
|
+
options_clone[:timestamp] = Time.at(options[:create_time])
|
|
17
|
+
options_clone.delete(:create_time)
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
message_size = value.to_s.bytesize
|
|
21
|
+
|
|
22
|
+
instrumentation_payload = {
|
|
23
|
+
client_id: config.client_id,
|
|
24
|
+
topic: topic,
|
|
25
|
+
message_size: message_size
|
|
26
|
+
}
|
|
18
27
|
|
|
19
|
-
|
|
28
|
+
@instrumenter.instrument("deliver", instrumentation_payload) do
|
|
29
|
+
sync_producer
|
|
30
|
+
.produce(payload: value, topic: topic, **options_clone)
|
|
31
|
+
.wait
|
|
32
|
+
end
|
|
20
33
|
end
|
|
21
34
|
|
|
22
35
|
def deliver_async!(value, topic:, **options)
|
|
23
|
-
|
|
36
|
+
options_clone = options.clone
|
|
37
|
+
if options[:create_time]
|
|
38
|
+
options_clone[:timestamp] = Time.at(options[:create_time])
|
|
39
|
+
options_clone.delete(:create_time)
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
message_size = value.to_s.bytesize
|
|
43
|
+
|
|
44
|
+
instrumentation_payload = {
|
|
45
|
+
client_id: config.client_id,
|
|
46
|
+
topic: topic,
|
|
47
|
+
message_size: message_size,
|
|
48
|
+
queue_size: async_producer_queue_size
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
@instrumenter.instrument("deliver_async", instrumentation_payload) do
|
|
52
|
+
async_producer
|
|
53
|
+
.produce(payload: value, topic: topic, **options_clone)
|
|
54
|
+
end
|
|
24
55
|
end
|
|
25
56
|
|
|
26
57
|
def shutdown
|
|
27
|
-
sync_producer.
|
|
28
|
-
async_producer.
|
|
29
|
-
|
|
30
|
-
Thread.current[:delivery_boy_sync_producer] = nil
|
|
58
|
+
sync_producer.close if sync_producer?
|
|
59
|
+
async_producer.close if async_producer?
|
|
31
60
|
end
|
|
32
61
|
|
|
33
62
|
def produce(value, topic:, **options)
|
|
34
|
-
|
|
63
|
+
options_clone = options.clone
|
|
64
|
+
if options[:create_time]
|
|
65
|
+
options_clone[:timestamp] = Time.at(options[:create_time])
|
|
66
|
+
options_clone.delete(:create_time)
|
|
67
|
+
end
|
|
68
|
+
|
|
69
|
+
message_size = value.to_s.bytesize
|
|
70
|
+
|
|
71
|
+
instrumentation_payload = {
|
|
72
|
+
client_id: config.client_id,
|
|
73
|
+
topic: topic,
|
|
74
|
+
message_size: message_size,
|
|
75
|
+
buffer_size: handles.size
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
@instrumenter.instrument("produce_message", instrumentation_payload) do
|
|
79
|
+
handle = sync_producer.produce(payload: value, topic: topic, **options_clone)
|
|
80
|
+
handles.push(handle)
|
|
81
|
+
end
|
|
35
82
|
end
|
|
36
83
|
|
|
37
84
|
def deliver_messages
|
|
38
|
-
|
|
85
|
+
message_count = handles.size
|
|
86
|
+
|
|
87
|
+
instrumentation_payload = {
|
|
88
|
+
client_id: config.client_id,
|
|
89
|
+
delivered_message_count: message_count
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
@instrumenter.instrument("deliver_messages", instrumentation_payload) do
|
|
93
|
+
sync_producer.flush
|
|
94
|
+
handles.clear
|
|
95
|
+
end
|
|
39
96
|
end
|
|
40
97
|
|
|
41
98
|
def clear_buffer
|
|
42
|
-
|
|
99
|
+
handles.clear
|
|
43
100
|
end
|
|
44
101
|
|
|
45
102
|
def buffer_size
|
|
46
|
-
|
|
103
|
+
handles.size
|
|
47
104
|
end
|
|
48
105
|
|
|
49
106
|
private
|
|
@@ -53,7 +110,7 @@ module DeliveryBoy
|
|
|
53
110
|
def sync_producer
|
|
54
111
|
# We want synchronous producers to be per-thread in order to avoid problems with
|
|
55
112
|
# concurrent deliveries.
|
|
56
|
-
Thread.current[:delivery_boy_sync_producer] ||= kafka.producer
|
|
113
|
+
Thread.current[:delivery_boy_sync_producer] ||= kafka.producer
|
|
57
114
|
end
|
|
58
115
|
|
|
59
116
|
def sync_producer?
|
|
@@ -63,64 +120,240 @@ module DeliveryBoy
|
|
|
63
120
|
def async_producer
|
|
64
121
|
# The async producer doesn't have to be per-thread, since all deliveries are
|
|
65
122
|
# performed by a single background thread.
|
|
66
|
-
@async_producer ||=
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
123
|
+
@async_producer ||= begin
|
|
124
|
+
producer = Rdkafka::Config.new({
|
|
125
|
+
"bootstrap.servers": config.brokers.join(","),
|
|
126
|
+
"queue.buffering.backpressure.threshold": config.delivery_threshold,
|
|
127
|
+
"queue.buffering.max.ms": config.delivery_interval_ms
|
|
128
|
+
}.merge(producer_options)).producer
|
|
129
|
+
|
|
130
|
+
producer.delivery_callback = delivery_callback
|
|
131
|
+
producer
|
|
132
|
+
end
|
|
72
133
|
end
|
|
73
134
|
|
|
74
135
|
def async_producer?
|
|
75
136
|
!@async_producer.nil?
|
|
76
137
|
end
|
|
77
138
|
|
|
139
|
+
def async_producer_queue_size
|
|
140
|
+
return 0 unless async_producer?
|
|
141
|
+
# rdkafka doesn't expose queue size directly, return 0 as approximation
|
|
142
|
+
0
|
|
143
|
+
end
|
|
144
|
+
|
|
145
|
+
def delivery_callback
|
|
146
|
+
instrumenter = @instrumenter
|
|
147
|
+
client_id = config.client_id
|
|
148
|
+
|
|
149
|
+
proc do |delivery_report|
|
|
150
|
+
if delivery_report.error
|
|
151
|
+
instrumenter.instrument("delivery_error", {
|
|
152
|
+
client_id: client_id,
|
|
153
|
+
error: delivery_report.error
|
|
154
|
+
})
|
|
155
|
+
else
|
|
156
|
+
instrumenter.instrument("ack_message", {
|
|
157
|
+
client_id: client_id,
|
|
158
|
+
topic: delivery_report.topic_name,
|
|
159
|
+
partition: delivery_report.partition,
|
|
160
|
+
offset: delivery_report.offset
|
|
161
|
+
})
|
|
162
|
+
end
|
|
163
|
+
end
|
|
164
|
+
end
|
|
165
|
+
|
|
78
166
|
def kafka
|
|
79
|
-
@kafka ||=
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
def
|
|
167
|
+
@kafka ||= Rdkafka::Config.new({
|
|
168
|
+
"bootstrap.servers": config.brokers.join(",")
|
|
169
|
+
}.merge(producer_options))
|
|
170
|
+
end
|
|
171
|
+
|
|
172
|
+
def sasl_options
|
|
173
|
+
return {} unless config.sasl_mechanism && !config.sasl_mechanism.empty?
|
|
174
|
+
|
|
175
|
+
config.validate_aws_msk_iam! if config.sasl_enabled?
|
|
176
|
+
|
|
177
|
+
options = {}
|
|
178
|
+
|
|
179
|
+
mechanism = config.sasl_mechanism.upcase
|
|
180
|
+
|
|
181
|
+
case mechanism
|
|
182
|
+
when "GSSAPI"
|
|
183
|
+
options.merge!(gssapi_options)
|
|
184
|
+
when "PLAIN"
|
|
185
|
+
options.merge!(plain_options)
|
|
186
|
+
when "SCRAM-SHA-256", "SCRAM-SHA-512"
|
|
187
|
+
options["sasl.mechanism"] = mechanism
|
|
188
|
+
options.merge!(scram_options)
|
|
189
|
+
when "OAUTHBEARER"
|
|
190
|
+
options.merge!(oauthbearer_options)
|
|
191
|
+
else
|
|
192
|
+
logger.warn "Unknown SASL mechanism: #{config.sasl_mechanism}"
|
|
193
|
+
end
|
|
194
|
+
|
|
195
|
+
options.compact
|
|
196
|
+
end
|
|
197
|
+
|
|
198
|
+
def gssapi_options
|
|
111
199
|
{
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
retry_backoff: config.retry_backoff,
|
|
116
|
-
max_buffer_size: config.max_buffer_size,
|
|
117
|
-
max_buffer_bytesize: config.max_buffer_bytesize,
|
|
118
|
-
compression_codec: (config.compression_codec.to_sym if config.compression_codec),
|
|
119
|
-
compression_threshold: config.compression_threshold,
|
|
120
|
-
idempotent: config.idempotent,
|
|
121
|
-
transactional: config.transactional,
|
|
122
|
-
transactional_timeout: config.transactional_timeout,
|
|
200
|
+
"sasl.mechanism" => "GSSAPI",
|
|
201
|
+
"sasl.kerberos.principal" => config.sasl_gssapi_principal,
|
|
202
|
+
"sasl.kerberos.keytab" => config.sasl_gssapi_keytab
|
|
123
203
|
}
|
|
124
204
|
end
|
|
205
|
+
|
|
206
|
+
def plain_options
|
|
207
|
+
username = config.sasl_username || config.sasl_plain_username
|
|
208
|
+
password = config.sasl_password || config.sasl_plain_password
|
|
209
|
+
|
|
210
|
+
if username.nil? || username.to_s.empty? || password.nil? || password.to_s.empty?
|
|
211
|
+
raise ConfigError, "PLAIN authentication requires sasl_username and sasl_password to be set"
|
|
212
|
+
end
|
|
213
|
+
|
|
214
|
+
# Note: sasl_plain_authzid doesn't have a librdkafka equivalent
|
|
215
|
+
# Log warning if set, but don't fail
|
|
216
|
+
if config.sasl_plain_authzid && !config.sasl_plain_authzid.empty?
|
|
217
|
+
logger.warn "sasl_plain_authzid is not supported by librdkafka and will be ignored"
|
|
218
|
+
end
|
|
219
|
+
|
|
220
|
+
{
|
|
221
|
+
"sasl.mechanism" => "PLAIN",
|
|
222
|
+
"sasl.username" => username,
|
|
223
|
+
"sasl.password" => password
|
|
224
|
+
}
|
|
225
|
+
end
|
|
226
|
+
|
|
227
|
+
def scram_options
|
|
228
|
+
username = config.sasl_username || config.sasl_scram_username
|
|
229
|
+
password = config.sasl_password || config.sasl_scram_password
|
|
230
|
+
|
|
231
|
+
if username.nil? || username.to_s.empty? || password.nil? || password.to_s.empty?
|
|
232
|
+
raise ConfigError, "SCRAM authentication requires sasl_username and sasl_password to be set"
|
|
233
|
+
end
|
|
234
|
+
|
|
235
|
+
{
|
|
236
|
+
"sasl.username" => username,
|
|
237
|
+
"sasl.password" => password
|
|
238
|
+
}
|
|
239
|
+
end
|
|
240
|
+
|
|
241
|
+
def oauthbearer_options
|
|
242
|
+
# Check for legacy token provider (not supported)
|
|
243
|
+
if config.sasl_oauth_token_provider
|
|
244
|
+
raise ConfigError, <<~ERROR
|
|
245
|
+
sasl_oauth_token_provider is no longer supported with librdkafka.
|
|
246
|
+
|
|
247
|
+
Migration options:
|
|
248
|
+
1. Use OIDC configuration (recommended for OIDC providers like Auth0, Okta):
|
|
249
|
+
config.sasl_oauthbearer_method = "oidc"
|
|
250
|
+
config.sasl_oauthbearer_client_id = "your-client-id"
|
|
251
|
+
config.sasl_oauthbearer_client_secret = "your-client-secret"
|
|
252
|
+
config.sasl_oauthbearer_token_endpoint_url = "https://auth.example.com/oauth/token"
|
|
253
|
+
|
|
254
|
+
2. Use SCRAM-SHA-256/512 as an alternative authentication method.
|
|
255
|
+
|
|
256
|
+
See: https://github.com/zendesk/delivery_boy/blob/master/MIGRATION.md#oauthbearer
|
|
257
|
+
ERROR
|
|
258
|
+
end
|
|
259
|
+
|
|
260
|
+
if config.sasl_oauthbearer_method&.downcase == "oidc"
|
|
261
|
+
if config.sasl_oauthbearer_client_id.nil? || config.sasl_oauthbearer_client_id.empty?
|
|
262
|
+
raise ConfigError, "OAUTHBEARER OIDC requires sasl_oauthbearer_client_id to be set"
|
|
263
|
+
end
|
|
264
|
+
if config.sasl_oauthbearer_client_secret.nil? || config.sasl_oauthbearer_client_secret.empty?
|
|
265
|
+
raise ConfigError, "OAUTHBEARER OIDC requires sasl_oauthbearer_client_secret to be set"
|
|
266
|
+
end
|
|
267
|
+
if config.sasl_oauthbearer_token_endpoint_url.nil? || config.sasl_oauthbearer_token_endpoint_url.empty?
|
|
268
|
+
raise ConfigError, "OAUTHBEARER OIDC requires sasl_oauthbearer_token_endpoint_url to be set"
|
|
269
|
+
end
|
|
270
|
+
else
|
|
271
|
+
raise ConfigError, <<~ERROR
|
|
272
|
+
OAUTHBEARER requires OIDC configuration.
|
|
273
|
+
|
|
274
|
+
Set the following options:
|
|
275
|
+
config.sasl_oauthbearer_method = "oidc"
|
|
276
|
+
config.sasl_oauthbearer_client_id = "your-client-id"
|
|
277
|
+
config.sasl_oauthbearer_client_secret = "your-client-secret"
|
|
278
|
+
config.sasl_oauthbearer_token_endpoint_url = "https://auth.example.com/oauth/token"
|
|
279
|
+
ERROR
|
|
280
|
+
end
|
|
281
|
+
|
|
282
|
+
options = {
|
|
283
|
+
"sasl.mechanism" => "OAUTHBEARER",
|
|
284
|
+
"sasl.oauthbearer.method" => "oidc",
|
|
285
|
+
"sasl.oauthbearer.client.id" => config.sasl_oauthbearer_client_id,
|
|
286
|
+
"sasl.oauthbearer.client.secret" => config.sasl_oauthbearer_client_secret,
|
|
287
|
+
"sasl.oauthbearer.token.endpoint.url" => config.sasl_oauthbearer_token_endpoint_url
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
options["sasl.oauthbearer.scope"] = config.sasl_oauthbearer_scope if config.sasl_oauthbearer_scope
|
|
291
|
+
options["sasl.oauthbearer.extensions"] = config.sasl_oauthbearer_extensions if config.sasl_oauthbearer_extensions
|
|
292
|
+
|
|
293
|
+
options
|
|
294
|
+
end
|
|
295
|
+
|
|
296
|
+
def security_protocol
|
|
297
|
+
has_ssl = config.ssl_ca_cert || config.ssl_ca_cert_file_path
|
|
298
|
+
has_sasl = config.sasl_enabled? || config.sasl_gssapi_principal
|
|
299
|
+
|
|
300
|
+
if config.sasl_over_ssl == false && has_ssl
|
|
301
|
+
raise ConfigError, <<~ERROR
|
|
302
|
+
sasl_over_ssl=false with SSL certificates configured is not supported by librdkafka.
|
|
303
|
+
|
|
304
|
+
librdkafka's security.protocol cannot express "SSL for verification but SASL over plaintext".
|
|
305
|
+
|
|
306
|
+
Options:
|
|
307
|
+
1. Remove SSL certificate configuration to use SASL_PLAINTEXT
|
|
308
|
+
2. Remove sasl_over_ssl=false to use SASL_SSL (recommended)
|
|
309
|
+
|
|
310
|
+
Note: sasl_over_ssl is deprecated and will be removed in a future version.
|
|
311
|
+
ERROR
|
|
312
|
+
end
|
|
313
|
+
|
|
314
|
+
if has_sasl && has_ssl
|
|
315
|
+
"SASL_SSL"
|
|
316
|
+
elsif has_sasl
|
|
317
|
+
"SASL_PLAINTEXT"
|
|
318
|
+
elsif has_ssl
|
|
319
|
+
"SSL"
|
|
320
|
+
else
|
|
321
|
+
"PLAINTEXT"
|
|
322
|
+
end
|
|
323
|
+
end
|
|
324
|
+
|
|
325
|
+
def producer_options
|
|
326
|
+
if config.transactional? && config.transactional_id.nil?
|
|
327
|
+
raise "transactional_id must be set"
|
|
328
|
+
end
|
|
329
|
+
|
|
330
|
+
{
|
|
331
|
+
"client.id": config.client_id,
|
|
332
|
+
"socket.connection.setup.timeout.ms": config.connection_timeout_ms,
|
|
333
|
+
"socket.timeout.ms": config.socket_timeout_ms,
|
|
334
|
+
"request.required.acks": config.required_acks,
|
|
335
|
+
"request.timeout.ms": config.ack_timeout_ms,
|
|
336
|
+
"message.send.max.retries": config.max_retries,
|
|
337
|
+
"retry.backoff.ms": config.retry_backoff_ms,
|
|
338
|
+
"queue.buffering.max.messages": config.max_buffer_size,
|
|
339
|
+
"queue.buffering.max.kbytes": config.max_buffer_kbytesize,
|
|
340
|
+
"compression.codec": config.compression_codec, # values none, gzip, snappy, lz4, zstd
|
|
341
|
+
"enable.idempotence": config.idempotent,
|
|
342
|
+
"transactional.id": config.transactional_id,
|
|
343
|
+
"transaction.timeout.ms": config.transactional_timeout_ms,
|
|
344
|
+
"security.protocol": security_protocol,
|
|
345
|
+
"ssl.ca.pem": config.ssl_ca_cert,
|
|
346
|
+
"ssl.ca.location": config.ssl_ca_cert_file_path,
|
|
347
|
+
"ssl.certificate.pem": config.ssl_client_cert,
|
|
348
|
+
"ssl.key.pem": config.ssl_client_cert_key,
|
|
349
|
+
"ssl.key.password": config.ssl_client_cert_key_password
|
|
350
|
+
# ssl_ca_certs_from_system: config.ssl_ca_certs_from_system, # TODO: there is no corresponding librdkafka option. check what this does
|
|
351
|
+
# ssl_verify_hostname: config.ssl_verify_hostname, # check
|
|
352
|
+
}.merge(sasl_options)
|
|
353
|
+
end
|
|
354
|
+
|
|
355
|
+
def handles
|
|
356
|
+
Thread.current[:delivery_boy_handles] ||= []
|
|
357
|
+
end
|
|
125
358
|
end
|
|
126
359
|
end
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module DeliveryBoy
|
|
4
|
+
class Instrumenter
|
|
5
|
+
NAMESPACE = "delivery_boy"
|
|
6
|
+
|
|
7
|
+
def initialize(default_payload: {})
|
|
8
|
+
require "active_support/notifications"
|
|
9
|
+
@default_payload = default_payload
|
|
10
|
+
end
|
|
11
|
+
|
|
12
|
+
def instrument(event_name, payload = {}, &block)
|
|
13
|
+
ActiveSupport::Notifications.instrument(
|
|
14
|
+
"#{event_name}.#{NAMESPACE}",
|
|
15
|
+
@default_payload.merge(payload),
|
|
16
|
+
&block
|
|
17
|
+
)
|
|
18
|
+
end
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
class NullInstrumenter
|
|
22
|
+
def instrument(*, &block)
|
|
23
|
+
block&.call
|
|
24
|
+
end
|
|
25
|
+
end
|
|
26
|
+
end
|
data/lib/delivery_boy/railtie.rb
CHANGED
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
1
3
|
module DeliveryBoy
|
|
2
4
|
class Railtie < Rails::Railtie
|
|
3
5
|
initializer "delivery_boy.load_config" do
|
|
@@ -12,12 +14,15 @@ module DeliveryBoy
|
|
|
12
14
|
end
|
|
13
15
|
|
|
14
16
|
if config.datadog_enabled
|
|
15
|
-
require "
|
|
17
|
+
require "delivery_boy/datadog"
|
|
18
|
+
|
|
19
|
+
DeliveryBoy::Datadog.host = config.datadog_host if config.datadog_host.present?
|
|
20
|
+
DeliveryBoy::Datadog.port = config.datadog_port if config.datadog_port.present?
|
|
21
|
+
DeliveryBoy::Datadog.namespace = config.datadog_namespace if config.datadog_namespace.present?
|
|
22
|
+
DeliveryBoy::Datadog.tags = config.datadog_tags if config.datadog_tags.present?
|
|
16
23
|
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
Kafka::Datadog.namespace = config.datadog_namespace if config.datadog_namespace.present?
|
|
20
|
-
Kafka::Datadog.tags = config.datadog_tags if config.datadog_tags.present?
|
|
24
|
+
# Enable instrumentation
|
|
25
|
+
DeliveryBoy.instrumenter = DeliveryBoy::Instrumenter.new(default_payload: {})
|
|
21
26
|
end
|
|
22
27
|
end
|
|
23
28
|
end
|