ruby-kafka 0.1.1 → 0.1.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: a0a1febaafd77046f2840beb708d3ad0d6d4520b
4
- data.tar.gz: 5a5824f5799db3d6f7f00193baa3c3a9b4aa6a93
3
+ metadata.gz: e80cbf966470ab5038d6aab4f88fde89bd5e7f66
4
+ data.tar.gz: fcb92660786331e9b0f70d9a3e33aa8919590eff
5
5
  SHA512:
6
- metadata.gz: 8617964bb38c05859f8fed11c77aa6b906ddf2d4bdd86a4e0ab003e4ba797454f886bf2e9611270de30790a4e24cb0bab94d70274fd020fed148fa81d2868d0a
7
- data.tar.gz: acc55fc5c0afcf0c9f46e33b04dcd0ec47a1de7fc6f63819d1ac2cdb4b5c46b347a1e570494aec08fc09993ebfa4ae6fbb260514772a1aa49159c6c8ed1a3e46
6
+ metadata.gz: a62034fba5bf5f64f27d19676ac3429bfc929984f744db234d2fe25db10ae32ae93da6ff1d2635a0285e1d72010ec565951b2fcf00c7e5bc0aba56ba158b03d5
7
+ data.tar.gz: 236d24d5b6a8fac6fbad9cedc01b0639085ca746bc8bcf12f110aacd2f120fcf0c16f81a58e01c10d92fcc8b2c1c235ce228bc1b731412d4e94ff2fc1dbf43e8
data/README.md CHANGED
@@ -62,6 +62,8 @@ producer.produce("hello4", topic: "test-messages", partition_key: "yo")
62
62
  producer.send_messages
63
63
  ```
64
64
 
65
+ Read the docs for [Kafka::Producer](http://www.rubydoc.info/gems/ruby-kafka/Kafka/Producer) for more details.
66
+
65
67
  ## Development
66
68
 
67
69
  After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake spec` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment.
@@ -84,6 +86,16 @@ The API should now have stabilized and the library should be battle tested enoug
84
86
 
85
87
  The Consumer API defined by Kafka 0.9 will be implemented.
86
88
 
89
+ ## Why a new library?
90
+
91
+ There are a few existing Kafka clients in Ruby:
92
+
93
+ * [Poseidon](https://github.com/bpot/poseidon) seems to work for Kafka 0.8, but the project has is unmaintained and has known issues.
94
+ * [Hermann](https://github.com/reiseburo/hermann) wraps the C library [librdkafka](https://github.com/edenhill/librdkafka) and seems to be very efficient, but its API and mode of operation is too intrusive for our needs.
95
+ * [jruby-kafka](https://github.com/joekiller/jruby-kafka) is a great option if you're running on JRuby.
96
+
97
+ We needed a robust client that could be used from our existing Ruby apps, allowed our Ops to monitor operation, and provided flexible error handling. There didn't exist such a client, hence this project.
98
+
87
99
  ## Contributing
88
100
 
89
101
  Bug reports and pull requests are welcome on GitHub at https://github.com/zendesk/ruby-kafka.
@@ -24,11 +24,10 @@ module Kafka
24
24
  end
25
25
 
26
26
  def fetch_metadata(**options)
27
- api_key = Protocol::TOPIC_METADATA_API_KEY
28
27
  request = Protocol::TopicMetadataRequest.new(**options)
29
28
  response_class = Protocol::MetadataResponse
30
29
 
31
- response = @connection.request(api_key, request, response_class)
30
+ response = @connection.send_request(request, response_class)
32
31
 
33
32
  response.topics.each do |topic|
34
33
  Protocol.handle_error(topic.topic_error_code)
@@ -47,11 +46,10 @@ module Kafka
47
46
  end
48
47
 
49
48
  def produce(**options)
50
- api_key = Protocol::PRODUCE_API_KEY
51
49
  request = Protocol::ProduceRequest.new(**options)
52
50
  response_class = request.requires_acks? ? Protocol::ProduceResponse : nil
53
51
 
54
- @connection.request(api_key, request, response_class)
52
+ @connection.send_request(request, response_class)
55
53
  end
56
54
  end
57
55
  end
@@ -15,10 +15,12 @@ module Kafka
15
15
  # @param seed_brokers [Array<String>]
16
16
  # @param client_id [String]
17
17
  # @param logger [Logger]
18
+ # @param connect_timeout [Integer, nil] see {Connection#initialize}.
18
19
  # @param socket_timeout [Integer, nil] see {Connection#initialize}.
19
- def initialize(seed_brokers:, client_id:, logger:, socket_timeout: nil)
20
+ def initialize(seed_brokers:, client_id:, logger:, connect_timeout: nil, socket_timeout: nil)
20
21
  @client_id = client_id
21
22
  @logger = logger
23
+ @connect_timeout = connect_timeout
22
24
  @socket_timeout = socket_timeout
23
25
  @brokers = {}
24
26
  @seed_brokers = seed_brokers
@@ -33,13 +35,9 @@ module Kafka
33
35
  #
34
36
  # @param topic [String]
35
37
  # @param partition [Integer]
36
- # @return [Integer] the broker id.
37
- def get_leader_id(topic, partition)
38
- cluster_info.find_leader_id(topic, partition)
39
- end
40
-
41
- def get_broker(broker_id)
42
- @brokers[broker_id] ||= connect_to_broker(broker_id)
38
+ # @return [Broker] the broker that's currently leader.
39
+ def get_leader(topic, partition)
40
+ get_broker(get_leader_id(topic, partition))
43
41
  end
44
42
 
45
43
  def partitions_for(topic)
@@ -59,6 +57,14 @@ module Kafka
59
57
 
60
58
  private
61
59
 
60
+ def get_leader_id(topic, partition)
61
+ cluster_info.find_leader_id(topic, partition)
62
+ end
63
+
64
+ def get_broker(broker_id)
65
+ @brokers[broker_id] ||= connect_to_broker(broker_id)
66
+ end
67
+
62
68
  def cluster_info
63
69
  @cluster_info ||= fetch_cluster_info
64
70
  end
@@ -109,6 +115,7 @@ module Kafka
109
115
  port: broker_info.port,
110
116
  node_id: broker_info.node_id,
111
117
  client_id: @client_id,
118
+ connect_timeout: @connect_timeout,
112
119
  socket_timeout: @socket_timeout,
113
120
  logger: @logger,
114
121
  )
@@ -14,17 +14,21 @@ module Kafka
14
14
  #
15
15
  # @param logger [Logger]
16
16
  #
17
+ # @param connect_timeout [Integer, nil] the timeout setting for connecting
18
+ # to brokers. See {BrokerPool#initialize}.
19
+ #
17
20
  # @param socket_timeout [Integer, nil] the timeout setting for socket
18
21
  # connections. See {BrokerPool#initialize}.
19
22
  #
20
23
  # @return [Client]
21
- def initialize(seed_brokers:, client_id: DEFAULT_CLIENT_ID, logger:, socket_timeout: nil)
24
+ def initialize(seed_brokers:, client_id: DEFAULT_CLIENT_ID, logger:, connect_timeout: nil, socket_timeout: nil)
22
25
  @logger = logger
23
26
 
24
27
  @broker_pool = BrokerPool.new(
25
28
  seed_brokers: seed_brokers,
26
29
  client_id: client_id,
27
30
  logger: logger,
31
+ connect_timeout: connect_timeout,
28
32
  socket_timeout: socket_timeout,
29
33
  )
30
34
  end
@@ -1,5 +1,6 @@
1
1
  require "stringio"
2
2
  require "kafka/socket_with_timeout"
3
+ require "kafka/instrumentation"
3
4
  require "kafka/protocol/request_message"
4
5
  require "kafka/protocol/encoder"
5
6
  require "kafka/protocol/decoder"
@@ -11,8 +12,20 @@ module Kafka
11
12
  # Usually you'll need a separate connection to each broker in a cluster, since most
12
13
  # requests must be directed specifically to the broker that is currently leader for
13
14
  # the set of topic partitions you want to produce to or consumer from.
15
+ #
16
+ # ## Instrumentation
17
+ #
18
+ # Connections emit a `request.kafka` notification on each request. The following
19
+ # keys will be found in the payload:
20
+ #
21
+ # * `:api` — the name of the API being invoked.
22
+ # * `:request_size` — the number of bytes in the request.
23
+ # * `:response_size` — the number of bytes in the response.
24
+ #
25
+ # The notification also includes the duration of the request.
26
+ #
14
27
  class Connection
15
- SOCKET_TIMEOUT = 5
28
+ SOCKET_TIMEOUT = 10
16
29
  CONNECT_TIMEOUT = 10
17
30
 
18
31
  # Opens a connection to a Kafka broker.
@@ -26,7 +39,7 @@ module Kafka
26
39
  # @param connect_timeout [Integer] the socket timeout for connecting to the broker.
27
40
  # Default is 10 seconds.
28
41
  # @param socket_timeout [Integer] the socket timeout for reading and writing to the
29
- # broker. Default is 5 seconds.
42
+ # broker. Default is 10 seconds.
30
43
  #
31
44
  # @return [Connection] a new connection.
32
45
  def initialize(host:, port:, client_id:, logger:, connect_timeout: nil, socket_timeout: nil)
@@ -35,33 +48,13 @@ module Kafka
35
48
 
36
49
  @connect_timeout = connect_timeout || CONNECT_TIMEOUT
37
50
  @socket_timeout = socket_timeout || SOCKET_TIMEOUT
38
-
39
- @logger.info "Opening connection to #{@host}:#{@port} with client id #{@client_id}..."
40
-
41
- connect
42
51
  end
43
52
 
44
53
  def to_s
45
54
  "#{@host}:#{@port}"
46
55
  end
47
56
 
48
- def connect
49
- @socket = SocketWithTimeout.new(@host, @port, connect_timeout: @connect_timeout, timeout: @socket_timeout)
50
-
51
- @encoder = Kafka::Protocol::Encoder.new(@socket)
52
- @decoder = Kafka::Protocol::Decoder.new(@socket)
53
-
54
- # Correlation id is initialized to zero and bumped for each request.
55
- @correlation_id = 0
56
- rescue Errno::ETIMEDOUT => e
57
- @logger.error "Timed out while trying to connect to #{self}: #{e}"
58
- raise ConnectionError, e
59
- rescue SocketError, Errno::ECONNREFUSED => e
60
- @logger.error "Failed to connect to #{self}: #{e}"
61
- raise ConnectionError, e
62
- end
63
-
64
- def connected?
57
+ def open?
65
58
  !@socket.nil?
66
59
  end
67
60
 
@@ -75,32 +68,24 @@ module Kafka
75
68
 
76
69
  # Sends a request over the connection.
77
70
  #
78
- # @param api_key [Integer] the integer code for the API that is invoked.
79
71
  # @param request [#encode] the request that should be encoded and written.
80
72
  # @param response_class [#decode] an object that can decode the response.
81
73
  #
82
74
  # @return [Object] the response that was decoded by `response_class`.
83
- def request(api_key, request, response_class)
84
- connect unless connected?
85
-
86
- write_request(api_key, request)
87
-
88
- unless response_class.nil?
89
- loop do
90
- correlation_id, response = read_response(response_class)
91
-
92
- # There may have been a previous request that timed out before the client
93
- # was able to read the response. In that case, the response will still be
94
- # sitting in the socket waiting to be read. If the response we just read
95
- # was to a previous request, we can safely skip it.
96
- if correlation_id < @correlation_id
97
- @logger.error "Received out-of-order response id #{correlation_id}, was expecting #{@correlation_id}"
98
- elsif correlation_id > @correlation_id
99
- raise Kafka::Error, "Correlation id mismatch: expected #{@correlation_id} but got #{correlation_id}"
100
- else
101
- break response
102
- end
103
- end
75
+ def send_request(request, response_class)
76
+ Instrumentation.instrument("request.kafka") do |notification|
77
+ open unless open?
78
+
79
+ @correlation_id += 1
80
+
81
+ # Look up the API name.
82
+ notification[:api] = Protocol.api_name(request.api_key)
83
+
84
+ # We may not read a response, in which case the size is zero.
85
+ notification[:response_size] = 0
86
+
87
+ write_request(request, notification)
88
+ wait_for_response(response_class, notification) unless response_class.nil?
104
89
  end
105
90
  rescue Errno::EPIPE, Errno::ECONNRESET, Errno::ETIMEDOUT, EOFError => e
106
91
  @logger.error "Connection error: #{e}"
@@ -112,24 +97,41 @@ module Kafka
112
97
 
113
98
  private
114
99
 
100
+ def open
101
+ @logger.info "Opening connection to #{@host}:#{@port} with client id #{@client_id}..."
102
+
103
+ @socket = SocketWithTimeout.new(@host, @port, connect_timeout: @connect_timeout, timeout: @socket_timeout)
104
+
105
+ @encoder = Kafka::Protocol::Encoder.new(@socket)
106
+ @decoder = Kafka::Protocol::Decoder.new(@socket)
107
+
108
+ # Correlation id is initialized to zero and bumped for each request.
109
+ @correlation_id = 0
110
+ rescue Errno::ETIMEDOUT => e
111
+ @logger.error "Timed out while trying to connect to #{self}: #{e}"
112
+ raise ConnectionError, e
113
+ rescue SocketError, Errno::ECONNREFUSED => e
114
+ @logger.error "Failed to connect to #{self}: #{e}"
115
+ raise ConnectionError, e
116
+ end
117
+
115
118
  # Writes a request over the connection.
116
119
  #
117
- # @param api_key [Integer] the integer code for the API that is invoked.
118
120
  # @param request [#encode] the request that should be encoded and written.
119
121
  #
120
122
  # @return [nil]
121
- def write_request(api_key, request)
122
- @correlation_id += 1
123
+ def write_request(request, notification)
123
124
  @logger.debug "Sending request #{@correlation_id} to #{to_s}"
124
125
 
125
126
  message = Kafka::Protocol::RequestMessage.new(
126
- api_key: api_key,
127
+ api_key: request.api_key,
127
128
  correlation_id: @correlation_id,
128
129
  client_id: @client_id,
129
130
  request: request,
130
131
  )
131
132
 
132
133
  data = Kafka::Protocol::Encoder.encode_with(message)
134
+ notification[:request_size] = data.bytesize
133
135
 
134
136
  @encoder.write_bytes(data)
135
137
 
@@ -145,12 +147,13 @@ module Kafka
145
147
  # a given Decoder.
146
148
  #
147
149
  # @return [nil]
148
- def read_response(response_class)
150
+ def read_response(response_class, notification)
149
151
  @logger.debug "Waiting for response #{@correlation_id} from #{to_s}"
150
152
 
151
- bytes = @decoder.bytes
153
+ data = @decoder.bytes
154
+ notification[:response_size] = data.bytesize
152
155
 
153
- buffer = StringIO.new(bytes)
156
+ buffer = StringIO.new(data)
154
157
  response_decoder = Kafka::Protocol::Decoder.new(buffer)
155
158
 
156
159
  correlation_id = response_decoder.int32
@@ -163,5 +166,23 @@ module Kafka
163
166
  @logger.error "Timed out while waiting for response #{@correlation_id}"
164
167
  raise
165
168
  end
169
+
170
+ def wait_for_response(response_class, notification)
171
+ loop do
172
+ correlation_id, response = read_response(response_class, notification)
173
+
174
+ # There may have been a previous request that timed out before the client
175
+ # was able to read the response. In that case, the response will still be
176
+ # sitting in the socket waiting to be read. If the response we just read
177
+ # was to a previous request, we can safely skip it.
178
+ if correlation_id < @correlation_id
179
+ @logger.error "Received out-of-order response id #{correlation_id}, was expecting #{@correlation_id}"
180
+ elsif correlation_id > @correlation_id
181
+ raise Kafka::Error, "Correlation id mismatch: expected #{@correlation_id} but got #{correlation_id}"
182
+ else
183
+ return response
184
+ end
185
+ end
186
+ end
166
187
  end
167
188
  end
@@ -0,0 +1,13 @@
1
+ module Kafka
2
+ class NullInstrumentation
3
+ def self.instrument(name, payload = {})
4
+ yield payload if block_given?
5
+ end
6
+ end
7
+
8
+ if defined?(ActiveSupport::Notifications)
9
+ Instrumentation = ActiveSupport::Notifications
10
+ else
11
+ Instrumentation = NullInstrumentation
12
+ end
13
+ end
@@ -7,6 +7,20 @@ module Kafka
7
7
 
8
8
  # Allows sending messages to a Kafka cluster.
9
9
  #
10
+ # Typically you won't instantiate this class yourself, but rather have {Kafka::Client}
11
+ # do it for you, e.g.
12
+ #
13
+ # # Will instantiate Kafka::Client
14
+ # kafka = Kafka.new(...)
15
+ #
16
+ # # Will instantiate Kafka::Producer
17
+ # producer = kafka.get_producer
18
+ #
19
+ # This is done in order to share a logger as well as a pool of broker connections across
20
+ # different producers. This also means that you don't need to pass the `broker_pool` and
21
+ # `logger` options to `#get_producer`. See {#initialize} for the list of other options
22
+ # you can pass in.
23
+ #
10
24
  # ## Buffering
11
25
  #
12
26
  # The producer buffers pending messages until {#send_messages} is called. Note that there is
@@ -73,8 +87,10 @@ module Kafka
73
87
  # Initializes a new Producer.
74
88
  #
75
89
  # @param broker_pool [BrokerPool] the broker pool representing the cluster.
90
+ # Typically passed in for you.
76
91
  #
77
- # @param logger [Logger]
92
+ # @param logger [Logger] the logger that should be used. Typically passed
93
+ # in for you.
78
94
  #
79
95
  # @param ack_timeout [Integer] The number of seconds a broker can wait for
80
96
  # replicas to acknowledge a write before responding with a timeout.
@@ -91,7 +107,7 @@ module Kafka
91
107
  # @param max_buffer_size [Integer] the number of messages allowed in the buffer
92
108
  # before new writes will raise BufferOverflow exceptions.
93
109
  #
94
- def initialize(broker_pool:, logger:, ack_timeout: 10, required_acks: 1, max_retries: 2, retry_backoff: 1, max_buffer_size: 1000)
110
+ def initialize(broker_pool:, logger:, ack_timeout: 5, required_acks: 1, max_retries: 2, retry_backoff: 1, max_buffer_size: 1000)
95
111
  @broker_pool = broker_pool
96
112
  @logger = logger
97
113
  @required_acks = required_acks
@@ -1,7 +1,9 @@
1
1
  module Kafka
2
2
  module Protocol
3
- PRODUCE_API_KEY = 0
4
- TOPIC_METADATA_API_KEY = 3
3
+ APIS = {
4
+ 0 => :produce,
5
+ 3 => :topic_metadata,
6
+ }
5
7
 
6
8
  ERRORS = {
7
9
  -1 => UnknownError,
@@ -23,7 +25,6 @@ module Kafka
23
25
  21 => InvalidRequiredAcks,
24
26
  }
25
27
 
26
-
27
28
  def self.handle_error(error_code)
28
29
  if error_code == 0
29
30
  # No errors, yay!
@@ -33,6 +34,10 @@ module Kafka
33
34
  raise UnknownError, "Unknown error with code #{error_code}"
34
35
  end
35
36
  end
37
+
38
+ def self.api_name(api_key)
39
+ APIS.fetch(api_key, :unknown)
40
+ end
36
41
  end
37
42
  end
38
43
 
@@ -33,6 +33,10 @@ module Kafka
33
33
  @messages_for_topics = messages_for_topics
34
34
  end
35
35
 
36
+ def api_key
37
+ 0
38
+ end
39
+
36
40
  # Whether this request requires any acknowledgements at all. If no acknowledgements
37
41
  # are required, the server will not send back a response at all.
38
42
  #
@@ -5,6 +5,10 @@ module Kafka
5
5
  @topics = topics
6
6
  end
7
7
 
8
+ def api_key
9
+ 3
10
+ end
11
+
8
12
  def encode(encoder)
9
13
  encoder.write_array(@topics) {|topic| encoder.write_string(topic) }
10
14
  end
@@ -12,18 +12,16 @@ module Kafka
12
12
  messages_for_broker = {}
13
13
 
14
14
  @buffer.each do |topic, partition, messages|
15
- broker_id = @broker_pool.get_leader_id(topic, partition)
15
+ broker = @broker_pool.get_leader(topic, partition)
16
16
 
17
- @logger.debug "Current leader for #{topic}/#{partition} is node #{broker_id}"
17
+ @logger.debug "Current leader for #{topic}/#{partition} is node #{broker}"
18
18
 
19
- messages_for_broker[broker_id] ||= MessageBuffer.new
20
- messages_for_broker[broker_id].concat(messages, topic: topic, partition: partition)
19
+ messages_for_broker[broker] ||= MessageBuffer.new
20
+ messages_for_broker[broker].concat(messages, topic: topic, partition: partition)
21
21
  end
22
22
 
23
- messages_for_broker.each do |broker_id, message_set|
23
+ messages_for_broker.each do |broker, message_set|
24
24
  begin
25
- broker = @broker_pool.get_broker(broker_id)
26
-
27
25
  response = broker.produce(
28
26
  messages_for_topics: message_set.to_h,
29
27
  required_acks: @required_acks,
@@ -32,7 +30,7 @@ module Kafka
32
30
 
33
31
  handle_response(response) if response
34
32
  rescue ConnectionError => e
35
- @logger.error "Could not connect to broker #{broker_id}: #{e}"
33
+ @logger.error "Could not connect to broker #{broker}: #{e}"
36
34
 
37
35
  # Mark the broker pool as stale in order to force a cluster metadata refresh.
38
36
  @broker_pool.mark_as_stale!
@@ -1,3 +1,3 @@
1
1
  module Kafka
2
- VERSION = "0.1.1"
2
+ VERSION = "0.1.2"
3
3
  end
@@ -32,4 +32,5 @@ Gem::Specification.new do |spec|
32
32
  spec.add_development_dependency "dotenv"
33
33
  spec.add_development_dependency "docker-api"
34
34
  spec.add_development_dependency "rspec-benchmark"
35
+ spec.add_development_dependency "activesupport", ">= 4.2.0", "< 5.1"
35
36
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.1
4
+ version: 0.1.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Daniel Schierbeck
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2016-02-04 00:00:00.000000000 Z
11
+ date: 2016-02-05 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -108,6 +108,26 @@ dependencies:
108
108
  - - ">="
109
109
  - !ruby/object:Gem::Version
110
110
  version: '0'
111
+ - !ruby/object:Gem::Dependency
112
+ name: activesupport
113
+ requirement: !ruby/object:Gem::Requirement
114
+ requirements:
115
+ - - ">="
116
+ - !ruby/object:Gem::Version
117
+ version: 4.2.0
118
+ - - "<"
119
+ - !ruby/object:Gem::Version
120
+ version: '5.1'
121
+ type: :development
122
+ prerelease: false
123
+ version_requirements: !ruby/object:Gem::Requirement
124
+ requirements:
125
+ - - ">="
126
+ - !ruby/object:Gem::Version
127
+ version: 4.2.0
128
+ - - "<"
129
+ - !ruby/object:Gem::Version
130
+ version: '5.1'
111
131
  description: |-
112
132
  A client library for the Kafka distributed commit log.
113
133
 
@@ -129,12 +149,12 @@ files:
129
149
  - bin/setup
130
150
  - circle.yml
131
151
  - examples/simple-producer.rb
132
- - kafka.gemspec
133
152
  - lib/kafka.rb
134
153
  - lib/kafka/broker.rb
135
154
  - lib/kafka/broker_pool.rb
136
155
  - lib/kafka/client.rb
137
156
  - lib/kafka/connection.rb
157
+ - lib/kafka/instrumentation.rb
138
158
  - lib/kafka/message_buffer.rb
139
159
  - lib/kafka/partitioner.rb
140
160
  - lib/kafka/producer.rb
@@ -151,6 +171,7 @@ files:
151
171
  - lib/kafka/transmission.rb
152
172
  - lib/kafka/version.rb
153
173
  - lib/ruby-kafka.rb
174
+ - ruby-kafka.gemspec
154
175
  homepage: https://github.com/zendesk/ruby-kafka
155
176
  licenses:
156
177
  - Apache License Version 2.0