ruby-kafka 0.3.13.beta1 → 0.3.13.beta2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: e6c48d1b7996a28caddd510f0daf032fdfca84fe
4
- data.tar.gz: 302d2211024a2fdb904947c97cb0c258dc8583eb
3
+ metadata.gz: 3a1680eaa22189f9b23bcbca4789e3fd1ae59f16
4
+ data.tar.gz: 4bac9d11016756832446f01977bc64eea2ed6c16
5
5
  SHA512:
6
- metadata.gz: d5524a95270673acb7e7e0ba2d6907eb774c715fb467925a052a633c1ab0e76de0fe5a16806ef3a7b95bbc29d54bd832a2b0a43d496753f4b5f1a162e4f91dde
7
- data.tar.gz: 34f6417d7d0fdb987f1c6fdf69595a9de413ad010ca391f1d5d5fa3632fe781fe9514ae3568d20c2ed120887e288bb42848ebb22ab42125215d50ca16c0523da
6
+ metadata.gz: b3fa4970596e7f62155769a63103cfac3410ab234baf4a539620408f2704bd4e29b2e0977cf507cb3972c0788306aabf339334a90218f130a64dbdac64378eae
7
+ data.tar.gz: 7fd63b898166156713276a8d792e29af19a1764c9c27e97651ee267a7f11e10e07fcdc8807dea79d664e2c4e6248e9250b2e3cb85b1c0d41b127e73c1e6d65e2
@@ -153,11 +153,11 @@ module Kafka
153
153
  end
154
154
 
155
155
  def buffer_overflow(topic)
156
- @instrumenter.instrument("buffer_overflow.producer", {
156
+ @instrumenter.instrument("buffer_overflow.async_producer", {
157
157
  topic: topic,
158
158
  })
159
159
 
160
- @logger.error "Buffer overflow: failed to enqueue message for #{topic}"
160
+ @logger.error "Cannot produce message to #{topic}, max queue size (#{@max_queue_size}) reached"
161
161
 
162
162
  raise BufferOverflow
163
163
  end
@@ -118,16 +118,11 @@ module Kafka
118
118
  rescue Errno::ETIMEDOUT => e
119
119
  @logger.error "Timed out while trying to connect to #{self}: #{e}"
120
120
  raise ConnectionError, e
121
- rescue SocketError, Errno::ECONNREFUSED => e
121
+ rescue SocketError, Errno::ECONNREFUSED, Errno::EHOSTUNREACH => e
122
122
  @logger.error "Failed to connect to #{self}: #{e}"
123
123
  raise ConnectionError, e
124
124
  end
125
125
 
126
- def reopen
127
- close
128
- open
129
- end
130
-
131
126
  # Writes a request over the connection.
132
127
  #
133
128
  # @param request [#encode] the request that should be encoded and written.
@@ -145,29 +140,14 @@ module Kafka
145
140
  )
146
141
 
147
142
  data = Kafka::Protocol::Encoder.encode_with(message)
148
- retried = false
149
143
  notification[:request_size] = data.bytesize
150
144
 
151
- begin
152
- @encoder.write_bytes(data)
153
- rescue Errno::ETIMEDOUT
154
- @logger.error "Timed out while writing request #{@correlation_id}"
155
- raise
156
- rescue Errno::EPIPE, Errno::ECONNRESET, EOFError
157
- # Kafka brokers automatically close client connections after a period of
158
- # inactivity. If this has happened, it's safe to re-open the connection
159
- # and retry the request.
160
- if retried
161
- raise
162
- else
163
- @logger.warn "Connection has been closed by the server, retrying..."
164
- retried = true
165
- reopen
166
- retry
167
- end
168
- end
145
+ @encoder.write_bytes(data)
169
146
 
170
147
  nil
148
+ rescue Errno::ETIMEDOUT
149
+ @logger.error "Timed out while writing request #{@correlation_id}"
150
+ raise
171
151
  end
172
152
 
173
153
  # Reads a response from the connection.
@@ -54,6 +54,9 @@ module Kafka
54
54
 
55
55
  # Whether or not the consumer is currently consuming messages.
56
56
  @running = false
57
+
58
+ # The maximum number of bytes to fetch from a single partition, by topic.
59
+ @max_bytes = {}
57
60
  end
58
61
 
59
62
  # Subscribes the consumer to a topic.
@@ -71,12 +74,15 @@ module Kafka
71
74
  # only applies when first consuming a topic partition – once the consumer
72
75
  # has checkpointed its progress, it will always resume from the last
73
76
  # checkpoint.
77
+ # @param max_bytes_per_partition [Integer] the maximum amount of data fetched
78
+ # from a single partition at a time.
74
79
  # @return [nil]
75
- def subscribe(topic, default_offset: nil, start_from_beginning: true)
80
+ def subscribe(topic, default_offset: nil, start_from_beginning: true, max_bytes_per_partition: 1048576)
76
81
  default_offset ||= start_from_beginning ? :earliest : :latest
77
82
 
78
83
  @group.subscribe(topic)
79
84
  @offset_manager.set_default_offset(topic, default_offset)
85
+ @max_bytes[topic] = max_bytes_per_partition
80
86
 
81
87
  nil
82
88
  end
@@ -248,10 +254,11 @@ module Kafka
248
254
  assigned_partitions.each do |topic, partitions|
249
255
  partitions.each do |partition|
250
256
  offset = @offset_manager.next_offset_for(topic, partition)
257
+ max_bytes = @max_bytes.fetch(topic)
251
258
 
252
259
  @logger.debug "Fetching batch from #{topic}/#{partition} starting at offset #{offset}"
253
260
 
254
- operation.fetch_from_partition(topic, partition, offset: offset)
261
+ operation.fetch_from_partition(topic, partition, offset: offset, max_bytes: max_bytes)
255
262
  end
256
263
  end
257
264
 
@@ -133,6 +133,7 @@ module Kafka
133
133
  def produce_message(event)
134
134
  client = event.payload.fetch(:client_id)
135
135
  topic = event.payload.fetch(:topic)
136
+ message_size = event.payload.fetch(:message_size)
136
137
  buffer_size = event.payload.fetch(:buffer_size)
137
138
  max_buffer_size = event.payload.fetch(:max_buffer_size)
138
139
  buffer_fill_ratio = buffer_size.to_f / max_buffer_size.to_f
@@ -144,6 +145,8 @@ module Kafka
144
145
  # This gets us the write rate.
145
146
  increment("producer.produce.messages", tags: tags.merge(topic: topic))
146
147
 
148
+ histogram("producer.produce.message_size", message_size, tags: tags.merge(topic: topic))
149
+
147
150
  # This gets us the avg/max buffer size per producer.
148
151
  histogram("producer.buffer.size", buffer_size, tags: tags)
149
152
 
@@ -220,10 +223,19 @@ module Kafka
220
223
  }
221
224
 
222
225
  # This gets us the avg/max queue size per producer.
223
- histogram("producer.queue.size", queue_size, tags: tags)
226
+ histogram("async_producer.queue.size", queue_size, tags: tags)
224
227
 
225
228
  # This gets us the avg/max queue fill ratio per producer.
226
- histogram("producer.queue.fill_ratio", queue_fill_ratio, tags: tags)
229
+ histogram("async_producer.queue.fill_ratio", queue_fill_ratio, tags: tags)
230
+ end
231
+
232
+ def buffer_overflow(event)
233
+ tags = {
234
+ client: event.payload.fetch(:client_id),
235
+ topic: event.payload.fetch(:topic),
236
+ }
237
+
238
+ increment("async_producer.produce.errors", tags: tags)
227
239
  end
228
240
 
229
241
  attach_to "async_producer.kafka"
@@ -195,11 +195,13 @@ module Kafka
195
195
  )
196
196
 
197
197
  if buffer_size >= @max_buffer_size
198
- buffer_overflow topic, "Max buffer size (#{@max_buffer_size} messages) exceeded"
198
+ buffer_overflow topic,
199
+ "Cannot produce to #{topic}, max buffer size (#{@max_buffer_size} messages) reached"
199
200
  end
200
201
 
201
202
  if buffer_bytesize + message.bytesize >= @max_buffer_bytesize
202
- buffer_overflow topic, "Max buffer bytesize (#{@max_buffer_bytesize} bytes) exceeded"
203
+ buffer_overflow topic,
204
+ "Cannot produce to #{topic}, max buffer bytesize (#{@max_buffer_bytesize} bytes) reached"
203
205
  end
204
206
 
205
207
  @target_topics.add(topic)
@@ -210,6 +212,7 @@ module Kafka
210
212
  key: key,
211
213
  topic: topic,
212
214
  create_time: create_time,
215
+ message_size: message.bytesize,
213
216
  buffer_size: buffer_size,
214
217
  max_buffer_size: @max_buffer_size,
215
218
  })
@@ -1,3 +1,3 @@
1
1
  module Kafka
2
- VERSION = "0.3.13.beta1"
2
+ VERSION = "0.3.13.beta2"
3
3
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.3.13.beta1
4
+ version: 0.3.13.beta2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Daniel Schierbeck