ruby-kafka 0.7.9 → 1.2.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 9facc22cb8739a683d4be6ee8389f0e2c6e7a6530e17e855ee6333aabdf428f0
4
- data.tar.gz: d0e979d9e2e0ccf776d7d062e807367ff6380fa4f2863ecf6c414f3a7a3dc1fa
3
+ metadata.gz: 0f8391cc7b1989cb5f669796bc4ad647b77d882e6506fae42bab18acb8a6bcc6
4
+ data.tar.gz: 012baaff5d2cc9eb17e3a7b7342f49f7c905a5f91d26078fa0ecf2f0fa81a2ad
5
5
  SHA512:
6
- metadata.gz: b339415a8ba234fe78d0fce1c36933f7d39e79561dfd296a1cdb580877f3e5ab7fe5c8119709458b5207f9dc9375638d7a1e93c5c24adc155841dca07b783e5b
7
- data.tar.gz: b54e211a11096e98c6460958de7097c4293fdee3dac18e117f75e9ce2c1eeb6b749a8a8f2b5ba8e559ee75c4fa33a8a299c68e37bab9a1f7e16194d8dd8ef92f
6
+ metadata.gz: 7f4e9302ca0ab41a6fded75f95ed866d959a2a027b90051ed7f3d7fba573aa63e57be7692d004dbeb3bacd99fe44b24188f81b0f3bed50f68e1da5189262271f
7
+ data.tar.gz: 7266bdd50e66a7ab9b3c71025468af0a4bea778fd312c41d4c699e2609420c11c28accb3246407ce98330bf521c109691bd1d4943dd532a44ee6732f1a410922
@@ -145,21 +145,149 @@ jobs:
145
145
  environment:
146
146
  LOG_LEVEL: DEBUG
147
147
  - image: wurstmeister/zookeeper
148
- - image: wurstmeister/kafka:2.12-2.1.0
148
+ - image: wurstmeister/kafka:2.12-2.1.1
149
149
  environment:
150
150
  KAFKA_ADVERTISED_HOST_NAME: localhost
151
151
  KAFKA_ADVERTISED_PORT: 9092
152
152
  KAFKA_PORT: 9092
153
153
  KAFKA_ZOOKEEPER_CONNECT: localhost:2181
154
154
  KAFKA_DELETE_TOPIC_ENABLE: true
155
- - image: wurstmeister/kafka:2.12-2.1.0
155
+ - image: wurstmeister/kafka:2.12-2.1.1
156
156
  environment:
157
157
  KAFKA_ADVERTISED_HOST_NAME: localhost
158
158
  KAFKA_ADVERTISED_PORT: 9093
159
159
  KAFKA_PORT: 9093
160
160
  KAFKA_ZOOKEEPER_CONNECT: localhost:2181
161
161
  KAFKA_DELETE_TOPIC_ENABLE: true
162
- - image: wurstmeister/kafka:2.12-2.1.0
162
+ - image: wurstmeister/kafka:2.12-2.1.1
163
+ environment:
164
+ KAFKA_ADVERTISED_HOST_NAME: localhost
165
+ KAFKA_ADVERTISED_PORT: 9094
166
+ KAFKA_PORT: 9094
167
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
168
+ KAFKA_DELETE_TOPIC_ENABLE: true
169
+ steps:
170
+ - checkout
171
+ - run: bundle install --path vendor/bundle
172
+ - run: bundle exec rspec --profile --tag functional spec/functional
173
+
174
+ kafka-2.2:
175
+ docker:
176
+ - image: circleci/ruby:2.5.1-node
177
+ environment:
178
+ LOG_LEVEL: DEBUG
179
+ - image: wurstmeister/zookeeper
180
+ - image: wurstmeister/kafka:2.12-2.2.1
181
+ environment:
182
+ KAFKA_ADVERTISED_HOST_NAME: localhost
183
+ KAFKA_ADVERTISED_PORT: 9092
184
+ KAFKA_PORT: 9092
185
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
186
+ KAFKA_DELETE_TOPIC_ENABLE: true
187
+ - image: wurstmeister/kafka:2.12-2.2.1
188
+ environment:
189
+ KAFKA_ADVERTISED_HOST_NAME: localhost
190
+ KAFKA_ADVERTISED_PORT: 9093
191
+ KAFKA_PORT: 9093
192
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
193
+ KAFKA_DELETE_TOPIC_ENABLE: true
194
+ - image: wurstmeister/kafka:2.12-2.2.1
195
+ environment:
196
+ KAFKA_ADVERTISED_HOST_NAME: localhost
197
+ KAFKA_ADVERTISED_PORT: 9094
198
+ KAFKA_PORT: 9094
199
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
200
+ KAFKA_DELETE_TOPIC_ENABLE: true
201
+ steps:
202
+ - checkout
203
+ - run: bundle install --path vendor/bundle
204
+ - run: bundle exec rspec --profile --tag functional spec/functional
205
+
206
+ kafka-2.3:
207
+ docker:
208
+ - image: circleci/ruby:2.5.1-node
209
+ environment:
210
+ LOG_LEVEL: DEBUG
211
+ - image: wurstmeister/zookeeper
212
+ - image: wurstmeister/kafka:2.12-2.3.1
213
+ environment:
214
+ KAFKA_ADVERTISED_HOST_NAME: localhost
215
+ KAFKA_ADVERTISED_PORT: 9092
216
+ KAFKA_PORT: 9092
217
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
218
+ KAFKA_DELETE_TOPIC_ENABLE: true
219
+ - image: wurstmeister/kafka:2.12-2.3.1
220
+ environment:
221
+ KAFKA_ADVERTISED_HOST_NAME: localhost
222
+ KAFKA_ADVERTISED_PORT: 9093
223
+ KAFKA_PORT: 9093
224
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
225
+ KAFKA_DELETE_TOPIC_ENABLE: true
226
+ - image: wurstmeister/kafka:2.12-2.3.1
227
+ environment:
228
+ KAFKA_ADVERTISED_HOST_NAME: localhost
229
+ KAFKA_ADVERTISED_PORT: 9094
230
+ KAFKA_PORT: 9094
231
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
232
+ KAFKA_DELETE_TOPIC_ENABLE: true
233
+ steps:
234
+ - checkout
235
+ - run: bundle install --path vendor/bundle
236
+ - run: bundle exec rspec --profile --tag functional spec/functional
237
+
238
+ kafka-2.4:
239
+ docker:
240
+ - image: circleci/ruby:2.5.1-node
241
+ environment:
242
+ LOG_LEVEL: DEBUG
243
+ - image: wurstmeister/zookeeper
244
+ - image: wurstmeister/kafka:2.12-2.4.0
245
+ environment:
246
+ KAFKA_ADVERTISED_HOST_NAME: localhost
247
+ KAFKA_ADVERTISED_PORT: 9092
248
+ KAFKA_PORT: 9092
249
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
250
+ KAFKA_DELETE_TOPIC_ENABLE: true
251
+ - image: wurstmeister/kafka:2.12-2.4.0
252
+ environment:
253
+ KAFKA_ADVERTISED_HOST_NAME: localhost
254
+ KAFKA_ADVERTISED_PORT: 9093
255
+ KAFKA_PORT: 9093
256
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
257
+ KAFKA_DELETE_TOPIC_ENABLE: true
258
+ - image: wurstmeister/kafka:2.12-2.4.0
259
+ environment:
260
+ KAFKA_ADVERTISED_HOST_NAME: localhost
261
+ KAFKA_ADVERTISED_PORT: 9094
262
+ KAFKA_PORT: 9094
263
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
264
+ KAFKA_DELETE_TOPIC_ENABLE: true
265
+ steps:
266
+ - checkout
267
+ - run: bundle install --path vendor/bundle
268
+ - run: bundle exec rspec --profile --tag functional spec/functional
269
+
270
+ kafka-2.5:
271
+ docker:
272
+ - image: circleci/ruby:2.5.1-node
273
+ environment:
274
+ LOG_LEVEL: DEBUG
275
+ - image: wurstmeister/zookeeper
276
+ - image: wurstmeister/kafka:2.12-2.5.0
277
+ environment:
278
+ KAFKA_ADVERTISED_HOST_NAME: localhost
279
+ KAFKA_ADVERTISED_PORT: 9092
280
+ KAFKA_PORT: 9092
281
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
282
+ KAFKA_DELETE_TOPIC_ENABLE: true
283
+ - image: wurstmeister/kafka:2.12-2.5.0
284
+ environment:
285
+ KAFKA_ADVERTISED_HOST_NAME: localhost
286
+ KAFKA_ADVERTISED_PORT: 9093
287
+ KAFKA_PORT: 9093
288
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
289
+ KAFKA_DELETE_TOPIC_ENABLE: true
290
+ - image: wurstmeister/kafka:2.12-2.5.0
163
291
  environment:
164
292
  KAFKA_ADVERTISED_HOST_NAME: localhost
165
293
  KAFKA_ADVERTISED_PORT: 9094
@@ -181,3 +309,7 @@ workflows:
181
309
  - kafka-1.1
182
310
  - kafka-2.0
183
311
  - kafka-2.1
312
+ - kafka-2.2
313
+ - kafka-2.3
314
+ - kafka-2.4
315
+ - kafka-2.5
@@ -0,0 +1,19 @@
1
+ name: Mark stale issues and pull requests
2
+
3
+ on:
4
+ schedule:
5
+ - cron: "0 0 * * *"
6
+
7
+ jobs:
8
+ stale:
9
+
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - uses: actions/stale@v1
14
+ with:
15
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
16
+ stale-issue-message: 'Issue has been marked as stale due to a lack of activity.'
17
+ stale-pr-message: 'Pull request has been marked as stale due to a lack of activity.'
18
+ stale-issue-label: 'no-issue-activity'
19
+ stale-pr-label: 'no-pr-activity'
@@ -1 +1 @@
1
- 2.5.1
1
+ 2.7.1
@@ -2,6 +2,32 @@
2
2
 
3
3
  Changes and additions to the library will be listed here.
4
4
 
5
+ ## Unreleased
6
+
7
+ ## 1.2.0
8
+
9
+ - Add producer consumer interceptors (#837).
10
+ - Add support for configuring the client partitioner (#848).
11
+
12
+ ## 1.1.0
13
+
14
+ - Extra sanity checking when marking offsets as processed (#824).
15
+ - Make `verify_hostname` settable for SSL contexts (#828).
16
+ - Instrument `create_time` from last message in batch (#811).
17
+ - Add client function for fetching topic replica count (#822).
18
+ - Allow consumers to refresh the topic lists (#818).
19
+ - Disconnect after leaving a group (#817).
20
+ - Use `max_wait_time` as the sleep instead of hardcoded 2 seconds (#825).
21
+
22
+ ## 1.0.0
23
+
24
+ - Add client methods to manage configs (#759)
25
+ - Support Kafka 2.3 and 2.4.
26
+
27
+ ## 0.7.10
28
+
29
+ - Fix logger again (#762)
30
+
5
31
  ## 0.7.9
6
32
 
7
33
  - Fix SSL authentication for ruby < 2.4.0 (#742)
data/README.md CHANGED
@@ -108,6 +108,26 @@ Or install it yourself as:
108
108
  <td>Limited support</td>
109
109
  <td>Limited support</td>
110
110
  </tr>
111
+ <tr>
112
+ <th>Kafka 2.2</th>
113
+ <td>Limited support</td>
114
+ <td>Limited support</td>
115
+ </tr>
116
+ <tr>
117
+ <th>Kafka 2.3</th>
118
+ <td>Limited support</td>
119
+ <td>Limited support</td>
120
+ </tr>
121
+ <tr>
122
+ <th>Kafka 2.4</th>
123
+ <td>Limited support</td>
124
+ <td>Limited support</td>
125
+ </tr>
126
+ <tr>
127
+ <th>Kafka 2.5</th>
128
+ <td>Limited support</td>
129
+ <td>Limited support</td>
130
+ </tr>
111
131
  </table>
112
132
 
113
133
  This library is targeting Kafka 0.9 with the v0.4.x series and Kafka 0.10 with the v0.5.x series. There's limited support for Kafka 0.8, and things should work with Kafka 0.11, although there may be performance issues due to changes in the protocol.
@@ -119,6 +139,10 @@ This library is targeting Kafka 0.9 with the v0.4.x series and Kafka 0.10 with t
119
139
  - **Kafka 1.0:** Everything that works with Kafka 0.11 should still work, but so far no features specific to Kafka 1.0 have been added.
120
140
  - **Kafka 2.0:** Everything that works with Kafka 1.0 should still work, but so far no features specific to Kafka 2.0 have been added.
121
141
  - **Kafka 2.1:** Everything that works with Kafka 2.0 should still work, but so far no features specific to Kafka 2.1 have been added.
142
+ - **Kafka 2.2:** Everything that works with Kafka 2.1 should still work, but so far no features specific to Kafka 2.2 have been added.
143
+ - **Kafka 2.3:** Everything that works with Kafka 2.2 should still work, but so far no features specific to Kafka 2.3 have been added.
144
+ - **Kafka 2.4:** Everything that works with Kafka 2.3 should still work, but so far no features specific to Kafka 2.4 have been added.
145
+ - **Kafka 2.5:** Everything that works with Kafka 2.4 should still work, but so far no features specific to Kafka 2.5 have been added.
122
146
 
123
147
  This library requires Ruby 2.1 or higher.
124
148
 
@@ -325,6 +349,26 @@ partition = PartitioningScheme.assign(partitions, event)
325
349
  producer.produce(event, topic: "events", partition: partition)
326
350
  ```
327
351
 
352
+ Another option is to configure a custom client partitioner that implements `call(partition_count, message)` and uses the same schema as the other client. For example:
353
+
354
+ ```ruby
355
+ class CustomPartitioner
356
+ def call(partition_count, message)
357
+ ...
358
+ end
359
+ end
360
+
361
+ partitioner = CustomPartitioner.new
362
+ Kafka.new(partitioner: partitioner, ...)
363
+ ```
364
+
365
+ Or, simply create a Proc handling the partitioning logic instead of having to add a new class. For example:
366
+
367
+ ```ruby
368
+ partitioner = -> (partition_count, message) { ... }
369
+ Kafka.new(partitioner: partitioner, ...)
370
+ ```
371
+
328
372
  #### Buffering and Error Handling
329
373
 
330
374
  The producer is designed for resilience in the face of temporary network errors, Kafka broker failovers, and other issues that prevent the client from writing messages to the destination topics. It does this by employing local, in-memory buffers. Only when messages are acknowledged by a Kafka broker will they be removed from the buffer.
@@ -928,6 +972,8 @@ This configures the store to look up CA certificates from the system default cer
928
972
 
929
973
  In order to authenticate the client to the cluster, you need to pass in a certificate and key created for the client and trusted by the brokers.
930
974
 
975
+ **NOTE**: You can disable hostname validation by passing `verify_hostname: false`.
976
+
931
977
  ```ruby
932
978
  kafka = Kafka.new(
933
979
  ["kafka1:9092"],
@@ -103,6 +103,9 @@ module Kafka
103
103
  # @raise [BufferOverflow] if the message queue is full.
104
104
  # @return [nil]
105
105
  def produce(value, topic:, **options)
106
+ # We want to fail fast if `topic` isn't a String
107
+ topic = topic.to_str
108
+
106
109
  ensure_threads_running!
107
110
 
108
111
  if @queue.size >= @max_queue_size
@@ -243,10 +246,10 @@ module Kafka
243
246
 
244
247
  private
245
248
 
246
- def produce(*args)
249
+ def produce(value, **kwargs)
247
250
  retries = 0
248
251
  begin
249
- @producer.produce(*args)
252
+ @producer.produce(value, **kwargs)
250
253
  rescue BufferOverflow => e
251
254
  deliver_messages
252
255
  if @max_retries == -1
@@ -62,16 +62,25 @@ module Kafka
62
62
  #
63
63
  # @param sasl_over_ssl [Boolean] whether to enforce SSL with SASL
64
64
  #
65
+ # @param ssl_ca_certs_from_system [Boolean] whether to use the CA certs from the
66
+ # system's default certificate store.
67
+ #
68
+ # @param partitioner [Partitioner, nil] the partitioner that should be used by the client.
69
+ #
65
70
  # @param sasl_oauth_token_provider [Object, nil] OAuthBearer Token Provider instance that
66
71
  # implements method token. See {Sasl::OAuth#initialize}
67
72
  #
73
+ # @param ssl_verify_hostname [Boolean, true] whether to verify that the host serving
74
+ # the SSL certificate and the signing chain of the certificate have the correct domains
75
+ # based on the CA certificate
76
+ #
68
77
  # @return [Client]
69
78
  def initialize(seed_brokers:, client_id: "ruby-kafka", logger: nil, connect_timeout: nil, socket_timeout: nil,
70
79
  ssl_ca_cert_file_path: nil, ssl_ca_cert: nil, ssl_client_cert: nil, ssl_client_cert_key: nil,
71
80
  ssl_client_cert_key_password: nil, ssl_client_cert_chain: nil, sasl_gssapi_principal: nil,
72
81
  sasl_gssapi_keytab: nil, sasl_plain_authzid: '', sasl_plain_username: nil, sasl_plain_password: nil,
73
82
  sasl_scram_username: nil, sasl_scram_password: nil, sasl_scram_mechanism: nil,
74
- sasl_over_ssl: true, ssl_ca_certs_from_system: false, sasl_oauth_token_provider: nil, ssl_verify_hostname: true)
83
+ sasl_over_ssl: true, ssl_ca_certs_from_system: false, partitioner: nil, sasl_oauth_token_provider: nil, ssl_verify_hostname: true)
75
84
  @logger = TaggedLogger.new(logger)
76
85
  @instrumenter = Instrumenter.new(client_id: client_id)
77
86
  @seed_brokers = normalize_seed_brokers(seed_brokers)
@@ -115,6 +124,7 @@ module Kafka
115
124
  )
116
125
 
117
126
  @cluster = initialize_cluster
127
+ @partitioner = partitioner || Partitioner.new
118
128
  end
119
129
 
120
130
  # Delivers a single message to the Kafka cluster.
@@ -138,6 +148,9 @@ module Kafka
138
148
  def deliver_message(value, key: nil, headers: {}, topic:, partition: nil, partition_key: nil, retries: 1)
139
149
  create_time = Time.now
140
150
 
151
+ # We want to fail fast if `topic` isn't a String
152
+ topic = topic.to_str
153
+
141
154
  message = PendingMessage.new(
142
155
  value: value,
143
156
  key: key,
@@ -150,7 +163,7 @@ module Kafka
150
163
 
151
164
  if partition.nil?
152
165
  partition_count = @cluster.partitions_for(topic).count
153
- partition = Partitioner.partition_for_key(partition_count, message)
166
+ partition = @partitioner.call(partition_count, message)
154
167
  end
155
168
 
156
169
  buffer = MessageBuffer.new
@@ -241,6 +254,9 @@ module Kafka
241
254
  # be in a message set before it should be compressed. Note that message sets
242
255
  # are per-partition rather than per-topic or per-producer.
243
256
  #
257
+ # @param interceptors [Array<Object>] a list of producer interceptors the implement
258
+ # `call(Kafka::PendingMessage)`.
259
+ #
244
260
  # @return [Kafka::Producer] the Kafka producer.
245
261
  def producer(
246
262
  compression_codec: nil,
@@ -254,7 +270,8 @@ module Kafka
254
270
  idempotent: false,
255
271
  transactional: false,
256
272
  transactional_id: nil,
257
- transactional_timeout: 60
273
+ transactional_timeout: 60,
274
+ interceptors: []
258
275
  )
259
276
  cluster = initialize_cluster
260
277
  compressor = Compressor.new(
@@ -284,6 +301,8 @@ module Kafka
284
301
  retry_backoff: retry_backoff,
285
302
  max_buffer_size: max_buffer_size,
286
303
  max_buffer_bytesize: max_buffer_bytesize,
304
+ partitioner: @partitioner,
305
+ interceptors: interceptors
287
306
  )
288
307
  end
289
308
 
@@ -333,15 +352,23 @@ module Kafka
333
352
  # @param fetcher_max_queue_size [Integer] max number of items in the fetch queue that
334
353
  # are stored for further processing. Note, that each item in the queue represents a
335
354
  # response from a single broker.
355
+ # @param refresh_topic_interval [Integer] interval of refreshing the topic list.
356
+ # If it is 0, the topic list won't be refreshed (default)
357
+ # If it is n (n > 0), the topic list will be refreshed every n seconds
358
+ # @param interceptors [Array<Object>] a list of consumer interceptors that implement
359
+ # `call(Kafka::FetchedBatch)`.
336
360
  # @return [Consumer]
337
361
  def consumer(
338
362
  group_id:,
339
363
  session_timeout: 30,
364
+ rebalance_timeout: 60,
340
365
  offset_commit_interval: 10,
341
366
  offset_commit_threshold: 0,
342
367
  heartbeat_interval: 10,
343
368
  offset_retention_time: nil,
344
- fetcher_max_queue_size: 100
369
+ fetcher_max_queue_size: 100,
370
+ refresh_topic_interval: 0,
371
+ interceptors: []
345
372
  )
346
373
  cluster = initialize_cluster
347
374
 
@@ -357,6 +384,7 @@ module Kafka
357
384
  logger: @logger,
358
385
  group_id: group_id,
359
386
  session_timeout: session_timeout,
387
+ rebalance_timeout: rebalance_timeout,
360
388
  retention_time: retention_time,
361
389
  instrumenter: instrumenter,
362
390
  )
@@ -394,6 +422,8 @@ module Kafka
394
422
  fetcher: fetcher,
395
423
  session_timeout: session_timeout,
396
424
  heartbeat: heartbeat,
425
+ refresh_topic_interval: refresh_topic_interval,
426
+ interceptors: interceptors
397
427
  )
398
428
  end
399
429
 
@@ -530,6 +560,24 @@ module Kafka
530
560
  end
531
561
  end
532
562
 
563
+ # Describe broker configs
564
+ #
565
+ # @param broker_id [int] the id of the broker
566
+ # @param configs [Array] array of config keys.
567
+ # @return [Array<Kafka::Protocol::DescribeConfigsResponse::ConfigEntry>]
568
+ def describe_configs(broker_id, configs = [])
569
+ @cluster.describe_configs(broker_id, configs)
570
+ end
571
+
572
+ # Alter broker configs
573
+ #
574
+ # @param broker_id [int] the id of the broker
575
+ # @param configs [Array] array of config strings.
576
+ # @return [nil]
577
+ def alter_configs(broker_id, configs = [])
578
+ @cluster.alter_configs(broker_id, configs)
579
+ end
580
+
533
581
  # Creates a topic in the cluster.
534
582
  #
535
583
  # @example Creating a topic with log compaction
@@ -615,6 +663,14 @@ module Kafka
615
663
  @cluster.describe_group(group_id)
616
664
  end
617
665
 
666
+ # Fetch all committed offsets for a consumer group
667
+ #
668
+ # @param group_id [String] the id of the consumer group
669
+ # @return [Hash<String, Hash<Integer, Kafka::Protocol::OffsetFetchResponse::PartitionOffsetInfo>>]
670
+ def fetch_group_offsets(group_id)
671
+ @cluster.fetch_group_offsets(group_id)
672
+ end
673
+
618
674
  # Create partitions for a topic.
619
675
  #
620
676
  # @param name [String] the name of the topic.
@@ -663,6 +719,14 @@ module Kafka
663
719
  @cluster.partitions_for(topic).count
664
720
  end
665
721
 
722
+ # Counts the number of replicas for a topic's partition
723
+ #
724
+ # @param topic [String]
725
+ # @return [Integer] the number of replica nodes for the topic's partition
726
+ def replica_count_for(topic)
727
+ @cluster.partitions_for(topic).first.replicas.count
728
+ end
729
+
666
730
  # Retrieve the offset of the last message in a partition. If there are no
667
731
  # messages in the partition -1 is returned.
668
732
  #