rdkafka 0.7.0 → 0.8.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 38ccc62319eff24c7def2b8cebced2a8551b28a1651e77348c868ce189a7d418
4
- data.tar.gz: e23e2cad27a95e2bf51c2428e4d2246deae903f8e0d7800b086b35c5b262fc65
3
+ metadata.gz: 8bf97d10412b4f3c0801f657796fd05c94ceede3caafe6a45719c840b534752a
4
+ data.tar.gz: 8cc93bffb8119cf9c97aa60cf1f6d634e66a99686e0785150ae51283a6514e8c
5
5
  SHA512:
6
- metadata.gz: 5faa270613e25d64f2b5d955a637b97b8315e7e365843b0e5a10d34c6340a795ebcf8ed2cb9dba2ae3a7d6839e464271d979760d351a6a8777224c31cdb262b6
7
- data.tar.gz: d209464e6fe089945638063caa4ec6ab80d7c0d171678c43e320314cdc2ff0f980806b92941d0529b8ec93d598e5a637673fe3d97df4755503d2b484cc8f670b
6
+ metadata.gz: e8e085b3dd9d80d77003d2c7f0fbda69371c233b121eac2594556d7d62da99a4bed4aace93e9489783c789e64bafd732faaad3de2ffb5d3a5af8568c5b43676b
7
+ data.tar.gz: 5a267a9814e87e876544e04e6657829c298ff01b03cb5ff41a0205795ea2fb4f6a2147ee1a0949e52f7a069f5dc73d5ed2072867194032e8ea913ac5d6871e23
data/.travis.yml CHANGED
@@ -14,9 +14,24 @@ rvm:
14
14
  - 2.4
15
15
  - 2.5
16
16
  - 2.6
17
+ - 2.7.1
18
+ - jruby-9.2.9.0
19
+ - jruby-9.2.10.0
20
+ - jruby-9.2.11.1
17
21
 
18
22
  before_install:
19
23
  - gem update --system
24
+ - |
25
+ r_eng="$(ruby -e 'STDOUT.write RUBY_ENGINE')";
26
+ if [ "$r_eng" == "jruby" ]; then
27
+ sudo apt-get update && \
28
+ sudo apt-get install -y git && \
29
+ sudo apt-get install -y libpthread-stubs0-dev && \
30
+ sudo apt-get install -y build-essential && \
31
+ sudo apt-get install -y zlib1g-dev && \
32
+ sudo apt-get install -y libssl-dev && \
33
+ sudo apt-get install -y libsasl2-dev
34
+ fi
20
35
 
21
36
  before_script:
22
37
  - docker-compose up -d
@@ -26,7 +41,7 @@ before_script:
26
41
  - ./cc-test-reporter before-build
27
42
 
28
43
  script:
29
- - bundle exec rspec
44
+ - bundle exec rspec --format documentation
30
45
 
31
46
  after_script:
32
47
  - docker-compose stop
data/CHANGELOG.md CHANGED
@@ -1,6 +1,14 @@
1
- # 0.6.0
1
+ # 0.8.0
2
+ * Upgrade librdkafka to 1.4.0
3
+ * Integrate librdkafka metadata API and add partition_key (by Adithya-copart)
4
+ * Ruby 2.7 compatibility fix (by Geoff Thé)A
5
+ * Add error to delivery report (by Alex Stanovsky)
6
+ * Don't override CPPFLAGS and LDFLAGS if already set on Mac (by Hiroshi Hatake)
7
+ * Allow use of Rake 13.x and up (by Tomasz Pajor)
8
+
9
+ # 0.7.0
2
10
  * Bump librdkafka to 1.2.0 (by rob-as)
3
- * Allow customizing the wait time for delivery report availability (by mensfeld )
11
+ * Allow customizing the wait time for delivery report availability (by mensfeld)
4
12
 
5
13
  # 0.6.0
6
14
  * Bump librdkafka to 1.1.0 (by Chris Gaffney)
data/docker-compose.yml CHANGED
@@ -1,18 +1,22 @@
1
+
1
2
  version: '2'
2
3
  services:
3
4
  zookeeper:
4
- image: wurstmeister/zookeeper
5
- ports:
6
- - "2181:2181"
5
+ image: confluentinc/cp-zookeeper:latest
6
+ environment:
7
+ ZOOKEEPER_CLIENT_PORT: 2181
8
+ ZOOKEEPER_TICK_TIME: 2000
9
+
7
10
  kafka:
8
- image: wurstmeister/kafka:1.0.1
11
+ image: confluentinc/cp-kafka:latest
12
+ depends_on:
13
+ - zookeeper
9
14
  ports:
10
- - "9092:9092"
15
+ - 9092:9092
11
16
  environment:
12
- KAFKA_ADVERTISED_HOST_NAME: localhost
13
- KAFKA_ADVERTISED_PORT: 9092
17
+ KAFKA_BROKER_ID: 1
14
18
  KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
15
- KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
16
- KAFKA_CREATE_TOPICS: "consume_test_topic:3:1,empty_test_topic:3:1,load_test_topic:3:1,produce_test_topic:3:1,rake_test_topic:3:1,watermarks_test_topic:3:1"
17
- volumes:
18
- - /var/run/docker.sock:/var/run/docker.sock
19
+ KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092
20
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
21
+ KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
22
+ KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
data/ext/README.md CHANGED
@@ -5,19 +5,7 @@ this gem is installed.
5
5
 
6
6
  To update the `librdkafka` version follow the following steps:
7
7
 
8
- * Download the new version `tar.gz` from https://github.com/edenhill/librdkafka/
9
- * Generate a `sha256` with (if using MacOS) `shasum -a 256 <file>`
10
- * Change the `sha256` in `lib/rdkafka/version.rb`
8
+ * Go to https://github.com/edenhill/librdkafka/releases to get the new
9
+ version number and asset checksum for `tar.gz`.
11
10
  * Change the version in `lib/rdkafka/version.rb`
12
-
13
- ## Disclaimer
14
-
15
- Currently the `librdkafka` project does not provide
16
- checksums of releases. The checksum provided here is generated on a best
17
- effort basis. If the CDN would be compromised at the time of download the
18
- checksum could be incorrect.
19
-
20
- Do your own verification if you rely on this behaviour.
21
-
22
- Once https://github.com/appsignal/rdkafka-ruby/issues/44 is implemented
23
- we will change this process.
11
+ * Change the `sha256` in `lib/rdkafka/version.rb`
data/ext/Rakefile CHANGED
@@ -23,9 +23,10 @@ task :default => :clean do
23
23
  recipe = MiniPortile.new("librdkafka", Rdkafka::LIBRDKAFKA_VERSION)
24
24
 
25
25
  # Use default homebrew openssl if we're on mac and the directory exists
26
- if recipe.host.include?("darwin") && Dir.exists?("/usr/local/opt/openssl")
27
- ENV["CPPFLAGS"] = "-I/usr/local/opt/openssl/include"
28
- ENV["LDFLAGS"] = "-L/usr/local/opt/openssl/lib"
26
+ # and each of flags is not empty
27
+ if recipe.host&.include?("darwin") && Dir.exist?("/usr/local/opt/openssl")
28
+ ENV["CPPFLAGS"] = "-I/usr/local/opt/openssl/include" unless ENV["CPPFLAGS"]
29
+ ENV["LDFLAGS"] = "-L/usr/local/opt/openssl/lib" unless ENV["LDFLAGS"]
29
30
  end
30
31
 
31
32
  recipe.files << {
@@ -55,3 +56,22 @@ task :clean do
55
56
  FileUtils.rm_rf File.join(File.dirname(__FILE__), "ports")
56
57
  FileUtils.rm_rf File.join(File.dirname(__FILE__), "tmp")
57
58
  end
59
+
60
+ namespace :build do
61
+ desc "Build librdkafka at the given git sha or tag"
62
+ task :git, [:ref] do |task, args|
63
+ ref = args[:ref]
64
+ version = "git-#{ref}"
65
+
66
+ recipe = MiniPortile.new("librdkafka", version)
67
+ recipe.files << "https://github.com/edenhill/librdkafka/archive/#{ref}.tar.gz"
68
+ recipe.configure_options = ["--host=#{recipe.host}"]
69
+ recipe.cook
70
+
71
+ ext = recipe.host.include?("darwin") ? "dylib" : "so"
72
+ lib = File.expand_path("ports/#{recipe.host}/librdkafka/#{version}/lib/librdkafka.#{ext}", __dir__)
73
+
74
+ # Copy will copy the content, following any symlinks
75
+ FileUtils.cp(lib, __dir__)
76
+ end
77
+ end
@@ -8,7 +8,7 @@ module Rdkafka
8
8
  extend FFI::Library
9
9
 
10
10
  def self.lib_extension
11
- if Gem::Platform.local.os.include?("darwin")
11
+ if RbConfig::CONFIG['host_os'] =~ /darwin/
12
12
  'dylib'
13
13
  else
14
14
  'so'
@@ -22,6 +22,11 @@ module Rdkafka
22
22
  RD_KAFKA_RESP_ERR__NOENT = -156
23
23
  RD_KAFKA_RESP_ERR_NO_ERROR = 0
24
24
 
25
+ RD_KAFKA_OFFSET_END = -1
26
+ RD_KAFKA_OFFSET_BEGINNING = -2
27
+ RD_KAFKA_OFFSET_STORED = -1000
28
+ RD_KAFKA_OFFSET_INVALID = -1001
29
+
25
30
  class SizePtr < FFI::Struct
26
31
  layout :value, :size_t
27
32
  end
@@ -35,6 +40,8 @@ module Rdkafka
35
40
 
36
41
  attach_function :rd_kafka_memberid, [:pointer], :string
37
42
  attach_function :rd_kafka_clusterid, [:pointer], :string
43
+ attach_function :rd_kafka_metadata, [:pointer, :int, :pointer, :pointer, :int], :int
44
+ attach_function :rd_kafka_metadata_destroy, [:pointer], :void
38
45
 
39
46
  # Message struct
40
47
 
@@ -227,6 +234,17 @@ module Rdkafka
227
234
  callback :delivery_cb, [:pointer, :pointer, :pointer], :void
228
235
  attach_function :rd_kafka_conf_set_dr_msg_cb, [:pointer, :delivery_cb], :void
229
236
 
237
+ # Partitioner
238
+ attach_function :rd_kafka_msg_partitioner_consistent_random, [:pointer, :pointer, :size_t, :int32, :pointer, :pointer], :int32
239
+
240
+ def self.partitioner(str, partition_count)
241
+ # Return RD_KAFKA_PARTITION_UA(unassigned partition) when partition count is nil/zero.
242
+ return -1 unless partition_count&.nonzero?
243
+
244
+ str_ptr = FFI::MemoryPointer.from_string(str)
245
+ rd_kafka_msg_partitioner_consistent_random(nil, str_ptr, str.size, partition_count, nil, nil)
246
+ end
247
+
230
248
  DeliveryCallback = FFI::Function.new(
231
249
  :void, [:pointer, :pointer, :pointer]
232
250
  ) do |client_ptr, message_ptr, opaque_ptr|
@@ -240,7 +258,7 @@ module Rdkafka
240
258
  delivery_handle[:offset] = message[:offset]
241
259
  # Call delivery callback on opaque
242
260
  if opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
243
- opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset]))
261
+ opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset], message[:err]))
244
262
  end
245
263
  end
246
264
  end
@@ -212,10 +212,8 @@ module Rdkafka
212
212
  Rdkafka::Bindings.rd_kafka_queue_get_main(handle)
213
213
  )
214
214
 
215
- FFI::AutoPointer.new(
216
- handle,
217
- Rdkafka::Bindings.method(:rd_kafka_destroy)
218
- )
215
+ # Return handle which should be closed using rd_kafka_destroy after usage.
216
+ handle
219
217
  end
220
218
  end
221
219
 
@@ -19,7 +19,7 @@ module Rdkafka
19
19
  raise Rdkafka::RdkafkaError.new(err, "Error reading message headers")
20
20
  end
21
21
 
22
- headers_ptr = headers_ptrptr.read(:pointer).tap { |it| it.autorelease = false }
22
+ headers_ptr = headers_ptrptr.read_pointer
23
23
 
24
24
  name_ptrptr = FFI::MemoryPointer.new(:pointer)
25
25
  value_ptrptr = FFI::MemoryPointer.new(:pointer)
@@ -42,12 +42,14 @@ module Rdkafka
42
42
  raise Rdkafka::RdkafkaError.new(err, "Error reading a message header at index #{idx}")
43
43
  end
44
44
 
45
- name = name_ptrptr.read(:pointer).tap { |it| it.autorelease = false }
46
- name = name.read_string_to_null
45
+ name_ptr = name_ptrptr.read_pointer
46
+ name = name_ptr.respond_to?(:read_string_to_null) ? name_ptr.read_string_to_null : name_ptr.read_string
47
47
 
48
48
  size = size_ptr[:value]
49
- value = value_ptrptr.read(:pointer).tap { |it| it.autorelease = false }
50
- value = value.read_string(size)
49
+
50
+ value_ptr = value_ptrptr.read_pointer
51
+
52
+ value = value_ptr.read_string(size)
51
53
 
52
54
  headers[name.to_sym] = value
53
55
 
@@ -106,7 +106,7 @@ module Rdkafka
106
106
  data[elem[:topic]] = nil
107
107
  else
108
108
  partitions = data[elem[:topic]] || []
109
- offset = if elem[:offset] == -1001
109
+ offset = if elem[:offset] == Rdkafka::Bindings::RD_KAFKA_OFFSET_INVALID
110
110
  nil
111
111
  else
112
112
  elem[:offset]
@@ -125,10 +125,10 @@ module Rdkafka
125
125
  #
126
126
  # The pointer will be cleaned by `rd_kafka_topic_partition_list_destroy` when GC releases it.
127
127
  #
128
- # @return [FFI::AutoPointer]
128
+ # @return [FFI::Pointer]
129
129
  # @private
130
130
  def to_native_tpl
131
- tpl = TopicPartitionList.new_native_tpl(count)
131
+ tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(count)
132
132
 
133
133
  @data.each do |topic, partitions|
134
134
  if partitions
@@ -138,6 +138,7 @@ module Rdkafka
138
138
  topic,
139
139
  p.partition
140
140
  )
141
+
141
142
  if p.offset
142
143
  Rdkafka::Bindings.rd_kafka_topic_partition_list_set_offset(
143
144
  tpl,
@@ -158,17 +159,6 @@ module Rdkafka
158
159
 
159
160
  tpl
160
161
  end
161
-
162
- # Creates a new native tpl and wraps it into FFI::AutoPointer which in turn calls
163
- # `rd_kafka_topic_partition_list_destroy` when a pointer will be cleaned by GC
164
- #
165
- # @param count [Integer] an initial capacity of partitions list
166
- # @return [FFI::AutoPointer]
167
- # @private
168
- def self.new_native_tpl(count)
169
- tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(count)
170
- FFI::AutoPointer.new(tpl, Rdkafka::Bindings.method(:rd_kafka_topic_partition_list_destroy))
171
- end
172
162
  end
173
163
  end
174
164
  end
@@ -5,6 +5,9 @@ module Rdkafka
5
5
  #
6
6
  # To create a consumer set up a {Config} and call {Config#consumer consumer} on that. It is
7
7
  # mandatory to set `:"group.id"` in the configuration.
8
+ #
9
+ # Consumer implements `Enumerable`, so you can use `each` to consume messages, or for example
10
+ # `each_slice` to consume batches of messages.
8
11
  class Consumer
9
12
  include Enumerable
10
13
 
@@ -17,8 +20,12 @@ module Rdkafka
17
20
  # Close this consumer
18
21
  # @return [nil]
19
22
  def close
23
+ return unless @native_kafka
24
+
20
25
  @closing = true
21
26
  Rdkafka::Bindings.rd_kafka_consumer_close(@native_kafka)
27
+ Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
28
+ @native_kafka = nil
22
29
  end
23
30
 
24
31
  # Subscribe to one or more topics letting Kafka handle partition assignments.
@@ -30,20 +37,19 @@ module Rdkafka
30
37
  # @return [nil]
31
38
  def subscribe(*topics)
32
39
  # Create topic partition list with topics and no partition set
33
- tpl = TopicPartitionList.new_native_tpl(topics.length)
40
+ tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(topics.length)
34
41
 
35
42
  topics.each do |topic|
36
- Rdkafka::Bindings.rd_kafka_topic_partition_list_add(
37
- tpl,
38
- topic,
39
- -1
40
- )
43
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_add(tpl, topic, -1)
41
44
  end
45
+
42
46
  # Subscribe to topic partition list and check this was successful
43
47
  response = Rdkafka::Bindings.rd_kafka_subscribe(@native_kafka, tpl)
44
48
  if response != 0
45
49
  raise Rdkafka::RdkafkaError.new(response, "Error subscribing to '#{topics.join(', ')}'")
46
50
  end
51
+ ensure
52
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
47
53
  end
48
54
 
49
55
  # Unsubscribe from all subscribed topics.
@@ -69,12 +75,18 @@ module Rdkafka
69
75
  unless list.is_a?(TopicPartitionList)
70
76
  raise TypeError.new("list has to be a TopicPartitionList")
71
77
  end
78
+
72
79
  tpl = list.to_native_tpl
73
- response = Rdkafka::Bindings.rd_kafka_pause_partitions(@native_kafka, tpl)
74
80
 
75
- if response != 0
76
- list = TopicPartitionList.from_native_tpl(tpl)
77
- raise Rdkafka::RdkafkaTopicPartitionListError.new(response, list, "Error pausing '#{list.to_h}'")
81
+ begin
82
+ response = Rdkafka::Bindings.rd_kafka_pause_partitions(@native_kafka, tpl)
83
+
84
+ if response != 0
85
+ list = TopicPartitionList.from_native_tpl(tpl)
86
+ raise Rdkafka::RdkafkaTopicPartitionListError.new(response, list, "Error pausing '#{list.to_h}'")
87
+ end
88
+ ensure
89
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
78
90
  end
79
91
  end
80
92
 
@@ -89,10 +101,16 @@ module Rdkafka
89
101
  unless list.is_a?(TopicPartitionList)
90
102
  raise TypeError.new("list has to be a TopicPartitionList")
91
103
  end
104
+
92
105
  tpl = list.to_native_tpl
93
- response = Rdkafka::Bindings.rd_kafka_resume_partitions(@native_kafka, tpl)
94
- if response != 0
95
- raise Rdkafka::RdkafkaError.new(response, "Error resume '#{list.to_h}'")
106
+
107
+ begin
108
+ response = Rdkafka::Bindings.rd_kafka_resume_partitions(@native_kafka, tpl)
109
+ if response != 0
110
+ raise Rdkafka::RdkafkaError.new(response, "Error resume '#{list.to_h}'")
111
+ end
112
+ ensure
113
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
96
114
  end
97
115
  end
98
116
 
@@ -102,17 +120,19 @@ module Rdkafka
102
120
  #
103
121
  # @return [TopicPartitionList]
104
122
  def subscription
105
- tpl = FFI::MemoryPointer.new(:pointer)
106
- response = Rdkafka::Bindings.rd_kafka_subscription(@native_kafka, tpl)
123
+ ptr = FFI::MemoryPointer.new(:pointer)
124
+ response = Rdkafka::Bindings.rd_kafka_subscription(@native_kafka, ptr)
125
+
107
126
  if response != 0
108
127
  raise Rdkafka::RdkafkaError.new(response)
109
128
  end
110
- tpl = tpl.read(:pointer).tap { |it| it.autorelease = false }
129
+
130
+ native = ptr.read_pointer
111
131
 
112
132
  begin
113
- Rdkafka::Consumer::TopicPartitionList.from_native_tpl(tpl)
133
+ Rdkafka::Consumer::TopicPartitionList.from_native_tpl(native)
114
134
  ensure
115
- Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
135
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(native)
116
136
  end
117
137
  end
118
138
 
@@ -125,10 +145,16 @@ module Rdkafka
125
145
  unless list.is_a?(TopicPartitionList)
126
146
  raise TypeError.new("list has to be a TopicPartitionList")
127
147
  end
148
+
128
149
  tpl = list.to_native_tpl
129
- response = Rdkafka::Bindings.rd_kafka_assign(@native_kafka, tpl)
130
- if response != 0
131
- raise Rdkafka::RdkafkaError.new(response, "Error assigning '#{list.to_h}'")
150
+
151
+ begin
152
+ response = Rdkafka::Bindings.rd_kafka_assign(@native_kafka, tpl)
153
+ if response != 0
154
+ raise Rdkafka::RdkafkaError.new(response, "Error assigning '#{list.to_h}'")
155
+ end
156
+ ensure
157
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
132
158
  end
133
159
  end
134
160
 
@@ -138,19 +164,23 @@ module Rdkafka
138
164
  #
139
165
  # @return [TopicPartitionList]
140
166
  def assignment
141
- tpl = FFI::MemoryPointer.new(:pointer)
142
- response = Rdkafka::Bindings.rd_kafka_assignment(@native_kafka, tpl)
167
+ ptr = FFI::MemoryPointer.new(:pointer)
168
+ response = Rdkafka::Bindings.rd_kafka_assignment(@native_kafka, ptr)
143
169
  if response != 0
144
170
  raise Rdkafka::RdkafkaError.new(response)
145
171
  end
146
172
 
147
- tpl = tpl.read(:pointer).tap { |it| it.autorelease = false }
173
+ tpl = ptr.read_pointer
148
174
 
149
- begin
150
- Rdkafka::Consumer::TopicPartitionList.from_native_tpl(tpl)
151
- ensure
152
- Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy tpl
175
+ if !tpl.null?
176
+ begin
177
+ Rdkafka::Consumer::TopicPartitionList.from_native_tpl(tpl)
178
+ ensure
179
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy tpl
180
+ end
153
181
  end
182
+ ensure
183
+ ptr.free
154
184
  end
155
185
 
156
186
  # Return the current committed offset per partition for this consumer group.
@@ -168,12 +198,18 @@ module Rdkafka
168
198
  elsif !list.is_a?(TopicPartitionList)
169
199
  raise TypeError.new("list has to be nil or a TopicPartitionList")
170
200
  end
201
+
171
202
  tpl = list.to_native_tpl
172
- response = Rdkafka::Bindings.rd_kafka_committed(@native_kafka, tpl, timeout_ms)
173
- if response != 0
174
- raise Rdkafka::RdkafkaError.new(response)
203
+
204
+ begin
205
+ response = Rdkafka::Bindings.rd_kafka_committed(@native_kafka, tpl, timeout_ms)
206
+ if response != 0
207
+ raise Rdkafka::RdkafkaError.new(response)
208
+ end
209
+ TopicPartitionList.from_native_tpl(tpl)
210
+ ensure
211
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
175
212
  end
176
- TopicPartitionList.from_native_tpl(tpl)
177
213
  end
178
214
 
179
215
  # Query broker for low (oldest/beginning) and high (newest/end) offsets for a partition.
@@ -195,13 +231,16 @@ module Rdkafka
195
231
  partition,
196
232
  low,
197
233
  high,
198
- timeout_ms
234
+ timeout_ms,
199
235
  )
200
236
  if response != 0
201
237
  raise Rdkafka::RdkafkaError.new(response, "Error querying watermark offsets for partition #{partition} of #{topic}")
202
238
  end
203
239
 
204
- return low.read_int64, high.read_int64
240
+ return low.read_array_of_int64(1).first, high.read_array_of_int64(1).first
241
+ ensure
242
+ low.free
243
+ high.free
205
244
  end
206
245
 
207
246
  # Calculate the consumer lag per partition for the provided topic partition list.
@@ -217,6 +256,7 @@ module Rdkafka
217
256
  # @return [Hash<String, Hash<Integer, Integer>>] A hash containing all topics with the lag per partition
218
257
  def lag(topic_partition_list, watermark_timeout_ms=100)
219
258
  out = {}
259
+
220
260
  topic_partition_list.to_h.each do |topic, partitions|
221
261
  # Query high watermarks for this topic's partitions
222
262
  # and compare to the offset in the list.
@@ -313,7 +353,14 @@ module Rdkafka
313
353
  end
314
354
  end
315
355
 
316
- # Commit the current offsets of this consumer
356
+ # Manually commit the current offsets of this consumer.
357
+ #
358
+ # To use this set `enable.auto.commit`to `false` to disable automatic triggering
359
+ # of commits.
360
+ #
361
+ # If `enable.auto.offset.store` is set to `true` the offset of the last consumed
362
+ # message for every partition is used. If set to `false` you can use {store_offset} to
363
+ # indicate when a message has been fully processed.
317
364
  #
318
365
  # @param list [TopicPartitionList,nil] The topic with partitions to commit
319
366
  # @param async [Boolean] Whether to commit async or wait for the commit to finish
@@ -325,14 +372,16 @@ module Rdkafka
325
372
  if !list.nil? && !list.is_a?(TopicPartitionList)
326
373
  raise TypeError.new("list has to be nil or a TopicPartitionList")
327
374
  end
328
- tpl = if list
329
- list.to_native_tpl
330
- else
331
- nil
332
- end
333
- response = Rdkafka::Bindings.rd_kafka_commit(@native_kafka, tpl, async)
334
- if response != 0
335
- raise Rdkafka::RdkafkaError.new(response)
375
+
376
+ tpl = list ? list.to_native_tpl : nil
377
+
378
+ begin
379
+ response = Rdkafka::Bindings.rd_kafka_commit(@native_kafka, tpl, async)
380
+ if response != 0
381
+ raise Rdkafka::RdkafkaError.new(response)
382
+ end
383
+ ensure
384
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
336
385
  end
337
386
  end
338
387
 
@@ -344,6 +393,8 @@ module Rdkafka
344
393
  #
345
394
  # @return [Message, nil] A message or nil if there was no new message within the timeout
346
395
  def poll(timeout_ms)
396
+ return unless @native_kafka
397
+
347
398
  message_ptr = Rdkafka::Bindings.rd_kafka_consumer_poll(@native_kafka, timeout_ms)
348
399
  if message_ptr.null?
349
400
  nil
data/lib/rdkafka/error.rb CHANGED
@@ -39,6 +39,11 @@ module Rdkafka
39
39
  def is_partition_eof?
40
40
  code == :partition_eof
41
41
  end
42
+
43
+ # Error comparison
44
+ def ==(another_error)
45
+ another_error.is_a?(self.class) && (self.to_s == another_error.to_s)
46
+ end
42
47
  end
43
48
 
44
49
  # Error with topic partition list returned by the underlying rdkafka library.
@@ -0,0 +1,91 @@
1
+ module Rdkafka
2
+ class Metadata
3
+ attr_reader :brokers, :topics
4
+
5
+ def initialize(native_client, topic_name = nil)
6
+ native_topic = if topic_name
7
+ Rdkafka::Bindings.rd_kafka_topic_new(native_client, topic_name, nil)
8
+ end
9
+
10
+ ptr = FFI::MemoryPointer.new(:pointer)
11
+
12
+ # Retrieve metadata flag is 0/1 for single/multiple topics.
13
+ topic_flag = topic_name ? 1 : 0
14
+
15
+ # Retrieve the Metadata
16
+ result = Rdkafka::Bindings.rd_kafka_metadata(native_client, topic_flag, native_topic, ptr, 250)
17
+
18
+ # Error Handling
19
+ Rdkafka::Error.new(result) unless result.zero?
20
+
21
+ metadata_from_native(ptr.read_pointer)
22
+ ensure
23
+ Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic) if topic_name
24
+ Rdkafka::Bindings.rd_kafka_metadata_destroy(ptr.read_pointer)
25
+ end
26
+
27
+ private
28
+
29
+ def metadata_from_native(ptr)
30
+ metadata = Metadata.new(ptr)
31
+ @brokers = Array.new(metadata[:brokers_count]) do |i|
32
+ BrokerMetadata.new(metadata[:brokers_metadata] + (i * BrokerMetadata.size)).to_h
33
+ end
34
+
35
+ @topics = Array.new(metadata[:topics_count]) do |i|
36
+ topic = TopicMetadata.new(metadata[:topics_metadata] + (i * TopicMetadata.size))
37
+ Rdkafka::Error.new(topic[:rd_kafka_resp_err]) unless topic[:rd_kafka_resp_err].zero?
38
+
39
+ partitions = Array.new(topic[:partition_count]) do |j|
40
+ partition = PartitionMetadata.new(topic[:partitions_metadata] + (j * PartitionMetadata.size))
41
+ Rdkafka::Error.new(partition[:rd_kafka_resp_err]) unless partition[:rd_kafka_resp_err].zero?
42
+ partition.to_h
43
+ end
44
+ topic.to_h.merge!(partitions: partitions)
45
+ end
46
+ end
47
+
48
+ class CustomFFIStruct < FFI::Struct
49
+ def to_h
50
+ members.each_with_object({}) do |mem, hsh|
51
+ val = self.[](mem)
52
+ next if val.is_a?(FFI::Pointer) || mem == :rd_kafka_resp_err
53
+
54
+ hsh[mem] = self.[](mem)
55
+ end
56
+ end
57
+ end
58
+
59
+ class Metadata < CustomFFIStruct
60
+ layout :brokers_count, :int,
61
+ :brokers_metadata, :pointer,
62
+ :topics_count, :int,
63
+ :topics_metadata, :pointer,
64
+ :broker_id, :int32,
65
+ :broker_name, :string
66
+ end
67
+
68
+ class BrokerMetadata < CustomFFIStruct
69
+ layout :broker_id, :int32,
70
+ :broker_name, :string,
71
+ :broker_port, :int
72
+ end
73
+
74
+ class TopicMetadata < CustomFFIStruct
75
+ layout :topic_name, :string,
76
+ :partition_count, :int,
77
+ :partitions_metadata, :pointer,
78
+ :rd_kafka_resp_err, :int
79
+ end
80
+
81
+ class PartitionMetadata < CustomFFIStruct
82
+ layout :partition_id, :int32,
83
+ :rd_kafka_resp_err, :int,
84
+ :leader, :int32,
85
+ :replica_count, :int,
86
+ :replicas, :pointer,
87
+ :in_sync_replica_brokers, :int,
88
+ :isrs, :pointer
89
+ end
90
+ end
91
+ end