rdkafka 0.5.0 → 0.6.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: c01eae203c0d0a12be82edacae9fb3ecde7c73dc96e6a95d456434a2d0a2e5d9
4
- data.tar.gz: 50bced86691e02ca0f1af3df2140751f530b667aa5ddf6f6822110991e3f4b43
3
+ metadata.gz: 0b248aee9a260de3d8bb56efba9255f21d3bfcb6c269a1ab15beac7ae2521b6c
4
+ data.tar.gz: 2d1199d5717a832674f659ac1d5e69d2a659845e9c8e5509428270a4a8390ae1
5
5
  SHA512:
6
- metadata.gz: af1930db8fea0a9ebec2ab63c404be88a805349badbc575e351e90d694d989a962dcfd4db32a5e344fddc83e8f97ca7600188ccc6d8e46ccd9a73afa99affbed
7
- data.tar.gz: c8a629fe6ea2a9e1a0d6c38e6d2eb46f1ea8d6791e7d2bdd2751c83ab5a6ed6fc90d7783eaeef3e2f324f01d50e3bb3d308c12048001519dea42c3810c399240
6
+ metadata.gz: 62afc350fa40f5e2117e7791927fe5ad7fcb3cb7ba13cdbba2f450dc6537d41e37668a5997fccbb260abe3fbb09e7a73c0cb5d5059b94837d5097f373c27eb75
7
+ data.tar.gz: 1723633bcce6569992a2368df38c2d59b217dd94af9e8afab9932ca085e9ae0fbc3661ee8fbc370d31f3985204b38b3bbe15bbdfa11ca2be61c69b4c938a40f3
@@ -1,3 +1,7 @@
1
+ # 0.6.0
2
+ * Bump librdkafka to 1.1.0 (by Chris Gaffney)
3
+ * Implement seek (by breunigs)
4
+
1
5
  # 0.5.0
2
6
  * Bump librdkafka to 1.0.0 (by breunigs)
3
7
  * Add cluster and member information (by dmexe)
@@ -164,6 +164,7 @@ module Rdkafka
164
164
  attach_function :rd_kafka_offset_store, [:pointer, :int32, :int64], :int
165
165
  attach_function :rd_kafka_pause_partitions, [:pointer, :pointer], :int
166
166
  attach_function :rd_kafka_resume_partitions, [:pointer, :pointer], :int
167
+ attach_function :rd_kafka_seek, [:pointer, :int32, :int64, :int], :int
167
168
 
168
169
  # Headers
169
170
  attach_function :rd_kafka_header_get_all, [:pointer, :size_t, :pointer, :pointer, SizePtr], :int
@@ -282,6 +282,37 @@ module Rdkafka
282
282
  end
283
283
  end
284
284
 
285
+ # Seek to a particular message. The next poll on the topic/partition will return the
286
+ # message at the given offset.
287
+ #
288
+ # @param message [Rdkafka::Consumer::Message] The message to which to seek
289
+ #
290
+ # @raise [RdkafkaError] When seeking fails
291
+ #
292
+ # @return [nil]
293
+ def seek(message)
294
+ # rd_kafka_offset_store is one of the few calls that does not support
295
+ # a string as the topic, so create a native topic for it.
296
+ native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
297
+ @native_kafka,
298
+ message.topic,
299
+ nil
300
+ )
301
+ response = Rdkafka::Bindings.rd_kafka_seek(
302
+ native_topic,
303
+ message.partition,
304
+ message.offset,
305
+ 0 # timeout
306
+ )
307
+ if response != 0
308
+ raise Rdkafka::RdkafkaError.new(response)
309
+ end
310
+ ensure
311
+ if native_topic && !native_topic.null?
312
+ Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic)
313
+ end
314
+ end
315
+
285
316
  # Commit the current offsets of this consumer
286
317
  #
287
318
  # @param list [TopicPartitionList,nil] The topic with partitions to commit
@@ -1,5 +1,5 @@
1
1
  module Rdkafka
2
- VERSION = "0.5.0"
3
- LIBRDKAFKA_VERSION = "1.0.0"
4
- LIBRDKAFKA_SOURCE_SHA256 = 'b00a0d9f0e8c7ceb67b93b4ee67f3c68279a843a15bf4a6742eb64897519aa09'
2
+ VERSION = "0.6.0"
3
+ LIBRDKAFKA_VERSION = "1.1.0"
4
+ LIBRDKAFKA_SOURCE_SHA256 = "123b47404c16bcde194b4bd1221c21fdce832ad12912bd8074f88f64b2b86f2b"
5
5
  end
@@ -1,4 +1,5 @@
1
1
  require "spec_helper"
2
+ require "ostruct"
2
3
 
3
4
  describe Rdkafka::Consumer do
4
5
  let(:config) { rdkafka_config }
@@ -75,7 +76,7 @@ describe Rdkafka::Consumer do
75
76
  tpl.add_topic("consume_test_topic", (0..2))
76
77
  consumer.pause(tpl)
77
78
 
78
- # 6. unsure that messages are not available
79
+ # 6. ensure that messages are not available
79
80
  records = consumer.poll(timeout)
80
81
  expect(records).to be_nil
81
82
 
@@ -124,6 +125,92 @@ describe Rdkafka::Consumer do
124
125
  end
125
126
  end
126
127
 
128
+ describe "#seek" do
129
+ it "should raise an error when seeking fails" do
130
+ fake_msg = OpenStruct.new(topic: "consume_test_topic", partition: 0, offset: 0)
131
+
132
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_seek).and_return(20)
133
+ expect {
134
+ consumer.seek(fake_msg)
135
+ }.to raise_error Rdkafka::RdkafkaError
136
+ end
137
+
138
+ context "subscription" do
139
+ let(:timeout) { 1000 }
140
+
141
+ before do
142
+ consumer.subscribe("consume_test_topic")
143
+
144
+ # 1. partitions are assigned
145
+ wait_for_assignment(consumer)
146
+ expect(consumer.assignment).not_to be_empty
147
+
148
+ # 2. eat unrelated messages
149
+ while(consumer.poll(timeout)) do; end
150
+ end
151
+ after { consumer.unsubscribe }
152
+
153
+ def send_one_message(val)
154
+ producer.produce(
155
+ topic: "consume_test_topic",
156
+ payload: "payload #{val}",
157
+ key: "key 1",
158
+ partition: 0
159
+ ).wait
160
+ end
161
+
162
+ it "works when a partition is paused" do
163
+ # 3. get reference message
164
+ send_one_message(:a)
165
+ message1 = consumer.poll(timeout)
166
+ expect(message1&.payload).to eq "payload a"
167
+
168
+ # 4. pause the subscription
169
+ tpl = Rdkafka::Consumer::TopicPartitionList.new
170
+ tpl.add_topic("consume_test_topic", 1)
171
+ consumer.pause(tpl)
172
+
173
+ # 5. seek to previous message
174
+ consumer.seek(message1)
175
+
176
+ # 6. resume the subscription
177
+ tpl = Rdkafka::Consumer::TopicPartitionList.new
178
+ tpl.add_topic("consume_test_topic", 1)
179
+ consumer.resume(tpl)
180
+
181
+ # 7. ensure same message is read again
182
+ message2 = consumer.poll(timeout)
183
+ consumer.commit
184
+ expect(message1.offset).to eq message2.offset
185
+ expect(message1.payload).to eq message2.payload
186
+ end
187
+
188
+ it "allows skipping messages" do
189
+ # 3. send messages
190
+ send_one_message(:a)
191
+ send_one_message(:b)
192
+ send_one_message(:c)
193
+
194
+ # 4. get reference message
195
+ message = consumer.poll(timeout)
196
+ expect(message&.payload).to eq "payload a"
197
+
198
+ # 5. seek over one message
199
+ fake_msg = message.dup
200
+ fake_msg.instance_variable_set(:@offset, fake_msg.offset + 2)
201
+ consumer.seek(fake_msg)
202
+
203
+ # 6. ensure that only one message is available
204
+ records = consumer.poll(timeout)
205
+ expect(records&.payload).to eq "payload c"
206
+ records = consumer.poll(timeout)
207
+ expect(records).to be_nil
208
+
209
+ consumer.commit
210
+ end
211
+ end
212
+ end
213
+
127
214
  describe "#assign and #assignment" do
128
215
  it "should return an empty assignment if nothing is assigned" do
129
216
  expect(consumer.assignment).to be_empty
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: rdkafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.5.0
4
+ version: 0.6.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Thijs Cadier
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2019-04-11 00:00:00.000000000 Z
11
+ date: 2019-07-23 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: ffi