rdkafka 0.8.1 → 0.11.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: d09f2751b883400c550fee28fca85ffdd3e9f8090a9ba8e7bb22eadcba5c05c6
4
- data.tar.gz: 107a5b723b40ec43b10f02b1993f42ff4b30a4ba16efc21611efb860de798a3a
3
+ metadata.gz: 9b7042f241be5aad91c403f61aabb8e8ba87afa46674db8ba58c487fe01f88c9
4
+ data.tar.gz: 5cb00dc3dc8b4069a8e62cda9271e290d1371434332767270426488dedcff641
5
5
  SHA512:
6
- metadata.gz: 373bd71c9e1f3c1fadd4deb9e67bec7aa5821e0a7835c851f1bb64fb1ad142692145e01efa62ac357d333eebb76cef7d676ddfa53ef3a9cd08720060b4bc0937
7
- data.tar.gz: ec6d8932a2987e419f6fd731ff038cb88d9d898eab75d7eff82a6e606fcb71c249dd20f6d200bda44629f4bb81550c06ef4e8083ac1b4736c57932ab2486fdbc
6
+ metadata.gz: 2b287a0d81aca7909702969778fb64585b6808860971d50e1c5540eb0c43cf73617f9e701605b816e8f52a81e39d9909512e1d113f6a550555e6ca503dc5ff58
7
+ data.tar.gz: 7924ab06f52646168b52d0478b605484e4aa1a3674ace4f97af5b56588aea6dec8e614d3258b0670f92f18a905509ae6982d2c8999fa719f8ea7451e515ecb3d
data/.rspec ADDED
@@ -0,0 +1 @@
1
+ --format documentation
@@ -3,7 +3,7 @@ name: Rdkafka Ruby
3
3
 
4
4
  agent:
5
5
  machine:
6
- type: e1-standard-2
6
+ type: e1-standard-4
7
7
  os_image: ubuntu1804
8
8
 
9
9
  blocks:
@@ -13,11 +13,11 @@ blocks:
13
13
  - name: bundle exec rspec
14
14
  matrix:
15
15
  - env_var: RUBY_VERSION
16
- values: [ "2.5.8", "2.6.6", "2.7.2", "jruby-9.2.13.0" ]
16
+ values: [ "2.6.8", "2.7.4", "3.0.2", "jruby-9.3.1.0"]
17
17
  commands:
18
18
  - sem-version ruby $RUBY_VERSION
19
19
  - checkout
20
- - bundle install --path vendor/bundl
20
+ - bundle install --path vendor/bundle
21
21
  - cd ext && bundle exec rake && cd ..
22
22
  - docker-compose up -d --no-recreate
23
23
  - bundle exec rspec
data/CHANGELOG.md CHANGED
@@ -1,3 +1,23 @@
1
+ # 0.11.1
2
+ * Use mini_portile2 2.6, otherwise you can't run nokogiri and rdkafka at the same time.
3
+
4
+ # 0.11.0
5
+ * Upgrade librdkafka to 1.8.2
6
+ * Bump supported minimum Ruby version to 2.6
7
+ * Better homebrew path detection
8
+
9
+ # 0.10.0
10
+ * Upgrade librdkafka to 1.5.0
11
+ * Add error callback config
12
+
13
+ # 0.9.0
14
+ * Fixes for Ruby 3.0
15
+ * Allow any callable object for callbacks (gremerritt)
16
+ * Reduce memory allocations in Rdkafka::Producer#produce (jturkel)
17
+ * Use queue as log callback to avoid unsafe calls from trap context (breunigs)
18
+ * Allow passing in topic configuration on create_topic (dezka)
19
+ * Add each_batch method to consumer (mgrosso)
20
+
1
21
  # 0.8.1
2
22
  * Fix topic_flag behaviour and add tests for Metadata (geoff2k)
3
23
  * Add topic admin interface (geoff2k)
@@ -33,7 +53,7 @@
33
53
  * Use default Homebrew openssl location if present
34
54
  * Consumer lag handles empty topics
35
55
  * End iteration in consumer when it is closed
36
- * Add suport for storing message offsets
56
+ * Add support for storing message offsets
37
57
  * Add missing runtime dependency to rake
38
58
 
39
59
  # 0.4.1
data/Guardfile ADDED
@@ -0,0 +1,19 @@
1
+ # frozen_string_literal: true
2
+
3
+ logger level: :error
4
+
5
+ guard :rspec, cmd: "bundle exec rspec --format #{ENV.fetch("FORMAT", "documentation")}" do
6
+ require "guard/rspec/dsl"
7
+ dsl = Guard::RSpec::Dsl.new(self)
8
+
9
+ # Ruby files
10
+ ruby = dsl.ruby
11
+ dsl.watch_spec_files_for(ruby.lib_files)
12
+ watch(%r{^lib/(.+)\.rb}) { |m| "spec/#{m[1]}_spec.rb" }
13
+
14
+ # RSpec files
15
+ rspec = dsl.rspec
16
+ watch(rspec.spec_helper) { rspec.spec_dir }
17
+ watch(rspec.spec_support) { rspec.spec_dir }
18
+ watch(rspec.spec_files)
19
+ end
data/README.md CHANGED
@@ -7,7 +7,9 @@
7
7
  The `rdkafka` gem is a modern Kafka client library for Ruby based on
8
8
  [librdkafka](https://github.com/edenhill/librdkafka/).
9
9
  It wraps the production-ready C client using the [ffi](https://github.com/ffi/ffi)
10
- gem and targets Kafka 1.0+ and Ruby 2.4+.
10
+ gem and targets Kafka 1.0+ and Ruby versions that are under security or
11
+ active maintenance. We remove Ruby version from our CI builds if they
12
+ become EOL.
11
13
 
12
14
  `rdkafka` was written because we needed a reliable Ruby client for
13
15
  Kafka that supports modern Kafka at [AppSignal](https://appsignal.com).
data/bin/console ADDED
@@ -0,0 +1,11 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ # frozen_string_literal: true
4
+
5
+ ENV["IRBRC"] = File.join(File.dirname(__FILE__), ".irbrc")
6
+
7
+ require "bundler/setup"
8
+ require "rdkafka"
9
+
10
+ require "irb"
11
+ IRB.start(__FILE__)
data/docker-compose.yml CHANGED
@@ -4,13 +4,13 @@ version: '2'
4
4
 
5
5
  services:
6
6
  zookeeper:
7
- image: confluentinc/cp-zookeeper:latest
7
+ image: confluentinc/cp-zookeeper:5.2.6
8
8
  environment:
9
9
  ZOOKEEPER_CLIENT_PORT: 2181
10
10
  ZOOKEEPER_TICK_TIME: 2000
11
11
 
12
12
  kafka:
13
- image: confluentinc/cp-kafka:latest
13
+ image: confluentinc/cp-kafka:5.2.5-10
14
14
  depends_on:
15
15
  - zookeeper
16
16
  ports:
@@ -18,7 +18,7 @@ services:
18
18
  environment:
19
19
  KAFKA_BROKER_ID: 1
20
20
  KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
21
- KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092
21
+ KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:29092,PLAINTEXT_HOST://localhost:9092
22
22
  KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
23
23
  KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
24
24
  KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
data/ext/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ext
2
2
 
3
- This gem dependes on the `librdkafka` C library. It is downloaded when
3
+ This gem depends on the `librdkafka` C library. It is downloaded when
4
4
  this gem is installed.
5
5
 
6
6
  To update the `librdkafka` version follow the following steps:
@@ -9,3 +9,10 @@ To update the `librdkafka` version follow the following steps:
9
9
  version number and asset checksum for `tar.gz`.
10
10
  * Change the version in `lib/rdkafka/version.rb`
11
11
  * Change the `sha256` in `lib/rdkafka/version.rb`
12
+ * Run `bundle exec rake` in the `ext` directory to download and build
13
+ the new version
14
+ * Run `docker-compose pull` in the main gem directory to ensure the docker
15
+ images used by the tests and run `docker-compose up`
16
+ * Finally, run `bundle exec rspec` in the main gem directory to execute
17
+ the test suite to detect any regressions that may have been introduced
18
+ by the update
data/ext/Rakefile CHANGED
@@ -1,32 +1,17 @@
1
1
  require File.expand_path('../../lib/rdkafka/version', __FILE__)
2
2
  require "mini_portile2"
3
3
  require "fileutils"
4
+ require "open-uri"
4
5
 
5
6
  task :default => :clean do
6
- # MiniPortile#download_file_http is a monkey patch that removes the download
7
- # progress indicator. This indicator relies on the 'Content Length' response
8
- # headers, which is not set by GitHub
9
- class MiniPortile
10
- def download_file_http(url, full_path, _count)
11
- filename = File.basename(full_path)
12
- with_tempfile(filename, full_path) do |temp_file|
13
- params = { 'Accept-Encoding' => 'identity' }
14
- OpenURI.open_uri(url, 'rb', params) do |io|
15
- temp_file.write(io.read)
16
- end
17
- output
18
- end
19
- end
20
- end
21
-
22
7
  # Download and compile librdkafka
23
8
  recipe = MiniPortile.new("librdkafka", Rdkafka::LIBRDKAFKA_VERSION)
24
9
 
25
10
  # Use default homebrew openssl if we're on mac and the directory exists
26
11
  # and each of flags is not empty
27
- if recipe.host&.include?("darwin") && Dir.exist?("/usr/local/opt/openssl")
28
- ENV["CPPFLAGS"] = "-I/usr/local/opt/openssl/include" unless ENV["CPPFLAGS"]
29
- ENV["LDFLAGS"] = "-L/usr/local/opt/openssl/lib" unless ENV["LDFLAGS"]
12
+ if recipe.host&.include?("darwin") && system("which brew &> /dev/null") && Dir.exist?("#{homebrew_prefix = %x(brew --prefix openssl).strip}")
13
+ ENV["CPPFLAGS"] = "-I#{homebrew_prefix}/include" unless ENV["CPPFLAGS"]
14
+ ENV["LDFLAGS"] = "-L#{homebrew_prefix}/lib" unless ENV["LDFLAGS"]
30
15
  end
31
16
 
32
17
  recipe.files << {
data/lib/rdkafka/admin.rb CHANGED
@@ -34,9 +34,10 @@ module Rdkafka
34
34
  #
35
35
  # @raise [ConfigError] When the partition count or replication factor are out of valid range
36
36
  # @raise [RdkafkaError] When the topic name is invalid or the topic already exists
37
+ # @raise [RdkafkaError] When the topic configuration is invalid
37
38
  #
38
39
  # @return [CreateTopicHandle] Create topic handle that can be used to wait for the result of creating the topic
39
- def create_topic(topic_name, partition_count, replication_factor)
40
+ def create_topic(topic_name, partition_count, replication_factor, topic_config={})
40
41
 
41
42
  # Create a rd_kafka_NewTopic_t representing the new topic
42
43
  error_buffer = FFI::MemoryPointer.from_string(" " * 256)
@@ -51,6 +52,16 @@ module Rdkafka
51
52
  raise Rdkafka::Config::ConfigError.new(error_buffer.read_string)
52
53
  end
53
54
 
55
+ unless topic_config.nil?
56
+ topic_config.each do |key, value|
57
+ Rdkafka::Bindings.rd_kafka_NewTopic_set_config(
58
+ new_topic_ptr,
59
+ key.to_s,
60
+ value.to_s
61
+ )
62
+ end
63
+ end
64
+
54
65
  # Note that rd_kafka_CreateTopics can create more than one topic at a time
55
66
  pointer_array = [new_topic_ptr]
56
67
  topics_array_ptr = FFI::MemoryPointer.new(:pointer)
@@ -108,6 +108,8 @@ module Rdkafka
108
108
  attach_function :rd_kafka_conf_set_opaque, [:pointer, :pointer], :void
109
109
  callback :stats_cb, [:pointer, :string, :int, :pointer], :int
110
110
  attach_function :rd_kafka_conf_set_stats_cb, [:pointer, :stats_cb], :void
111
+ callback :error_cb, [:pointer, :int, :string, :pointer], :void
112
+ attach_function :rd_kafka_conf_set_error_cb, [:pointer, :error_cb], :void
111
113
 
112
114
  # Log queue
113
115
  attach_function :rd_kafka_set_log_queue, [:pointer, :pointer], :void
@@ -130,7 +132,7 @@ module Rdkafka
130
132
  else
131
133
  Logger::UNKNOWN
132
134
  end
133
- Rdkafka::Config.logger.add(severity) { "rdkafka: #{line}" }
135
+ Rdkafka::Config.log_queue << [severity, "rdkafka: #{line}"]
134
136
  end
135
137
 
136
138
  StatsCallback = FFI::Function.new(
@@ -146,6 +148,15 @@ module Rdkafka
146
148
  0
147
149
  end
148
150
 
151
+ ErrorCallback = FFI::Function.new(
152
+ :void, [:pointer, :int, :string, :pointer]
153
+ ) do |_client_prr, err_code, reason, _opaque|
154
+ if Rdkafka::Config.error_callback
155
+ error = Rdkafka::RdkafkaError.new(err_code, broker_message: reason)
156
+ Rdkafka::Config.error_callback.call(error)
157
+ end
158
+ end
159
+
149
160
  # Handle
150
161
 
151
162
  enum :kafka_type, [
@@ -252,6 +263,7 @@ module Rdkafka
252
263
 
253
264
  attach_function :rd_kafka_CreateTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :void
254
265
  attach_function :rd_kafka_NewTopic_new, [:pointer, :size_t, :size_t, :pointer, :size_t], :pointer
266
+ attach_function :rd_kafka_NewTopic_set_config, [:pointer, :string, :string], :int32
255
267
  attach_function :rd_kafka_NewTopic_destroy, [:pointer], :void
256
268
  attach_function :rd_kafka_event_CreateTopics_result, [:pointer], :pointer
257
269
  attach_function :rd_kafka_CreateTopics_result_topics, [:pointer, :pointer], :pointer
@@ -10,7 +10,18 @@ module Rdkafka
10
10
  # @private
11
11
  @@statistics_callback = nil
12
12
  # @private
13
+ @@error_callback = nil
14
+ # @private
13
15
  @@opaques = {}
16
+ # @private
17
+ @@log_queue = Queue.new
18
+
19
+ Thread.start do
20
+ loop do
21
+ severity, msg = @@log_queue.pop
22
+ @@logger.add(severity, msg)
23
+ end
24
+ end
14
25
 
15
26
  # Returns the current logger, by default this is a logger to stdout.
16
27
  #
@@ -19,6 +30,16 @@ module Rdkafka
19
30
  @@logger
20
31
  end
21
32
 
33
+
34
+ # Returns a queue whose contents will be passed to the configured logger. Each entry
35
+ # should follow the format [Logger::Severity, String]. The benefit over calling the
36
+ # logger directly is that this is safe to use from trap contexts.
37
+ #
38
+ # @return [Queue]
39
+ def self.log_queue
40
+ @@log_queue
41
+ end
42
+
22
43
  # Set the logger that will be used for all logging output by this library.
23
44
  #
24
45
  # @param logger [Logger] The logger to be used
@@ -33,11 +54,11 @@ module Rdkafka
33
54
  # You can configure if and how often this happens using `statistics.interval.ms`.
34
55
  # The callback is called with a hash that's documented here: https://github.com/edenhill/librdkafka/blob/master/STATISTICS.md
35
56
  #
36
- # @param callback [Proc] The callback
57
+ # @param callback [Proc, #call] The callback
37
58
  #
38
59
  # @return [nil]
39
60
  def self.statistics_callback=(callback)
40
- raise TypeError.new("Callback has to be a proc or lambda") unless callback.is_a? Proc
61
+ raise TypeError.new("Callback has to be callable") unless callback.respond_to?(:call)
41
62
  @@statistics_callback = callback
42
63
  end
43
64
 
@@ -48,6 +69,25 @@ module Rdkafka
48
69
  @@statistics_callback
49
70
  end
50
71
 
72
+ # Set a callback that will be called every time the underlying client emits an error.
73
+ # If this callback is not set, global errors such as brokers becoming unavailable will only be sent to the logger, as defined by librdkafka.
74
+ # The callback is called with an instance of RdKafka::Error.
75
+ #
76
+ # @param callback [Proc, #call] The callback
77
+ #
78
+ # @return [nil]
79
+ def self.error_callback=(callback)
80
+ raise TypeError.new("Callback has to be callable") unless callback.respond_to?(:call)
81
+ @@error_callback = callback
82
+ end
83
+
84
+ # Returns the current error callback, by default this is nil.
85
+ #
86
+ # @return [Proc, nil]
87
+ def self.error_callback
88
+ @@error_callback
89
+ end
90
+
51
91
  # @private
52
92
  def self.opaques
53
93
  @@opaques
@@ -203,6 +243,9 @@ module Rdkafka
203
243
 
204
244
  # Set stats callback
205
245
  Rdkafka::Bindings.rd_kafka_conf_set_stats_cb(config, Rdkafka::Bindings::StatsCallback)
246
+
247
+ # Set error callback
248
+ Rdkafka::Bindings.rd_kafka_conf_set_error_cb(config, Rdkafka::Bindings::ErrorCallback)
206
249
  end
207
250
  end
208
251
 
@@ -471,5 +471,95 @@ module Rdkafka
471
471
  def closed_consumer_check(method)
472
472
  raise Rdkafka::ClosedConsumerError.new(method) if @native_kafka.nil?
473
473
  end
474
+
475
+ # Poll for new messages and yield them in batches that may contain
476
+ # messages from more than one partition.
477
+ #
478
+ # Rather than yield each message immediately as soon as it is received,
479
+ # each_batch will attempt to wait for as long as `timeout_ms` in order
480
+ # to create a batch of up to but no more than `max_items` in size.
481
+ #
482
+ # Said differently, if more than `max_items` are available within
483
+ # `timeout_ms`, then `each_batch` will yield early with `max_items` in the
484
+ # array, but if `timeout_ms` passes by with fewer messages arriving, it
485
+ # will yield an array of fewer messages, quite possibly zero.
486
+ #
487
+ # In order to prevent wrongly auto committing many messages at once across
488
+ # possibly many partitions, callers must explicitly indicate which messages
489
+ # have been successfully processed as some consumed messages may not have
490
+ # been yielded yet. To do this, the caller should set
491
+ # `enable.auto.offset.store` to false and pass processed messages to
492
+ # {store_offset}. It is also possible, though more complex, to set
493
+ # 'enable.auto.commit' to false and then pass a manually assembled
494
+ # TopicPartitionList to {commit}.
495
+ #
496
+ # As with `each`, iteration will end when the consumer is closed.
497
+ #
498
+ # Exception behavior is more complicated than with `each`, in that if
499
+ # :yield_on_error is true, and an exception is raised during the
500
+ # poll, and messages have already been received, they will be yielded to
501
+ # the caller before the exception is allowed to propagate.
502
+ #
503
+ # If you are setting either auto.commit or auto.offset.store to false in
504
+ # the consumer configuration, then you should let yield_on_error keep its
505
+ # default value of false because you are guaranteed to see these messages
506
+ # again. However, if both auto.commit and auto.offset.store are set to
507
+ # true, you should set yield_on_error to true so you can process messages
508
+ # that you may or may not see again.
509
+ #
510
+ # @param max_items [Integer] Maximum size of the yielded array of messages
511
+ #
512
+ # @param bytes_threshold [Integer] Threshold number of total message bytes in the yielded array of messages
513
+ #
514
+ # @param timeout_ms [Integer] max time to wait for up to max_items
515
+ #
516
+ # @raise [RdkafkaError] When polling fails
517
+ #
518
+ # @yield [messages, pending_exception]
519
+ # @yieldparam messages [Array] An array of received Message
520
+ # @yieldparam pending_exception [Exception] normally nil, or an exception
521
+ # which will be propagated after processing of the partial batch is complete.
522
+ #
523
+ # @return [nil]
524
+ def each_batch(max_items: 100, bytes_threshold: Float::INFINITY, timeout_ms: 250, yield_on_error: false, &block)
525
+ closed_consumer_check(__method__)
526
+ slice = []
527
+ bytes = 0
528
+ end_time = monotonic_now + timeout_ms / 1000.0
529
+ loop do
530
+ break if @closing
531
+ max_wait = end_time - monotonic_now
532
+ max_wait_ms = if max_wait <= 0
533
+ 0 # should not block, but may retrieve a message
534
+ else
535
+ (max_wait * 1000).floor
536
+ end
537
+ message = nil
538
+ begin
539
+ message = poll max_wait_ms
540
+ rescue Rdkafka::RdkafkaError => error
541
+ raise unless yield_on_error
542
+ raise if slice.empty?
543
+ yield slice.dup, error
544
+ raise
545
+ end
546
+ if message
547
+ slice << message
548
+ bytes += message.payload.bytesize
549
+ end
550
+ if slice.size == max_items || bytes >= bytes_threshold || monotonic_now >= end_time - 0.001
551
+ yield slice.dup, nil
552
+ slice.clear
553
+ bytes = 0
554
+ end_time = monotonic_now + timeout_ms / 1000.0
555
+ end
556
+ end
557
+ end
558
+
559
+ private
560
+ def monotonic_now
561
+ # needed because Time.now can go backwards
562
+ Process.clock_gettime(Process::CLOCK_MONOTONIC)
563
+ end
474
564
  end
475
565
  end
@@ -11,7 +11,7 @@ module Rdkafka
11
11
  attr_reader :offset
12
12
 
13
13
  # Error in case happen during produce.
14
- # @return [string]
14
+ # @return [String]
15
15
  attr_reader :error
16
16
 
17
17
  private
@@ -1,3 +1,5 @@
1
+ require "securerandom"
2
+
1
3
  module Rdkafka
2
4
  # A producer for Kafka messages. To create a producer set up a {Config} and call {Config#producer producer} on that.
3
5
  class Producer
@@ -9,11 +11,12 @@ module Rdkafka
9
11
 
10
12
  # @private
11
13
  def initialize(native_kafka)
14
+ @id = SecureRandom.uuid
12
15
  @closing = false
13
16
  @native_kafka = native_kafka
14
17
 
15
18
  # Makes sure, that the producer gets closed before it gets GCed by Ruby
16
- ObjectSpace.define_finalizer(self, proc { close })
19
+ ObjectSpace.define_finalizer(@id, proc { close })
17
20
 
18
21
  # Start thread to poll client for delivery callbacks
19
22
  @polling_thread = Thread.new do
@@ -31,16 +34,18 @@ module Rdkafka
31
34
  # Set a callback that will be called every time a message is successfully produced.
32
35
  # The callback is called with a {DeliveryReport}
33
36
  #
34
- # @param callback [Proc] The callback
37
+ # @param callback [Proc, #call] The callback
35
38
  #
36
39
  # @return [nil]
37
40
  def delivery_callback=(callback)
38
- raise TypeError.new("Callback has to be a proc or lambda") unless callback.is_a? Proc
41
+ raise TypeError.new("Callback has to be callable") unless callback.respond_to?(:call)
39
42
  @delivery_callback = callback
40
43
  end
41
44
 
42
45
  # Close this producer and wait for the internal poll queue to empty.
43
46
  def close
47
+ ObjectSpace.undefine_finalizer(@id)
48
+
44
49
  return unless @native_kafka
45
50
 
46
51
  # Indicate to polling thread that we're closing
@@ -70,8 +75,9 @@ module Rdkafka
70
75
  #
71
76
  # @param topic [String] The topic to produce to
72
77
  # @param payload [String,nil] The message's payload
73
- # @param key [String] The message's key
78
+ # @param key [String, nil] The message's key
74
79
  # @param partition [Integer,nil] Optional partition to produce to
80
+ # @param partition_key [String, nil] Optional partition key based on which partition assignment can happen
75
81
  # @param timestamp [Time,Integer,nil] Optional timestamp of this message. Integer timestamp is in milliseconds since Jan 1 1970.
76
82
  # @param headers [Hash<String,String>] Optional message headers
77
83
  #
@@ -140,16 +146,14 @@ module Rdkafka
140
146
  headers.each do |key0, value0|
141
147
  key = key0.to_s
142
148
  value = value0.to_s
143
- args += [
144
- :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_HEADER,
145
- :string, key,
146
- :pointer, value,
147
- :size_t, value.bytes.size
148
- ]
149
+ args << :int << Rdkafka::Bindings::RD_KAFKA_VTYPE_HEADER
150
+ args << :string << key
151
+ args << :pointer << value
152
+ args << :size_t << value.bytes.size
149
153
  end
150
154
  end
151
155
 
152
- args += [:int, Rdkafka::Bindings::RD_KAFKA_VTYPE_END]
156
+ args << :int << Rdkafka::Bindings::RD_KAFKA_VTYPE_END
153
157
 
154
158
  # Produce the message
155
159
  response = Rdkafka::Bindings.rd_kafka_producev(
@@ -1,5 +1,5 @@
1
1
  module Rdkafka
2
- VERSION = "0.8.1"
3
- LIBRDKAFKA_VERSION = "1.4.0"
4
- LIBRDKAFKA_SOURCE_SHA256 = "ae27ea3f3d0d32d29004e7f709efbba2666c5383a107cc45b3a1949486b2eb84"
2
+ VERSION = "0.11.1"
3
+ LIBRDKAFKA_VERSION = "1.8.2"
4
+ LIBRDKAFKA_SOURCE_SHA256 = "6a747d293a7a4613bd2897e28e8791476fbe1ae7361f2530a876e0fd483482a6"
5
5
  end
data/rdkafka.gemspec CHANGED
@@ -14,15 +14,17 @@ Gem::Specification.new do |gem|
14
14
  gem.name = 'rdkafka'
15
15
  gem.require_paths = ['lib']
16
16
  gem.version = Rdkafka::VERSION
17
- gem.required_ruby_version = '>= 2.4'
17
+ gem.required_ruby_version = '>= 2.6'
18
18
  gem.extensions = %w(ext/Rakefile)
19
19
 
20
- gem.add_dependency 'ffi', '~> 1.9'
21
- gem.add_dependency 'mini_portile2', '~> 2.1'
22
- gem.add_dependency 'rake', '>= 12.3'
20
+ gem.add_dependency 'ffi', '~> 1.15'
21
+ gem.add_dependency 'mini_portile2', '~> 2.6'
22
+ gem.add_dependency 'rake', '> 12'
23
23
 
24
- gem.add_development_dependency 'pry', '~> 0.10'
24
+ gem.add_development_dependency 'pry'
25
25
  gem.add_development_dependency 'rspec', '~> 3.5'
26
- gem.add_development_dependency 'rake', '~> 12.0'
27
- gem.add_development_dependency 'simplecov', '~> 0.15'
26
+ gem.add_development_dependency 'rake'
27
+ gem.add_development_dependency 'simplecov'
28
+ gem.add_development_dependency 'guard'
29
+ gem.add_development_dependency 'guard-rspec'
28
30
  end
@@ -111,4 +111,3 @@ describe Rdkafka::AbstractHandle do
111
111
  end
112
112
  end
113
113
  end
114
-
@@ -14,6 +14,8 @@ describe Rdkafka::Admin do
14
14
  let(:topic_name) { "test-topic-#{Random.new.rand(0..1_000_000)}" }
15
15
  let(:topic_partition_count) { 3 }
16
16
  let(:topic_replication_factor) { 1 }
17
+ let(:topic_config) { {"cleanup.policy" => "compact", "min.cleanable.dirty.ratio" => 0.8} }
18
+ let(:invalid_topic_config) { {"cleeeeenup.policee" => "campact"} }
17
19
 
18
20
  describe "#create_topic" do
19
21
  describe "called with invalid input" do
@@ -50,7 +52,7 @@ describe Rdkafka::Admin do
50
52
  end
51
53
 
52
54
  describe "with an invalid partition count" do
53
- let(:topic_partition_count) { -1 }
55
+ let(:topic_partition_count) { -999 }
54
56
 
55
57
  it "raises an exception" do
56
58
  expect {
@@ -68,6 +70,15 @@ describe Rdkafka::Admin do
68
70
  }.to raise_error Rdkafka::Config::ConfigError, /replication_factor out of expected range/
69
71
  end
70
72
  end
73
+
74
+ describe "with an invalid topic configuration" do
75
+ it "doesn't create the topic" do
76
+ create_topic_handle = admin.create_topic(topic_name, topic_partition_count, topic_replication_factor, invalid_topic_config)
77
+ expect {
78
+ create_topic_handle.wait(max_wait_timeout: 15.0)
79
+ }.to raise_error Rdkafka::RdkafkaError, /Broker: Configuration is invalid \(invalid_config\)/
80
+ end
81
+ end
71
82
  end
72
83
 
73
84
  context "edge case" do
@@ -97,7 +108,7 @@ describe Rdkafka::Admin do
97
108
  end
98
109
 
99
110
  it "creates a topic" do
100
- create_topic_handle = admin.create_topic(topic_name, topic_partition_count, topic_replication_factor)
111
+ create_topic_handle = admin.create_topic(topic_name, topic_partition_count, topic_replication_factor, topic_config)
101
112
  create_topic_report = create_topic_handle.wait(max_wait_timeout: 15.0)
102
113
  expect(create_topic_report.error_string).to be_nil
103
114
  expect(create_topic_report.result_name).to eq(topic_name)