karafka-rdkafka 0.13.7 → 0.13.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.gitignore +4 -0
  4. data/.rspec +1 -0
  5. data/.ruby-gemset +1 -0
  6. data/.ruby-version +1 -0
  7. data/CHANGELOG.md +40 -31
  8. data/{LICENSE → MIT-LICENSE} +2 -1
  9. data/README.md +11 -11
  10. data/ext/README.md +1 -1
  11. data/ext/Rakefile +1 -1
  12. data/lib/rdkafka/abstract_handle.rb +37 -24
  13. data/lib/rdkafka/admin.rb +6 -7
  14. data/lib/rdkafka/bindings.rb +1 -4
  15. data/lib/rdkafka/config.rb +30 -15
  16. data/lib/rdkafka/consumer/headers.rb +2 -4
  17. data/lib/rdkafka/consumer.rb +83 -53
  18. data/lib/rdkafka/helpers/time.rb +14 -0
  19. data/lib/rdkafka/producer.rb +8 -15
  20. data/lib/rdkafka/version.rb +1 -1
  21. data/lib/rdkafka.rb +10 -1
  22. data/spec/rdkafka/abstract_handle_spec.rb +0 -2
  23. data/spec/rdkafka/admin/create_topic_handle_spec.rb +0 -2
  24. data/spec/rdkafka/admin/create_topic_report_spec.rb +0 -2
  25. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +0 -2
  26. data/spec/rdkafka/admin/delete_topic_report_spec.rb +0 -2
  27. data/spec/rdkafka/admin_spec.rb +0 -1
  28. data/spec/rdkafka/bindings_spec.rb +0 -1
  29. data/spec/rdkafka/callbacks_spec.rb +0 -2
  30. data/spec/rdkafka/config_spec.rb +8 -2
  31. data/spec/rdkafka/consumer/headers_spec.rb +0 -2
  32. data/spec/rdkafka/consumer/message_spec.rb +0 -2
  33. data/spec/rdkafka/consumer/partition_spec.rb +0 -2
  34. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +0 -2
  35. data/spec/rdkafka/consumer_spec.rb +122 -38
  36. data/spec/rdkafka/error_spec.rb +0 -2
  37. data/spec/rdkafka/metadata_spec.rb +0 -1
  38. data/spec/rdkafka/native_kafka_spec.rb +0 -2
  39. data/spec/rdkafka/producer/delivery_handle_spec.rb +0 -2
  40. data/spec/rdkafka/producer/delivery_report_spec.rb +0 -2
  41. data/spec/rdkafka/producer_spec.rb +0 -1
  42. data/spec/spec_helper.rb +1 -1
  43. data.tar.gz.sig +0 -0
  44. metadata +7 -4
  45. metadata.gz.sig +0 -0
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 4fe6b47d334265ef793c32b215dca1d97adfa42a51215b9492f1d36b58a84403
4
- data.tar.gz: 4fc125147d796f981314640779adf5234376b0d0357c0321683002820fcb3301
3
+ metadata.gz: c954a06b4461885d7648e8081d0426ebb895a82f1a27607f2224fc9e60843574
4
+ data.tar.gz: a25bf01b430920c7fd2cf22b0dd23ea1de9dd15d1b45f614025e97a3b725f8f3
5
5
  SHA512:
6
- metadata.gz: f7fc5d7dc99653f117b4bef3baa0d6e772537f4a7df6375a6a33692d4a9728f81308e11c3cd873fbb5686fb83cf2021a5866eb252e10661b3fe512639e3e3349
7
- data.tar.gz: 9d23217c18093187f1fecc5afbbdbe05d2a6be3cb99adb0dc20ed7458da19c7035cc51eda664e09c3acaa5ac3dd3539b0c58c91cd6d6faed3c1d21edc9662dde
6
+ metadata.gz: afe480fedbbe5dc6f055709aaf3601acb17aaaa890f1c6e7423b90263eb8fa893de0e8b9bb571e6abf1c5fc0b8df2a6be2e5de4af1de1b68414204275981246e
7
+ data.tar.gz: 7bb81ff6dcbc95b018ede8bd986eda701f1f5f3bb3985684d436a38a2173f8753b01e1642780eb9ad52a65ad02e2b3b7d656c4259758c8571ef2858822384875
checksums.yaml.gz.sig CHANGED
Binary file
data/.gitignore CHANGED
@@ -1,3 +1,6 @@
1
+ # Ignore bundler config.
2
+ /.bundle
3
+
1
4
  Gemfile.lock
2
5
  ext/ports
3
6
  ext/tmp
@@ -6,3 +9,4 @@ ext/librdkafka.*
6
9
  .yardoc
7
10
  doc
8
11
  coverage
12
+ vendor
data/.rspec CHANGED
@@ -1 +1,2 @@
1
+ --require spec_helper
1
2
  --format documentation
data/.ruby-gemset ADDED
@@ -0,0 +1 @@
1
+ rdkafka-ruby
data/.ruby-version ADDED
@@ -0,0 +1 @@
1
+ 3.2.2
data/CHANGELOG.md CHANGED
@@ -1,30 +1,39 @@
1
- # 0.13.7 (Unreleased)
1
+ # Rdkafka Changelog
2
+
3
+ ## 0.13.9 (2023-11-07)
4
+ - [Enhancement] Expose alternative way of managing consumer events via a separate queue.
5
+ - [Enhancement] Allow for setting `statistics_callback` as nil to reset predefined settings configured by a different gem.
6
+
7
+ ## 0.13.8 (2023-10-31)
8
+ - [Enhancement] Get consumer position (thijsc & mensfeld)
9
+
10
+ ## 0.13.7 (2023-10-31)
2
11
  - [Change] Drop support for Ruby 2.6 due to incompatibilities in usage of `ObjectSpace::WeakMap`
3
12
  - [Fix] Fix dangling Opaque references.
4
13
 
5
- # 0.13.6 (2023-10-17)
14
+ ## 0.13.6 (2023-10-17)
6
15
  * **[Feature]** Support transactions API in the producer
7
16
  * [Enhancement] Add `raise_response_error` flag to the `Rdkafka::AbstractHandle`.
8
17
  * [Enhancement] Provide `#purge` to remove any outstanding requests from the producer.
9
18
  * [Enhancement] Fix `#flush` does not handle the timeouts errors by making it return true if all flushed or false if failed. We do **not** raise an exception here to keep it backwards compatible.
10
19
 
11
- # 0.13.5
20
+ ## 0.13.5
12
21
  * Fix DeliveryReport `create_result#error` being nil despite an error being associated with it
13
22
 
14
- # 0.13.4
23
+ ## 0.13.4
15
24
  * Always call initial poll on librdkafka to make sure oauth bearer cb is handled pre-operations.
16
25
 
17
- # 0.13.3
26
+ ## 0.13.3
18
27
  * Bump librdkafka to 2.2.0
19
28
 
20
- # 0.13.2
29
+ ## 0.13.2
21
30
  * Ensure operations counter decrement is fully thread-safe
22
31
  * Bump librdkafka to 2.1.1
23
32
 
24
- # 0.13.1
33
+ ## 0.13.1
25
34
  * Add offsets_for_times method on consumer (timflapper)
26
35
 
27
- # 0.13.0
36
+ ## 0.13.0 (2023-07-24)
28
37
  * Support cooperative sticky partition assignment in the rebalance callback (methodmissing)
29
38
  * Support both string and symbol header keys (ColinDKelley)
30
39
  * Handle tombstone messages properly (kgalieva)
@@ -45,32 +54,32 @@
45
54
  * Retry metadta fetches on certain errors with a backoff (mensfeld)
46
55
  * Do not lock access to underlying native kafka client and rely on Karafka granular locking (mensfeld)
47
56
 
48
- # 0.12.3
57
+ ## 0.12.3
49
58
  - Include backtrace in non-raised binded errors.
50
59
  - Include topic name in the delivery reports
51
60
 
52
- # 0.12.2
61
+ ## 0.12.2
53
62
  * Increase the metadata default timeout from 250ms to 2 seconds. This should allow for working with remote clusters.
54
63
 
55
- # 0.12.1
64
+ ## 0.12.1
56
65
  * Bumps librdkafka to 2.0.2 (lmaia)
57
66
  * Add support for adding more partitions via Admin API
58
67
 
59
- # 0.12.0
68
+ ## 0.12.0 (2022-06-17)
60
69
  * Bumps librdkafka to 1.9.0
61
70
  * Fix crash on empty partition key (mensfeld)
62
71
  * Pass the delivery handle to the callback (gvisokinskas)
63
72
 
64
- # 0.11.0
73
+ ## 0.11.0 (2021-11-17)
65
74
  * Upgrade librdkafka to 1.8.2
66
75
  * Bump supported minimum Ruby version to 2.6
67
76
  * Better homebrew path detection
68
77
 
69
- # 0.10.0
78
+ ## 0.10.0 (2021-09-07)
70
79
  * Upgrade librdkafka to 1.5.0
71
80
  * Add error callback config
72
81
 
73
- # 0.9.0
82
+ ## 0.9.0 (2021-06-23)
74
83
  * Fixes for Ruby 3.0
75
84
  * Allow any callable object for callbacks (gremerritt)
76
85
  * Reduce memory allocations in Rdkafka::Producer#produce (jturkel)
@@ -78,13 +87,13 @@
78
87
  * Allow passing in topic configuration on create_topic (dezka)
79
88
  * Add each_batch method to consumer (mgrosso)
80
89
 
81
- # 0.8.1
90
+ ## 0.8.1 (2020-12-07)
82
91
  * Fix topic_flag behaviour and add tests for Metadata (geoff2k)
83
92
  * Add topic admin interface (geoff2k)
84
93
  * Raise an exception if @native_kafka is nil (geoff2k)
85
94
  * Option to use zstd compression (jasonmartens)
86
95
 
87
- # 0.8.0
96
+ ## 0.8.0 (2020-06-02)
88
97
  * Upgrade librdkafka to 1.4.0
89
98
  * Integrate librdkafka metadata API and add partition_key (by Adithya-copart)
90
99
  * Ruby 2.7 compatibility fix (by Geoff Thé)A
@@ -92,22 +101,22 @@
92
101
  * Don't override CPPFLAGS and LDFLAGS if already set on Mac (by Hiroshi Hatake)
93
102
  * Allow use of Rake 13.x and up (by Tomasz Pajor)
94
103
 
95
- # 0.7.0
104
+ ## 0.7.0 (2019-09-21)
96
105
  * Bump librdkafka to 1.2.0 (by rob-as)
97
106
  * Allow customizing the wait time for delivery report availability (by mensfeld)
98
107
 
99
- # 0.6.0
108
+ ## 0.6.0 (2019-07-23)
100
109
  * Bump librdkafka to 1.1.0 (by Chris Gaffney)
101
110
  * Implement seek (by breunigs)
102
111
 
103
- # 0.5.0
112
+ ## 0.5.0 (2019-04-11)
104
113
  * Bump librdkafka to 1.0.0 (by breunigs)
105
114
  * Add cluster and member information (by dmexe)
106
115
  * Support message headers for consumer & producer (by dmexe)
107
116
  * Add consumer rebalance listener (by dmexe)
108
117
  * Implement pause/resume partitions (by dmexe)
109
118
 
110
- # 0.4.2
119
+ ## 0.4.2 (2019-01-12)
111
120
  * Delivery callback for producer
112
121
  * Document list param of commit method
113
122
  * Use default Homebrew openssl location if present
@@ -116,10 +125,10 @@
116
125
  * Add support for storing message offsets
117
126
  * Add missing runtime dependency to rake
118
127
 
119
- # 0.4.1
128
+ ## 0.4.1 (2018-10-19)
120
129
  * Bump librdkafka to 0.11.6
121
130
 
122
- # 0.4.0
131
+ ## 0.4.0 (2018-09-24)
123
132
  * Improvements in librdkafka archive download
124
133
  * Add global statistics callback
125
134
  * Use Time for timestamps, potentially breaking change if you
@@ -131,34 +140,34 @@
131
140
  * Support committing a topic partition list
132
141
  * Add consumer assignment method
133
142
 
134
- # 0.3.5
143
+ ## 0.3.5 (2018-01-17)
135
144
  * Fix crash when not waiting for delivery handles
136
145
  * Run specs on Ruby 2.5
137
146
 
138
- # 0.3.4
147
+ ## 0.3.4 (2017-12-05)
139
148
  * Bump librdkafka to 0.11.3
140
149
 
141
- # 0.3.3
150
+ ## 0.3.3 (2017-10-27)
142
151
  * Fix bug that prevent display of `RdkafkaError` message
143
152
 
144
- # 0.3.2
153
+ ## 0.3.2 (2017-10-25)
145
154
  * `add_topic` now supports using a partition count
146
155
  * Add way to make errors clearer with an extra message
147
156
  * Show topics in subscribe error message
148
157
  * Show partition and topic in query watermark offsets error message
149
158
 
150
- # 0.3.1
159
+ ## 0.3.1 (2017-10-23)
151
160
  * Bump librdkafka to 0.11.1
152
161
  * Officially support ranges in `add_topic` for topic partition list.
153
162
  * Add consumer lag calculator
154
163
 
155
- # 0.3.0
164
+ ## 0.3.0 (2017-10-17)
156
165
  * Move both add topic methods to one `add_topic` in `TopicPartitionList`
157
166
  * Add committed offsets to consumer
158
167
  * Add query watermark offset to consumer
159
168
 
160
- # 0.2.0
169
+ ## 0.2.0 (2017-10-13)
161
170
  * Some refactoring and add inline documentation
162
171
 
163
- # 0.1.x
172
+ ## 0.1.x (2017-09-10)
164
173
  * Initial working version including producing and consuming
@@ -1,6 +1,7 @@
1
1
  The MIT License (MIT)
2
2
 
3
- Copyright (c) 2017 Thijs Cadier
3
+ Copyright (c) 2017-2023 Thijs Cadier
4
+ 2023, Maciej Mensfeld
4
5
 
5
6
  Permission is hereby granted, free of charge, to any person obtaining a copy
6
7
  of this software and associated documentation files (the "Software"), to deal
data/README.md CHANGED
@@ -10,16 +10,16 @@
10
10
  ---
11
11
 
12
12
  The `rdkafka` gem is a modern Kafka client library for Ruby based on
13
- [librdkafka](https://github.com/edenhill/librdkafka/).
13
+ [librdkafka](https://github.com/confluentinc/librdkafka/).
14
14
  It wraps the production-ready C client using the [ffi](https://github.com/ffi/ffi)
15
- gem and targets Kafka 1.0+ and Ruby versions that are under security or
16
- active maintenance. We remove Ruby version from our CI builds if they
15
+ gem and targets Kafka 1.0+ and Ruby versions under security or
16
+ active maintenance. We remove a Ruby version from our CI builds when they
17
17
  become EOL.
18
18
 
19
19
  `rdkafka` was written because of the need for a reliable Ruby client for Kafka that supports modern Kafka at [AppSignal](https://appsignal.com). AppSignal runs it in production on very high-traffic systems.
20
20
 
21
21
  The most important pieces of a Kafka client are implemented. We're
22
- working towards feature completeness, you can track that here:
22
+ working towards feature completeness. You can track that here:
23
23
  https://github.com/appsignal/rdkafka-ruby/milestone/1
24
24
 
25
25
  ## Table of content
@@ -38,7 +38,7 @@ https://github.com/appsignal/rdkafka-ruby/milestone/1
38
38
  ## Installation
39
39
 
40
40
  This gem downloads and compiles librdkafka when it is installed. If you
41
- have any problems installing the gem please open an issue.
41
+ If you have any problems installing the gem, please open an issue.
42
42
 
43
43
  ## Usage
44
44
 
@@ -64,9 +64,9 @@ end
64
64
 
65
65
  ### Producing messages
66
66
 
67
- Produce a number of messages, put the delivery handles in an array and
67
+ Produce a number of messages, put the delivery handles in an array, and
68
68
  wait for them before exiting. This way the messages will be batched and
69
- sent to Kafka in an efficient way.
69
+ efficiently sent to Kafka.
70
70
 
71
71
  ```ruby
72
72
  config = {:"bootstrap.servers" => "localhost:9092"}
@@ -91,7 +91,7 @@ released until it `#close` is explicitly called, so be sure to call
91
91
 
92
92
  ## Higher level libraries
93
93
 
94
- Currently, there are two actively developed frameworks based on rdkafka-ruby, that provide higher level API that can be used to work with Kafka messages and one library for publishing messages.
94
+ Currently, there are two actively developed frameworks based on rdkafka-ruby, that provide higher-level API that can be used to work with Kafka messages and one library for publishing messages.
95
95
 
96
96
  ### Message processing frameworks
97
97
 
@@ -104,7 +104,7 @@ Currently, there are two actively developed frameworks based on rdkafka-ruby, th
104
104
 
105
105
  ## Development
106
106
 
107
- A Docker Compose file is included to run Kafka and Zookeeper. To run
107
+ A Docker Compose file is included to run Kafka. To run
108
108
  that:
109
109
 
110
110
  ```
@@ -122,7 +122,7 @@ DEBUG_PRODUCER=true bundle exec rspec
122
122
  DEBUG_CONSUMER=true bundle exec rspec
123
123
  ```
124
124
 
125
- After running the tests you can bring the cluster down to start with a
125
+ After running the tests, you can bring the cluster down to start with a
126
126
  clean slate:
127
127
 
128
128
  ```
@@ -131,7 +131,7 @@ docker-compose down
131
131
 
132
132
  ## Example
133
133
 
134
- To see everything working run these in separate tabs:
134
+ To see everything working, run these in separate tabs:
135
135
 
136
136
  ```
137
137
  bundle exec rake consume_messages
data/ext/README.md CHANGED
@@ -5,7 +5,7 @@ this gem is installed.
5
5
 
6
6
  To update the `librdkafka` version follow the following steps:
7
7
 
8
- * Go to https://github.com/edenhill/librdkafka/releases to get the new
8
+ * Go to https://github.com/confluentinc/librdkafka/releases to get the new
9
9
  version number and asset checksum for `tar.gz`.
10
10
  * Change the version in `lib/rdkafka/version.rb`
11
11
  * Change the `sha256` in `lib/rdkafka/version.rb`
data/ext/Rakefile CHANGED
@@ -17,7 +17,7 @@ task :default => :clean do
17
17
  end
18
18
 
19
19
  recipe.files << {
20
- :url => "https://codeload.github.com/edenhill/librdkafka/tar.gz/v#{Rdkafka::LIBRDKAFKA_VERSION}",
20
+ :url => "https://codeload.github.com/confluentinc/librdkafka/tar.gz/v#{Rdkafka::LIBRDKAFKA_VERSION}",
21
21
  :sha256 => Rdkafka::LIBRDKAFKA_SOURCE_SHA256
22
22
  }
23
23
  recipe.configure_options = ["--host=#{recipe.host}"]
@@ -1,28 +1,37 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "ffi"
4
-
5
3
  module Rdkafka
4
+ # This class serves as an abstract base class to represent handles within the Rdkafka module.
5
+ # As a subclass of `FFI::Struct`, this class provides a blueprint for other specific handle
6
+ # classes to inherit from, ensuring they adhere to a particular structure and behavior.
7
+ #
8
+ # Subclasses must define their own layout, and the layout must start with:
9
+ #
10
+ # layout :pending, :bool,
11
+ # :response, :int
6
12
  class AbstractHandle < FFI::Struct
7
- # Subclasses must define their own layout, and the layout must start with:
8
- #
9
- # layout :pending, :bool,
10
- # :response, :int
13
+ include Helpers::Time
11
14
 
15
+ # Registry for registering all the handles.
12
16
  REGISTRY = {}
13
17
 
14
- CURRENT_TIME = -> { Process.clock_gettime(Process::CLOCK_MONOTONIC) }.freeze
15
-
16
- private_constant :CURRENT_TIME
18
+ class << self
19
+ # Adds handle to the register
20
+ #
21
+ # @param handle [AbstractHandle] any handle we want to register
22
+ def register(handle)
23
+ address = handle.to_ptr.address
24
+ REGISTRY[address] = handle
25
+ end
17
26
 
18
- def self.register(handle)
19
- address = handle.to_ptr.address
20
- REGISTRY[address] = handle
27
+ # Removes handle from the register based on the handle address
28
+ #
29
+ # @param address [Integer] address of the registered handle we want to remove
30
+ def remove(address)
31
+ REGISTRY.delete(address)
32
+ end
21
33
  end
22
34
 
23
- def self.remove(address)
24
- REGISTRY.delete(address)
25
- end
26
35
 
27
36
  # Whether the handle is still pending.
28
37
  #
@@ -32,27 +41,31 @@ module Rdkafka
32
41
  end
33
42
 
34
43
  # Wait for the operation to complete or raise an error if this takes longer than the timeout.
35
- # If there is a timeout this does not mean the operation failed, rdkafka might still be working on the operation.
36
- # In this case it is possible to call wait again.
44
+ # If there is a timeout this does not mean the operation failed, rdkafka might still be working
45
+ # on the operation. In this case it is possible to call wait again.
37
46
  #
38
- # @param max_wait_timeout [Numeric, nil] Amount of time to wait before timing out. If this is nil it does not time out.
39
- # @param wait_timeout [Numeric] Amount of time we should wait before we recheck if the operation has completed
47
+ # @param max_wait_timeout [Numeric, nil] Amount of time to wait before timing out.
48
+ # If this is nil it does not time out.
49
+ # @param wait_timeout [Numeric] Amount of time we should wait before we recheck if the
50
+ # operation has completed
40
51
  # @param raise_response_error [Boolean] should we raise error when waiting finishes
41
52
  #
53
+ # @return [Object] Operation-specific result
54
+ #
42
55
  # @raise [RdkafkaError] When the operation failed
43
56
  # @raise [WaitTimeoutError] When the timeout has been reached and the handle is still pending
44
- #
45
- # @return [Object] Operation-specific result
46
57
  def wait(max_wait_timeout: 60, wait_timeout: 0.1, raise_response_error: true)
47
58
  timeout = if max_wait_timeout
48
- CURRENT_TIME.call + max_wait_timeout
59
+ monotonic_now + max_wait_timeout
49
60
  else
50
61
  nil
51
62
  end
52
63
  loop do
53
64
  if pending?
54
- if timeout && timeout <= CURRENT_TIME.call
55
- raise WaitTimeoutError.new("Waiting for #{operation_name} timed out after #{max_wait_timeout} seconds")
65
+ if timeout && timeout <= monotonic_now
66
+ raise WaitTimeoutError.new(
67
+ "Waiting for #{operation_name} timed out after #{max_wait_timeout} seconds"
68
+ )
56
69
  end
57
70
  sleep wait_timeout
58
71
  elsif self[:response] != 0 && raise_response_error
data/lib/rdkafka/admin.rb CHANGED
@@ -1,7 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "objspace"
4
-
5
3
  module Rdkafka
6
4
  class Admin
7
5
  # @private
@@ -30,11 +28,12 @@ module Rdkafka
30
28
 
31
29
  # Create a topic with the given partition count and replication factor
32
30
  #
31
+ # @return [CreateTopicHandle] Create topic handle that can be used to wait for the result of
32
+ # creating the topic
33
+ #
33
34
  # @raise [ConfigError] When the partition count or replication factor are out of valid range
34
35
  # @raise [RdkafkaError] When the topic name is invalid or the topic already exists
35
36
  # @raise [RdkafkaError] When the topic configuration is invalid
36
- #
37
- # @return [CreateTopicHandle] Create topic handle that can be used to wait for the result of creating the topic
38
37
  def create_topic(topic_name, partition_count, replication_factor, topic_config={})
39
38
  closed_admin_check(__method__)
40
39
 
@@ -107,11 +106,11 @@ module Rdkafka
107
106
  create_topic_handle
108
107
  end
109
108
 
110
- # Delete the named topic
109
+ # Deletes the named topic
111
110
  #
111
+ # @return [DeleteTopicHandle] Delete topic handle that can be used to wait for the result of
112
+ # deleting the topic
112
113
  # @raise [RdkafkaError] When the topic name is invalid or the topic does not exist
113
- #
114
- # @return [DeleteTopicHandle] Delete topic handle that can be used to wait for the result of deleting the topic
115
114
  def delete_topic(topic_name)
116
115
  closed_admin_check(__method__)
117
116
 
@@ -1,9 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "ffi"
4
- require "json"
5
- require "logger"
6
-
7
3
  module Rdkafka
8
4
  # @private
9
5
  #
@@ -208,6 +204,7 @@ module Rdkafka
208
204
  attach_function :rd_kafka_resume_partitions, [:pointer, :pointer], :int, blocking: true
209
205
  attach_function :rd_kafka_seek, [:pointer, :int32, :int64, :int], :int, blocking: true
210
206
  attach_function :rd_kafka_offsets_for_times, [:pointer, :pointer, :int], :int, blocking: true
207
+ attach_function :rd_kafka_position, [:pointer, :pointer], :int, blocking: true
211
208
 
212
209
  # Headers
213
210
  attach_function :rd_kafka_header_get_all, [:pointer, :size_t, :pointer, :pointer, SizePtr], :int
@@ -1,11 +1,9 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "logger"
4
-
5
3
  module Rdkafka
6
4
  # Configuration for a Kafka consumer or producer. You can create an instance and use
7
5
  # the consumer and producer methods to create a client. Documentation of the available
8
- # configuration options is available on https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md.
6
+ # configuration options is available on https://github.com/confluentinc/librdkafka/blob/master/CONFIGURATION.md.
9
7
  class Config
10
8
  # @private
11
9
  @@logger = Logger.new(STDOUT)
@@ -53,13 +51,13 @@ module Rdkafka
53
51
 
54
52
  # Set a callback that will be called every time the underlying client emits statistics.
55
53
  # You can configure if and how often this happens using `statistics.interval.ms`.
56
- # The callback is called with a hash that's documented here: https://github.com/edenhill/librdkafka/blob/master/STATISTICS.md
54
+ # The callback is called with a hash that's documented here: https://github.com/confluentinc/librdkafka/blob/master/STATISTICS.md
57
55
  #
58
56
  # @param callback [Proc, #call] The callback
59
57
  #
60
58
  # @return [nil]
61
59
  def self.statistics_callback=(callback)
62
- raise TypeError.new("Callback has to be callable") unless callback.respond_to?(:call)
60
+ raise TypeError.new("Callback has to be callable") unless callback.respond_to?(:call) || callback == nil
63
61
  @@statistics_callback = callback
64
62
  end
65
63
 
@@ -114,6 +112,7 @@ module Rdkafka
114
112
  def initialize(config_hash = {})
115
113
  @config_hash = DEFAULT_CONFIG.merge(config_hash)
116
114
  @consumer_rebalance_listener = nil
115
+ @consumer_poll_set = true
117
116
  end
118
117
 
119
118
  # Set a config option.
@@ -142,12 +141,28 @@ module Rdkafka
142
141
  @consumer_rebalance_listener = listener
143
142
  end
144
143
 
145
- # Create a consumer with this configuration.
144
+ # Should we use a single queue for the underlying consumer and events.
146
145
  #
147
- # @raise [ConfigError] When the configuration contains invalid options
148
- # @raise [ClientCreationError] When the native client cannot be created
146
+ # This is an advanced API that allows for more granular control of the polling process.
147
+ # When this value is set to `false` (`true` by defualt), there will be two queues that need to
148
+ # be polled:
149
+ # - main librdkafka queue for events
150
+ # - consumer queue with messages and rebalances
151
+ #
152
+ # It is recommended to use the defaults and only set it to `false` in advance multi-threaded
153
+ # and complex cases where granular events handling control is needed.
154
+ #
155
+ # @param poll_set [Boolean]
156
+ def consumer_poll_set=(poll_set)
157
+ @consumer_poll_set = poll_set
158
+ end
159
+
160
+ # Creates a consumer with this configuration.
149
161
  #
150
162
  # @return [Consumer] The created consumer
163
+ #
164
+ # @raise [ConfigError] When the configuration contains invalid options
165
+ # @raise [ClientCreationError] When the native client cannot be created
151
166
  def consumer
152
167
  opaque = Opaque.new
153
168
  config = native_config(opaque)
@@ -160,8 +175,8 @@ module Rdkafka
160
175
  # Create native client
161
176
  kafka = native_kafka(config, :rd_kafka_consumer)
162
177
 
163
- # Redirect the main queue to the consumer
164
- Rdkafka::Bindings.rd_kafka_poll_set_consumer(kafka)
178
+ # Redirect the main queue to the consumer queue
179
+ Rdkafka::Bindings.rd_kafka_poll_set_consumer(kafka) if @consumer_poll_set
165
180
 
166
181
  # Return consumer with Kafka client
167
182
  Rdkafka::Consumer.new(
@@ -175,10 +190,10 @@ module Rdkafka
175
190
 
176
191
  # Create a producer with this configuration.
177
192
  #
193
+ # @return [Producer] The created producer
194
+ #
178
195
  # @raise [ConfigError] When the configuration contains invalid options
179
196
  # @raise [ClientCreationError] When the native client cannot be created
180
- #
181
- # @return [Producer] The created producer
182
197
  def producer
183
198
  # Create opaque
184
199
  opaque = Opaque.new
@@ -200,12 +215,12 @@ module Rdkafka
200
215
  end
201
216
  end
202
217
 
203
- # Create an admin instance with this configuration.
218
+ # Creates an admin instance with this configuration.
219
+ #
220
+ # @return [Admin] The created admin instance
204
221
  #
205
222
  # @raise [ConfigError] When the configuration contains invalid options
206
223
  # @raise [ClientCreationError] When the native client cannot be created
207
- #
208
- # @return [Admin] The created admin instance
209
224
  def admin
210
225
  opaque = Opaque.new
211
226
  config = native_config(opaque)
@@ -18,13 +18,11 @@ module Rdkafka
18
18
 
19
19
  # Reads a librdkafka native message's headers and returns them as a Ruby Hash
20
20
  #
21
- # @param [librdkakfa message] native_message
21
+ # @private
22
22
  #
23
+ # @param [librdkakfa message] native_message
23
24
  # @return [Hash<String, String>] headers Hash for the native_message
24
- #
25
25
  # @raise [Rdkafka::RdkafkaError] when fail to read headers
26
- #
27
- # @private
28
26
  def self.from_native(native_message)
29
27
  headers_ptrptr = FFI::MemoryPointer.new(:pointer)
30
28
  err = Rdkafka::Bindings.rd_kafka_message_headers(native_message, headers_ptrptr)