karafka-rdkafka 0.20.0.rc3-x86_64-linux-gnu

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. checksums.yaml +7 -0
  2. data/.github/CODEOWNERS +3 -0
  3. data/.github/FUNDING.yml +1 -0
  4. data/.github/workflows/ci_linux_x86_64_gnu.yml +248 -0
  5. data/.github/workflows/ci_macos_arm64.yml +301 -0
  6. data/.github/workflows/push_linux_x86_64_gnu.yml +60 -0
  7. data/.github/workflows/push_ruby.yml +37 -0
  8. data/.github/workflows/verify-action-pins.yml +16 -0
  9. data/.gitignore +15 -0
  10. data/.rspec +2 -0
  11. data/.ruby-gemset +1 -0
  12. data/.ruby-version +1 -0
  13. data/.yardopts +2 -0
  14. data/CHANGELOG.md +323 -0
  15. data/Gemfile +5 -0
  16. data/MIT-LICENSE +22 -0
  17. data/README.md +177 -0
  18. data/Rakefile +96 -0
  19. data/docker-compose.yml +25 -0
  20. data/ext/README.md +19 -0
  21. data/ext/Rakefile +131 -0
  22. data/ext/build_common.sh +361 -0
  23. data/ext/build_linux_x86_64_gnu.sh +306 -0
  24. data/ext/build_macos_arm64.sh +550 -0
  25. data/ext/librdkafka.so +0 -0
  26. data/karafka-rdkafka.gemspec +61 -0
  27. data/lib/rdkafka/abstract_handle.rb +116 -0
  28. data/lib/rdkafka/admin/acl_binding_result.rb +51 -0
  29. data/lib/rdkafka/admin/config_binding_result.rb +30 -0
  30. data/lib/rdkafka/admin/config_resource_binding_result.rb +18 -0
  31. data/lib/rdkafka/admin/create_acl_handle.rb +28 -0
  32. data/lib/rdkafka/admin/create_acl_report.rb +24 -0
  33. data/lib/rdkafka/admin/create_partitions_handle.rb +30 -0
  34. data/lib/rdkafka/admin/create_partitions_report.rb +6 -0
  35. data/lib/rdkafka/admin/create_topic_handle.rb +32 -0
  36. data/lib/rdkafka/admin/create_topic_report.rb +24 -0
  37. data/lib/rdkafka/admin/delete_acl_handle.rb +30 -0
  38. data/lib/rdkafka/admin/delete_acl_report.rb +23 -0
  39. data/lib/rdkafka/admin/delete_groups_handle.rb +28 -0
  40. data/lib/rdkafka/admin/delete_groups_report.rb +24 -0
  41. data/lib/rdkafka/admin/delete_topic_handle.rb +32 -0
  42. data/lib/rdkafka/admin/delete_topic_report.rb +24 -0
  43. data/lib/rdkafka/admin/describe_acl_handle.rb +30 -0
  44. data/lib/rdkafka/admin/describe_acl_report.rb +24 -0
  45. data/lib/rdkafka/admin/describe_configs_handle.rb +33 -0
  46. data/lib/rdkafka/admin/describe_configs_report.rb +48 -0
  47. data/lib/rdkafka/admin/incremental_alter_configs_handle.rb +33 -0
  48. data/lib/rdkafka/admin/incremental_alter_configs_report.rb +48 -0
  49. data/lib/rdkafka/admin.rb +832 -0
  50. data/lib/rdkafka/bindings.rb +582 -0
  51. data/lib/rdkafka/callbacks.rb +415 -0
  52. data/lib/rdkafka/config.rb +398 -0
  53. data/lib/rdkafka/consumer/headers.rb +79 -0
  54. data/lib/rdkafka/consumer/message.rb +86 -0
  55. data/lib/rdkafka/consumer/partition.rb +57 -0
  56. data/lib/rdkafka/consumer/topic_partition_list.rb +190 -0
  57. data/lib/rdkafka/consumer.rb +663 -0
  58. data/lib/rdkafka/error.rb +201 -0
  59. data/lib/rdkafka/helpers/oauth.rb +58 -0
  60. data/lib/rdkafka/helpers/time.rb +14 -0
  61. data/lib/rdkafka/metadata.rb +115 -0
  62. data/lib/rdkafka/native_kafka.rb +139 -0
  63. data/lib/rdkafka/producer/delivery_handle.rb +48 -0
  64. data/lib/rdkafka/producer/delivery_report.rb +45 -0
  65. data/lib/rdkafka/producer/partitions_count_cache.rb +216 -0
  66. data/lib/rdkafka/producer.rb +492 -0
  67. data/lib/rdkafka/version.rb +7 -0
  68. data/lib/rdkafka.rb +54 -0
  69. data/renovate.json +92 -0
  70. data/spec/rdkafka/abstract_handle_spec.rb +117 -0
  71. data/spec/rdkafka/admin/create_acl_handle_spec.rb +56 -0
  72. data/spec/rdkafka/admin/create_acl_report_spec.rb +18 -0
  73. data/spec/rdkafka/admin/create_topic_handle_spec.rb +54 -0
  74. data/spec/rdkafka/admin/create_topic_report_spec.rb +16 -0
  75. data/spec/rdkafka/admin/delete_acl_handle_spec.rb +85 -0
  76. data/spec/rdkafka/admin/delete_acl_report_spec.rb +72 -0
  77. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +54 -0
  78. data/spec/rdkafka/admin/delete_topic_report_spec.rb +16 -0
  79. data/spec/rdkafka/admin/describe_acl_handle_spec.rb +85 -0
  80. data/spec/rdkafka/admin/describe_acl_report_spec.rb +73 -0
  81. data/spec/rdkafka/admin_spec.rb +769 -0
  82. data/spec/rdkafka/bindings_spec.rb +222 -0
  83. data/spec/rdkafka/callbacks_spec.rb +20 -0
  84. data/spec/rdkafka/config_spec.rb +258 -0
  85. data/spec/rdkafka/consumer/headers_spec.rb +73 -0
  86. data/spec/rdkafka/consumer/message_spec.rb +139 -0
  87. data/spec/rdkafka/consumer/partition_spec.rb +57 -0
  88. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +248 -0
  89. data/spec/rdkafka/consumer_spec.rb +1299 -0
  90. data/spec/rdkafka/error_spec.rb +95 -0
  91. data/spec/rdkafka/metadata_spec.rb +79 -0
  92. data/spec/rdkafka/native_kafka_spec.rb +130 -0
  93. data/spec/rdkafka/producer/delivery_handle_spec.rb +60 -0
  94. data/spec/rdkafka/producer/delivery_report_spec.rb +25 -0
  95. data/spec/rdkafka/producer/partitions_count_cache_spec.rb +359 -0
  96. data/spec/rdkafka/producer/partitions_count_spec.rb +359 -0
  97. data/spec/rdkafka/producer_spec.rb +1234 -0
  98. data/spec/spec_helper.rb +181 -0
  99. metadata +244 -0
@@ -0,0 +1,201 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ # Base error class.
5
+ class BaseError < RuntimeError; end
6
+
7
+ # Error returned by the underlying rdkafka library.
8
+ class RdkafkaError < BaseError
9
+ # Empty hash for details default allocation
10
+ EMPTY_HASH = {}.freeze
11
+
12
+ # The underlying raw error response
13
+ # @return [Integer]
14
+ attr_reader :rdkafka_response
15
+
16
+ # Prefix to be used for human readable representation
17
+ # @return [String]
18
+ attr_reader :message_prefix
19
+
20
+ # Error message sent by the broker
21
+ # @return [String]
22
+ attr_reader :broker_message
23
+
24
+ # Optional details hash specific to a given error or empty hash if none or not supported
25
+ # @return [Hash]
26
+ attr_reader :details
27
+
28
+ class << self
29
+ def build_from_c(response_ptr, message_prefix = nil, broker_message: nil)
30
+ code = Rdkafka::Bindings.rd_kafka_error_code(response_ptr)
31
+
32
+ return false if code.zero?
33
+
34
+ message = broker_message || Rdkafka::Bindings.rd_kafka_err2str(code)
35
+ fatal = !Rdkafka::Bindings.rd_kafka_error_is_fatal(response_ptr).zero?
36
+ retryable = !Rdkafka::Bindings.rd_kafka_error_is_retriable(response_ptr).zero?
37
+ abortable = !Rdkafka::Bindings.rd_kafka_error_txn_requires_abort(response_ptr).zero?
38
+
39
+ Rdkafka::Bindings.rd_kafka_error_destroy(response_ptr)
40
+
41
+ new(
42
+ code,
43
+ message_prefix,
44
+ broker_message: message,
45
+ fatal: fatal,
46
+ retryable: retryable,
47
+ abortable: abortable
48
+ )
49
+ end
50
+
51
+ def build(response_ptr_or_code, message_prefix = nil, broker_message: nil)
52
+ case response_ptr_or_code
53
+ when Integer
54
+ return false if response_ptr_or_code.zero?
55
+
56
+ new(response_ptr_or_code, message_prefix, broker_message: broker_message)
57
+ when Bindings::Message
58
+ return false if response_ptr_or_code[:err].zero?
59
+
60
+ unless response_ptr_or_code[:payload].null?
61
+ message_prefix ||= response_ptr_or_code[:payload].read_string(response_ptr_or_code[:len])
62
+ end
63
+
64
+ details = if response_ptr_or_code[:rkt].null?
65
+ EMPTY_HASH
66
+ else
67
+ {
68
+ partition: response_ptr_or_code[:partition],
69
+ offset: response_ptr_or_code[:offset],
70
+ topic: Bindings.rd_kafka_topic_name(response_ptr_or_code[:rkt])
71
+ }.freeze
72
+ end
73
+ new(
74
+ response_ptr_or_code[:err],
75
+ message_prefix,
76
+ broker_message: broker_message,
77
+ details: details
78
+ )
79
+ else
80
+ build_from_c(response_ptr_or_code, message_prefix)
81
+ end
82
+ end
83
+
84
+ def validate!(response_ptr_or_code, message_prefix = nil, broker_message: nil)
85
+ error = build(response_ptr_or_code, message_prefix, broker_message: broker_message)
86
+ error ? raise(error) : false
87
+ end
88
+ end
89
+
90
+ # @private
91
+ def initialize(
92
+ response,
93
+ message_prefix=nil,
94
+ broker_message: nil,
95
+ fatal: false,
96
+ retryable: false,
97
+ abortable: false,
98
+ details: EMPTY_HASH
99
+ )
100
+ raise TypeError.new("Response has to be an integer") unless response.is_a? Integer
101
+ @rdkafka_response = response
102
+ @message_prefix = message_prefix
103
+ @broker_message = broker_message
104
+ @fatal = fatal
105
+ @retryable = retryable
106
+ @abortable = abortable
107
+ @details = details
108
+ end
109
+
110
+ # This error's code, for example `:partition_eof`, `:msg_size_too_large`.
111
+ # @return [Symbol]
112
+ def code
113
+ code = Rdkafka::Bindings.rd_kafka_err2name(@rdkafka_response).downcase
114
+ if code[0] == "_"
115
+ code[1..-1].to_sym
116
+ else
117
+ code.to_sym
118
+ end
119
+ end
120
+
121
+ # Human readable representation of this error.
122
+ # @return [String]
123
+ def to_s
124
+ message_prefix_part = if message_prefix
125
+ "#{message_prefix} - "
126
+ else
127
+ ''
128
+ end
129
+
130
+ err_str = Rdkafka::Bindings.rd_kafka_err2str(@rdkafka_response)
131
+ base = "#{message_prefix_part}#{err_str} (#{code})"
132
+
133
+ return base if broker_message.nil?
134
+ return base if broker_message.empty?
135
+
136
+ "#{base}\n#{broker_message}"
137
+ end
138
+
139
+ # Whether this error indicates the partition is EOF.
140
+ # @return [Boolean]
141
+ def is_partition_eof?
142
+ code == :partition_eof
143
+ end
144
+
145
+ # Error comparison
146
+ def ==(another_error)
147
+ another_error.is_a?(self.class) && (self.to_s == another_error.to_s)
148
+ end
149
+
150
+ def fatal?
151
+ @fatal
152
+ end
153
+
154
+ def retryable?
155
+ @retryable
156
+ end
157
+
158
+ def abortable?
159
+ @abortable
160
+ end
161
+ end
162
+
163
+ # Error with topic partition list returned by the underlying rdkafka library.
164
+ class RdkafkaTopicPartitionListError < RdkafkaError
165
+ # @return [TopicPartitionList]
166
+ attr_reader :topic_partition_list
167
+
168
+ # @private
169
+ def initialize(response, topic_partition_list, message_prefix=nil)
170
+ super(response, message_prefix)
171
+ @topic_partition_list = topic_partition_list
172
+ end
173
+ end
174
+
175
+ # Error class for public consumer method calls on a closed consumer.
176
+ class ClosedConsumerError < BaseError
177
+ def initialize(method)
178
+ super("Illegal call to #{method.to_s} on a closed consumer")
179
+ end
180
+ end
181
+
182
+ # Error class for public producer method calls on a closed producer.
183
+ class ClosedProducerError < BaseError
184
+ def initialize(method)
185
+ super("Illegal call to #{method.to_s} on a closed producer")
186
+ end
187
+ end
188
+
189
+ # Error class for public consumer method calls on a closed admin.
190
+ class ClosedAdminError < BaseError
191
+ def initialize(method)
192
+ super("Illegal call to #{method.to_s} on a closed admin")
193
+ end
194
+ end
195
+
196
+ class ClosedInnerError < BaseError
197
+ def initialize
198
+ super("Illegal call to a closed inner librdkafka instance")
199
+ end
200
+ end
201
+ end
@@ -0,0 +1,58 @@
1
+ module Rdkafka
2
+ module Helpers
3
+
4
+ module OAuth
5
+
6
+ # Set the OAuthBearer token
7
+ #
8
+ # @param token [String] the mandatory token value to set, often (but not necessarily) a JWS compact serialization as per https://tools.ietf.org/html/rfc7515#section-3.1.
9
+ # @param lifetime_ms [Integer] when the token expires, in terms of the number of milliseconds since the epoch. See https://currentmillis.com/.
10
+ # @param principal_name [String] the mandatory Kafka principal name associated with the token.
11
+ # @param extensions [Hash] optional SASL extensions key-value pairs to be communicated to the broker as additional key-value pairs during the initial client response as per https://tools.ietf.org/html/rfc7628#section-3.1.
12
+ # @return [Integer] 0 on success
13
+ def oauthbearer_set_token(token:, lifetime_ms:, principal_name:, extensions: nil)
14
+ error_buffer = FFI::MemoryPointer.from_string(" " * 256)
15
+
16
+ response = @native_kafka.with_inner do |inner|
17
+ Rdkafka::Bindings.rd_kafka_oauthbearer_set_token(
18
+ inner, token, lifetime_ms, principal_name,
19
+ flatten_extensions(extensions), extension_size(extensions), error_buffer, 256
20
+ )
21
+ end
22
+
23
+ return response if response.zero?
24
+
25
+ oauthbearer_set_token_failure("Failed to set token: #{error_buffer.read_string}")
26
+
27
+ response
28
+ end
29
+
30
+ # Marks failed oauth token acquire in librdkafka
31
+ #
32
+ # @param reason [String] human readable error reason for failing to acquire token
33
+ def oauthbearer_set_token_failure(reason)
34
+ @native_kafka.with_inner do |inner|
35
+ Rdkafka::Bindings.rd_kafka_oauthbearer_set_token_failure(
36
+ inner,
37
+ reason
38
+ )
39
+ end
40
+ end
41
+
42
+ private
43
+
44
+ # Flatten the extensions hash into a string according to the spec, https://datatracker.ietf.org/doc/html/rfc7628#section-3.1
45
+ def flatten_extensions(extensions)
46
+ return nil unless extensions
47
+ "\x01#{extensions.map { |e| e.join("=") }.join("\x01")}"
48
+ end
49
+
50
+ # extension_size is the number of keys + values which should be a non-negative even number
51
+ # https://github.com/confluentinc/librdkafka/blob/master/src/rdkafka_sasl_oauthbearer.c#L327-L347
52
+ def extension_size(extensions)
53
+ return 0 unless extensions
54
+ extensions.size * 2
55
+ end
56
+ end
57
+ end
58
+ end
@@ -0,0 +1,14 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ # Namespace for some small utilities used in multiple components
5
+ module Helpers
6
+ # Time related methods used across Karafka
7
+ module Time
8
+ # @return [Float] current monotonic time in seconds with microsecond precision
9
+ def monotonic_now
10
+ ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
11
+ end
12
+ end
13
+ end
14
+ end
@@ -0,0 +1,115 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ class Metadata
5
+ attr_reader :brokers, :topics
6
+
7
+ # Errors upon which we retry the metadata fetch
8
+ RETRIED_ERRORS = %i[
9
+ timed_out
10
+ leader_not_available
11
+ ].freeze
12
+
13
+ private_constant :RETRIED_ERRORS
14
+
15
+ def initialize(native_client, topic_name = nil, timeout_ms = 2_000)
16
+ attempt ||= 0
17
+ attempt += 1
18
+
19
+ native_topic = if topic_name
20
+ Rdkafka::Bindings.rd_kafka_topic_new(native_client, topic_name, nil)
21
+ end
22
+
23
+ ptr = FFI::MemoryPointer.new(:pointer)
24
+
25
+ # If topic_flag is 1, we request info about *all* topics in the cluster. If topic_flag is 0,
26
+ # we only request info about locally known topics (or a single topic if one is passed in).
27
+ topic_flag = topic_name.nil? ? 1 : 0
28
+
29
+ # Retrieve the Metadata
30
+ result = Rdkafka::Bindings.rd_kafka_metadata(native_client, topic_flag, native_topic, ptr, timeout_ms)
31
+
32
+ Rdkafka::RdkafkaError.validate!(result)
33
+
34
+ metadata_from_native(ptr.read_pointer)
35
+ rescue ::Rdkafka::RdkafkaError => e
36
+ raise unless RETRIED_ERRORS.include?(e.code)
37
+ raise if attempt > 10
38
+
39
+ backoff_factor = 2**attempt
40
+ timeout = backoff_factor * 0.1
41
+
42
+ sleep(timeout)
43
+
44
+ retry
45
+ ensure
46
+ Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic) if topic_name
47
+ Rdkafka::Bindings.rd_kafka_metadata_destroy(ptr.read_pointer)
48
+ end
49
+
50
+ private
51
+
52
+ def metadata_from_native(ptr)
53
+ metadata = Metadata.new(ptr)
54
+ @brokers = Array.new(metadata[:brokers_count]) do |i|
55
+ BrokerMetadata.new(metadata[:brokers_metadata] + (i * BrokerMetadata.size)).to_h
56
+ end
57
+
58
+ @topics = Array.new(metadata[:topics_count]) do |i|
59
+ topic = TopicMetadata.new(metadata[:topics_metadata] + (i * TopicMetadata.size))
60
+
61
+ RdkafkaError.validate!(topic[:rd_kafka_resp_err])
62
+
63
+ partitions = Array.new(topic[:partition_count]) do |j|
64
+ partition = PartitionMetadata.new(topic[:partitions_metadata] + (j * PartitionMetadata.size))
65
+ RdkafkaError.validate!(partition[:rd_kafka_resp_err])
66
+ partition.to_h
67
+ end
68
+ topic.to_h.merge!(partitions: partitions)
69
+ end
70
+ end
71
+
72
+ class CustomFFIStruct < FFI::Struct
73
+ def to_h
74
+ members.each_with_object({}) do |mem, hsh|
75
+ val = self.[](mem)
76
+ next if val.is_a?(FFI::Pointer) || mem == :rd_kafka_resp_err
77
+
78
+ hsh[mem] = self.[](mem)
79
+ end
80
+ end
81
+ end
82
+
83
+ class Metadata < CustomFFIStruct
84
+ layout :brokers_count, :int,
85
+ :brokers_metadata, :pointer,
86
+ :topics_count, :int,
87
+ :topics_metadata, :pointer,
88
+ :broker_id, :int32,
89
+ :broker_name, :string
90
+ end
91
+
92
+ class BrokerMetadata < CustomFFIStruct
93
+ layout :broker_id, :int32,
94
+ :broker_name, :string,
95
+ :broker_port, :int
96
+ end
97
+
98
+ class TopicMetadata < CustomFFIStruct
99
+ layout :topic_name, :string,
100
+ :partition_count, :int,
101
+ :partitions_metadata, :pointer,
102
+ :rd_kafka_resp_err, :int
103
+ end
104
+
105
+ class PartitionMetadata < CustomFFIStruct
106
+ layout :partition_id, :int32,
107
+ :rd_kafka_resp_err, :int,
108
+ :leader, :int32,
109
+ :replica_count, :int,
110
+ :replicas, :pointer,
111
+ :in_sync_replica_brokers, :int,
112
+ :isrs, :pointer
113
+ end
114
+ end
115
+ end
@@ -0,0 +1,139 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ # @private
5
+ # A wrapper around a native kafka that polls and cleanly exits
6
+ class NativeKafka
7
+ def initialize(inner, run_polling_thread:, opaque:, auto_start: true, timeout_ms: 100)
8
+ @inner = inner
9
+ @opaque = opaque
10
+ # Lock around external access
11
+ @access_mutex = Mutex.new
12
+ # Lock around internal polling
13
+ @poll_mutex = Mutex.new
14
+ # Lock around decrementing the operations in progress counter
15
+ # We have two mutexes - one for increment (`@access_mutex`) and one for decrement mutex
16
+ # because they serve different purposes:
17
+ #
18
+ # - `@access_mutex` allows us to lock the execution and make sure that any operation within
19
+ # the `#synchronize` is the only one running and that there are no other running
20
+ # operations.
21
+ # - `@decrement_mutex` ensures, that our decrement operation is thread-safe for any Ruby
22
+ # implementation.
23
+ #
24
+ # We do not use the same mutex, because it could create a deadlock when an already
25
+ # incremented operation cannot decrement because `@access_lock` is now owned by a different
26
+ # thread in a synchronized mode and the synchronized mode is waiting on the decrement.
27
+ @decrement_mutex = Mutex.new
28
+ # counter for operations in progress using inner
29
+ @operations_in_progress = 0
30
+
31
+ @run_polling_thread = run_polling_thread
32
+
33
+ @timeout_ms = timeout_ms
34
+
35
+ start if auto_start
36
+
37
+ @closing = false
38
+ end
39
+
40
+ def start
41
+ synchronize do
42
+ return if @started
43
+
44
+ @started = true
45
+
46
+ # Trigger initial poll to make sure oauthbearer cb and other initial cb are handled
47
+ Rdkafka::Bindings.rd_kafka_poll(@inner, 0)
48
+
49
+ if @run_polling_thread
50
+ # Start thread to poll client for delivery callbacks,
51
+ # not used in consumer.
52
+ @polling_thread = Thread.new do
53
+ loop do
54
+ @poll_mutex.synchronize do
55
+ Rdkafka::Bindings.rd_kafka_poll(@inner, @timeout_ms)
56
+ end
57
+
58
+ # Exit thread if closing and the poll queue is empty
59
+ if Thread.current[:closing] && Rdkafka::Bindings.rd_kafka_outq_len(@inner) == 0
60
+ break
61
+ end
62
+ end
63
+ end
64
+
65
+ @polling_thread.name = "rdkafka.native_kafka##{Rdkafka::Bindings.rd_kafka_name(@inner).gsub('rdkafka', '')}"
66
+ @polling_thread.abort_on_exception = true
67
+ @polling_thread[:closing] = false
68
+ end
69
+ end
70
+ end
71
+
72
+ def with_inner
73
+ if @access_mutex.owned?
74
+ @operations_in_progress += 1
75
+ else
76
+ @access_mutex.synchronize { @operations_in_progress += 1 }
77
+ end
78
+
79
+ @inner.nil? ? raise(ClosedInnerError) : yield(@inner)
80
+ ensure
81
+ @decrement_mutex.synchronize { @operations_in_progress -= 1 }
82
+ end
83
+
84
+ def synchronize(&block)
85
+ @access_mutex.synchronize do
86
+ # Wait for any commands using the inner to finish
87
+ # This can take a while on blocking operations like polling but is essential not to proceed
88
+ # with certain types of operations like resources destruction as it can cause the process
89
+ # to hang or crash
90
+ sleep(0.01) until @operations_in_progress.zero?
91
+
92
+ with_inner(&block)
93
+ end
94
+ end
95
+
96
+ def finalizer
97
+ ->(_) { close }
98
+ end
99
+
100
+ def closed?
101
+ @closing || @inner.nil?
102
+ end
103
+
104
+ def close(object_id=nil)
105
+ return if closed?
106
+
107
+ synchronize do
108
+ # Indicate to the outside world that we are closing
109
+ @closing = true
110
+
111
+ if @polling_thread
112
+ # Indicate to polling thread that we're closing
113
+ @polling_thread[:closing] = true
114
+
115
+ # Wait for the polling thread to finish up,
116
+ # this can be aborted in practice if this
117
+ # code runs from a finalizer.
118
+ @polling_thread.join
119
+ end
120
+
121
+ # Destroy the client after locking both mutexes
122
+ @poll_mutex.lock
123
+
124
+ # This check prevents a race condition, where we would enter the close in two threads
125
+ # and after unlocking the primary one that hold the lock but finished, ours would be unlocked
126
+ # and would continue to run, trying to destroy inner twice
127
+ return unless @inner
128
+
129
+ yield if block_given?
130
+
131
+ Rdkafka::Bindings.rd_kafka_destroy(@inner)
132
+ @inner = nil
133
+ @opaque = nil
134
+ @poll_mutex.unlock
135
+ @poll_mutex = nil
136
+ end
137
+ end
138
+ end
139
+ end
@@ -0,0 +1,48 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ class Producer
5
+ # Handle to wait for a delivery report which is returned when
6
+ # producing a message.
7
+ class DeliveryHandle < Rdkafka::AbstractHandle
8
+ layout :pending, :bool,
9
+ :response, :int,
10
+ :partition, :int,
11
+ :offset, :int64,
12
+ :topic_name, :pointer
13
+
14
+ # @return [Object, nil] label set during message production or nil by default
15
+ attr_accessor :label
16
+
17
+ # @return [String] topic where we are trying to send the message
18
+ # We use this instead of reading from `topic_name` pointer to save on memory allocations
19
+ attr_accessor :topic
20
+
21
+ # @return [String] the name of the operation (e.g. "delivery")
22
+ def operation_name
23
+ "delivery"
24
+ end
25
+
26
+ # @return [DeliveryReport] a report on the delivery of the message
27
+ def create_result
28
+ if self[:response] == 0
29
+ DeliveryReport.new(
30
+ self[:partition],
31
+ self[:offset],
32
+ topic,
33
+ nil,
34
+ label
35
+ )
36
+ else
37
+ DeliveryReport.new(
38
+ self[:partition],
39
+ self[:offset],
40
+ topic,
41
+ Rdkafka::RdkafkaError.build(self[:response]),
42
+ label
43
+ )
44
+ end
45
+ end
46
+ end
47
+ end
48
+ end
@@ -0,0 +1,45 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ class Producer
5
+ # Delivery report for a successfully produced message.
6
+ class DeliveryReport
7
+ # The partition this message was produced to.
8
+ # @return [Integer]
9
+ attr_reader :partition
10
+
11
+ # The offset of the produced message.
12
+ # @return [Integer]
13
+ attr_reader :offset
14
+
15
+ # The name of the topic this message was produced to or nil in case delivery failed and we
16
+ # we not able to get the topic reference
17
+ # @return [String, nil]
18
+ attr_reader :topic_name
19
+
20
+ # Error in case happen during produce.
21
+ # @return [Integer]
22
+ attr_reader :error
23
+
24
+ # @return [Object, nil] label set during message production or nil by default
25
+ attr_reader :label
26
+
27
+ # We alias the `#topic_name` under `#topic` to make this consistent with `Consumer::Message`
28
+ # where the topic name is under `#topic` method. That way we have a consistent name that
29
+ # is present in both places
30
+ #
31
+ # We do not remove the original `#topic_name` because of backwards compatibility
32
+ alias topic topic_name
33
+
34
+ private
35
+
36
+ def initialize(partition, offset, topic_name = nil, error = nil, label = nil)
37
+ @partition = partition
38
+ @offset = offset
39
+ @topic_name = topic_name
40
+ @error = error
41
+ @label = label
42
+ end
43
+ end
44
+ end
45
+ end