rdkafka 0.22.0.beta1-x86_64-linux-gnu

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (102) hide show
  1. checksums.yaml +7 -0
  2. data/.github/CODEOWNERS +3 -0
  3. data/.github/FUNDING.yml +1 -0
  4. data/.github/workflows/ci_linux_x86_64_gnu.yml +249 -0
  5. data/.github/workflows/ci_linux_x86_64_musl.yml +205 -0
  6. data/.github/workflows/ci_macos_arm64.yml +306 -0
  7. data/.github/workflows/push_linux_x86_64_gnu.yml +64 -0
  8. data/.github/workflows/push_linux_x86_64_musl.yml +77 -0
  9. data/.github/workflows/push_macos_arm64.yml +54 -0
  10. data/.github/workflows/push_ruby.yml +37 -0
  11. data/.github/workflows/verify-action-pins.yml +16 -0
  12. data/.gitignore +14 -0
  13. data/.rspec +2 -0
  14. data/.ruby-gemset +1 -0
  15. data/.ruby-version +1 -0
  16. data/.yardopts +2 -0
  17. data/CHANGELOG.md +247 -0
  18. data/Gemfile +5 -0
  19. data/MIT-LICENSE +22 -0
  20. data/README.md +178 -0
  21. data/Rakefile +96 -0
  22. data/docker-compose.yml +25 -0
  23. data/ext/README.md +19 -0
  24. data/ext/Rakefile +131 -0
  25. data/ext/build_common.sh +361 -0
  26. data/ext/build_linux_x86_64_gnu.sh +306 -0
  27. data/ext/build_linux_x86_64_musl.sh +763 -0
  28. data/ext/build_macos_arm64.sh +550 -0
  29. data/ext/librdkafka.so +0 -0
  30. data/lib/rdkafka/abstract_handle.rb +116 -0
  31. data/lib/rdkafka/admin/acl_binding_result.rb +51 -0
  32. data/lib/rdkafka/admin/config_binding_result.rb +30 -0
  33. data/lib/rdkafka/admin/config_resource_binding_result.rb +18 -0
  34. data/lib/rdkafka/admin/create_acl_handle.rb +28 -0
  35. data/lib/rdkafka/admin/create_acl_report.rb +24 -0
  36. data/lib/rdkafka/admin/create_partitions_handle.rb +27 -0
  37. data/lib/rdkafka/admin/create_partitions_report.rb +6 -0
  38. data/lib/rdkafka/admin/create_topic_handle.rb +29 -0
  39. data/lib/rdkafka/admin/create_topic_report.rb +24 -0
  40. data/lib/rdkafka/admin/delete_acl_handle.rb +30 -0
  41. data/lib/rdkafka/admin/delete_acl_report.rb +23 -0
  42. data/lib/rdkafka/admin/delete_groups_handle.rb +28 -0
  43. data/lib/rdkafka/admin/delete_groups_report.rb +24 -0
  44. data/lib/rdkafka/admin/delete_topic_handle.rb +29 -0
  45. data/lib/rdkafka/admin/delete_topic_report.rb +24 -0
  46. data/lib/rdkafka/admin/describe_acl_handle.rb +30 -0
  47. data/lib/rdkafka/admin/describe_acl_report.rb +24 -0
  48. data/lib/rdkafka/admin/describe_configs_handle.rb +33 -0
  49. data/lib/rdkafka/admin/describe_configs_report.rb +54 -0
  50. data/lib/rdkafka/admin/incremental_alter_configs_handle.rb +33 -0
  51. data/lib/rdkafka/admin/incremental_alter_configs_report.rb +54 -0
  52. data/lib/rdkafka/admin.rb +833 -0
  53. data/lib/rdkafka/bindings.rb +566 -0
  54. data/lib/rdkafka/callbacks.rb +415 -0
  55. data/lib/rdkafka/config.rb +398 -0
  56. data/lib/rdkafka/consumer/headers.rb +79 -0
  57. data/lib/rdkafka/consumer/message.rb +86 -0
  58. data/lib/rdkafka/consumer/partition.rb +51 -0
  59. data/lib/rdkafka/consumer/topic_partition_list.rb +169 -0
  60. data/lib/rdkafka/consumer.rb +653 -0
  61. data/lib/rdkafka/error.rb +101 -0
  62. data/lib/rdkafka/helpers/oauth.rb +58 -0
  63. data/lib/rdkafka/helpers/time.rb +14 -0
  64. data/lib/rdkafka/metadata.rb +115 -0
  65. data/lib/rdkafka/native_kafka.rb +139 -0
  66. data/lib/rdkafka/producer/delivery_handle.rb +40 -0
  67. data/lib/rdkafka/producer/delivery_report.rb +46 -0
  68. data/lib/rdkafka/producer/partitions_count_cache.rb +216 -0
  69. data/lib/rdkafka/producer.rb +430 -0
  70. data/lib/rdkafka/version.rb +7 -0
  71. data/lib/rdkafka.rb +54 -0
  72. data/rdkafka.gemspec +65 -0
  73. data/renovate.json +92 -0
  74. data/spec/rdkafka/abstract_handle_spec.rb +117 -0
  75. data/spec/rdkafka/admin/create_acl_handle_spec.rb +56 -0
  76. data/spec/rdkafka/admin/create_acl_report_spec.rb +18 -0
  77. data/spec/rdkafka/admin/create_topic_handle_spec.rb +52 -0
  78. data/spec/rdkafka/admin/create_topic_report_spec.rb +16 -0
  79. data/spec/rdkafka/admin/delete_acl_handle_spec.rb +85 -0
  80. data/spec/rdkafka/admin/delete_acl_report_spec.rb +72 -0
  81. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +52 -0
  82. data/spec/rdkafka/admin/delete_topic_report_spec.rb +16 -0
  83. data/spec/rdkafka/admin/describe_acl_handle_spec.rb +85 -0
  84. data/spec/rdkafka/admin/describe_acl_report_spec.rb +73 -0
  85. data/spec/rdkafka/admin_spec.rb +770 -0
  86. data/spec/rdkafka/bindings_spec.rb +223 -0
  87. data/spec/rdkafka/callbacks_spec.rb +20 -0
  88. data/spec/rdkafka/config_spec.rb +258 -0
  89. data/spec/rdkafka/consumer/headers_spec.rb +73 -0
  90. data/spec/rdkafka/consumer/message_spec.rb +139 -0
  91. data/spec/rdkafka/consumer/partition_spec.rb +57 -0
  92. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +248 -0
  93. data/spec/rdkafka/consumer_spec.rb +1274 -0
  94. data/spec/rdkafka/error_spec.rb +89 -0
  95. data/spec/rdkafka/metadata_spec.rb +79 -0
  96. data/spec/rdkafka/native_kafka_spec.rb +130 -0
  97. data/spec/rdkafka/producer/delivery_handle_spec.rb +45 -0
  98. data/spec/rdkafka/producer/delivery_report_spec.rb +25 -0
  99. data/spec/rdkafka/producer/partitions_count_cache_spec.rb +359 -0
  100. data/spec/rdkafka/producer_spec.rb +1052 -0
  101. data/spec/spec_helper.rb +195 -0
  102. metadata +276 -0
@@ -0,0 +1,101 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ # Base error class.
5
+ class BaseError < RuntimeError; end
6
+
7
+ # Error returned by the underlying rdkafka library.
8
+ class RdkafkaError < BaseError
9
+ # The underlying raw error response
10
+ # @return [Integer]
11
+ attr_reader :rdkafka_response
12
+
13
+ # Prefix to be used for human readable representation
14
+ # @return [String]
15
+ attr_reader :message_prefix
16
+
17
+ # Error message sent by the broker
18
+ # @return [String]
19
+ attr_reader :broker_message
20
+
21
+ # @private
22
+ def initialize(response, message_prefix=nil, broker_message: nil)
23
+ raise TypeError.new("Response has to be an integer") unless response.is_a? Integer
24
+ @rdkafka_response = response
25
+ @message_prefix = message_prefix
26
+ @broker_message = broker_message
27
+ end
28
+
29
+ # This error's code, for example `:partition_eof`, `:msg_size_too_large`.
30
+ # @return [Symbol]
31
+ def code
32
+ code = Rdkafka::Bindings.rd_kafka_err2name(@rdkafka_response).downcase
33
+ if code[0] == "_"
34
+ code[1..-1].to_sym
35
+ else
36
+ code.to_sym
37
+ end
38
+ end
39
+
40
+ # Human readable representation of this error.
41
+ # @return [String]
42
+ def to_s
43
+ message_prefix_part = if message_prefix
44
+ "#{message_prefix} - "
45
+ else
46
+ ''
47
+ end
48
+ "#{message_prefix_part}#{Rdkafka::Bindings.rd_kafka_err2str(@rdkafka_response)} (#{code})"
49
+ end
50
+
51
+ # Whether this error indicates the partition is EOF.
52
+ # @return [Boolean]
53
+ def is_partition_eof?
54
+ code == :partition_eof
55
+ end
56
+
57
+ # Error comparison
58
+ def ==(another_error)
59
+ another_error.is_a?(self.class) && (self.to_s == another_error.to_s)
60
+ end
61
+ end
62
+
63
+ # Error with topic partition list returned by the underlying rdkafka library.
64
+ class RdkafkaTopicPartitionListError < RdkafkaError
65
+ # @return [TopicPartitionList]
66
+ attr_reader :topic_partition_list
67
+
68
+ # @private
69
+ def initialize(response, topic_partition_list, message_prefix=nil)
70
+ super(response, message_prefix)
71
+ @topic_partition_list = topic_partition_list
72
+ end
73
+ end
74
+
75
+ # Error class for public consumer method calls on a closed consumer.
76
+ class ClosedConsumerError < BaseError
77
+ def initialize(method)
78
+ super("Illegal call to #{method.to_s} on a closed consumer")
79
+ end
80
+ end
81
+
82
+ # Error class for public producer method calls on a closed producer.
83
+ class ClosedProducerError < BaseError
84
+ def initialize(method)
85
+ super("Illegal call to #{method.to_s} on a closed producer")
86
+ end
87
+ end
88
+
89
+ # Error class for public consumer method calls on a closed admin.
90
+ class ClosedAdminError < BaseError
91
+ def initialize(method)
92
+ super("Illegal call to #{method.to_s} on a closed admin")
93
+ end
94
+ end
95
+
96
+ class ClosedInnerError < BaseError
97
+ def initialize
98
+ super("Illegal call to a closed inner librdkafka instance")
99
+ end
100
+ end
101
+ end
@@ -0,0 +1,58 @@
1
+ module Rdkafka
2
+ module Helpers
3
+
4
+ module OAuth
5
+
6
+ # Set the OAuthBearer token
7
+ #
8
+ # @param token [String] the mandatory token value to set, often (but not necessarily) a JWS compact serialization as per https://tools.ietf.org/html/rfc7515#section-3.1.
9
+ # @param lifetime_ms [Integer] when the token expires, in terms of the number of milliseconds since the epoch. See https://currentmillis.com/.
10
+ # @param principal_name [String] the mandatory Kafka principal name associated with the token.
11
+ # @param extensions [Hash] optional SASL extensions key-value pairs to be communicated to the broker as additional key-value pairs during the initial client response as per https://tools.ietf.org/html/rfc7628#section-3.1.
12
+ # @return [Integer] 0 on success
13
+ def oauthbearer_set_token(token:, lifetime_ms:, principal_name:, extensions: nil)
14
+ error_buffer = FFI::MemoryPointer.from_string(" " * 256)
15
+
16
+ response = @native_kafka.with_inner do |inner|
17
+ Rdkafka::Bindings.rd_kafka_oauthbearer_set_token(
18
+ inner, token, lifetime_ms, principal_name,
19
+ flatten_extensions(extensions), extension_size(extensions), error_buffer, 256
20
+ )
21
+ end
22
+
23
+ return response if response.zero?
24
+
25
+ oauthbearer_set_token_failure("Failed to set token: #{error_buffer.read_string}")
26
+
27
+ response
28
+ end
29
+
30
+ # Marks failed oauth token acquire in librdkafka
31
+ #
32
+ # @param reason [String] human readable error reason for failing to acquire token
33
+ def oauthbearer_set_token_failure(reason)
34
+ @native_kafka.with_inner do |inner|
35
+ Rdkafka::Bindings.rd_kafka_oauthbearer_set_token_failure(
36
+ inner,
37
+ reason
38
+ )
39
+ end
40
+ end
41
+
42
+ private
43
+
44
+ # Flatten the extensions hash into a string according to the spec, https://datatracker.ietf.org/doc/html/rfc7628#section-3.1
45
+ def flatten_extensions(extensions)
46
+ return nil unless extensions
47
+ "\x01#{extensions.map { |e| e.join("=") }.join("\x01")}"
48
+ end
49
+
50
+ # extension_size is the number of keys + values which should be a non-negative even number
51
+ # https://github.com/confluentinc/librdkafka/blob/master/src/rdkafka_sasl_oauthbearer.c#L327-L347
52
+ def extension_size(extensions)
53
+ return 0 unless extensions
54
+ extensions.size * 2
55
+ end
56
+ end
57
+ end
58
+ end
@@ -0,0 +1,14 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ # Namespace for some small utilities used in multiple components
5
+ module Helpers
6
+ # Time related methods used across Karafka
7
+ module Time
8
+ # @return [Float] current monotonic time in seconds with microsecond precision
9
+ def monotonic_now
10
+ ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
11
+ end
12
+ end
13
+ end
14
+ end
@@ -0,0 +1,115 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ class Metadata
5
+ attr_reader :brokers, :topics
6
+
7
+ # Errors upon which we retry the metadata fetch
8
+ RETRIED_ERRORS = %i[
9
+ timed_out
10
+ leader_not_available
11
+ ].freeze
12
+
13
+ private_constant :RETRIED_ERRORS
14
+
15
+ def initialize(native_client, topic_name = nil, timeout_ms = 2_000)
16
+ attempt ||= 0
17
+ attempt += 1
18
+
19
+ native_topic = if topic_name
20
+ Rdkafka::Bindings.rd_kafka_topic_new(native_client, topic_name, nil)
21
+ end
22
+
23
+ ptr = FFI::MemoryPointer.new(:pointer)
24
+
25
+ # If topic_flag is 1, we request info about *all* topics in the cluster. If topic_flag is 0,
26
+ # we only request info about locally known topics (or a single topic if one is passed in).
27
+ topic_flag = topic_name.nil? ? 1 : 0
28
+
29
+ # Retrieve the Metadata
30
+ result = Rdkafka::Bindings.rd_kafka_metadata(native_client, topic_flag, native_topic, ptr, timeout_ms)
31
+
32
+ # Error Handling
33
+ raise Rdkafka::RdkafkaError.new(result) unless result.zero?
34
+
35
+ metadata_from_native(ptr.read_pointer)
36
+ rescue ::Rdkafka::RdkafkaError => e
37
+ raise unless RETRIED_ERRORS.include?(e.code)
38
+ raise if attempt > 10
39
+
40
+ backoff_factor = 2**attempt
41
+ timeout = backoff_factor * 0.1
42
+
43
+ sleep(timeout)
44
+
45
+ retry
46
+ ensure
47
+ Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic) if topic_name
48
+ Rdkafka::Bindings.rd_kafka_metadata_destroy(ptr.read_pointer)
49
+ end
50
+
51
+ private
52
+
53
+ def metadata_from_native(ptr)
54
+ metadata = Metadata.new(ptr)
55
+ @brokers = Array.new(metadata[:brokers_count]) do |i|
56
+ BrokerMetadata.new(metadata[:brokers_metadata] + (i * BrokerMetadata.size)).to_h
57
+ end
58
+
59
+ @topics = Array.new(metadata[:topics_count]) do |i|
60
+ topic = TopicMetadata.new(metadata[:topics_metadata] + (i * TopicMetadata.size))
61
+ raise Rdkafka::RdkafkaError.new(topic[:rd_kafka_resp_err]) unless topic[:rd_kafka_resp_err].zero?
62
+
63
+ partitions = Array.new(topic[:partition_count]) do |j|
64
+ partition = PartitionMetadata.new(topic[:partitions_metadata] + (j * PartitionMetadata.size))
65
+ raise Rdkafka::RdkafkaError.new(partition[:rd_kafka_resp_err]) unless partition[:rd_kafka_resp_err].zero?
66
+ partition.to_h
67
+ end
68
+ topic.to_h.merge!(partitions: partitions)
69
+ end
70
+ end
71
+
72
+ class CustomFFIStruct < FFI::Struct
73
+ def to_h
74
+ members.each_with_object({}) do |mem, hsh|
75
+ val = self.[](mem)
76
+ next if val.is_a?(FFI::Pointer) || mem == :rd_kafka_resp_err
77
+
78
+ hsh[mem] = self.[](mem)
79
+ end
80
+ end
81
+ end
82
+
83
+ class Metadata < CustomFFIStruct
84
+ layout :brokers_count, :int,
85
+ :brokers_metadata, :pointer,
86
+ :topics_count, :int,
87
+ :topics_metadata, :pointer,
88
+ :broker_id, :int32,
89
+ :broker_name, :string
90
+ end
91
+
92
+ class BrokerMetadata < CustomFFIStruct
93
+ layout :broker_id, :int32,
94
+ :broker_name, :string,
95
+ :broker_port, :int
96
+ end
97
+
98
+ class TopicMetadata < CustomFFIStruct
99
+ layout :topic_name, :string,
100
+ :partition_count, :int,
101
+ :partitions_metadata, :pointer,
102
+ :rd_kafka_resp_err, :int
103
+ end
104
+
105
+ class PartitionMetadata < CustomFFIStruct
106
+ layout :partition_id, :int32,
107
+ :rd_kafka_resp_err, :int,
108
+ :leader, :int32,
109
+ :replica_count, :int,
110
+ :replicas, :pointer,
111
+ :in_sync_replica_brokers, :int,
112
+ :isrs, :pointer
113
+ end
114
+ end
115
+ end
@@ -0,0 +1,139 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ # @private
5
+ # A wrapper around a native kafka that polls and cleanly exits
6
+ class NativeKafka
7
+ def initialize(inner, run_polling_thread:, opaque:, auto_start: true, timeout_ms: 100)
8
+ @inner = inner
9
+ @opaque = opaque
10
+ # Lock around external access
11
+ @access_mutex = Mutex.new
12
+ # Lock around internal polling
13
+ @poll_mutex = Mutex.new
14
+ # Lock around decrementing the operations in progress counter
15
+ # We have two mutexes - one for increment (`@access_mutex`) and one for decrement mutex
16
+ # because they serve different purposes:
17
+ #
18
+ # - `@access_mutex` allows us to lock the execution and make sure that any operation within
19
+ # the `#synchronize` is the only one running and that there are no other running
20
+ # operations.
21
+ # - `@decrement_mutex` ensures, that our decrement operation is thread-safe for any Ruby
22
+ # implementation.
23
+ #
24
+ # We do not use the same mutex, because it could create a deadlock when an already
25
+ # incremented operation cannot decrement because `@access_lock` is now owned by a different
26
+ # thread in a synchronized mode and the synchronized mode is waiting on the decrement.
27
+ @decrement_mutex = Mutex.new
28
+ # counter for operations in progress using inner
29
+ @operations_in_progress = 0
30
+
31
+ @run_polling_thread = run_polling_thread
32
+
33
+ @timeout_ms = timeout_ms
34
+
35
+ start if auto_start
36
+
37
+ @closing = false
38
+ end
39
+
40
+ def start
41
+ synchronize do
42
+ return if @started
43
+
44
+ @started = true
45
+
46
+ # Trigger initial poll to make sure oauthbearer cb and other initial cb are handled
47
+ Rdkafka::Bindings.rd_kafka_poll(@inner, 0)
48
+
49
+ if @run_polling_thread
50
+ # Start thread to poll client for delivery callbacks,
51
+ # not used in consumer.
52
+ @polling_thread = Thread.new do
53
+ loop do
54
+ @poll_mutex.synchronize do
55
+ Rdkafka::Bindings.rd_kafka_poll(@inner, @timeout_ms)
56
+ end
57
+
58
+ # Exit thread if closing and the poll queue is empty
59
+ if Thread.current[:closing] && Rdkafka::Bindings.rd_kafka_outq_len(@inner) == 0
60
+ break
61
+ end
62
+ end
63
+ end
64
+
65
+ @polling_thread.name = "rdkafka.native_kafka##{Rdkafka::Bindings.rd_kafka_name(@inner).gsub('rdkafka', '')}"
66
+ @polling_thread.abort_on_exception = true
67
+ @polling_thread[:closing] = false
68
+ end
69
+ end
70
+ end
71
+
72
+ def with_inner
73
+ if @access_mutex.owned?
74
+ @operations_in_progress += 1
75
+ else
76
+ @access_mutex.synchronize { @operations_in_progress += 1 }
77
+ end
78
+
79
+ @inner.nil? ? raise(ClosedInnerError) : yield(@inner)
80
+ ensure
81
+ @decrement_mutex.synchronize { @operations_in_progress -= 1 }
82
+ end
83
+
84
+ def synchronize(&block)
85
+ @access_mutex.synchronize do
86
+ # Wait for any commands using the inner to finish
87
+ # This can take a while on blocking operations like polling but is essential not to proceed
88
+ # with certain types of operations like resources destruction as it can cause the process
89
+ # to hang or crash
90
+ sleep(0.01) until @operations_in_progress.zero?
91
+
92
+ with_inner(&block)
93
+ end
94
+ end
95
+
96
+ def finalizer
97
+ ->(_) { close }
98
+ end
99
+
100
+ def closed?
101
+ @closing || @inner.nil?
102
+ end
103
+
104
+ def close(object_id=nil)
105
+ return if closed?
106
+
107
+ synchronize do
108
+ # Indicate to the outside world that we are closing
109
+ @closing = true
110
+
111
+ if @polling_thread
112
+ # Indicate to polling thread that we're closing
113
+ @polling_thread[:closing] = true
114
+
115
+ # Wait for the polling thread to finish up,
116
+ # this can be aborted in practice if this
117
+ # code runs from a finalizer.
118
+ @polling_thread.join
119
+ end
120
+
121
+ # Destroy the client after locking both mutexes
122
+ @poll_mutex.lock
123
+
124
+ # This check prevents a race condition, where we would enter the close in two threads
125
+ # and after unlocking the primary one that hold the lock but finished, ours would be unlocked
126
+ # and would continue to run, trying to destroy inner twice
127
+ return unless @inner
128
+
129
+ yield if block_given?
130
+
131
+ Rdkafka::Bindings.rd_kafka_destroy(@inner)
132
+ @inner = nil
133
+ @opaque = nil
134
+ @poll_mutex.unlock
135
+ @poll_mutex = nil
136
+ end
137
+ end
138
+ end
139
+ end
@@ -0,0 +1,40 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ class Producer
5
+ # Handle to wait for a delivery report which is returned when
6
+ # producing a message.
7
+ class DeliveryHandle < Rdkafka::AbstractHandle
8
+ layout :pending, :bool,
9
+ :response, :int,
10
+ :partition, :int,
11
+ :offset, :int64,
12
+ :topic_name, :pointer
13
+
14
+ # @return [Object, nil] label set during message production or nil by default
15
+ attr_accessor :label
16
+
17
+ # @return [String] topic where we are trying to send the message
18
+ # We use this instead of reading from `topic_name` pointer to save on memory allocations
19
+ attr_accessor :topic
20
+
21
+ # @return [String] the name of the operation (e.g. "delivery")
22
+ def operation_name
23
+ "delivery"
24
+ end
25
+
26
+ # @return [DeliveryReport] a report on the delivery of the message
27
+ def create_result
28
+ DeliveryReport.new(
29
+ self[:partition],
30
+ self[:offset],
31
+ # For part of errors, we will not get a topic name reference and in cases like this
32
+ # we should not return it
33
+ topic,
34
+ self[:response] != 0 ? RdkafkaError.new(self[:response]) : nil,
35
+ label
36
+ )
37
+ end
38
+ end
39
+ end
40
+ end
@@ -0,0 +1,46 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ class Producer
5
+ # Delivery report for a successfully produced message.
6
+ class DeliveryReport
7
+ # The partition this message was produced to.
8
+ # @return [Integer]
9
+ attr_reader :partition
10
+
11
+ # The offset of the produced message.
12
+ # @return [Integer]
13
+ attr_reader :offset
14
+
15
+ # The name of the topic this message was produced to or nil in case of reports with errors
16
+ # where topic was not reached.
17
+ #
18
+ # @return [String, nil]
19
+ attr_reader :topic_name
20
+
21
+ # Error in case happen during produce.
22
+ # @return [Integer]
23
+ attr_reader :error
24
+
25
+ # @return [Object, nil] label set during message production or nil by default
26
+ attr_reader :label
27
+
28
+ # We alias the `#topic_name` under `#topic` to make this consistent with `Consumer::Message`
29
+ # where the topic name is under `#topic` method. That way we have a consistent name that
30
+ # is present in both places
31
+ #
32
+ # We do not remove the original `#topic_name` because of backwards compatibility
33
+ alias topic topic_name
34
+
35
+ private
36
+
37
+ def initialize(partition, offset, topic_name = nil, error = nil, label = nil)
38
+ @partition = partition
39
+ @offset = offset
40
+ @topic_name = topic_name
41
+ @error = error
42
+ @label = label
43
+ end
44
+ end
45
+ end
46
+ end