rdkafka 0.16.0.beta1 → 0.16.0.rc1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 3135d4f2663517517330d165948e9761ffc0ecd20942f911a6ee9541c437ee7e
4
- data.tar.gz: 9a489c2400c4054e9cec0d6c8d24f75bce407d370f8c393b7b31dcd0dbf7c361
3
+ metadata.gz: 3132a3d7af1fc531a6259fe3a027e555bfb59183737c3fcffdb5f7e9dc746320
4
+ data.tar.gz: 3f1c36a78196660ec0c1b522ff084f63c4c27fa53383864579107a937136cf72
5
5
  SHA512:
6
- metadata.gz: be8eba2aec012af189d893ebf6ddcd4e4dec117aecd7f36afa76bc6ee4c2a3fa93f4ac4160efa16e8c91efd7011ee41cda00a5e2146e18044a166035850cd490
7
- data.tar.gz: d1761a6ab9c7d9ee539d79679dc730cc392fc2d7369d3f78b9c8d8f1c800855ab15ae7c472259b43e95139aa30d4777026c0529671fd85f98f3e5fceeaf53e62
6
+ metadata.gz: 54d9c9b2cb2f788f8b5134241ff312c7afba6b7d260b4bfe2782fc492023d21b93498fe3af5024e0cc2482741bc70969a7219c85fa8cf35c0c65bdd3a76a73ee
7
+ data.tar.gz: 9af50d5fc373faf18c5bc1a71d080812bd230a637a40900e3025f97fcfa70df8b9de945b06332c813e9d03316a8eaac6217005f1d733c54be2b8e4b3045c3e9f
checksums.yaml.gz.sig CHANGED
Binary file
@@ -26,7 +26,6 @@ jobs:
26
26
  - '3.2'
27
27
  - '3.1'
28
28
  - '3.0'
29
- - '2.7'
30
29
  include:
31
30
  - ruby: '3.3'
32
31
  coverage: 'true'
data/CHANGELOG.md CHANGED
@@ -1,7 +1,12 @@
1
1
  # Rdkafka Changelog
2
2
 
3
3
  ## 0.16.0 (Unreleased)
4
+ - **[Breaking]** Retire support for Ruby 2.7.
5
+ - **[Feature]** Support incremental config describe + alter API.
4
6
  - **[Feature]** Oauthbearer token refresh callback (bruce-szalwinski-he)
7
+ - **[Feature]** Provide ability to use topic config on a producer for custom behaviors per dispatch.
8
+ - [Enhancement] Use topic config reference cache for messages production to prevent topic objects allocation with each message.
9
+ - [Enhancement] Provide `Rrdkafka::Admin#describe_errors` to get errors descriptions (mensfeld)
5
10
  - [Enhancement] Replace time poll based wait engine with an event based to improve response times on blocking operations and wait (nijikon + mensfeld)
6
11
  - [Enhancement] Allow for usage of the second regex engine of librdkafka by setting `RDKAFKA_DISABLE_REGEX_EXT` during build (mensfeld)
7
12
  - [Enhancement] name polling Thread as `rdkafka.native_kafka#<name>` (nijikon)
@@ -0,0 +1,30 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ class Admin
5
+ # A single config binding result that represents its values extracted from C
6
+ class ConfigBindingResult
7
+ attr_reader :name, :value, :read_only, :default, :sensitive, :synonym, :synonyms
8
+
9
+ # @param config_ptr [FFI::Pointer] config pointer
10
+ def initialize(config_ptr)
11
+ @name = Bindings.rd_kafka_ConfigEntry_name(config_ptr)
12
+ @value = Bindings.rd_kafka_ConfigEntry_value(config_ptr)
13
+ @read_only = Bindings.rd_kafka_ConfigEntry_is_read_only(config_ptr)
14
+ @default = Bindings.rd_kafka_ConfigEntry_is_default(config_ptr)
15
+ @sensitive = Bindings.rd_kafka_ConfigEntry_is_sensitive(config_ptr)
16
+ @synonym = Bindings.rd_kafka_ConfigEntry_is_synonym(config_ptr)
17
+ @synonyms = []
18
+
19
+ # The code below builds up the config synonyms using same config binding
20
+ pointer_to_size_t = FFI::MemoryPointer.new(:int32)
21
+ synonym_ptr = Bindings.rd_kafka_ConfigEntry_synonyms(config_ptr, pointer_to_size_t)
22
+ synonyms_ptr = synonym_ptr.read_array_of_pointer(pointer_to_size_t.read_int)
23
+
24
+ (1..pointer_to_size_t.read_int).map do |ar|
25
+ self.class.new synonyms_ptr[ar - 1]
26
+ end
27
+ end
28
+ end
29
+ end
30
+ end
@@ -0,0 +1,18 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ class Admin
5
+ # A simple binding that represents the requested config resource
6
+ class ConfigResourceBindingResult
7
+ attr_reader :name, :type, :configs, :configs_count
8
+
9
+ def initialize(config_resource_ptr)
10
+ ffi_binding = Bindings::ConfigResource.new(config_resource_ptr)
11
+
12
+ @name = ffi_binding[:name]
13
+ @type = ffi_binding[:type]
14
+ @configs = []
15
+ end
16
+ end
17
+ end
18
+ end
@@ -10,6 +10,7 @@ module Rdkafka
10
10
 
11
11
  def initialize(acls:, acls_count:)
12
12
  @acls=[]
13
+
13
14
  if acls != FFI::Pointer::NULL
14
15
  acl_binding_result_pointers = acls.read_array_of_pointer(acls_count)
15
16
  (1..acls_count).map do |acl_index|
@@ -0,0 +1,33 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ class Admin
5
+ class DescribeConfigsHandle < AbstractHandle
6
+ layout :pending, :bool,
7
+ :response, :int,
8
+ :response_string, :pointer,
9
+ :config_entries, :pointer,
10
+ :entry_count, :int
11
+
12
+ # @return [String] the name of the operation.
13
+ def operation_name
14
+ "describe configs"
15
+ end
16
+
17
+ # @return [DescribeAclReport] instance with an array of acls that matches the request filters.
18
+ def create_result
19
+ DescribeConfigsReport.new(
20
+ config_entries: self[:config_entries],
21
+ entry_count: self[:entry_count]
22
+ )
23
+ end
24
+
25
+ def raise_error
26
+ raise RdkafkaError.new(
27
+ self[:response],
28
+ broker_message: self[:response_string].read_string
29
+ )
30
+ end
31
+ end
32
+ end
33
+ end
@@ -0,0 +1,54 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ class Admin
5
+ class DescribeConfigsReport
6
+ attr_reader :resources
7
+
8
+ def initialize(config_entries:, entry_count:)
9
+ @resources=[]
10
+
11
+ return if config_entries == FFI::Pointer::NULL
12
+
13
+ config_entries
14
+ .read_array_of_pointer(entry_count)
15
+ .each { |config_resource_result_ptr| validate!(config_resource_result_ptr) }
16
+ .each do |config_resource_result_ptr|
17
+ config_resource_result = ConfigResourceBindingResult.new(config_resource_result_ptr)
18
+
19
+ pointer_to_size_t = FFI::MemoryPointer.new(:int32)
20
+ configs_ptr = Bindings.rd_kafka_ConfigResource_configs(
21
+ config_resource_result_ptr,
22
+ pointer_to_size_t
23
+ )
24
+
25
+ configs_ptr
26
+ .read_array_of_pointer(pointer_to_size_t.read_int)
27
+ .map { |config_ptr| ConfigBindingResult.new(config_ptr) }
28
+ .each { |config_binding| config_resource_result.configs << config_binding }
29
+
30
+ @resources << config_resource_result
31
+ end
32
+ ensure
33
+ return if config_entries == FFI::Pointer::NULL
34
+
35
+ Bindings.rd_kafka_ConfigResource_destroy_array(config_entries, entry_count)
36
+ end
37
+
38
+ private
39
+
40
+ def validate!(config_resource_result_ptr)
41
+ code = Bindings.rd_kafka_ConfigResource_error(config_resource_result_ptr)
42
+
43
+ return if code.zero?
44
+
45
+ raise(
46
+ RdkafkaError.new(
47
+ code,
48
+ Bindings.rd_kafka_ConfigResource_error_string(config_resource_result_ptr)
49
+ )
50
+ )
51
+ end
52
+ end
53
+ end
54
+ end
@@ -0,0 +1,33 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ class Admin
5
+ class IncrementalAlterConfigsHandle < AbstractHandle
6
+ layout :pending, :bool,
7
+ :response, :int,
8
+ :response_string, :pointer,
9
+ :config_entries, :pointer,
10
+ :entry_count, :int
11
+
12
+ # @return [String] the name of the operation.
13
+ def operation_name
14
+ "incremental alter configs"
15
+ end
16
+
17
+ # @return [DescribeAclReport] instance with an array of acls that matches the request filters.
18
+ def create_result
19
+ IncrementalAlterConfigsReport.new(
20
+ config_entries: self[:config_entries],
21
+ entry_count: self[:entry_count]
22
+ )
23
+ end
24
+
25
+ def raise_error
26
+ raise RdkafkaError.new(
27
+ self[:response],
28
+ broker_message: self[:response_string].read_string
29
+ )
30
+ end
31
+ end
32
+ end
33
+ end
@@ -0,0 +1,54 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ class Admin
5
+ class IncrementalAlterConfigsReport
6
+ attr_reader :resources
7
+
8
+ def initialize(config_entries:, entry_count:)
9
+ @resources=[]
10
+
11
+ return if config_entries == FFI::Pointer::NULL
12
+
13
+ config_entries
14
+ .read_array_of_pointer(entry_count)
15
+ .each { |config_resource_result_ptr| validate!(config_resource_result_ptr) }
16
+ .each do |config_resource_result_ptr|
17
+ config_resource_result = ConfigResourceBindingResult.new(config_resource_result_ptr)
18
+
19
+ pointer_to_size_t = FFI::MemoryPointer.new(:int32)
20
+ configs_ptr = Bindings.rd_kafka_ConfigResource_configs(
21
+ config_resource_result_ptr,
22
+ pointer_to_size_t
23
+ )
24
+
25
+ configs_ptr
26
+ .read_array_of_pointer(pointer_to_size_t.read_int)
27
+ .map { |config_ptr| ConfigBindingResult.new(config_ptr) }
28
+ .each { |config_binding| config_resource_result.configs << config_binding }
29
+
30
+ @resources << config_resource_result
31
+ end
32
+ ensure
33
+ return if config_entries == FFI::Pointer::NULL
34
+
35
+ Bindings.rd_kafka_ConfigResource_destroy_array(config_entries, entry_count)
36
+ end
37
+
38
+ private
39
+
40
+ def validate!(config_resource_result_ptr)
41
+ code = Bindings.rd_kafka_ConfigResource_error(config_resource_result_ptr)
42
+
43
+ return if code.zero?
44
+
45
+ raise(
46
+ RdkafkaError.new(
47
+ code,
48
+ Bindings.rd_kafka_ConfigResource_error_string(config_resource_result_ptr)
49
+ )
50
+ )
51
+ end
52
+ end
53
+ end
54
+ end
data/lib/rdkafka/admin.rb CHANGED
@@ -4,6 +4,50 @@ module Rdkafka
4
4
  class Admin
5
5
  include Helpers::OAuth
6
6
 
7
+ class << self
8
+ # Allows us to retrieve librdkafka errors with descriptions
9
+ # Useful for debugging and building UIs, etc.
10
+ #
11
+ # @return [Hash<Integer, Hash>] hash with errors mapped by code
12
+ def describe_errors
13
+ # Memory pointers for the array of structures and count
14
+ p_error_descs = FFI::MemoryPointer.new(:pointer)
15
+ p_count = FFI::MemoryPointer.new(:size_t)
16
+
17
+ # Call the attached function
18
+ Bindings.rd_kafka_get_err_descs(p_error_descs, p_count)
19
+
20
+ # Retrieve the number of items in the array
21
+ count = p_count.read_uint
22
+
23
+ # Get the pointer to the array of error descriptions
24
+ array_of_errors = FFI::Pointer.new(Bindings::NativeErrorDesc, p_error_descs.read_pointer)
25
+
26
+ errors = {}
27
+
28
+ count.times do |i|
29
+ # Get the pointer to each struct
30
+ error_ptr = array_of_errors[i]
31
+
32
+ # Create a new instance of NativeErrorDesc for each item
33
+ error_desc = Bindings::NativeErrorDesc.new(error_ptr)
34
+
35
+ # Read values from the struct
36
+ code = error_desc[:code]
37
+
38
+ name = ''
39
+ desc = ''
40
+
41
+ name = error_desc[:name].read_string unless error_desc[:name].null?
42
+ desc = error_desc[:desc].read_string unless error_desc[:desc].null?
43
+
44
+ errors[code] = { code: code, name: name, description: desc }
45
+ end
46
+
47
+ errors
48
+ end
49
+ end
50
+
7
51
  # @private
8
52
  def initialize(native_kafka)
9
53
  @native_kafka = native_kafka
@@ -620,6 +664,166 @@ module Rdkafka
620
664
  describe_acl_handle
621
665
  end
622
666
 
667
+
668
+ # Describe configs
669
+ #
670
+ # @param resources [Array<Hash>] Array where elements are hashes with two keys:
671
+ # - `:resource_type` - numerical resource type based on Kafka API
672
+ # - `:resource_name` - string with resource name
673
+ # @return [DescribeConfigsHandle] Describe config handle that can be used to wait for the
674
+ # result of fetching resources with their appropriate configs
675
+ #
676
+ # @raise [RdkafkaError]
677
+ #
678
+ # @note Several resources can be requested at one go, but only one broker at a time
679
+ def describe_configs(resources)
680
+ closed_admin_check(__method__)
681
+
682
+ handle = DescribeConfigsHandle.new
683
+ handle[:pending] = true
684
+ handle[:response] = -1
685
+
686
+ queue_ptr = @native_kafka.with_inner do |inner|
687
+ Rdkafka::Bindings.rd_kafka_queue_get_background(inner)
688
+ end
689
+
690
+ if queue_ptr.null?
691
+ raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
692
+ end
693
+
694
+ admin_options_ptr = @native_kafka.with_inner do |inner|
695
+ Rdkafka::Bindings.rd_kafka_AdminOptions_new(
696
+ inner,
697
+ Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS
698
+ )
699
+ end
700
+
701
+ DescribeConfigsHandle.register(handle)
702
+ Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, handle.to_ptr)
703
+
704
+ pointer_array = resources.map do |resource_details|
705
+ Rdkafka::Bindings.rd_kafka_ConfigResource_new(
706
+ resource_details.fetch(:resource_type),
707
+ FFI::MemoryPointer.from_string(
708
+ resource_details.fetch(:resource_name)
709
+ )
710
+ )
711
+ end
712
+
713
+ configs_array_ptr = FFI::MemoryPointer.new(:pointer, pointer_array.size)
714
+ configs_array_ptr.write_array_of_pointer(pointer_array)
715
+
716
+ begin
717
+ @native_kafka.with_inner do |inner|
718
+ Rdkafka::Bindings.rd_kafka_DescribeConfigs(
719
+ inner,
720
+ configs_array_ptr,
721
+ pointer_array.size,
722
+ admin_options_ptr,
723
+ queue_ptr
724
+ )
725
+ end
726
+ rescue Exception
727
+ DescribeConfigsHandle.remove(handle.to_ptr.address)
728
+
729
+ raise
730
+ ensure
731
+ Rdkafka::Bindings.rd_kafka_ConfigResource_destroy_array(
732
+ configs_array_ptr,
733
+ pointer_array.size
734
+ ) if configs_array_ptr
735
+ end
736
+
737
+ handle
738
+ end
739
+
740
+ # Alters in an incremental way all the configs provided for given resources
741
+ #
742
+ # @param resources_with_configs [Array<Hash>] resources with the configs key that contains
743
+ # name, value and the proper op_type to perform on this value.
744
+ #
745
+ # @return [IncrementalAlterConfigsHandle] Incremental alter configs handle that can be used to
746
+ # wait for the result of altering resources with their appropriate configs
747
+ #
748
+ # @raise [RdkafkaError]
749
+ #
750
+ # @note Several resources can be requested at one go, but only one broker at a time
751
+ # @note The results won't contain altered values but only the altered resources
752
+ def incremental_alter_configs(resources_with_configs)
753
+ closed_admin_check(__method__)
754
+
755
+ handle = IncrementalAlterConfigsHandle.new
756
+ handle[:pending] = true
757
+ handle[:response] = -1
758
+
759
+ queue_ptr = @native_kafka.with_inner do |inner|
760
+ Rdkafka::Bindings.rd_kafka_queue_get_background(inner)
761
+ end
762
+
763
+ if queue_ptr.null?
764
+ raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
765
+ end
766
+
767
+ admin_options_ptr = @native_kafka.with_inner do |inner|
768
+ Rdkafka::Bindings.rd_kafka_AdminOptions_new(
769
+ inner,
770
+ Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_INCREMENTALALTERCONFIGS
771
+ )
772
+ end
773
+
774
+ IncrementalAlterConfigsHandle.register(handle)
775
+ Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, handle.to_ptr)
776
+
777
+ # Tu poprawnie tworzyc
778
+ pointer_array = resources_with_configs.map do |resource_details|
779
+ # First build the appropriate resource representation
780
+ resource_ptr = Rdkafka::Bindings.rd_kafka_ConfigResource_new(
781
+ resource_details.fetch(:resource_type),
782
+ FFI::MemoryPointer.from_string(
783
+ resource_details.fetch(:resource_name)
784
+ )
785
+ )
786
+
787
+ resource_details.fetch(:configs).each do |config|
788
+ Bindings.rd_kafka_ConfigResource_add_incremental_config(
789
+ resource_ptr,
790
+ config.fetch(:name),
791
+ config.fetch(:op_type),
792
+ config.fetch(:value)
793
+ )
794
+ end
795
+
796
+ resource_ptr
797
+ end
798
+
799
+ configs_array_ptr = FFI::MemoryPointer.new(:pointer, pointer_array.size)
800
+ configs_array_ptr.write_array_of_pointer(pointer_array)
801
+
802
+
803
+ begin
804
+ @native_kafka.with_inner do |inner|
805
+ Rdkafka::Bindings.rd_kafka_IncrementalAlterConfigs(
806
+ inner,
807
+ configs_array_ptr,
808
+ pointer_array.size,
809
+ admin_options_ptr,
810
+ queue_ptr
811
+ )
812
+ end
813
+ rescue Exception
814
+ IncrementalAlterConfigsHandle.remove(handle.to_ptr.address)
815
+
816
+ raise
817
+ ensure
818
+ Rdkafka::Bindings.rd_kafka_ConfigResource_destroy_array(
819
+ configs_array_ptr,
820
+ pointer_array.size
821
+ ) if configs_array_ptr
822
+ end
823
+
824
+ handle
825
+ end
826
+
623
827
  private
624
828
 
625
829
  def closed_admin_check(method)
@@ -89,10 +89,58 @@ module Rdkafka
89
89
  attach_function :rd_kafka_topic_partition_list_destroy, [:pointer], :void
90
90
  attach_function :rd_kafka_topic_partition_list_copy, [:pointer], :pointer
91
91
 
92
+ # Configs management
93
+ #
94
+ # Structs for management of configurations
95
+ # Each configuration is attached to a resource and one resource can have many configuration
96
+ # details. Each resource will also have separate errors results if obtaining configuration
97
+ # was not possible for any reason
98
+ class ConfigResource < FFI::Struct
99
+ layout :type, :int,
100
+ :name, :string
101
+ end
102
+
103
+ attach_function :rd_kafka_DescribeConfigs, [:pointer, :pointer, :size_t, :pointer, :pointer], :void, blocking: true
104
+ attach_function :rd_kafka_ConfigResource_new, [:int32, :pointer], :pointer
105
+ attach_function :rd_kafka_ConfigResource_destroy_array, [:pointer, :int32], :void
106
+ attach_function :rd_kafka_event_DescribeConfigs_result, [:pointer], :pointer
107
+ attach_function :rd_kafka_DescribeConfigs_result_resources, [:pointer, :pointer], :pointer
108
+ attach_function :rd_kafka_ConfigResource_configs, [:pointer, :pointer], :pointer
109
+ attach_function :rd_kafka_ConfigEntry_name, [:pointer], :string
110
+ attach_function :rd_kafka_ConfigEntry_value, [:pointer], :string
111
+ attach_function :rd_kafka_ConfigEntry_is_read_only, [:pointer], :int
112
+ attach_function :rd_kafka_ConfigEntry_is_default, [:pointer], :int
113
+ attach_function :rd_kafka_ConfigEntry_is_sensitive, [:pointer], :int
114
+ attach_function :rd_kafka_ConfigEntry_is_synonym, [:pointer], :int
115
+ attach_function :rd_kafka_ConfigEntry_synonyms, [:pointer, :pointer], :pointer
116
+ attach_function :rd_kafka_ConfigResource_error, [:pointer], :int
117
+ attach_function :rd_kafka_ConfigResource_error_string, [:pointer], :string
118
+ attach_function :rd_kafka_IncrementalAlterConfigs, [:pointer, :pointer, :size_t, :pointer, :pointer], :void, blocking: true
119
+ attach_function :rd_kafka_IncrementalAlterConfigs_result_resources, [:pointer, :pointer], :pointer
120
+ attach_function :rd_kafka_ConfigResource_add_incremental_config, [:pointer, :string, :int32, :string], :pointer
121
+ attach_function :rd_kafka_event_IncrementalAlterConfigs_result, [:pointer], :pointer
122
+
123
+ RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS = 5
124
+ RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT = 104
125
+
126
+ RD_KAFKA_ADMIN_OP_INCREMENTALALTERCONFIGS = 16
127
+ RD_KAFKA_EVENT_INCREMENTALALTERCONFIGS_RESULT = 131072
128
+
129
+ RD_KAFKA_ALTER_CONFIG_OP_TYPE_SET = 0
130
+ RD_KAFKA_ALTER_CONFIG_OP_TYPE_DELETE = 1
131
+ RD_KAFKA_ALTER_CONFIG_OP_TYPE_APPEND = 2
132
+ RD_KAFKA_ALTER_CONFIG_OP_TYPE_SUBTRACT = 3
133
+
92
134
  # Errors
135
+ class NativeErrorDesc < FFI::Struct
136
+ layout :code, :int,
137
+ :name, :pointer,
138
+ :desc, :pointer
139
+ end
93
140
 
94
141
  attach_function :rd_kafka_err2name, [:int], :string
95
142
  attach_function :rd_kafka_err2str, [:int], :string
143
+ attach_function :rd_kafka_get_err_descs, [:pointer, :pointer], :void
96
144
 
97
145
  # Configuration
98
146
 
@@ -119,6 +167,9 @@ module Rdkafka
119
167
  # Log queue
120
168
  attach_function :rd_kafka_set_log_queue, [:pointer, :pointer], :void
121
169
  attach_function :rd_kafka_queue_get_main, [:pointer], :pointer
170
+ # Per topic configs
171
+ attach_function :rd_kafka_topic_conf_new, [], :pointer
172
+ attach_function :rd_kafka_topic_conf_set, [:pointer, :string, :string, :pointer, :int], :kafka_config_response
122
173
 
123
174
  LogCallback = FFI::Function.new(
124
175
  :void, [:pointer, :int, :string, :string]
@@ -113,6 +113,42 @@ module Rdkafka
113
113
  end
114
114
  end
115
115
 
116
+ class DescribeConfigsResult
117
+ attr_reader :result_error, :error_string, :results, :results_count
118
+
119
+ def initialize(event_ptr)
120
+ @results=[]
121
+ @result_error = Rdkafka::Bindings.rd_kafka_event_error(event_ptr)
122
+ @error_string = Rdkafka::Bindings.rd_kafka_event_error_string(event_ptr)
123
+
124
+ if @result_error == 0
125
+ configs_describe_result = Rdkafka::Bindings.rd_kafka_event_DescribeConfigs_result(event_ptr)
126
+ # Get the number of matching acls
127
+ pointer_to_size_t = FFI::MemoryPointer.new(:int32)
128
+ @results = Rdkafka::Bindings.rd_kafka_DescribeConfigs_result_resources(configs_describe_result, pointer_to_size_t)
129
+ @results_count = pointer_to_size_t.read_int
130
+ end
131
+ end
132
+ end
133
+
134
+ class IncrementalAlterConfigsResult
135
+ attr_reader :result_error, :error_string, :results, :results_count
136
+
137
+ def initialize(event_ptr)
138
+ @results=[]
139
+ @result_error = Rdkafka::Bindings.rd_kafka_event_error(event_ptr)
140
+ @error_string = Rdkafka::Bindings.rd_kafka_event_error_string(event_ptr)
141
+
142
+ if @result_error == 0
143
+ incremental_alter_result = Rdkafka::Bindings.rd_kafka_event_IncrementalAlterConfigs_result(event_ptr)
144
+ # Get the number of matching acls
145
+ pointer_to_size_t = FFI::MemoryPointer.new(:int32)
146
+ @results = Rdkafka::Bindings.rd_kafka_IncrementalAlterConfigs_result_resources(incremental_alter_result, pointer_to_size_t)
147
+ @results_count = pointer_to_size_t.read_int
148
+ end
149
+ end
150
+ end
151
+
116
152
  # FFI Function used for Create Topic and Delete Topic callbacks
117
153
  BackgroundEventCallbackFunction = FFI::Function.new(
118
154
  :void, [:pointer, :pointer, :pointer]
@@ -123,20 +159,24 @@ module Rdkafka
123
159
  # @private
124
160
  class BackgroundEventCallback
125
161
  def self.call(_, event_ptr, _)
126
- event_type = Rdkafka::Bindings.rd_kafka_event_type(event_ptr)
127
- if event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_CREATETOPICS_RESULT
162
+ case Rdkafka::Bindings.rd_kafka_event_type(event_ptr)
163
+ when Rdkafka::Bindings::RD_KAFKA_EVENT_CREATETOPICS_RESULT
128
164
  process_create_topic(event_ptr)
129
- elsif event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_DELETETOPICS_RESULT
165
+ when Rdkafka::Bindings::RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT
166
+ process_describe_configs(event_ptr)
167
+ when Rdkafka::Bindings::RD_KAFKA_EVENT_INCREMENTALALTERCONFIGS_RESULT
168
+ process_incremental_alter_configs(event_ptr)
169
+ when Rdkafka::Bindings::RD_KAFKA_EVENT_DELETETOPICS_RESULT
130
170
  process_delete_topic(event_ptr)
131
- elsif event_type == Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATEPARTITIONS_RESULT
171
+ when Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATEPARTITIONS_RESULT
132
172
  process_create_partitions(event_ptr)
133
- elsif event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_CREATEACLS_RESULT
173
+ when Rdkafka::Bindings::RD_KAFKA_EVENT_CREATEACLS_RESULT
134
174
  process_create_acl(event_ptr)
135
- elsif event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_DELETEACLS_RESULT
175
+ when Rdkafka::Bindings::RD_KAFKA_EVENT_DELETEACLS_RESULT
136
176
  process_delete_acl(event_ptr)
137
- elsif event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_DESCRIBEACLS_RESULT
177
+ when Rdkafka::Bindings::RD_KAFKA_EVENT_DESCRIBEACLS_RESULT
138
178
  process_describe_acl(event_ptr)
139
- elsif event_type == Rdkafka::Bindings::RD_KAFKA_EVENT_DELETEGROUPS_RESULT
179
+ when Rdkafka::Bindings::RD_KAFKA_EVENT_DELETEGROUPS_RESULT
140
180
  process_delete_groups(event_ptr)
141
181
  end
142
182
  end
@@ -161,6 +201,42 @@ module Rdkafka
161
201
  end
162
202
  end
163
203
 
204
+ def self.process_describe_configs(event_ptr)
205
+ describe_configs = DescribeConfigsResult.new(event_ptr)
206
+ describe_configs_handle_ptr = Rdkafka::Bindings.rd_kafka_event_opaque(event_ptr)
207
+
208
+ if describe_configs_handle = Rdkafka::Admin::DescribeConfigsHandle.remove(describe_configs_handle_ptr.address)
209
+ describe_configs_handle[:response] = describe_configs.result_error
210
+ describe_configs_handle[:response_string] = describe_configs.error_string
211
+ describe_configs_handle[:pending] = false
212
+
213
+ if describe_configs.result_error == 0
214
+ describe_configs_handle[:config_entries] = describe_configs.results
215
+ describe_configs_handle[:entry_count] = describe_configs.results_count
216
+ end
217
+
218
+ describe_configs_handle.unlock
219
+ end
220
+ end
221
+
222
+ def self.process_incremental_alter_configs(event_ptr)
223
+ incremental_alter = IncrementalAlterConfigsResult.new(event_ptr)
224
+ incremental_alter_handle_ptr = Rdkafka::Bindings.rd_kafka_event_opaque(event_ptr)
225
+
226
+ if incremental_alter_handle = Rdkafka::Admin::IncrementalAlterConfigsHandle.remove(incremental_alter_handle_ptr.address)
227
+ incremental_alter_handle[:response] = incremental_alter.result_error
228
+ incremental_alter_handle[:response_string] = incremental_alter.error_string
229
+ incremental_alter_handle[:pending] = false
230
+
231
+ if incremental_alter.result_error == 0
232
+ incremental_alter_handle[:config_entries] = incremental_alter.results
233
+ incremental_alter_handle[:entry_count] = incremental_alter.results_count
234
+ end
235
+
236
+ incremental_alter_handle.unlock
237
+ end
238
+ end
239
+
164
240
  def self.process_delete_groups(event_ptr)
165
241
  delete_groups_result = Rdkafka::Bindings.rd_kafka_event_DeleteGroups_result(event_ptr)
166
242
 
@@ -263,7 +339,7 @@ module Rdkafka
263
339
  describe_acl_handle[:response_string] = describe_acl.error_string
264
340
 
265
341
  if describe_acl.result_error == 0
266
- describe_acl_handle[:acls] = describe_acl.matching_acls
342
+ describe_acl_handle[:acls] = describe_acl.matching_acls
267
343
  describe_acl_handle[:acls_count] = describe_acl.matching_acls_count
268
344
  end
269
345
 
@@ -9,7 +9,15 @@ module Rdkafka
9
9
  # Cache partitions count for 30 seconds
10
10
  PARTITIONS_COUNT_TTL = 30
11
11
 
12
- private_constant :PARTITIONS_COUNT_TTL
12
+ # Empty hash used as a default
13
+ EMPTY_HASH = {}.freeze
14
+
15
+ private_constant :PARTITIONS_COUNT_TTL, :EMPTY_HASH
16
+
17
+ # Raised when there was a critical issue when invoking rd_kafka_topic_new
18
+ # This is a temporary solution until https://github.com/karafka/rdkafka-ruby/issues/451 is
19
+ # resolved and this is normalized in all the places
20
+ class TopicHandleCreationError < RuntimeError; end
13
21
 
14
22
  # @private
15
23
  # Returns the current delivery callback, by default this is nil.
@@ -28,6 +36,8 @@ module Rdkafka
28
36
  # @param partitioner_name [String, nil] name of the partitioner we want to use or nil to use
29
37
  # the "consistent_random" default
30
38
  def initialize(native_kafka, partitioner_name)
39
+ @topics_refs_map = {}
40
+ @topics_configs = {}
31
41
  @native_kafka = native_kafka
32
42
  @partitioner_name = partitioner_name || "consistent_random"
33
43
 
@@ -54,6 +64,52 @@ module Rdkafka
54
64
  end
55
65
  end
56
66
 
67
+ # Sets alternative set of configuration details that can be set per topic
68
+ # @note It is not allowed to re-set the same topic config twice because of the underlying
69
+ # librdkafka caching
70
+ # @param topic [String] The topic name
71
+ # @param config [Hash] config we want to use per topic basis
72
+ # @param config_hash [Integer] hash of the config. We expect it here instead of computing it,
73
+ # because it is already computed during the retrieval attempt in the `#produce` flow.
74
+ def set_topic_config(topic, config, config_hash)
75
+ # Ensure lock on topic reference just in case
76
+ @native_kafka.with_inner do |inner|
77
+ @topics_refs_map[topic] ||= {}
78
+ @topics_configs[topic] ||= {}
79
+
80
+ return if @topics_configs[topic].key?(config_hash)
81
+
82
+ # If config is empty, we create an empty reference that will be used with defaults
83
+ rd_topic_config = if config.empty?
84
+ nil
85
+ else
86
+ Rdkafka::Bindings.rd_kafka_topic_conf_new.tap do |topic_config|
87
+ config.each do |key, value|
88
+ error_buffer = FFI::MemoryPointer.new(:char, 256)
89
+ result = Rdkafka::Bindings.rd_kafka_topic_conf_set(
90
+ topic_config,
91
+ key.to_s,
92
+ value.to_s,
93
+ error_buffer,
94
+ 256
95
+ )
96
+
97
+ unless result == :config_ok
98
+ raise Config::ConfigError.new(error_buffer.read_string)
99
+ end
100
+ end
101
+ end
102
+ end
103
+
104
+ topic_handle = Bindings.rd_kafka_topic_new(inner, topic, rd_topic_config)
105
+
106
+ raise TopicHandleCreationError.new("Error creating topic handle for topic #{topic}") if topic_handle.null?
107
+
108
+ @topics_configs[topic][config_hash] = config
109
+ @topics_refs_map[topic][config_hash] = topic_handle
110
+ end
111
+ end
112
+
57
113
  # Starts the native Kafka polling thread and kicks off the init polling
58
114
  # @note Not needed to run unless explicit start was disabled
59
115
  def start
@@ -83,7 +139,18 @@ module Rdkafka
83
139
  def close
84
140
  return if closed?
85
141
  ObjectSpace.undefine_finalizer(self)
86
- @native_kafka.close
142
+
143
+ @native_kafka.close do
144
+ # We need to remove the topics references objects before we destroy the producer,
145
+ # otherwise they would leak out
146
+ @topics_refs_map.each_value do |refs|
147
+ refs.each_value do |ref|
148
+ Rdkafka::Bindings.rd_kafka_topic_destroy(ref)
149
+ end
150
+ end
151
+ end
152
+
153
+ @topics_refs_map.clear
87
154
  end
88
155
 
89
156
  # Whether this producer has closed
@@ -182,11 +249,22 @@ module Rdkafka
182
249
  # @param timestamp [Time,Integer,nil] Optional timestamp of this message. Integer timestamp is in milliseconds since Jan 1 1970.
183
250
  # @param headers [Hash<String,String>] Optional message headers
184
251
  # @param label [Object, nil] a label that can be assigned when producing a message that will be part of the delivery handle and the delivery report
252
+ # @param topic_config [Hash] topic config for given message dispatch. Allows to send messages to topics with different configuration
185
253
  #
186
254
  # @return [DeliveryHandle] Delivery handle that can be used to wait for the result of producing this message
187
255
  #
188
256
  # @raise [RdkafkaError] When adding the message to rdkafka's queue failed
189
- def produce(topic:, payload: nil, key: nil, partition: nil, partition_key: nil, timestamp: nil, headers: nil, label: nil)
257
+ def produce(
258
+ topic:,
259
+ payload: nil,
260
+ key: nil,
261
+ partition: nil,
262
+ partition_key: nil,
263
+ timestamp: nil,
264
+ headers: nil,
265
+ label: nil,
266
+ topic_config: EMPTY_HASH
267
+ )
190
268
  closed_producer_check(__method__)
191
269
 
192
270
  # Start by checking and converting the input
@@ -205,8 +283,20 @@ module Rdkafka
205
283
  key.bytesize
206
284
  end
207
285
 
286
+ topic_config_hash = topic_config.hash
287
+
288
+ # Checks if we have the rdkafka topic reference object ready. It saves us on object
289
+ # allocation and allows to use custom config on demand.
290
+ set_topic_config(topic, topic_config, topic_config_hash) unless @topics_refs_map.dig(topic, topic_config_hash)
291
+ topic_ref = @topics_refs_map.dig(topic, topic_config_hash)
292
+
208
293
  if partition_key
209
294
  partition_count = partition_count(topic)
295
+
296
+ # Check if there are no overrides for the partitioner and use the default one only when
297
+ # no per-topic is present.
298
+ partitioner_name = @topics_configs.dig(topic, topic_config_hash, :partitioner) || @partitioner_name
299
+
210
300
  # If the topic is not present, set to -1
211
301
  partition = Rdkafka::Bindings.partitioner(partition_key, partition_count, @partitioner_name) if partition_count.positive?
212
302
  end
@@ -236,7 +326,7 @@ module Rdkafka
236
326
  DeliveryHandle.register(delivery_handle)
237
327
 
238
328
  args = [
239
- :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_TOPIC, :string, topic,
329
+ :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_RKT, :pointer, topic_ref,
240
330
  :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_MSGFLAGS, :int, Rdkafka::Bindings::RD_KAFKA_MSG_F_COPY,
241
331
  :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_VALUE, :buffer_in, payload, :size_t, payload_size,
242
332
  :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_KEY, :buffer_in, key, :size_t, key_size,
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Rdkafka
4
- VERSION = "0.16.0.beta1"
4
+ VERSION = "0.16.0.rc1"
5
5
  LIBRDKAFKA_VERSION = "2.3.0"
6
6
  LIBRDKAFKA_SOURCE_SHA256 = "2d49c35c77eeb3d42fa61c43757fcbb6a206daa560247154e60642bcdcc14d12"
7
7
  end
data/lib/rdkafka.rb CHANGED
@@ -24,7 +24,13 @@ require "rdkafka/admin/delete_acl_handle"
24
24
  require "rdkafka/admin/delete_acl_report"
25
25
  require "rdkafka/admin/describe_acl_handle"
26
26
  require "rdkafka/admin/describe_acl_report"
27
+ require "rdkafka/admin/describe_configs_handle"
28
+ require "rdkafka/admin/describe_configs_report"
29
+ require "rdkafka/admin/incremental_alter_configs_handle"
30
+ require "rdkafka/admin/incremental_alter_configs_report"
27
31
  require "rdkafka/admin/acl_binding_result"
32
+ require "rdkafka/admin/config_binding_result"
33
+ require "rdkafka/admin/config_resource_binding_result"
28
34
  require "rdkafka/bindings"
29
35
  require "rdkafka/callbacks"
30
36
  require "rdkafka/config"
data/rdkafka.gemspec CHANGED
@@ -15,7 +15,7 @@ Gem::Specification.new do |gem|
15
15
  gem.name = 'rdkafka'
16
16
  gem.require_paths = ['lib']
17
17
  gem.version = Rdkafka::VERSION
18
- gem.required_ruby_version = '>= 2.7'
18
+ gem.required_ruby_version = '>= 3.0'
19
19
  gem.extensions = %w(ext/Rakefile)
20
20
  gem.cert_chain = %w[certs/cert_chain.pem]
21
21
 
@@ -16,12 +16,12 @@ describe Rdkafka::Admin do
16
16
  admin.close
17
17
  end
18
18
 
19
- let(:topic_name) { "test-topic-#{Random.new.rand(0..1_000_000)}" }
19
+ let(:topic_name) { "test-topic-#{SecureRandom.uuid}" }
20
20
  let(:topic_partition_count) { 3 }
21
21
  let(:topic_replication_factor) { 1 }
22
22
  let(:topic_config) { {"cleanup.policy" => "compact", "min.cleanable.dirty.ratio" => 0.8} }
23
23
  let(:invalid_topic_config) { {"cleeeeenup.policee" => "campact"} }
24
- let(:group_name) { "test-group-#{Random.new.rand(0..1_000_000)}" }
24
+ let(:group_name) { "test-group-#{SecureRandom.uuid}" }
25
25
 
26
26
  let(:resource_name) {"acl-test-topic"}
27
27
  let(:resource_type) {Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC}
@@ -31,6 +31,14 @@ describe Rdkafka::Admin do
31
31
  let(:operation) {Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_READ}
32
32
  let(:permission_type) {Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW}
33
33
 
34
+ describe '#describe_errors' do
35
+ let(:errors) { admin.class.describe_errors }
36
+
37
+ it { expect(errors.size).to eq(162) }
38
+ it { expect(errors[-184]).to eq(code: -184, description: 'Local: Queue full', name: '_QUEUE_FULL') }
39
+ it { expect(errors[21]).to eq(code: 21, description: 'Broker: Invalid required acks value', name: 'INVALID_REQUIRED_ACKS') }
40
+ end
41
+
34
42
  describe 'admin without auto-start' do
35
43
  let(:admin) { config.admin(native_kafka_auto_start: false) }
36
44
 
@@ -142,6 +150,275 @@ expect(ex.broker_message).to match(/Topic name.*is invalid: .* contains one or m
142
150
  end
143
151
  end
144
152
 
153
+ describe "describe_configs" do
154
+ subject(:resources_results) { admin.describe_configs(resources).wait.resources }
155
+
156
+ before do
157
+ admin.create_topic(topic_name, 2, 1).wait
158
+ sleep(1)
159
+ end
160
+
161
+ context 'when describing config of an existing topic' do
162
+ let(:resources) { [{ resource_type: 2, resource_name: topic_name }] }
163
+
164
+ it do
165
+ expect(resources_results.size).to eq(1)
166
+ expect(resources_results.first.type).to eq(2)
167
+ expect(resources_results.first.name).to eq(topic_name)
168
+ expect(resources_results.first.configs.size).to be > 25
169
+ expect(resources_results.first.configs.first.name).to eq('compression.type')
170
+ expect(resources_results.first.configs.first.value).to eq('producer')
171
+ expect(resources_results.first.configs.map(&:synonyms)).not_to be_empty
172
+ end
173
+ end
174
+
175
+ context 'when describing config of a non-existing topic' do
176
+ let(:resources) { [{ resource_type: 2, resource_name: SecureRandom.uuid }] }
177
+
178
+ it 'expect to raise error' do
179
+ expect { resources_results }.to raise_error(Rdkafka::RdkafkaError, /unknown_topic_or_part/)
180
+ end
181
+ end
182
+
183
+ context 'when describing both existing and non-existing topics' do
184
+ let(:resources) do
185
+ [
186
+ { resource_type: 2, resource_name: topic_name },
187
+ { resource_type: 2, resource_name: SecureRandom.uuid }
188
+ ]
189
+ end
190
+
191
+ it 'expect to raise error' do
192
+ expect { resources_results }.to raise_error(Rdkafka::RdkafkaError, /unknown_topic_or_part/)
193
+ end
194
+ end
195
+
196
+ context 'when describing multiple existing topics' do
197
+ let(:resources) do
198
+ [
199
+ { resource_type: 2, resource_name: 'example_topic' },
200
+ { resource_type: 2, resource_name: topic_name }
201
+ ]
202
+ end
203
+
204
+ it do
205
+ expect(resources_results.size).to eq(2)
206
+ expect(resources_results.first.type).to eq(2)
207
+ expect(resources_results.first.name).to eq('example_topic')
208
+ expect(resources_results.last.type).to eq(2)
209
+ expect(resources_results.last.name).to eq(topic_name)
210
+ end
211
+ end
212
+
213
+ context 'when trying to describe invalid resource type' do
214
+ let(:resources) { [{ resource_type: 0, resource_name: SecureRandom.uuid }] }
215
+
216
+ it 'expect to raise error' do
217
+ expect { resources_results }.to raise_error(Rdkafka::RdkafkaError, /invalid_request/)
218
+ end
219
+ end
220
+
221
+ context 'when trying to describe invalid broker' do
222
+ let(:resources) { [{ resource_type: 4, resource_name: 'non-existing' }] }
223
+
224
+ it 'expect to raise error' do
225
+ expect { resources_results }.to raise_error(Rdkafka::RdkafkaError, /invalid_arg/)
226
+ end
227
+ end
228
+
229
+ context 'when trying to describe valid broker' do
230
+ let(:resources) { [{ resource_type: 4, resource_name: '1' }] }
231
+
232
+ it do
233
+ expect(resources_results.size).to eq(1)
234
+ expect(resources_results.first.type).to eq(4)
235
+ expect(resources_results.first.name).to eq('1')
236
+ expect(resources_results.first.configs.size).to be > 230
237
+ expect(resources_results.first.configs.first.name).to eq('log.cleaner.min.compaction.lag.ms')
238
+ expect(resources_results.first.configs.first.value).to eq('0')
239
+ expect(resources_results.first.configs.map(&:synonyms)).not_to be_empty
240
+ end
241
+ end
242
+
243
+ context 'when describing valid broker with topics in one request' do
244
+ let(:resources) do
245
+ [
246
+ { resource_type: 4, resource_name: '1' },
247
+ { resource_type: 2, resource_name: topic_name }
248
+ ]
249
+ end
250
+
251
+ it do
252
+ expect(resources_results.size).to eq(2)
253
+ expect(resources_results.first.type).to eq(4)
254
+ expect(resources_results.first.name).to eq('1')
255
+ expect(resources_results.first.configs.size).to be > 230
256
+ expect(resources_results.first.configs.first.name).to eq('log.cleaner.min.compaction.lag.ms')
257
+ expect(resources_results.first.configs.first.value).to eq('0')
258
+ expect(resources_results.last.type).to eq(2)
259
+ expect(resources_results.last.name).to eq(topic_name)
260
+ expect(resources_results.last.configs.size).to be > 25
261
+ expect(resources_results.last.configs.first.name).to eq('compression.type')
262
+ expect(resources_results.last.configs.first.value).to eq('producer')
263
+ end
264
+ end
265
+ end
266
+
267
+ describe "incremental_alter_configs" do
268
+ subject(:resources_results) { admin.incremental_alter_configs(resources_with_configs).wait.resources }
269
+
270
+ before do
271
+ admin.create_topic(topic_name, 2, 1).wait
272
+ sleep(1)
273
+ end
274
+
275
+ context 'when altering one topic with one valid config via set' do
276
+ let(:target_retention) { (86400002 + rand(10_000)).to_s }
277
+ let(:resources_with_configs) do
278
+ [
279
+ {
280
+ resource_type: 2,
281
+ resource_name: topic_name,
282
+ configs: [
283
+ {
284
+ name: 'delete.retention.ms',
285
+ value: target_retention,
286
+ op_type: 0
287
+ }
288
+ ]
289
+ }
290
+ ]
291
+ end
292
+
293
+ it do
294
+ expect(resources_results.size).to eq(1)
295
+ expect(resources_results.first.type).to eq(2)
296
+ expect(resources_results.first.name).to eq(topic_name)
297
+
298
+ ret_config = admin.describe_configs(resources_with_configs).wait.resources.first.configs.find do |config|
299
+ config.name == 'delete.retention.ms'
300
+ end
301
+
302
+ expect(ret_config.value).to eq(target_retention)
303
+ end
304
+ end
305
+
306
+ context 'when altering one topic with one valid config via delete' do
307
+ let(:target_retention) { (8640002 + rand(10_000)).to_s }
308
+ let(:resources_with_configs) do
309
+ [
310
+ {
311
+ resource_type: 2,
312
+ resource_name: topic_name,
313
+ configs: [
314
+ {
315
+ name: 'delete.retention.ms',
316
+ value: target_retention,
317
+ op_type: 1
318
+ }
319
+ ]
320
+ }
321
+ ]
322
+ end
323
+
324
+ it do
325
+ expect(resources_results.size).to eq(1)
326
+ expect(resources_results.first.type).to eq(2)
327
+ expect(resources_results.first.name).to eq(topic_name)
328
+ ret_config = admin.describe_configs(resources_with_configs).wait.resources.first.configs.find do |config|
329
+ config.name == 'delete.retention.ms'
330
+ end
331
+
332
+ expect(ret_config.value).to eq('86400000')
333
+ end
334
+ end
335
+
336
+ context 'when altering one topic with one valid config via append' do
337
+ let(:target_policy) { 'compact' }
338
+ let(:resources_with_configs) do
339
+ [
340
+ {
341
+ resource_type: 2,
342
+ resource_name: topic_name,
343
+ configs: [
344
+ {
345
+ name: 'cleanup.policy',
346
+ value: target_policy,
347
+ op_type: 2
348
+ }
349
+ ]
350
+ }
351
+ ]
352
+ end
353
+
354
+ it do
355
+ expect(resources_results.size).to eq(1)
356
+ expect(resources_results.first.type).to eq(2)
357
+ expect(resources_results.first.name).to eq(topic_name)
358
+
359
+ ret_config = admin.describe_configs(resources_with_configs).wait.resources.first.configs.find do |config|
360
+ config.name == 'cleanup.policy'
361
+ end
362
+
363
+ expect(ret_config.value).to eq("delete,#{target_policy}")
364
+ end
365
+ end
366
+
367
+ context 'when altering one topic with one valid config via subtrack' do
368
+ let(:target_policy) { 'delete' }
369
+ let(:resources_with_configs) do
370
+ [
371
+ {
372
+ resource_type: 2,
373
+ resource_name: topic_name,
374
+ configs: [
375
+ {
376
+ name: 'cleanup.policy',
377
+ value: target_policy,
378
+ op_type: 3
379
+ }
380
+ ]
381
+ }
382
+ ]
383
+ end
384
+
385
+ it do
386
+ expect(resources_results.size).to eq(1)
387
+ expect(resources_results.first.type).to eq(2)
388
+ expect(resources_results.first.name).to eq(topic_name)
389
+
390
+ ret_config = admin.describe_configs(resources_with_configs).wait.resources.first.configs.find do |config|
391
+ config.name == 'cleanup.policy'
392
+ end
393
+
394
+ expect(ret_config.value).to eq('')
395
+ end
396
+ end
397
+
398
+ context 'when altering one topic with invalid config' do
399
+ let(:target_retention) { '-10' }
400
+ let(:resources_with_configs) do
401
+ [
402
+ {
403
+ resource_type: 2,
404
+ resource_name: topic_name,
405
+ configs: [
406
+ {
407
+ name: 'delete.retention.ms',
408
+ value: target_retention,
409
+ op_type: 0
410
+ }
411
+ ]
412
+ }
413
+ ]
414
+ end
415
+
416
+ it 'expect to raise error' do
417
+ expect { resources_results }.to raise_error(Rdkafka::RdkafkaError, /invalid_config/)
418
+ end
419
+ end
420
+ end
421
+
145
422
  describe "#delete_topic" do
146
423
  describe "called with invalid input" do
147
424
  # https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/internals/Topic.java#L29
@@ -412,7 +689,10 @@ expect(ex.broker_message).to match(/Topic name.*is invalid: .* contains one or m
412
689
  end
413
690
 
414
691
  context 'when topic has less then desired number of partitions' do
415
- before { admin.create_topic(topic_name, 1, 1).wait }
692
+ before do
693
+ admin.create_topic(topic_name, 1, 1).wait
694
+ sleep(1)
695
+ end
416
696
 
417
697
  it 'expect to change number of partitions' do
418
698
  admin.create_partitions(topic_name, 10).wait
@@ -31,6 +31,48 @@ describe Rdkafka::Producer do
31
31
  it { expect(producer.name).to include('rdkafka#producer-') }
32
32
  end
33
33
 
34
+ describe '#produce with topic config alterations' do
35
+ context 'when config is not valid' do
36
+ it 'expect to raise error' do
37
+ expect do
38
+ producer.produce(topic: 'test', payload: '', topic_config: { 'invalid': 'invalid' })
39
+ end.to raise_error(Rdkafka::Config::ConfigError)
40
+ end
41
+ end
42
+
43
+ context 'when config is valid' do
44
+ it 'expect to raise error' do
45
+ expect do
46
+ producer.produce(topic: 'test', payload: '', topic_config: { 'acks': 1 }).wait
47
+ end.not_to raise_error
48
+ end
49
+
50
+ context 'when alteration should change behavior' do
51
+ # This is set incorrectly for a reason
52
+ # If alteration would not work, this will hang the spec suite
53
+ let(:producer) do
54
+ rdkafka_producer_config(
55
+ 'message.timeout.ms': 1_000_000,
56
+ :"bootstrap.servers" => "localhost:9094",
57
+ ).producer
58
+ end
59
+
60
+ it 'expect to give up on delivery fast based on alteration config' do
61
+ expect do
62
+ producer.produce(
63
+ topic: 'produce_config_test',
64
+ payload: 'test',
65
+ topic_config: {
66
+ 'compression.type': 'gzip',
67
+ 'message.timeout.ms': 1
68
+ }
69
+ ).wait
70
+ end.to raise_error(Rdkafka::RdkafkaError, /msg_timed_out/)
71
+ end
72
+ end
73
+ end
74
+ end
75
+
34
76
  context "delivery callback" do
35
77
  context "with a proc/lambda" do
36
78
  it "should set the callback" do
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: rdkafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.16.0.beta1
4
+ version: 0.16.0.rc1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Thijs Cadier
@@ -36,7 +36,7 @@ cert_chain:
36
36
  AnG1dJU+yL2BK7vaVytLTstJME5mepSZ46qqIJXMuWob/YPDmVaBF39TDSG9e34s
37
37
  msG3BiCqgOgHAnL23+CN3Rt8MsuRfEtoTKpJVcCfoEoNHOkc
38
38
  -----END CERTIFICATE-----
39
- date: 2024-04-29 00:00:00.000000000 Z
39
+ date: 2024-05-27 00:00:00.000000000 Z
40
40
  dependencies:
41
41
  - !ruby/object:Gem::Dependency
42
42
  name: ffi
@@ -193,6 +193,8 @@ files:
193
193
  - lib/rdkafka/abstract_handle.rb
194
194
  - lib/rdkafka/admin.rb
195
195
  - lib/rdkafka/admin/acl_binding_result.rb
196
+ - lib/rdkafka/admin/config_binding_result.rb
197
+ - lib/rdkafka/admin/config_resource_binding_result.rb
196
198
  - lib/rdkafka/admin/create_acl_handle.rb
197
199
  - lib/rdkafka/admin/create_acl_report.rb
198
200
  - lib/rdkafka/admin/create_partitions_handle.rb
@@ -207,6 +209,10 @@ files:
207
209
  - lib/rdkafka/admin/delete_topic_report.rb
208
210
  - lib/rdkafka/admin/describe_acl_handle.rb
209
211
  - lib/rdkafka/admin/describe_acl_report.rb
212
+ - lib/rdkafka/admin/describe_configs_handle.rb
213
+ - lib/rdkafka/admin/describe_configs_report.rb
214
+ - lib/rdkafka/admin/incremental_alter_configs_handle.rb
215
+ - lib/rdkafka/admin/incremental_alter_configs_report.rb
210
216
  - lib/rdkafka/bindings.rb
211
217
  - lib/rdkafka/callbacks.rb
212
218
  - lib/rdkafka/config.rb
@@ -272,7 +278,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
272
278
  requirements:
273
279
  - - ">="
274
280
  - !ruby/object:Gem::Version
275
- version: '2.7'
281
+ version: '3.0'
276
282
  required_rubygems_version: !ruby/object:Gem::Requirement
277
283
  requirements:
278
284
  - - ">="
metadata.gz.sig CHANGED
Binary file