kafka 0.5.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (56) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +14 -0
  3. data/.rubocop.yml +210 -0
  4. data/.travis.yml +45 -0
  5. data/CHANGELOG.md +3 -0
  6. data/CODE_OF_CONDUCT.md +74 -0
  7. data/Gemfile +5 -0
  8. data/LICENSE.txt +21 -0
  9. data/README.md +182 -0
  10. data/Rakefile +69 -0
  11. data/examples/consumer.rb +55 -0
  12. data/examples/producer.rb +46 -0
  13. data/ext/Rakefile +69 -0
  14. data/kafka.gemspec +39 -0
  15. data/lib/kafka/admin.rb +141 -0
  16. data/lib/kafka/config.rb +145 -0
  17. data/lib/kafka/consumer.rb +87 -0
  18. data/lib/kafka/error.rb +44 -0
  19. data/lib/kafka/ffi/admin/admin_options.rb +121 -0
  20. data/lib/kafka/ffi/admin/config_entry.rb +97 -0
  21. data/lib/kafka/ffi/admin/config_resource.rb +101 -0
  22. data/lib/kafka/ffi/admin/delete_topic.rb +19 -0
  23. data/lib/kafka/ffi/admin/new_partitions.rb +77 -0
  24. data/lib/kafka/ffi/admin/new_topic.rb +91 -0
  25. data/lib/kafka/ffi/admin/result.rb +66 -0
  26. data/lib/kafka/ffi/admin/topic_result.rb +32 -0
  27. data/lib/kafka/ffi/admin.rb +16 -0
  28. data/lib/kafka/ffi/broker_metadata.rb +32 -0
  29. data/lib/kafka/ffi/client.rb +640 -0
  30. data/lib/kafka/ffi/config.rb +382 -0
  31. data/lib/kafka/ffi/consumer.rb +342 -0
  32. data/lib/kafka/ffi/error.rb +25 -0
  33. data/lib/kafka/ffi/event.rb +215 -0
  34. data/lib/kafka/ffi/group_info.rb +75 -0
  35. data/lib/kafka/ffi/group_list.rb +27 -0
  36. data/lib/kafka/ffi/group_member_info.rb +52 -0
  37. data/lib/kafka/ffi/message/header.rb +205 -0
  38. data/lib/kafka/ffi/message.rb +205 -0
  39. data/lib/kafka/ffi/metadata.rb +58 -0
  40. data/lib/kafka/ffi/opaque.rb +81 -0
  41. data/lib/kafka/ffi/opaque_pointer.rb +73 -0
  42. data/lib/kafka/ffi/partition_metadata.rb +61 -0
  43. data/lib/kafka/ffi/producer.rb +144 -0
  44. data/lib/kafka/ffi/queue.rb +65 -0
  45. data/lib/kafka/ffi/topic.rb +32 -0
  46. data/lib/kafka/ffi/topic_config.rb +126 -0
  47. data/lib/kafka/ffi/topic_metadata.rb +42 -0
  48. data/lib/kafka/ffi/topic_partition.rb +43 -0
  49. data/lib/kafka/ffi/topic_partition_list.rb +167 -0
  50. data/lib/kafka/ffi.rb +624 -0
  51. data/lib/kafka/poller.rb +28 -0
  52. data/lib/kafka/producer/delivery_report.rb +120 -0
  53. data/lib/kafka/producer.rb +127 -0
  54. data/lib/kafka/version.rb +8 -0
  55. data/lib/kafka.rb +11 -0
  56. metadata +159 -0
@@ -0,0 +1,46 @@
1
+ # frozen_string_literal: true
2
+
3
+ $LOAD_PATH.unshift File.expand_path("../lib", __dir__)
4
+
5
+ require "kafka"
6
+ require "securerandom"
7
+
8
+ config = Kafka::Config.new({
9
+ "bootstrap.servers": "127.0.0.1:9092",
10
+ })
11
+
12
+ producer = Kafka::Producer.new(config)
13
+
14
+ # Initialize a topic with 8 partitions and 1 replica per partition. This is
15
+ # only for testing, a replication factor of 1 is not generally recommended for
16
+ # production.
17
+ admin = Kafka::Admin.new(config)
18
+ admin.create_topic("ruby_test_topic", 8, 1)
19
+ admin.close
20
+
21
+ @run = true
22
+ trap("INT") { @run = false }
23
+ trap("TERM") { @run = false }
24
+
25
+ # Create several threads to publish messages to the topic. Producers are thread
26
+ # safe and can be accessed from multiple threads.
27
+ workers = 8.times.map do |i|
28
+ Thread.new do
29
+ while @run
30
+ producer.produce("ruby_test_topic", "#{i}: #{SecureRandom.uuid}") do |report|
31
+ # Wait for delivery confirmation from the cluster
32
+ report.wait
33
+ puts report.inspect
34
+ end
35
+
36
+ sleep(SecureRandom.rand % 0.2)
37
+ end
38
+ end
39
+ end
40
+
41
+ # Wait for all worker threads to finish
42
+ workers.each(&:join)
43
+
44
+ # Gracefully close the producer, flushing any remaining messages, and
45
+ # processing and remaining callbacks.
46
+ producer.close
data/ext/Rakefile ADDED
@@ -0,0 +1,69 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "../lib/kafka/version"
4
+
5
+ require "mini_portile2"
6
+ require "fileutils"
7
+
8
+ desc "Compile librdkafka v#{Kafka::LIBRDKAFKA_VERSION}"
9
+ task :default do
10
+ Rake::Task["build:release"].invoke(Kafka::LIBRDKAFKA_VERSION, Kafka::LIBRDKAFKA_CHECKSUM)
11
+ end
12
+
13
+ desc "Clean up and remove build artifacts"
14
+ task :clean do
15
+ Dir.chdir(__dir__) do
16
+ FileUtils.rm_rf([
17
+ "tmp",
18
+ "ports",
19
+ Dir.glob("librdkafka.*"),
20
+ ])
21
+ end
22
+ end
23
+
24
+ namespace :build do
25
+ # Build librdkafka and store the library into ext/
26
+ #
27
+ # NOTE: To speed up the build try:
28
+ # MAKE="make -j${nproc}" rake
29
+ def build(version:, checksum: nil)
30
+ recipe = MiniPortile.new("librdkafka", version)
31
+ recipe.files << {
32
+ url: "https://github.com/edenhill/librdkafka/archive/#{version}.tar.gz",
33
+ sha256: checksum,
34
+ }
35
+ recipe.configure_options = ["--host=#{recipe.host}"]
36
+ recipe.cook
37
+
38
+ ext = recipe.host.include?("darwin") ? "dylib" : "so"
39
+ lib = File.join(recipe.path, "lib", "librdkafka.#{ext}")
40
+
41
+ # cp will copy the content following any symlinks
42
+ FileUtils.cp(lib, __dir__)
43
+
44
+ # Remove build directories
45
+ FileUtils.rm_rf([
46
+ File.join(__dir__, "tmp"),
47
+ File.join(__dir__, "ports"),
48
+ ])
49
+ end
50
+
51
+ desc "Download and build an official release of librdkafka"
52
+ task :release, [:version, :checksum] do |_task, args|
53
+ version = args[:version]
54
+ checksum = args[:checksum]
55
+
56
+ raise ArgumentError, "version is required" if version.nil?
57
+ raise ArgumentError, "checksum is required" if checksum.nil?
58
+
59
+ # Prefix the version string to look like "v1.3.0" so the URL is correct.
60
+ version = version.start_with?("v") ? str : "v#{version}"
61
+
62
+ build(version: version, checksum: checksum)
63
+ end
64
+
65
+ desc "Build librdkafka at the given git sha or tag"
66
+ task :git, [:ref, :checksum] do |_task, args|
67
+ build(version: args[:ref], checksum: args[:checksum])
68
+ end
69
+ end
data/kafka.gemspec ADDED
@@ -0,0 +1,39 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "lib/kafka/version"
4
+
5
+ Gem::Specification.new do |spec|
6
+ spec.name = "kafka"
7
+ spec.version = Kafka::VERSION
8
+ spec.authors = ["Chris Gaffney"]
9
+ spec.email = ["gaffneyc@gmail.com"]
10
+
11
+ spec.summary = "Kafka client bindings to librdkafka"
12
+ spec.description = <<~DESCRIPTION
13
+ Kafka provides binding to librdafka as well as a default producer and
14
+ consumer implementation.
15
+ DESCRIPTION
16
+
17
+ spec.homepage = "http://github.com/deadmanssnitch/kafka"
18
+ spec.license = "MIT"
19
+ spec.required_ruby_version = Gem::Requirement.new(">= 2.5.0")
20
+
21
+ spec.metadata["homepage_uri"] = spec.homepage
22
+ spec.metadata["source_code_uri"] = spec.homepage
23
+ spec.metadata["changelog_uri"] = "https://github.com/deadmanssnitch/kafka/blob/master/CHANGELOG.md"
24
+
25
+ # Specify which files should be added to the gem when it is released.
26
+ # The `git ls-files -z` loads the files in the RubyGem that have been added into git.
27
+ spec.files = Dir.chdir(__dir__) do
28
+ `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features)/}) }
29
+ end
30
+
31
+ spec.require_paths = ["lib"]
32
+ spec.extensions = %w[ext/Rakefile]
33
+
34
+ spec.add_dependency "ffi"
35
+ spec.add_dependency "mini_portile2"
36
+
37
+ spec.add_development_dependency "rake"
38
+ spec.add_development_dependency "rspec"
39
+ end
@@ -0,0 +1,141 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "ffi"
4
+ require "kafka/ffi"
5
+
6
+ module Kafka
7
+ # Admin provides a client for accessing the rdkafka Admin API to make changes
8
+ # to the cluster. The API provides was to create topics, delete topics, add
9
+ # new partitions for a topic, and manage configs
10
+ class Admin
11
+ # Create a new Admin client for accessing the librdkafka Admin API.
12
+ #
13
+ # @param config [Kafka::Config] Cluster config
14
+ def initialize(config = nil)
15
+ # Wrap a Producer since it appears to allocate the fewest resources.
16
+ @client = ::Kafka::FFI::Producer.new(config)
17
+ end
18
+
19
+ # Create a topic with the given name, number of partitions, and number of
20
+ # replicas per partition (replication factor). Total number of partitions
21
+ # will be partitions x replication_factor.
22
+ #
23
+ # @param name [String] Name of the topic to create
24
+ # @param partitions [Integer] Number of partitions the topic will have
25
+ # @param replication_factor [Integer] Number of replicas per partition to
26
+ # have in the cluster.
27
+ #
28
+ # @param wait [Boolean] Wait up to timeout milliseconds for topic creation
29
+ # to propogate to the cluster before returning.
30
+ # @param validate [Boolean] Only validate the request
31
+ # @param timeout [Integer] Time to wait in milliseconds for each operation
32
+ # to complete. Total request execution time may be longer than timeout
33
+ # due to multiple operations being done. Defaults to `socket.timeout.ms`
34
+ # config setting.
35
+ #
36
+ # @return [nil] Create timed out
37
+ # @return [TopicResult] Response from the cluster with details about if the
38
+ # topic was created or any errors.
39
+ def create_topic(name, partitions, replication_factor, wait: true, validate: false, timeout: nil)
40
+ req = ::Kafka::FFI::Admin::NewTopic.new(name, partitions, replication_factor)
41
+ opts = new_options(:create_topics, wait: wait, validate: validate, timeout: timeout)
42
+
43
+ res = @client.create_topics(req, options: opts)
44
+ if res
45
+ res[0]
46
+ end
47
+ ensure
48
+ opts.destroy
49
+ req.destroy
50
+ end
51
+
52
+ # Delete the topic with the given name
53
+ #
54
+ # @param name [String] Name of the topic to delete
55
+ #
56
+ # @param wait [Boolean] Wait up to timeout milliseconds for topic creation
57
+ # to propogate to the cluster before returning.
58
+ # @param validate [Boolean] Only validate the request
59
+ # @param timeout [Integer] Time to wait in milliseconds for each operation
60
+ # to complete. Total request execution time may be longer than timeout
61
+ # due to multiple operations being done. Defaults to `socket.timeout.ms`
62
+ # config setting.
63
+ #
64
+ # @return [nil] Delete timed out
65
+ # @return [TopicResult] Response from the cluster with details about the
66
+ # deletion or any errors.
67
+ def delete_topic(name, wait: true, validate: false, timeout: nil)
68
+ req = ::Kafka::FFI::Admin::DeleteTopic.new(name)
69
+ opts = new_options(:create_topics, wait: wait, validate: validate, timeout: timeout)
70
+
71
+ res = @client.delete_topics(req, options: opts)
72
+ if res
73
+ res[0]
74
+ end
75
+ ensure
76
+ opts.destroy
77
+ req.destroy
78
+ end
79
+
80
+ # Get current config settings for the resource.
81
+ #
82
+ # @example Get configuration for a topic
83
+ # describe_config(:topic, "events")
84
+ #
85
+ # @param type [:broker, :topic, :group] Type of resource
86
+ # @param name [String] Name of the resource
87
+ #
88
+ # @return [ConfigResource]
89
+ def describe_config(type, name, wait: true, validate: false, timeout: nil)
90
+ req = ::Kafka::FFI::Admin::ConfigResource.new(type, name)
91
+ opts = new_options(:create_topics, wait: wait, validate: validate, timeout: timeout)
92
+
93
+ res = @client.describe_configs(req, options: opts)
94
+ if res
95
+ res[0]
96
+ end
97
+ ensure
98
+ opts.destroy
99
+ req.destroy
100
+ end
101
+
102
+ # Retrieve metadata for the cluster
103
+ #
104
+ # @see Kafka::FFI::Client#metadata
105
+ #
106
+ # @return [Metadata]
107
+ def metadata(local_only: false, topic: nil, timeout: 1000)
108
+ @client.metadata(local_only: local_only, topic: topic, timeout: timeout)
109
+ end
110
+
111
+ # Destroy the Client, releasing all used resources back to the system. It
112
+ # is the application's responsbility to call #destroy when done with the
113
+ # client.
114
+ def destroy
115
+ @client.destroy
116
+ end
117
+ alias close destroy
118
+
119
+ private
120
+
121
+ def new_options(api, wait: false, timeout: nil, validate: false)
122
+ options = ::Kafka::FFI::Admin::AdminOptions.new(@client, api)
123
+
124
+ # Request timeout defaults to socket.timeout.ms unless set. We use the
125
+ # timeout for both request_timeout and operation timeout when not set. It
126
+ # simplifies the API even if it is a bad assumption.
127
+ if timeout.nil?
128
+ timeout = @client.config.get("socket.timeout.ms").to_i
129
+ end
130
+
131
+ options.set_request_timeout(timeout)
132
+ options.set_validate_only(validate)
133
+
134
+ if wait
135
+ options.set_operation_timeout(timeout)
136
+ end
137
+
138
+ options
139
+ end
140
+ end
141
+ end
@@ -0,0 +1,145 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka
4
+ class Config
5
+ # Create a new Config for initializing a Kafka Consumer or Producer. This
6
+ # config is reusable and can be used to configure multiple Consumers or
7
+ # Producers.
8
+ #
9
+ # @see https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
10
+ #
11
+ # @param opts [Hash{[String, Symbol] => [String, Integer, nil, Boolean]}]
12
+ #
13
+ # @raise [TypeError] Value was not of the correct type
14
+ def initialize(opts = {})
15
+ @opts = {}
16
+ @callbacks = {}
17
+
18
+ # Use #set to rekey the options as strings and type check the value.
19
+ opts.each_pair do |key, val|
20
+ set(key, val)
21
+ end
22
+ end
23
+
24
+ # Retrieve the configured value for the key.
25
+ #
26
+ # @return [nil] Value is not set
27
+ # @return Configured value for the given key
28
+ def get(key)
29
+ @opts[key.to_s]
30
+ end
31
+
32
+ # Set configratuon option `key` to `value`.
33
+ #
34
+ # @see https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
35
+ #
36
+ # @param key [#to_s] Configuration option
37
+ # @param value [String, Integer, Boolean, nil]
38
+ #
39
+ # @raise [TypeError] Value was not of the correct type
40
+ def set(key, val)
41
+ key = key.to_s
42
+
43
+ @opts[key] =
44
+ case val
45
+ when String, Integer, true, false, nil
46
+ val
47
+ else
48
+ raise TypeError, "#{key}'s value must be a String, Integer, true, or false"
49
+ end
50
+
51
+ nil
52
+ end
53
+
54
+ # Callback for the delivery status of a message published to the Kafka
55
+ # cluster.
56
+ #
57
+ # @note Producer only
58
+ #
59
+ # @see Kafka::FFI::Config#set_dr_msg_cb
60
+ def on_delivery_report(&block)
61
+ @callbacks[:delivery_report] = block
62
+ end
63
+
64
+ # @note Consumer only
65
+ #
66
+ # @see Kafka::FFI::Config#set_consume_cb
67
+ def on_consume(&block)
68
+ @callbacks[:consume] = block
69
+ end
70
+
71
+ # Callback for result of automatic or manual offset commits.
72
+ #
73
+ # @note Consumer only
74
+ #
75
+ # @see Kafka::FFI::Config#set_offset_commit_cb
76
+ def on_offset_commit(&block)
77
+ @callbacks[:offset_commit] = block
78
+ end
79
+
80
+ # Callback for errors from the cluster. Most errors are informational and
81
+ # should be ignored as librdkafka will attempt to recover. However fatal
82
+ # errors can be reported which should cause the system to gracefully
83
+ # shutdown.
84
+ #
85
+ # @see Kafka::FFI::Config#set_error_cb
86
+ def on_error(&block)
87
+ @callbacks[:error] = block
88
+ end
89
+
90
+ # Callback for when Brokers throttle a client
91
+ #
92
+ # @see Kafka::FFI::Config#set_throttle_cb
93
+ def on_throttle(&block)
94
+ @callbacks[:throttle] = block
95
+ end
96
+
97
+ # Callback for log messages
98
+ #
99
+ # @see Kafka::FFI::Config#set_log_cb
100
+ def on_log(&block)
101
+ @callbacks[:log] = block
102
+ end
103
+
104
+ # Callback for connetion stats
105
+ #
106
+ # @see Kafka::FFI::Config#set_stats_cb
107
+ def on_stats(&block)
108
+ @callbacks[:stats] = block
109
+ end
110
+
111
+ # Allocate and configure a new Kafka::FFI::Config that mirrors this Config.
112
+ # The returned Kafka::FFI::Config should be either passed to initialize a
113
+ # new Client or eventually destroyed. Once passed to a Client, the Config
114
+ # is now owned by the Client and should not be modified or destroyed.
115
+ #
116
+ # @return [Kafka::FFI::Config]
117
+ def to_ffi
118
+ conf = Kafka::FFI.rd_kafka_conf_new
119
+
120
+ @opts.each do |name, value|
121
+ conf.set(name, value)
122
+ end
123
+
124
+ # Omitted callbacks:
125
+ # - background_event - Requires lower level usage
126
+ # - rebalance - Requires knowing the rebalance semantics
127
+ # - all socket - Unknown need at this level
128
+ # - ssl_cert_verify - Currently not needed
129
+ # - oauthbearer_token_refresh - Unable to test
130
+ @callbacks.each do |name, callback|
131
+ case name
132
+ when :delivery_report then conf.set_dr_msg_cb(&callback)
133
+ when :consume then conf.set_consume_cb(&callback)
134
+ when :offset_commit then conf.set_offset_commit_cb(&callback)
135
+ when :error then conf.set_error_cb(&callback)
136
+ when :throttle then conf.set_throttle_cb(&callback)
137
+ when :log then conf.set_log_cb(&callback)
138
+ when :stats then conf.set_stats_cb(&callback)
139
+ end
140
+ end
141
+
142
+ conf
143
+ end
144
+ end
145
+ end
@@ -0,0 +1,87 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "kafka/poller"
4
+
5
+ module Kafka
6
+ class Consumer
7
+ # Returns the backing Kafka::FFI::Consumer.
8
+ #
9
+ # @DANGER Using the backing Consumer means being aware of memory management
10
+ # and could leave the consumer in a bad state. Make sure you know what
11
+ # you're doing.
12
+ #
13
+ # @return [Kafka::FFI::Consumer]
14
+ attr_reader :client
15
+
16
+ # @param config [Kafka::Config]
17
+ def initialize(config)
18
+ # Initialize the client
19
+ @client = Kafka::FFI::Consumer.new(config)
20
+
21
+ # Event loop polling for events so callbacks are fired.
22
+ @poller = Poller.new(@client)
23
+ end
24
+
25
+ # Subscribe the consumer to the given list of topics. Once the
26
+ # subscriptions have become active and partitions assigned, calls to #poll
27
+ # will yield messages for the subscribed topics.
28
+ #
29
+ # subscribe will _set_ the list of subscriptions, removing any that are not
30
+ # included in the most recent call.
31
+ #
32
+ # @param topic [String, Array<String>] Topics to subscribe to
33
+ def subscribe(topic, *rest)
34
+ @client.subscribe(topic, *rest)
35
+ end
36
+
37
+ # Retrieves the set of topic + partition assignments for the consumer.
38
+ #
39
+ # @example
40
+ # consumer.assignment # => { "topic" => [1,2,3] }
41
+ #
42
+ # @return [Hash{String => Array<Integer>}] List of partition assignments
43
+ # keyed by the topic name.
44
+ def assignments
45
+ @client.assignment
46
+ end
47
+
48
+ # Poll the consumer for waiting message.
49
+ #
50
+ # @param timeout [Integer] Time to wait in milliseconds for a message to be
51
+ # available.
52
+ def poll(timeout: 250, &block)
53
+ @client.consumer_poll(timeout, &block)
54
+ end
55
+
56
+ # @param msg [Consumer::Message]
57
+ def commit(msg, async: false)
58
+ list = Kafka::FFI::TopicPartitionList.new(1)
59
+
60
+ list.add(msg.topic, msg.partition)
61
+ list.set_offset(msg.topic, msg.partition, msg.offset + 1)
62
+
63
+ @client.commit(list, async)
64
+ ensure
65
+ list.destroy
66
+ end
67
+
68
+ # Gracefully shutdown the consumer and it's connections.
69
+ #
70
+ # @note After calling #close it is unsafe to call any other method on the
71
+ # Consumer.
72
+ def close
73
+ # @see https://github.com/edenhill/librdkafka/blob/master/INTRODUCTION.md#high-level-kafkaconsumer
74
+ @poller.stop
75
+
76
+ # Gracefully shutdown the consumer, leaving the consumer group,
77
+ # committing any remaining offsets, and releasing resources back to the
78
+ # system.
79
+ #
80
+ # This will effectively call #close on the Client automatically. Trying
81
+ # to follow the documentation and calling #close before #destroy caused
82
+ # warnings due to brokers disconnecting but just calling #destroy fixes
83
+ # that.
84
+ @client.destroy
85
+ end
86
+ end
87
+ end
@@ -0,0 +1,44 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka
4
+ class Error < StandardError; end
5
+
6
+ # ::Kafka::ResponseError is an Error that can be raised based on an :error_code as
7
+ # returned from the librdkafka API.
8
+ #
9
+ # @see rdkafka.h RD_KAFKA_RESP_ERR_*
10
+ # @see rdkafka.h rd_kafka_resp_err_t
11
+ class ::Kafka::ResponseError < Error
12
+ # @attr code [Integer] Error code as defined by librdkafka.
13
+ attr_reader :code
14
+
15
+ def initialize(code, message = nil)
16
+ @code = code
17
+ @message = message
18
+ end
19
+
20
+ # Returns the librdkafka error constant for this error.
21
+ # @return [String]
22
+ def name
23
+ "RD_KAFKA_RESP_ERR_#{::Kafka::FFI.rd_kafka_err2name(@code)}"
24
+ end
25
+
26
+ # Returns true when the error is from internal to librdkafka or false when
27
+ # the error was received from a broker or timeout.
28
+ #
29
+ # @see https://github.com/edenhill/librdkafka/blob/4818ecadee/src/rdkafka.h#L245
30
+ #
31
+ # @return [true] Error was internal to librdkafka
32
+ # @return [false] Error was returned by the cluster
33
+ def internal?
34
+ code < 0
35
+ end
36
+
37
+ # Returns a human readable error description
38
+ #
39
+ # @return [String] Human readable description of the error.
40
+ def to_s
41
+ @message || ::Kafka::FFI.rd_kafka_err2str(@code)
42
+ end
43
+ end
44
+ end
@@ -0,0 +1,121 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "kafka/ffi/opaque_pointer"
4
+
5
+ module Kafka::FFI::Admin
6
+ class AdminOptions < ::Kafka::FFI::OpaquePointer
7
+ def self.new(client, api)
8
+ ::Kafka::FFI.rd_kafka_AdminOptions_new(client, api)
9
+ end
10
+
11
+ # rubocop:disable Naming/AccessorMethodName
12
+
13
+ # Sets the overall request timeout which includes broker lookup, request
14
+ # transmissing, operation time, and response processing.
15
+ #
16
+ # Valid for all admin requests.
17
+ #
18
+ # @note Default request timeout is `socket.timeout.ms` config option.
19
+ #
20
+ # @param timeout [-1] Wait indefinitely for request to finish
21
+ # @param timeout [Integer] Time to wait in milliseconds for request to be
22
+ # processed.
23
+ #
24
+ # @raise [ResponseError<RD_KAFKA_RESP_ERR__INVALID_ARG>] Timeout was out of
25
+ # range.
26
+ def set_request_timeout(timeout)
27
+ error = ::FFI::MemoryPointer.new(:char, 512)
28
+
29
+ resp = ::Kafka::FFI.rd_kafka_AdminOptions_set_request_timeout(self, timeout, error, error.size)
30
+ if resp != :ok
31
+ raise ::Kafka::ResponseError.new(resp, error.read_string)
32
+ end
33
+
34
+ nil
35
+ ensure
36
+ error.free
37
+ end
38
+
39
+ # Set the broker's operation wait timeout for the request to be processed
40
+ # by the cluster.
41
+ #
42
+ # Only valid for :create_topics, :delete_topics, and :create_partitions
43
+ # operations.
44
+ #
45
+ # @param timeout [-1, 0] Return immediately after starting the operation.
46
+ # @param timeout [Integer] Max time to wait in milliseconds for the
47
+ # operation to propogate to the cluster.
48
+ #
49
+ # @raise [ResponseError<RD_KAFKA_RESP_ERR__INVALID_ARG>] Timeout was out of
50
+ # range.
51
+ def set_operation_timeout(timeout)
52
+ error = ::FFI::MemoryPointer.new(:char, 512)
53
+
54
+ resp = ::Kafka::FFI.rd_kafka_AdminOptions_set_operation_timeout(self, timeout, error, error.size)
55
+ if resp != :ok
56
+ raise ::Kafka::ResponseError.new(resp, error.read_string)
57
+ end
58
+
59
+ nil
60
+ ensure
61
+ error.free
62
+ end
63
+
64
+ # Tell the broker to only validate the request without actually performing
65
+ # the operation.
66
+ #
67
+ # Only valid for :create_topics, :delete_topics, and :create_partitions
68
+ # operations.
69
+ #
70
+ # @param on [Boolean] True to validate the request without performing it.
71
+ #
72
+ # @raise [Kafka::ResponseError]
73
+ def set_validate_only(on)
74
+ error = ::FFI::MemoryPointer.new(:char, 512)
75
+
76
+ resp = ::Kafka::FFI.rd_kafka_AdminOptions_set_validate_only(self, on, error, error.size)
77
+ if resp != :ok
78
+ raise ::Kafka::ResponseError.new(resp, error.read_string)
79
+ end
80
+
81
+ nil
82
+ ensure
83
+ error.free
84
+ end
85
+
86
+ # Override which broker the Admin request will be sent to. By default,
87
+ # requests are sent to the controller Broker with a couple exceptions (see
88
+ # librdkafka)
89
+ #
90
+ # @note This API shoudl typically not be used and primarily serves as a
91
+ # workaround in some cases.
92
+ #
93
+ # @see rdkafka.h rd_kafka_AdminOptions_set_broker
94
+ #
95
+ # @param broker_id [Integer] ID of the Broker to receive the request.
96
+ def set_broker(broker_id)
97
+ error = ::FFI::MemoryPointer.new(:char, 512)
98
+
99
+ resp = ::Kafka::FFI.rd_kafka_AdminOptions_set_broker(self, broker_id, error, error.size)
100
+ if resp != :ok
101
+ raise ::Kafka::ResponseError.new(resp, error.read_string)
102
+ end
103
+
104
+ nil
105
+ ensure
106
+ error.free
107
+ end
108
+
109
+ # rubocop:enable Naming/AccessorMethodName
110
+
111
+ # Ruby like aliases for librdkafka functions
112
+ alias request_timeout= set_request_timeout
113
+ alias operation_timeout= set_operation_timeout
114
+ alias validate_only= set_validate_only
115
+ alias broker= set_broker
116
+
117
+ def destroy
118
+ ::Kafka::FFI.rd_kafka_AdminOptions_destroy(self)
119
+ end
120
+ end
121
+ end