karafka-rdkafka 0.12.4 → 0.13.0.beta1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/.github/workflows/ci.yml +1 -1
- data/CHANGELOG.md +21 -2
- data/Gemfile +2 -0
- data/README.md +26 -0
- data/Rakefile +2 -0
- data/certs/cert_chain.pem +21 -21
- data/certs/karafka-pro.pem +11 -0
- data/ext/Rakefile +26 -53
- data/karafka-rdkafka.gemspec +2 -0
- data/lib/rdkafka/abstract_handle.rb +2 -0
- data/lib/rdkafka/admin/create_topic_handle.rb +2 -0
- data/lib/rdkafka/admin/create_topic_report.rb +2 -0
- data/lib/rdkafka/admin/delete_topic_handle.rb +2 -0
- data/lib/rdkafka/admin/delete_topic_report.rb +2 -0
- data/lib/rdkafka/admin.rb +95 -73
- data/lib/rdkafka/bindings.rb +52 -37
- data/lib/rdkafka/callbacks.rb +2 -0
- data/lib/rdkafka/config.rb +13 -10
- data/lib/rdkafka/consumer/headers.rb +24 -7
- data/lib/rdkafka/consumer/message.rb +3 -1
- data/lib/rdkafka/consumer/partition.rb +2 -0
- data/lib/rdkafka/consumer/topic_partition_list.rb +2 -0
- data/lib/rdkafka/consumer.rb +100 -44
- data/lib/rdkafka/error.rb +9 -0
- data/lib/rdkafka/metadata.rb +25 -2
- data/lib/rdkafka/native_kafka.rb +83 -0
- data/lib/rdkafka/producer/delivery_handle.rb +2 -0
- data/lib/rdkafka/producer/delivery_report.rb +3 -1
- data/lib/rdkafka/producer.rb +75 -12
- data/lib/rdkafka/version.rb +3 -1
- data/lib/rdkafka.rb +3 -1
- data/spec/rdkafka/abstract_handle_spec.rb +2 -0
- data/spec/rdkafka/admin/create_topic_handle_spec.rb +2 -0
- data/spec/rdkafka/admin/create_topic_report_spec.rb +2 -0
- data/spec/rdkafka/admin/delete_topic_handle_spec.rb +2 -0
- data/spec/rdkafka/admin/delete_topic_report_spec.rb +2 -0
- data/spec/rdkafka/admin_spec.rb +4 -3
- data/spec/rdkafka/bindings_spec.rb +2 -0
- data/spec/rdkafka/callbacks_spec.rb +2 -0
- data/spec/rdkafka/config_spec.rb +17 -2
- data/spec/rdkafka/consumer/headers_spec.rb +62 -0
- data/spec/rdkafka/consumer/message_spec.rb +2 -0
- data/spec/rdkafka/consumer/partition_spec.rb +2 -0
- data/spec/rdkafka/consumer/topic_partition_list_spec.rb +2 -0
- data/spec/rdkafka/consumer_spec.rb +124 -22
- data/spec/rdkafka/error_spec.rb +2 -0
- data/spec/rdkafka/metadata_spec.rb +2 -0
- data/spec/rdkafka/{producer/client_spec.rb → native_kafka_spec.rb} +13 -34
- data/spec/rdkafka/producer/delivery_handle_spec.rb +2 -0
- data/spec/rdkafka/producer/delivery_report_spec.rb +4 -2
- data/spec/rdkafka/producer_spec.rb +118 -17
- data/spec/spec_helper.rb +17 -1
- data.tar.gz.sig +0 -0
- metadata +33 -33
- metadata.gz.sig +0 -0
- data/bin/console +0 -11
- data/dist/librdkafka_2.0.2.tar.gz +0 -0
- data/lib/rdkafka/producer/client.rb +0 -47
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 419dcef5e71a6578202fde7eacd628054ac77bae0c91c57845fd6dbd6ade6715
|
4
|
+
data.tar.gz: 05e7e1ef8e07c1b2dddf02f1e3f8e8e0da7dfcb75af41ccc2f9dcba2e30bba1f
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 4643afd749af09b9bd2c8d16ae25b989eb03feb39b12c966059956cad47966a8a74e6d0c356bfdd4115a3a419aebbab1d8421410e0bef64bd2f1611d66ba2ab6
|
7
|
+
data.tar.gz: 7f526e3bb375126862a84b7824e60872454366cfa7a2b5abb8d6e09f978423dc1e56e1c2a488f36f28ff622f942834b7d576d6fe91e4263cb5864165597c881f
|
checksums.yaml.gz.sig
CHANGED
Binary file
|
data/.github/workflows/ci.yml
CHANGED
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,22 @@
|
|
1
|
-
|
2
|
-
|
1
|
+
# 0.13.0
|
2
|
+
* Support cooperative sticky partition assignment in the rebalance callback (methodmissing)
|
3
|
+
* Support both string and symbol header keys (ColinDKelley)
|
4
|
+
* Handle tombstone messages properly (kgalieva)
|
5
|
+
* Add topic name to delivery report (maeve)
|
6
|
+
* Allow string partitioner config (mollyegibson)
|
7
|
+
* Fix documented type for DeliveryReport#error (jimmydo)
|
8
|
+
* Bump librdkafka to 2.0.2 (lmaia)
|
9
|
+
* Use finalizers to cleanly exit producer and admin (thijsc)
|
10
|
+
* Lock access to the native kafka client (thijsc)
|
11
|
+
* Fix potential race condition in multi-threaded producer (mensfeld)
|
12
|
+
* Fix leaking FFI resources in specs (mensfeld)
|
13
|
+
* Improve specs stability (mensfeld)
|
14
|
+
* Make metadata request timeout configurable (mensfeld)
|
15
|
+
* call_on_partitions_assigned and call_on_partitions_revoked only get a tpl passed in (thijsc)
|
16
|
+
* Support `#assignment_lost?` on a consumer to check for involuntary assignment revocation (mensfeld)
|
17
|
+
* Expose `#name` on the consumer and producer (mensfeld)
|
18
|
+
* Introduce producer partitions count metadata cache (mensfeld)
|
19
|
+
* Retry metadta fetches on certain errors with a backoff (mensfeld)
|
3
20
|
|
4
21
|
# 0.12.3
|
5
22
|
- Include backtrace in non-raised binded errors.
|
@@ -14,6 +31,8 @@
|
|
14
31
|
|
15
32
|
# 0.12.0
|
16
33
|
* Bumps librdkafka to 1.9.0
|
34
|
+
* Fix crash on empty partition key (mensfeld)
|
35
|
+
* Pass the delivery handle to the callback (gvisokinskas)
|
17
36
|
|
18
37
|
# 0.11.0
|
19
38
|
* Upgrade librdkafka to 1.8.2
|
data/Gemfile
CHANGED
data/README.md
CHANGED
@@ -23,6 +23,19 @@ The most important pieces of a Kafka client are implemented. We're
|
|
23
23
|
working towards feature completeness, you can track that here:
|
24
24
|
https://github.com/appsignal/rdkafka-ruby/milestone/1
|
25
25
|
|
26
|
+
## Table of content
|
27
|
+
|
28
|
+
- [Installation](#installation)
|
29
|
+
- [Usage](#usage)
|
30
|
+
* [Consuming messages](#consuming-messages)
|
31
|
+
* [Producing messages](#producing-messages)
|
32
|
+
- [Higher level libraries](#higher-level-libraries)
|
33
|
+
* [Message processing frameworks](#message-processing-frameworks)
|
34
|
+
* [Message publishing libraries](#message-publishing-libraries)
|
35
|
+
- [Development](#development)
|
36
|
+
- [Example](#example)
|
37
|
+
|
38
|
+
|
26
39
|
## Installation
|
27
40
|
|
28
41
|
This gem downloads and compiles librdkafka when it is installed. If you
|
@@ -77,6 +90,19 @@ Note that creating a producer consumes some resources that will not be
|
|
77
90
|
released until it `#close` is explicitly called, so be sure to call
|
78
91
|
`Config#producer` only as necessary.
|
79
92
|
|
93
|
+
## Higher level libraries
|
94
|
+
|
95
|
+
Currently, there are two actively developed frameworks based on rdkafka-ruby, that provide higher level API that can be used to work with Kafka messages and one library for publishing messages.
|
96
|
+
|
97
|
+
### Message processing frameworks
|
98
|
+
|
99
|
+
* [Karafka](https://github.com/karafka/karafka) - Ruby and Rails efficient Kafka processing framework.
|
100
|
+
* [Racecar](https://github.com/zendesk/racecar) - A simple framework for Kafka consumers in Ruby
|
101
|
+
|
102
|
+
### Message publishing libraries
|
103
|
+
|
104
|
+
* [WaterDrop](https://github.com/karafka/waterdrop) – Standalone Karafka library for producing Kafka messages.
|
105
|
+
|
80
106
|
## Development
|
81
107
|
|
82
108
|
A Docker Compose file is included to run Kafka and Zookeeper. To run
|
data/Rakefile
CHANGED
data/certs/cert_chain.pem
CHANGED
@@ -1,26 +1,26 @@
|
|
1
1
|
-----BEGIN CERTIFICATE-----
|
2
2
|
MIIEcDCCAtigAwIBAgIBATANBgkqhkiG9w0BAQsFADA/MRAwDgYDVQQDDAdjb250
|
3
3
|
YWN0MRcwFQYKCZImiZPyLGQBGRYHa2FyYWZrYTESMBAGCgmSJomT8ixkARkWAmlv
|
4
|
-
|
4
|
+
MB4XDTIyMDgxOTE3MjEzN1oXDTIzMDgxOTE3MjEzN1owPzEQMA4GA1UEAwwHY29u
|
5
5
|
dGFjdDEXMBUGCgmSJomT8ixkARkWB2thcmFma2ExEjAQBgoJkiaJk/IsZAEZFgJp
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
6
|
+
bzCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCCAYoCggGBAODzeO3L6lxdATzMHKNW
|
7
|
+
jFA/GGunoPuylO/BMzy8RiQHh7VIvysAKs0tHhTx3g2D0STDpF+hcQcPELFikiT2
|
8
|
+
F+1wOHj/SsrK7VKqfA8+gq04hKc5sQoX2Egf9k3V0YJ3eZ6R/koHkQ8A0TVt0w6F
|
9
|
+
ZQckoV4MqnEAx0g/FZN3mnHTlJ3VFLSBqJEIe+S6FZMl92mSv+hTrlUG8VaYxSfN
|
10
|
+
lTCvnKk284F6QZq5XIENLRmcDd/3aPBLnLwNnyMyhB+6gK8cUO+CFlDO5tjo/aBA
|
11
|
+
rUnl++wGG0JooF1ed0v+evOn9KoMBG6rHewcf79qJbVOscbD8qSAmo+sCXtcFryr
|
12
|
+
KRMTB8gNbowJkFRJDEe8tfRy11u1fYzFg/qNO82FJd62rKAw2wN0C29yCeQOPRb1
|
13
|
+
Cw9Y4ZwK9VFNEcV9L+3pHTHn2XfuZHtDaG198VweiF6raFO4yiEYccodH/USP0L5
|
14
|
+
cbcCFtmu/4HDSxL1ByQXO84A0ybJuk3/+aPUSXe9C9U8fwIDAQABo3cwdTAJBgNV
|
15
|
+
HRMEAjAAMAsGA1UdDwQEAwIEsDAdBgNVHQ4EFgQUSlcEakb7gfn/5E2WY6z73BF/
|
16
|
+
iZkwHQYDVR0RBBYwFIESY29udGFjdEBrYXJhZmthLmlvMB0GA1UdEgQWMBSBEmNv
|
17
|
+
bnRhY3RAa2FyYWZrYS5pbzANBgkqhkiG9w0BAQsFAAOCAYEA1aS+E7RXJ1w9g9mJ
|
18
|
+
G0NzFxe64OEuENosNlvYQCbRKGCXAU1qqelYkBQHseRgRKxLICrnypRo9IEobyHa
|
19
|
+
vDnJ4r7Tsb34dleqQW2zY/obG+cia3Ym2JsegXWF7dDOzCXJ4FN8MFoT2jHlqLLw
|
20
|
+
yrap0YO5zx0GSQ0Dwy8h2n2v2vanMEeCx7iNm3ERgR5WuN5sjzWoz2A/JLEEcK0C
|
21
|
+
EnAGKCWAd1fuG8IemDjT1edsd5FyYR4bIX0m+99oDuFZyPiiIbalmyYiSBBp59Yb
|
22
|
+
Q0P8zeBi4OfwCZNcxqz0KONmw9JLNv6DgyEAH5xe/4JzhMEgvIRiPj0pHfA7oqQF
|
23
|
+
KUNqvD1KlxbEC+bZfE5IZhnqYLdld/Ksqd22FI1RBhiS1Ejfsj99LVIm9cBuZEY2
|
24
|
+
Qf04B9ceLUaC4fPVEz10FyobjaFoY4i32xRto3XnrzeAgfEe4swLq8bQsR3w/EF3
|
25
|
+
MGU0FeSV2Yj7Xc2x/7BzLK8xQn5l7Yy75iPF+KP3vVmDHnNl
|
26
26
|
-----END CERTIFICATE-----
|
@@ -0,0 +1,11 @@
|
|
1
|
+
-----BEGIN RSA PUBLIC KEY-----
|
2
|
+
MIIBigKCAYEApcd6ybskiNs9WUvBGVUE8GdWDehjZ9TyjSj/fDl/UcMYqY0R5YX9
|
3
|
+
tnYxEwZZRMdVltKWxr88Qmshh1IQz6CpJVbcfYjt/158pSGPm+AUua6tkLqIvZDM
|
4
|
+
ocFOMafmroI+BMuL+Zu5QH7HC2tkT16jclGYfMQkJjXVUQTk2UZr+94+8RlUz/CH
|
5
|
+
Y6hPA7xPgIyPfyPCxz1VWzAwXwT++NCJQPBr5MqT84LNSEzUSlR9pFNShf3UCUT+
|
6
|
+
8LWOvjFSNGmMMSsbo2T7/+dz9/FM02YG00EO0x04qteggwcaEYLFrigDN6/fM0ih
|
7
|
+
BXZILnMUqC/qrfW2YFg4ZqKZJuxaALqqkPxrkBDYqoqcAloqn36jBSke6tc/2I/J
|
8
|
+
2Afq3r53UoAbUH7h5I/L8YeaiA4MYjAuq724lHlrOmIr4D6yjYC0a1LGlPjLk869
|
9
|
+
2nsVXNgomhVb071E6amR+rJJnfvkdZgCmEBFnqnBV5A1u4qgNsa2rVcD+gJRvb2T
|
10
|
+
aQtjlQWKPx5xAgMBAAE=
|
11
|
+
-----END RSA PUBLIC KEY-----
|
data/ext/Rakefile
CHANGED
@@ -1,67 +1,40 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
require File.expand_path('../../lib/rdkafka/version', __FILE__)
|
4
|
+
require "mini_portile2"
|
4
5
|
require "fileutils"
|
5
6
|
require "open-uri"
|
6
7
|
|
7
8
|
task :default => :clean do
|
8
|
-
#
|
9
|
-
|
10
|
-
#
|
11
|
-
# Mini_portile causes an issue because it's dependencies are downloaded on the fly and therefore don't exist/aren't
|
12
|
-
# accessible in the nix environment
|
13
|
-
if ENV.fetch('RDKAFKA_EXT_PATH', '').empty?
|
14
|
-
# Download and compile librdkafka if RDKAFKA_EXT_PATH is not set
|
15
|
-
require "mini_portile2"
|
16
|
-
recipe = MiniPortile.new("librdkafka", Rdkafka::LIBRDKAFKA_VERSION)
|
9
|
+
# Download and compile librdkafka
|
10
|
+
recipe = MiniPortile.new("librdkafka", Rdkafka::LIBRDKAFKA_VERSION)
|
17
11
|
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
releases = File.expand_path(File.join(File.dirname(__FILE__), '../dist'))
|
26
|
-
|
27
|
-
recipe.files << {
|
28
|
-
:url => "file://#{releases}/librdkafka_#{Rdkafka::LIBRDKAFKA_VERSION}.tar.gz",
|
29
|
-
:sha256 => Rdkafka::LIBRDKAFKA_SOURCE_SHA256
|
30
|
-
}
|
31
|
-
recipe.configure_options = ["--host=#{recipe.host}"]
|
32
|
-
|
33
|
-
# Disable using libc regex engine in favor of the embedded one
|
34
|
-
# The default regex engine of librdkafka does not always work exactly as most of the users
|
35
|
-
# would expect, hence this flag allows for changing it to the other one
|
36
|
-
if ENV.key?('RDKAFKA_DISABLE_REGEX_EXT')
|
37
|
-
recipe.configure_options << '--disable-regex-ext'
|
38
|
-
end
|
12
|
+
# Use default homebrew openssl if we're on mac and the directory exists
|
13
|
+
# and each of flags is not empty
|
14
|
+
if recipe.host&.include?("darwin") && system("which brew &> /dev/null") && Dir.exist?("#{homebrew_prefix = %x(brew --prefix openssl).strip}")
|
15
|
+
ENV["CPPFLAGS"] = "-I#{homebrew_prefix}/include" unless ENV["CPPFLAGS"]
|
16
|
+
ENV["LDFLAGS"] = "-L#{homebrew_prefix}/lib" unless ENV["LDFLAGS"]
|
17
|
+
end
|
39
18
|
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
FileUtils.mv(lib_path, File.join(File.dirname(__FILE__), "librdkafka.#{to_extension}"))
|
51
|
-
# Cleanup files created by miniportile we don't need in the gem
|
52
|
-
FileUtils.rm_rf File.join(File.dirname(__FILE__), "tmp")
|
53
|
-
FileUtils.rm_rf File.join(File.dirname(__FILE__), "ports")
|
19
|
+
recipe.files << {
|
20
|
+
:url => "https://codeload.github.com/edenhill/librdkafka/tar.gz/v#{Rdkafka::LIBRDKAFKA_VERSION}",
|
21
|
+
:sha256 => Rdkafka::LIBRDKAFKA_SOURCE_SHA256
|
22
|
+
}
|
23
|
+
recipe.configure_options = ["--host=#{recipe.host}"]
|
24
|
+
recipe.cook
|
25
|
+
# Move dynamic library we're interested in
|
26
|
+
if recipe.host.include?('darwin')
|
27
|
+
from_extension = '1.dylib'
|
28
|
+
to_extension = 'dylib'
|
54
29
|
else
|
55
|
-
|
56
|
-
|
57
|
-
raise "RDKAFKA_EXT_PATH must be set in your nix config when running under nix"
|
58
|
-
end
|
59
|
-
files = [
|
60
|
-
File.join(ENV['RDKAFKA_EXT_PATH'], 'lib', 'librdkafka.dylib'),
|
61
|
-
File.join(ENV['RDKAFKA_EXT_PATH'], 'lib', 'librdkafka.so')
|
62
|
-
]
|
63
|
-
files.each { |ext| FileUtils.cp(ext, File.dirname(__FILE__)) if File.exist?(ext) }
|
30
|
+
from_extension = 'so.1'
|
31
|
+
to_extension = 'so'
|
64
32
|
end
|
33
|
+
lib_path = File.join(File.dirname(__FILE__), "ports/#{recipe.host}/librdkafka/#{Rdkafka::LIBRDKAFKA_VERSION}/lib/librdkafka.#{from_extension}")
|
34
|
+
FileUtils.mv(lib_path, File.join(File.dirname(__FILE__), "librdkafka.#{to_extension}"))
|
35
|
+
# Cleanup files created by miniportile we don't need in the gem
|
36
|
+
FileUtils.rm_rf File.join(File.dirname(__FILE__), "tmp")
|
37
|
+
FileUtils.rm_rf File.join(File.dirname(__FILE__), "ports")
|
65
38
|
end
|
66
39
|
|
67
40
|
task :clean do
|
data/karafka-rdkafka.gemspec
CHANGED
data/lib/rdkafka/admin.rb
CHANGED
@@ -1,33 +1,31 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "objspace"
|
4
|
+
|
1
5
|
module Rdkafka
|
2
6
|
class Admin
|
3
7
|
# @private
|
4
8
|
def initialize(native_kafka)
|
5
9
|
@native_kafka = native_kafka
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
if @closing && Rdkafka::Bindings.rd_kafka_outq_len(@native_kafka) == 0
|
14
|
-
break
|
15
|
-
end
|
16
|
-
end
|
17
|
-
end
|
18
|
-
@polling_thread.abort_on_exception = true
|
10
|
+
|
11
|
+
# Makes sure, that native kafka gets closed before it gets GCed by Ruby
|
12
|
+
ObjectSpace.define_finalizer(self, native_kafka.finalizer)
|
13
|
+
end
|
14
|
+
|
15
|
+
def finalizer
|
16
|
+
->(_) { close }
|
19
17
|
end
|
20
18
|
|
21
19
|
# Close this admin instance
|
22
20
|
def close
|
23
|
-
return
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
@native_kafka
|
21
|
+
return if closed?
|
22
|
+
ObjectSpace.undefine_finalizer(self)
|
23
|
+
@native_kafka.close
|
24
|
+
end
|
25
|
+
|
26
|
+
# Whether this admin has closed
|
27
|
+
def closed?
|
28
|
+
@native_kafka.closed?
|
31
29
|
end
|
32
30
|
|
33
31
|
# Create a topic with the given partition count and replication factor
|
@@ -38,6 +36,7 @@ module Rdkafka
|
|
38
36
|
#
|
39
37
|
# @return [CreateTopicHandle] Create topic handle that can be used to wait for the result of creating the topic
|
40
38
|
def create_topic(topic_name, partition_count, replication_factor, topic_config={})
|
39
|
+
closed_admin_check(__method__)
|
41
40
|
|
42
41
|
# Create a rd_kafka_NewTopic_t representing the new topic
|
43
42
|
error_buffer = FFI::MemoryPointer.from_string(" " * 256)
|
@@ -68,7 +67,9 @@ module Rdkafka
|
|
68
67
|
topics_array_ptr.write_array_of_pointer(pointer_array)
|
69
68
|
|
70
69
|
# Get a pointer to the queue that our request will be enqueued on
|
71
|
-
queue_ptr =
|
70
|
+
queue_ptr = @native_kafka.with_inner do |inner|
|
71
|
+
Rdkafka::Bindings.rd_kafka_queue_get_background(inner)
|
72
|
+
end
|
72
73
|
if queue_ptr.null?
|
73
74
|
Rdkafka::Bindings.rd_kafka_NewTopic_destroy(new_topic_ptr)
|
74
75
|
raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
|
@@ -79,17 +80,21 @@ module Rdkafka
|
|
79
80
|
create_topic_handle[:pending] = true
|
80
81
|
create_topic_handle[:response] = -1
|
81
82
|
CreateTopicHandle.register(create_topic_handle)
|
82
|
-
admin_options_ptr =
|
83
|
+
admin_options_ptr = @native_kafka.with_inner do |inner|
|
84
|
+
Rdkafka::Bindings.rd_kafka_AdminOptions_new(inner, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATETOPICS)
|
85
|
+
end
|
83
86
|
Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, create_topic_handle.to_ptr)
|
84
87
|
|
85
88
|
begin
|
86
|
-
|
87
|
-
|
89
|
+
@native_kafka.with_inner do |inner|
|
90
|
+
Rdkafka::Bindings.rd_kafka_CreateTopics(
|
91
|
+
inner,
|
88
92
|
topics_array_ptr,
|
89
93
|
1,
|
90
94
|
admin_options_ptr,
|
91
95
|
queue_ptr
|
92
|
-
|
96
|
+
)
|
97
|
+
end
|
93
98
|
rescue Exception
|
94
99
|
CreateTopicHandle.remove(create_topic_handle.to_ptr.address)
|
95
100
|
raise
|
@@ -108,6 +113,7 @@ module Rdkafka
|
|
108
113
|
#
|
109
114
|
# @return [DeleteTopicHandle] Delete topic handle that can be used to wait for the result of deleting the topic
|
110
115
|
def delete_topic(topic_name)
|
116
|
+
closed_admin_check(__method__)
|
111
117
|
|
112
118
|
# Create a rd_kafka_DeleteTopic_t representing the topic to be deleted
|
113
119
|
delete_topic_ptr = Rdkafka::Bindings.rd_kafka_DeleteTopic_new(FFI::MemoryPointer.from_string(topic_name))
|
@@ -118,7 +124,9 @@ module Rdkafka
|
|
118
124
|
topics_array_ptr.write_array_of_pointer(pointer_array)
|
119
125
|
|
120
126
|
# Get a pointer to the queue that our request will be enqueued on
|
121
|
-
queue_ptr =
|
127
|
+
queue_ptr = @native_kafka.with_inner do |inner|
|
128
|
+
Rdkafka::Bindings.rd_kafka_queue_get_background(inner)
|
129
|
+
end
|
122
130
|
if queue_ptr.null?
|
123
131
|
Rdkafka::Bindings.rd_kafka_DeleteTopic_destroy(delete_topic_ptr)
|
124
132
|
raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
|
@@ -129,17 +137,21 @@ module Rdkafka
|
|
129
137
|
delete_topic_handle[:pending] = true
|
130
138
|
delete_topic_handle[:response] = -1
|
131
139
|
DeleteTopicHandle.register(delete_topic_handle)
|
132
|
-
admin_options_ptr =
|
140
|
+
admin_options_ptr = @native_kafka.with_inner do |inner|
|
141
|
+
Rdkafka::Bindings.rd_kafka_AdminOptions_new(inner, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_DELETETOPICS)
|
142
|
+
end
|
133
143
|
Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, delete_topic_handle.to_ptr)
|
134
144
|
|
135
145
|
begin
|
136
|
-
|
137
|
-
|
146
|
+
@native_kafka.with_inner do |inner|
|
147
|
+
Rdkafka::Bindings.rd_kafka_DeleteTopics(
|
148
|
+
inner,
|
138
149
|
topics_array_ptr,
|
139
150
|
1,
|
140
151
|
admin_options_ptr,
|
141
152
|
queue_ptr
|
142
|
-
|
153
|
+
)
|
154
|
+
end
|
143
155
|
rescue Exception
|
144
156
|
DeleteTopicHandle.remove(delete_topic_handle.to_ptr.address)
|
145
157
|
raise
|
@@ -163,54 +175,64 @@ module Rdkafka
|
|
163
175
|
#
|
164
176
|
# @return [CreateTopicHandle] Create topic handle that can be used to wait for the result of creating the topic
|
165
177
|
def create_partitions(topic_name, partition_count)
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
178
|
+
closed_admin_check(__method__)
|
179
|
+
|
180
|
+
@native_kafka.with_inner do |inner|
|
181
|
+
error_buffer = FFI::MemoryPointer.from_string(" " * 256)
|
182
|
+
new_partitions_ptr = Rdkafka::Bindings.rd_kafka_NewPartitions_new(
|
183
|
+
FFI::MemoryPointer.from_string(topic_name),
|
184
|
+
partition_count,
|
185
|
+
error_buffer,
|
186
|
+
256
|
187
|
+
)
|
188
|
+
if new_partitions_ptr.null?
|
189
|
+
raise Rdkafka::Config::ConfigError.new(error_buffer.read_string)
|
190
|
+
end
|
176
191
|
|
177
|
-
|
178
|
-
|
179
|
-
|
192
|
+
pointer_array = [new_partitions_ptr]
|
193
|
+
topics_array_ptr = FFI::MemoryPointer.new(:pointer)
|
194
|
+
topics_array_ptr.write_array_of_pointer(pointer_array)
|
180
195
|
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
196
|
+
# Get a pointer to the queue that our request will be enqueued on
|
197
|
+
queue_ptr = Rdkafka::Bindings.rd_kafka_queue_get_background(inner)
|
198
|
+
if queue_ptr.null?
|
199
|
+
Rdkafka::Bindings.rd_kafka_NewPartitions_destroy(new_partitions_ptr)
|
200
|
+
raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
|
201
|
+
end
|
187
202
|
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
203
|
+
# Create and register the handle we will return to the caller
|
204
|
+
create_partitions_handle = CreatePartitionsHandle.new
|
205
|
+
create_partitions_handle[:pending] = true
|
206
|
+
create_partitions_handle[:response] = -1
|
207
|
+
CreatePartitionsHandle.register(create_partitions_handle)
|
208
|
+
admin_options_ptr = Rdkafka::Bindings.rd_kafka_AdminOptions_new(inner, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATEPARTITIONS)
|
209
|
+
Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, create_partitions_handle.to_ptr)
|
210
|
+
|
211
|
+
begin
|
212
|
+
Rdkafka::Bindings.rd_kafka_CreatePartitions(
|
213
|
+
inner,
|
214
|
+
topics_array_ptr,
|
215
|
+
1,
|
216
|
+
admin_options_ptr,
|
217
|
+
queue_ptr
|
218
|
+
)
|
219
|
+
rescue Exception
|
220
|
+
CreatePartitionsHandle.remove(create_partitions_handle.to_ptr.address)
|
221
|
+
raise
|
222
|
+
ensure
|
223
|
+
Rdkafka::Bindings.rd_kafka_AdminOptions_destroy(admin_options_ptr)
|
224
|
+
Rdkafka::Bindings.rd_kafka_queue_destroy(queue_ptr)
|
225
|
+
Rdkafka::Bindings.rd_kafka_NewPartitions_destroy(new_partitions_ptr)
|
226
|
+
end
|
195
227
|
|
196
|
-
|
197
|
-
Rdkafka::Bindings.rd_kafka_CreatePartitions(
|
198
|
-
@native_kafka,
|
199
|
-
topics_array_ptr,
|
200
|
-
1,
|
201
|
-
admin_options_ptr,
|
202
|
-
queue_ptr
|
203
|
-
)
|
204
|
-
rescue Exception
|
205
|
-
CreatePartitionsHandle.remove(create_partitions_handle.to_ptr.address)
|
206
|
-
raise
|
207
|
-
ensure
|
208
|
-
Rdkafka::Bindings.rd_kafka_AdminOptions_destroy(admin_options_ptr)
|
209
|
-
Rdkafka::Bindings.rd_kafka_queue_destroy(queue_ptr)
|
210
|
-
Rdkafka::Bindings.rd_kafka_NewPartitions_destroy(new_partitions_ptr)
|
228
|
+
create_partitions_handle
|
211
229
|
end
|
230
|
+
end
|
231
|
+
|
232
|
+
private
|
212
233
|
|
213
|
-
|
234
|
+
def closed_admin_check(method)
|
235
|
+
raise Rdkafka::ClosedAdminError.new(method) if closed?
|
214
236
|
end
|
215
237
|
end
|
216
238
|
end
|