jruby-kafka-lockjar 1.4.0.pre-java
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/Jarfile +2 -0
- data/lib/jruby-kafka.rb +10 -0
- data/lib/jruby-kafka/consumer.rb +41 -0
- data/lib/jruby-kafka/error.rb +9 -0
- data/lib/jruby-kafka/group.rb +246 -0
- data/lib/jruby-kafka/kafka-producer.rb +76 -0
- data/lib/jruby-kafka/namespace.rb +3 -0
- data/lib/jruby-kafka/producer.rb +101 -0
- metadata +82 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: adedc59b55590e41c4a622bd463dec1ebf998d70
|
4
|
+
data.tar.gz: b27119eee15c967e74be17c0ccf05d4e8002b246
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 16e36cbb128ee4c4a2488f4142229a52af9949491844a52ebe646135ac1d38c8c64820808f2273382fe8922799b0bbe175dad266d473d82bab3e15d66b4c0d4e
|
7
|
+
data.tar.gz: 10c1b52c9f8f3cfdc18d8b5bb1b4bf8adac9c0ab12b763efe41bb723d3537f49704bb3b60d28469c522852c68d2c5b80ea4857bdc2f28a6892fc2d2c584151cb
|
data/Jarfile
ADDED
data/lib/jruby-kafka.rb
ADDED
@@ -0,0 +1,10 @@
|
|
1
|
+
require 'lock_jar'
|
2
|
+
LockJar.lock File.join(File.dirname(__FILE__), '../Jarfile'), lockfile: 'Jarfile.jruby-kafka.lock'
|
3
|
+
LockJar.load 'Jarfile.jruby-kafka.lock'
|
4
|
+
require 'jruby-kafka/consumer'
|
5
|
+
require 'jruby-kafka/group'
|
6
|
+
require 'jruby-kafka/producer'
|
7
|
+
require 'jruby-kafka/kafka-producer'
|
8
|
+
|
9
|
+
module Kafka
|
10
|
+
end
|
@@ -0,0 +1,41 @@
|
|
1
|
+
require 'java'
|
2
|
+
require 'jruby-kafka/namespace'
|
3
|
+
|
4
|
+
# noinspection JRubyStringImportInspection
|
5
|
+
class Kafka::Consumer
|
6
|
+
java_import 'kafka.consumer.ConsumerIterator'
|
7
|
+
java_import 'kafka.consumer.KafkaStream'
|
8
|
+
java_import 'kafka.common.ConsumerRebalanceFailedException'
|
9
|
+
java_import 'kafka.consumer.ConsumerTimeoutException'
|
10
|
+
|
11
|
+
include Java::JavaLang::Runnable
|
12
|
+
java_signature 'void run()'
|
13
|
+
|
14
|
+
def initialize(a_stream, a_thread_number, a_queue, restart_on_exception, a_sleep_ms)
|
15
|
+
@m_thread_number = a_thread_number
|
16
|
+
@m_stream = a_stream
|
17
|
+
@m_queue = a_queue
|
18
|
+
@m_restart_on_exception = restart_on_exception
|
19
|
+
@m_sleep_ms = 1.0 / 1000.0 * Float(a_sleep_ms)
|
20
|
+
end
|
21
|
+
|
22
|
+
def run
|
23
|
+
it = @m_stream.iterator
|
24
|
+
begin
|
25
|
+
while it.hasNext
|
26
|
+
begin
|
27
|
+
@m_queue << it.next
|
28
|
+
end
|
29
|
+
end
|
30
|
+
rescue Exception => e
|
31
|
+
puts("#{self.class.name} caught exception: #{e.class.name}")
|
32
|
+
puts(e.message) if e.message != ''
|
33
|
+
if @m_restart_on_exception
|
34
|
+
sleep(@m_sleep_ms)
|
35
|
+
retry
|
36
|
+
else
|
37
|
+
raise e
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
@@ -0,0 +1,246 @@
|
|
1
|
+
# basically we are porting this https://cwiki.apache.org/confluence/display/KAFKA/Consumer+Group+Example
|
2
|
+
require 'jruby-kafka/namespace'
|
3
|
+
require 'jruby-kafka/consumer'
|
4
|
+
require 'jruby-kafka/error'
|
5
|
+
|
6
|
+
# noinspection JRubyStringImportInspection
|
7
|
+
class Kafka::Group
|
8
|
+
java_import 'java.util.concurrent.ExecutorService'
|
9
|
+
java_import 'java.util.concurrent.Executors'
|
10
|
+
java_import 'org.I0Itec.zkclient.exception.ZkException'
|
11
|
+
|
12
|
+
# Create a Kafka client group
|
13
|
+
#
|
14
|
+
# options:
|
15
|
+
# :zk_connect => "localhost:2181" - REQUIRED: The connection string for the
|
16
|
+
# zookeeper connection in the form host:port. Multiple URLS can be given to allow fail-over.
|
17
|
+
# :zk_connect_timeout => "6000" - (optional) The max time that the client waits while establishing a connection to zookeeper.
|
18
|
+
# :group_id => "group" - REQUIRED: The group id to consume on.
|
19
|
+
# :topic_id => "topic" - REQUIRED: The topic id to consume on.
|
20
|
+
# :reset_beginning => "from-beginning" - (optional) reset the consumer group to start at the
|
21
|
+
# earliest message present in the log by clearing any offsets for the group stored in Zookeeper.
|
22
|
+
# :auto_offset_reset => "smallest" or "largest" - (optional, default 'largest') If the consumer does not already
|
23
|
+
# have an established offset to consume from, start with the earliest message present in the log (smallest) or
|
24
|
+
# after the last message in the log (largest).
|
25
|
+
# :consumer_restart_on_error => "true" - (optional) Controls if consumer threads are to restart on caught exceptions.
|
26
|
+
# exceptions are logged.
|
27
|
+
def initialize(options={})
|
28
|
+
validate_required_arguments(options)
|
29
|
+
|
30
|
+
@zk_connect = options[:zk_connect]
|
31
|
+
@group_id = options[:group_id]
|
32
|
+
@topic = options[:topic_id]
|
33
|
+
@topics_allowed = options[:allow_topics]
|
34
|
+
@topics_filtered = options[:filter_topics]
|
35
|
+
@zk_session_timeout = '6000'
|
36
|
+
@zk_connect_timeout = '6000'
|
37
|
+
@zk_sync_time = '2000'
|
38
|
+
@reset_beginning = nil
|
39
|
+
@auto_offset_reset = 'largest'
|
40
|
+
@auto_commit_interval = '1000'
|
41
|
+
@running = false
|
42
|
+
@rebalance_max_retries = '4'
|
43
|
+
@rebalance_backoff_ms = '2000'
|
44
|
+
@socket_timeout_ms = "#{30 * 1000}"
|
45
|
+
@socket_receive_buffer_bytes = "#{64 * 1024}"
|
46
|
+
@fetch_message_max_bytes = "#{1024 * 1024}"
|
47
|
+
@auto_commit_enable = "#{true}"
|
48
|
+
@queued_max_message_chunks = '10'
|
49
|
+
@fetch_min_bytes = '1'
|
50
|
+
@fetch_wait_max_ms = '100'
|
51
|
+
@refresh_leader_backoff_ms = '200'
|
52
|
+
@consumer_timeout_ms = '-1'
|
53
|
+
@consumer_restart_on_error = "#{false}"
|
54
|
+
@consumer_restart_sleep_ms = '0'
|
55
|
+
@consumer_id = nil
|
56
|
+
@key_decoder_class = "kafka.serializer.DefaultDecoder"
|
57
|
+
@value_decoder_class = "kafka.serializer.DefaultDecoder"
|
58
|
+
|
59
|
+
if options[:zk_connect_timeout]
|
60
|
+
@zk_connect_timeout = "#{options[:zk_connect_timeout]}"
|
61
|
+
end
|
62
|
+
if options[:zk_session_timeout]
|
63
|
+
@zk_session_timeout = "#{options[:zk_session_timeout]}"
|
64
|
+
end
|
65
|
+
if options[:zk_sync_time]
|
66
|
+
@zk_sync_time = "#{options[:zk_sync_time]}"
|
67
|
+
end
|
68
|
+
if options[:auto_commit_interval]
|
69
|
+
@auto_commit_interval = "#{options[:auto_commit_interval]}"
|
70
|
+
end
|
71
|
+
|
72
|
+
if options[:rebalance_max_retries]
|
73
|
+
@rebalance_max_retries = "#{options[:rebalance_max_retries]}"
|
74
|
+
end
|
75
|
+
|
76
|
+
if options[:rebalance_backoff_ms]
|
77
|
+
@rebalance_backoff_ms = "#{options[:rebalance_backoff_ms]}"
|
78
|
+
end
|
79
|
+
|
80
|
+
if options[:socket_timeout_ms]
|
81
|
+
@socket_timeout_ms = "#{options[:socket_timeout_ms]}"
|
82
|
+
end
|
83
|
+
|
84
|
+
if options[:socket_receive_buffer_bytes]
|
85
|
+
@socket_receive_buffer_bytes = "#{options[:socket_receive_buffer_bytes]}"
|
86
|
+
end
|
87
|
+
|
88
|
+
if options[:fetch_message_max_bytes]
|
89
|
+
@fetch_message_max_bytes = "#{options[:fetch_message_max_bytes]}"
|
90
|
+
end
|
91
|
+
|
92
|
+
if options[:auto_commit_enable]
|
93
|
+
@auto_commit_enable = "#{options[:auto_commit_enable]}"
|
94
|
+
end
|
95
|
+
|
96
|
+
if options[:queued_max_message_chunks]
|
97
|
+
@queued_max_message_chunks = "#{options[:queued_max_message_chunks]}"
|
98
|
+
end
|
99
|
+
|
100
|
+
if options[:fetch_min_bytes]
|
101
|
+
@fetch_min_bytes = "#{options[:fetch_min_bytes]}"
|
102
|
+
end
|
103
|
+
|
104
|
+
if options[:fetch_wait_max_ms]
|
105
|
+
@fetch_wait_max_ms = "#{options[:fetch_wait_max_ms]}"
|
106
|
+
end
|
107
|
+
|
108
|
+
if options[:refresh_leader_backoff_ms]
|
109
|
+
@refresh_leader_backoff_ms = "#{options[:refresh_leader_backoff_ms]}"
|
110
|
+
end
|
111
|
+
|
112
|
+
if options[:consumer_timeout_ms]
|
113
|
+
@consumer_timeout_ms = "#{options[:consumer_timeout_ms]}"
|
114
|
+
end
|
115
|
+
|
116
|
+
if options[:consumer_restart_on_error]
|
117
|
+
@consumer_restart_on_error = "#{options[:consumer_restart_on_error]}"
|
118
|
+
end
|
119
|
+
|
120
|
+
if options[:consumer_restart_sleep_ms]
|
121
|
+
@consumer_restart_sleep_ms = "#{options[:consumer_restart_sleep_ms]}"
|
122
|
+
end
|
123
|
+
|
124
|
+
if options[:auto_offset_reset]
|
125
|
+
@auto_offset_reset = "#{options[:auto_offset_reset]}"
|
126
|
+
end
|
127
|
+
|
128
|
+
if options[:key_decoder_class]
|
129
|
+
@key_decoder_class = "#{options[:key_decoder_class]}"
|
130
|
+
end
|
131
|
+
|
132
|
+
if options[:value_decoder_class]
|
133
|
+
@value_decoder_class = "#{options[:value_decoder_class]}"
|
134
|
+
end
|
135
|
+
|
136
|
+
if options[:reset_beginning]
|
137
|
+
if not options[:auto_offset_reset] || options[:auto_offset_reset] != 'smallest'
|
138
|
+
raise KafkaError.new('reset_beginning => from-beginning must be used with auto_offset_reset => smallest')
|
139
|
+
end
|
140
|
+
@reset_beginning = "#{options[:reset_beginning]}"
|
141
|
+
end
|
142
|
+
|
143
|
+
if options[:consumer_id]
|
144
|
+
@consumer_id = options[:consumer_id]
|
145
|
+
end
|
146
|
+
end
|
147
|
+
|
148
|
+
public
|
149
|
+
|
150
|
+
def shutdown
|
151
|
+
if @consumer
|
152
|
+
@consumer.shutdown
|
153
|
+
end
|
154
|
+
if @executor
|
155
|
+
@executor.shutdown
|
156
|
+
end
|
157
|
+
@running = false
|
158
|
+
end
|
159
|
+
|
160
|
+
def run(a_num_threads, a_queue)
|
161
|
+
begin
|
162
|
+
if @reset_beginning == 'from-beginning'
|
163
|
+
Java::kafka::utils::ZkUtils.maybeDeletePath(@zk_connect, "/consumers/#{@group_id}")
|
164
|
+
end
|
165
|
+
|
166
|
+
@consumer = Java::kafka::consumer::Consumer.createJavaConsumerConnector(create_consumer_config)
|
167
|
+
rescue ZkException => e
|
168
|
+
raise KafkaError.new(e), "Got ZkException: #{e}"
|
169
|
+
end
|
170
|
+
|
171
|
+
thread_value = a_num_threads.to_java Java::int
|
172
|
+
streams = get_streams(thread_value)
|
173
|
+
|
174
|
+
@executor = Executors.newFixedThreadPool(a_num_threads)
|
175
|
+
@executor_submit = @executor.java_method(:submit, [Java::JavaLang::Runnable.java_class])
|
176
|
+
|
177
|
+
thread_number = 0
|
178
|
+
streams.each do |stream|
|
179
|
+
@executor_submit.call(Kafka::Consumer.new(stream, thread_number, a_queue, @consumer_restart_on_error, @consumer_restart_sleep_ms))
|
180
|
+
thread_number += 1
|
181
|
+
end
|
182
|
+
@running = true
|
183
|
+
end
|
184
|
+
|
185
|
+
def running?
|
186
|
+
@running
|
187
|
+
end
|
188
|
+
|
189
|
+
private
|
190
|
+
|
191
|
+
def validate_required_arguments(options={})
|
192
|
+
[:zk_connect, :group_id].each do |opt|
|
193
|
+
raise(ArgumentError, "#{opt} is required.") unless options[opt]
|
194
|
+
end
|
195
|
+
unless [ options[:topic_id],
|
196
|
+
options[:allow_topics],
|
197
|
+
options[:filter_topics] ].compact.length == 1
|
198
|
+
raise(ArgumentError,
|
199
|
+
"exactly one of topic_id, allow_topics, filter_topics is required.")
|
200
|
+
end
|
201
|
+
end
|
202
|
+
|
203
|
+
def get_streams(threads)
|
204
|
+
constructor_param_class_name = "kafka.utils.VerifiableProperties"
|
205
|
+
key_decoder_instance = Java::JavaClass.for_name(@key_decoder_class).constructor(constructor_param_class_name).new_instance(nil)
|
206
|
+
value_decoder_instance = Java::JavaClass.for_name(@value_decoder_class).constructor(constructor_param_class_name).new_instance(nil)
|
207
|
+
if @topic
|
208
|
+
topic_count_map = java.util.HashMap.new
|
209
|
+
topic_count_map.put(@topic, threads)
|
210
|
+
consumer_map = @consumer.createMessageStreams(topic_count_map, key_decoder_instance, value_decoder_instance)
|
211
|
+
Array.new(consumer_map[@topic])
|
212
|
+
elsif @topics_allowed
|
213
|
+
filter = Java::kafka::consumer::Whitelist.new(@topics_allowed)
|
214
|
+
Array.new(@consumer.createMessageStreamsByFilter(filter, threads, key_decoder_instance, value_decoder_instance))
|
215
|
+
else # @topics_filtered
|
216
|
+
filter = Java::kafka::consumer::Blacklist.new(@topics_filtered)
|
217
|
+
Array.new(@consumer.createMessageStreamsByFilter(filter, threads, key_decoder_instance, value_decoder_instance))
|
218
|
+
end
|
219
|
+
end
|
220
|
+
|
221
|
+
def create_consumer_config
|
222
|
+
properties = java.util.Properties.new
|
223
|
+
properties.put('zookeeper.connect', @zk_connect)
|
224
|
+
properties.put('group.id', @group_id)
|
225
|
+
properties.put('zookeeper.connection.timeout.ms', @zk_connect_timeout)
|
226
|
+
properties.put('zookeeper.session.timeout.ms', @zk_session_timeout)
|
227
|
+
properties.put('zookeeper.sync.time.ms', @zk_sync_time)
|
228
|
+
properties.put('auto.commit.interval.ms', @auto_commit_interval)
|
229
|
+
properties.put('auto.offset.reset', @auto_offset_reset)
|
230
|
+
properties.put('rebalance.max.retries', @rebalance_max_retries)
|
231
|
+
properties.put('rebalance.backoff.ms', @rebalance_backoff_ms)
|
232
|
+
properties.put('socket.timeout.ms', @socket_timeout_ms)
|
233
|
+
properties.put('socket.receive.buffer.bytes', @socket_receive_buffer_bytes)
|
234
|
+
properties.put('fetch.message.max.bytes', @fetch_message_max_bytes)
|
235
|
+
properties.put('auto.commit.enable', @auto_commit_enable)
|
236
|
+
properties.put('queued.max.message.chunks', @queued_max_message_chunks)
|
237
|
+
properties.put('fetch.min.bytes', @fetch_min_bytes)
|
238
|
+
properties.put('fetch.wait.max.ms', @fetch_wait_max_ms)
|
239
|
+
properties.put('refresh.leader.backoff.ms', @refresh_leader_backoff_ms)
|
240
|
+
properties.put('consumer.timeout.ms', @consumer_timeout_ms)
|
241
|
+
unless @consumer_id.nil?
|
242
|
+
properties.put('consumer.id', @consumer_id)
|
243
|
+
end
|
244
|
+
Java::kafka::consumer::ConsumerConfig.new(properties)
|
245
|
+
end
|
246
|
+
end
|
@@ -0,0 +1,76 @@
|
|
1
|
+
require 'jruby-kafka/namespace'
|
2
|
+
require 'jruby-kafka/error'
|
3
|
+
|
4
|
+
# noinspection JRubyStringImportInspection
|
5
|
+
class Kafka::KafkaProducer
|
6
|
+
java_import 'org.apache.kafka.clients.producer.ProducerRecord'
|
7
|
+
KAFKA_PRODUCER = Java::org.apache.kafka.clients.producer.KafkaProducer
|
8
|
+
|
9
|
+
VALIDATIONS = {
|
10
|
+
:'required.codecs' => %w[
|
11
|
+
none gzip snappy lz4
|
12
|
+
]
|
13
|
+
}
|
14
|
+
|
15
|
+
REQUIRED = %w[
|
16
|
+
bootstrap.servers key.serializer
|
17
|
+
]
|
18
|
+
|
19
|
+
KNOWN = %w[
|
20
|
+
acks batch.size block.on.buffer.full
|
21
|
+
bootstrap.servers buffer.memory client.id
|
22
|
+
compression.type key.serializer linger.ms
|
23
|
+
max.in.flight.requests.per.connection max.request.size
|
24
|
+
metadata.fetch.timeout.ms metadata.max.age.ms metric.reporters
|
25
|
+
metrics.num.samples metrics.sample.window.ms receive.buffer.bytes
|
26
|
+
reconnect.backoff.ms retries retry.backoff.ms
|
27
|
+
send.buffer.bytes timeout.ms value.serializer
|
28
|
+
]
|
29
|
+
|
30
|
+
attr_reader :producer, :send_method, :options
|
31
|
+
|
32
|
+
def initialize(opts = {})
|
33
|
+
@options = opts.reduce({}) do |opts_array, (k, v)|
|
34
|
+
unless v.nil?
|
35
|
+
opts_array[k.to_s.gsub(/_/, '.')] = v
|
36
|
+
end
|
37
|
+
opts_array
|
38
|
+
end
|
39
|
+
validate_arguments
|
40
|
+
@send_method = proc { throw StandardError.new 'Producer is not connected' }
|
41
|
+
end
|
42
|
+
|
43
|
+
def connect
|
44
|
+
@producer = KAFKA_PRODUCER.new(create_producer_config)
|
45
|
+
@send_method = producer.java_method :send, [ProducerRecord]
|
46
|
+
end
|
47
|
+
|
48
|
+
# throws FailedToSendMessageException or if not connected, StandardError.
|
49
|
+
def send_msg(topic, partition, key, value)
|
50
|
+
send_method.call(ProducerRecord.new(topic, partition, key, value))
|
51
|
+
end
|
52
|
+
|
53
|
+
def close
|
54
|
+
@producer.close
|
55
|
+
end
|
56
|
+
|
57
|
+
private
|
58
|
+
|
59
|
+
def validate_arguments
|
60
|
+
errors = []
|
61
|
+
missing = REQUIRED.reject { |opt| options[opt] }
|
62
|
+
errors = ["Required settings: #{ missing.join(', ')}"] if missing.any?
|
63
|
+
invalid = VALIDATIONS.reject { |opt, valid| options[opt].nil? or valid.include? options[opt].to_s }
|
64
|
+
errors += invalid.map { |opt, valid| "#{ opt } should be one of: [#{ valid.join(', ')}]" }
|
65
|
+
fail StandardError.new "Invalid configuration arguments: #{ errors.join('; ') }" if errors.any?
|
66
|
+
options.keys.each do |opt|
|
67
|
+
STDERR.puts "WARNING: Unknown configuration key: #{opt}" unless KNOWN.include? opt
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
def create_producer_config
|
72
|
+
properties = java.util.Properties.new
|
73
|
+
options.each { |opt, value| properties.put opt, value.to_s }
|
74
|
+
properties
|
75
|
+
end
|
76
|
+
end
|
@@ -0,0 +1,101 @@
|
|
1
|
+
# basically we are porting this https://cwiki.apache.org/confluence/display/KAFKA/0.8.0+Producer+Example
|
2
|
+
require 'jruby-kafka/namespace'
|
3
|
+
require 'jruby-kafka/error'
|
4
|
+
|
5
|
+
# noinspection JRubyStringImportInspection
|
6
|
+
class Kafka::Producer
|
7
|
+
extend Gem::Deprecate
|
8
|
+
java_import 'kafka.producer.ProducerConfig'
|
9
|
+
java_import 'kafka.producer.KeyedMessage'
|
10
|
+
KAFKA_PRODUCER = Java::kafka.javaapi.producer.Producer
|
11
|
+
java_import 'kafka.message.NoCompressionCodec'
|
12
|
+
java_import 'kafka.message.GZIPCompressionCodec'
|
13
|
+
java_import 'kafka.message.SnappyCompressionCodec'
|
14
|
+
|
15
|
+
VALIDATIONS = {
|
16
|
+
:'request.required.acks' => %w[ 0 1 -1 ],
|
17
|
+
:'required.codecs' => [NoCompressionCodec.name, GZIPCompressionCodec.name, SnappyCompressionCodec.name],
|
18
|
+
:'producer.type' => %w[ sync async ]
|
19
|
+
}
|
20
|
+
|
21
|
+
REQUIRED = %w[
|
22
|
+
metadata.broker.list
|
23
|
+
]
|
24
|
+
|
25
|
+
# List of all available options extracted from http://kafka.apache.org/documentation.html#producerconfigs Apr. 27, 2014
|
26
|
+
# If new options are added, they should just work. Please add them to the list so that we can get handy warnings.
|
27
|
+
KNOWN = %w[
|
28
|
+
metadata.broker.list request.required.acks request.timeout.ms
|
29
|
+
producer.type serializer.class key.serializer.class
|
30
|
+
partitioner.class compression.codec compressed.topics
|
31
|
+
message.send.max.retries retry.backoff.ms topic.metadata.refresh.interval.ms
|
32
|
+
queue.buffering.max.ms queue.buffering.max.messages queue.enqueue.timeout.ms
|
33
|
+
batch.num.messages send.buffer.bytes client.id
|
34
|
+
broker.list serializer.encoding
|
35
|
+
]
|
36
|
+
|
37
|
+
attr_reader :producer, :send_method, :options
|
38
|
+
|
39
|
+
# Create a Kafka Producer
|
40
|
+
#
|
41
|
+
# options:
|
42
|
+
# metadata_broker_list: ["localhost:9092"] - REQUIRED: a seed list of kafka brokers
|
43
|
+
def initialize(opts = {})
|
44
|
+
@options = opts.reduce({}) do |opts_array, (k, v)|
|
45
|
+
unless v.nil?
|
46
|
+
opts_array[k.to_s.gsub(/_/, '.')] = v
|
47
|
+
end
|
48
|
+
opts_array
|
49
|
+
end
|
50
|
+
if options['broker.list']
|
51
|
+
options['metadata.broker.list'] = options.delete 'broker.list'
|
52
|
+
end
|
53
|
+
if options['metadata.broker.list'].is_a? Array
|
54
|
+
options['metadata.broker.list'] = options['metadata.broker.list'].join(',')
|
55
|
+
end
|
56
|
+
if options['compressed.topics'].is_a? Array
|
57
|
+
options['compressed.topics'] = options['compressed.topics'].join(',')
|
58
|
+
end
|
59
|
+
validate_arguments
|
60
|
+
@send_method = proc { throw StandardError.new 'Producer is not connected' }
|
61
|
+
end
|
62
|
+
|
63
|
+
def connect
|
64
|
+
@producer = KAFKA_PRODUCER.new(create_producer_config)
|
65
|
+
@send_method = producer.java_method :send, [KeyedMessage]
|
66
|
+
end
|
67
|
+
|
68
|
+
# throws FailedToSendMessageException or if not connected, StandardError.
|
69
|
+
def send_msg(topic, key, msg)
|
70
|
+
send_method.call(KeyedMessage.new(topic, key, msg))
|
71
|
+
end
|
72
|
+
|
73
|
+
def sendMsg(topic, key, msg)
|
74
|
+
send_msg(topic, key, msg)
|
75
|
+
end
|
76
|
+
deprecate :sendMsg, :send_msg, 2015, 01
|
77
|
+
|
78
|
+
def close
|
79
|
+
@producer.close
|
80
|
+
end
|
81
|
+
|
82
|
+
private
|
83
|
+
|
84
|
+
def validate_arguments
|
85
|
+
errors = []
|
86
|
+
missing = REQUIRED.reject { |opt| options[opt] }
|
87
|
+
errors = ["Required settings: #{ missing.join(', ')}"] if missing.any?
|
88
|
+
invalid = VALIDATIONS.reject { |opt, valid| options[opt].nil? or valid.include? options[opt].to_s }
|
89
|
+
errors += invalid.map { |opt, valid| "#{ opt } should be one of: [#{ valid.join(', ')}]" }
|
90
|
+
fail StandardError.new "Invalid configuration arguments: #{ errors.join('; ') }" if errors.any?
|
91
|
+
options.keys.each do |opt|
|
92
|
+
STDERR.puts "WARNING: Unknown configuration key: #{opt}" unless KNOWN.include? opt
|
93
|
+
end
|
94
|
+
end
|
95
|
+
|
96
|
+
def create_producer_config
|
97
|
+
properties = java.util.Properties.new
|
98
|
+
options.each { |opt, value| properties.put opt, value.to_s }
|
99
|
+
ProducerConfig.new(properties)
|
100
|
+
end
|
101
|
+
end
|
metadata
ADDED
@@ -0,0 +1,82 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: jruby-kafka-lockjar
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 1.4.0.pre
|
5
|
+
platform: java
|
6
|
+
authors:
|
7
|
+
- Joseph Lawson
|
8
|
+
- Darrick Wiebe
|
9
|
+
autorequire:
|
10
|
+
bindir: bin
|
11
|
+
cert_chain: []
|
12
|
+
date: 2015-05-20 00:00:00.000000000 Z
|
13
|
+
dependencies:
|
14
|
+
- !ruby/object:Gem::Dependency
|
15
|
+
name: lock_jar
|
16
|
+
version_requirements: !ruby/object:Gem::Requirement
|
17
|
+
requirements:
|
18
|
+
- - '>='
|
19
|
+
- !ruby/object:Gem::Version
|
20
|
+
version: '0'
|
21
|
+
requirement: !ruby/object:Gem::Requirement
|
22
|
+
requirements:
|
23
|
+
- - '>='
|
24
|
+
- !ruby/object:Gem::Version
|
25
|
+
version: '0'
|
26
|
+
prerelease: false
|
27
|
+
type: :runtime
|
28
|
+
- !ruby/object:Gem::Dependency
|
29
|
+
name: rake
|
30
|
+
version_requirements: !ruby/object:Gem::Requirement
|
31
|
+
requirements:
|
32
|
+
- - ~>
|
33
|
+
- !ruby/object:Gem::Version
|
34
|
+
version: '10.4'
|
35
|
+
requirement: !ruby/object:Gem::Requirement
|
36
|
+
requirements:
|
37
|
+
- - ~>
|
38
|
+
- !ruby/object:Gem::Version
|
39
|
+
version: '10.4'
|
40
|
+
prerelease: false
|
41
|
+
type: :development
|
42
|
+
description: Fork of jruby-kafka that uses lockjar instead of ruby-maven
|
43
|
+
email:
|
44
|
+
- joe@joekiller.com
|
45
|
+
- dw@xnlogic.com
|
46
|
+
executables: []
|
47
|
+
extensions: []
|
48
|
+
extra_rdoc_files: []
|
49
|
+
files:
|
50
|
+
- Jarfile
|
51
|
+
- lib/jruby-kafka.rb
|
52
|
+
- lib/jruby-kafka/consumer.rb
|
53
|
+
- lib/jruby-kafka/error.rb
|
54
|
+
- lib/jruby-kafka/group.rb
|
55
|
+
- lib/jruby-kafka/kafka-producer.rb
|
56
|
+
- lib/jruby-kafka/namespace.rb
|
57
|
+
- lib/jruby-kafka/producer.rb
|
58
|
+
homepage: https://github.com/xnlogic/jruby-kafka-lockjar
|
59
|
+
licenses:
|
60
|
+
- Apache 2.0
|
61
|
+
metadata: {}
|
62
|
+
post_install_message:
|
63
|
+
rdoc_options: []
|
64
|
+
require_paths:
|
65
|
+
- lib
|
66
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
67
|
+
requirements:
|
68
|
+
- - '>='
|
69
|
+
- !ruby/object:Gem::Version
|
70
|
+
version: '0'
|
71
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
72
|
+
requirements:
|
73
|
+
- - '>'
|
74
|
+
- !ruby/object:Gem::Version
|
75
|
+
version: 1.3.1
|
76
|
+
requirements: []
|
77
|
+
rubyforge_project:
|
78
|
+
rubygems_version: 2.4.5
|
79
|
+
signing_key:
|
80
|
+
specification_version: 4
|
81
|
+
summary: JRuby Kafka wrapper
|
82
|
+
test_files: []
|