sorceror_jruby-kafka 2.0.0-java
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/lib/jruby-kafka.rb +9 -0
- data/lib/jruby-kafka/consumer.rb +43 -0
- data/lib/jruby-kafka/error.rb +9 -0
- data/lib/jruby-kafka/ext.rb +11 -0
- data/lib/jruby-kafka/group.rb +252 -0
- data/lib/jruby-kafka/kafka-producer.rb +76 -0
- data/lib/jruby-kafka/namespace.rb +3 -0
- data/lib/jruby-kafka/producer.rb +101 -0
- data/lib/jruby-kafka_jars.rb +18 -0
- metadata +97 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: 3679846ac802a28cdaf2877c88923d21534823f5
|
4
|
+
data.tar.gz: b80a3d5555c9a3b48a3a522a51427f50c3a87955
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: b55f63552e194b7e4e22dc50849a8bea2b11b836af9f0e95bc4fe68901a73417272830152ae11a7764a20d082a944d80e252d2d934f91280d4a744ae4599604d
|
7
|
+
data.tar.gz: 458955a4bfe49f396ac7b115da6d37dc68a601e24c515ac8bad5f30ab3f8238fd1e1e11463ae286c8c76d73694bf50d650d907e811749ad0bb6b20dfe44c3cd7
|
data/lib/jruby-kafka.rb
ADDED
@@ -0,0 +1,43 @@
|
|
1
|
+
require 'java'
|
2
|
+
require 'jruby-kafka/namespace'
|
3
|
+
|
4
|
+
# noinspection JRubyStringImportInspection
|
5
|
+
class Kafka::Consumer
|
6
|
+
java_import 'kafka.consumer.ConsumerIterator'
|
7
|
+
java_import 'kafka.consumer.KafkaStream'
|
8
|
+
java_import 'kafka.common.ConsumerRebalanceFailedException'
|
9
|
+
java_import 'kafka.consumer.ConsumerTimeoutException'
|
10
|
+
|
11
|
+
include Java::JavaLang::Runnable
|
12
|
+
java_signature 'void run()'
|
13
|
+
|
14
|
+
def initialize(a_stream, a_thread_number, restart_on_exception, a_sleep_ms, callback)
|
15
|
+
@m_thread_number = a_thread_number
|
16
|
+
@m_stream = a_stream
|
17
|
+
@m_restart_on_exception = restart_on_exception
|
18
|
+
@m_sleep_ms = 1.0 / 1000.0 * Float(a_sleep_ms)
|
19
|
+
@m_callback = callback
|
20
|
+
end
|
21
|
+
|
22
|
+
def run
|
23
|
+
it = @m_stream.iterator
|
24
|
+
begin
|
25
|
+
while it.hasNext
|
26
|
+
begin
|
27
|
+
message = it.next
|
28
|
+
@m_callback.call(message.message.to_s, MetaData.new(message.topic, message.partition, message.offset))
|
29
|
+
end
|
30
|
+
end
|
31
|
+
rescue Exception => e
|
32
|
+
# Log exception (or only retry if consumer timed out)
|
33
|
+
if @m_restart_on_exception
|
34
|
+
sleep(@m_sleep_ms)
|
35
|
+
retry
|
36
|
+
else
|
37
|
+
raise e
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
class MetaData < Struct.new(:topic, :partition, :offset); end
|
43
|
+
end
|
@@ -0,0 +1,11 @@
|
|
1
|
+
require 'java'
|
2
|
+
java_import "kafka.common.TopicAndPartition"
|
3
|
+
java_import "kafka.javaapi.consumer.ZookeeperConsumerConnector"
|
4
|
+
|
5
|
+
class ZookeeperConsumerConnector
|
6
|
+
field_reader :underlying
|
7
|
+
|
8
|
+
def commitOffset(topic, partition, offset)
|
9
|
+
self.underlying.commitOffsetToZooKeeper(TopicAndPartition.new(topic, partition), offset)
|
10
|
+
end
|
11
|
+
end
|
@@ -0,0 +1,252 @@
|
|
1
|
+
# basically we are porting this https://cwiki.apache.org/confluence/display/KAFKA/Consumer+Group+Example
|
2
|
+
require 'jruby-kafka/namespace'
|
3
|
+
require 'jruby-kafka/consumer'
|
4
|
+
require 'jruby-kafka/error'
|
5
|
+
|
6
|
+
# noinspection JRubyStringImportInspection
|
7
|
+
class Kafka::Group
|
8
|
+
java_import 'java.util.concurrent.ExecutorService'
|
9
|
+
java_import 'java.util.concurrent.Executors'
|
10
|
+
java_import 'org.I0Itec.zkclient.exception.ZkException'
|
11
|
+
|
12
|
+
attr_reader :topic
|
13
|
+
|
14
|
+
# Create a Kafka client group
|
15
|
+
#
|
16
|
+
# options:
|
17
|
+
# :zk_connect => "localhost:2181" - REQUIRED: The connection string for the
|
18
|
+
# zookeeper connection in the form host:port. Multiple URLS can be given to allow fail-over.
|
19
|
+
# :zk_connect_timeout => "6000" - (optional) The max time that the client waits while establishing a connection to zookeeper.
|
20
|
+
# :group_id => "group" - REQUIRED: The group id to consume on.
|
21
|
+
# :topic_id => "topic" - REQUIRED: The topic id to consume on.
|
22
|
+
# :reset_beginning => "from-beginning" - (optional) reset the consumer group to start at the
|
23
|
+
# earliest message present in the log by clearing any offsets for the group stored in Zookeeper.
|
24
|
+
# :auto_offset_reset => "smallest" or "largest" - (optional, default 'largest') If the consumer does not already
|
25
|
+
# have an established offset to consume from, start with the earliest message present in the log (smallest) or
|
26
|
+
# after the last message in the log (largest).
|
27
|
+
# :consumer_restart_on_error => "true" - (optional) Controls if consumer threads are to restart on caught exceptions.
|
28
|
+
# exceptions are logged.
|
29
|
+
def initialize(options={})
|
30
|
+
validate_required_arguments(options)
|
31
|
+
|
32
|
+
@zk_connect = options[:zk_connect]
|
33
|
+
@group_id = options[:group_id]
|
34
|
+
@topic = options[:topic_id]
|
35
|
+
@topics_allowed = options[:allow_topics]
|
36
|
+
@topics_filtered = options[:filter_topics]
|
37
|
+
@zk_session_timeout = '6000'
|
38
|
+
@zk_connect_timeout = '6000'
|
39
|
+
@zk_sync_time = '2000'
|
40
|
+
@reset_beginning = nil
|
41
|
+
@auto_offset_reset = 'largest'
|
42
|
+
@auto_commit_interval = '1000'
|
43
|
+
@running = false
|
44
|
+
@rebalance_max_retries = '4'
|
45
|
+
@rebalance_backoff_ms = '2000'
|
46
|
+
@socket_timeout_ms = "#{30 * 1000}"
|
47
|
+
@socket_receive_buffer_bytes = "#{64 * 1024}"
|
48
|
+
@fetch_message_max_bytes = "#{1024 * 1024}"
|
49
|
+
@auto_commit_enable = "#{true}"
|
50
|
+
@queued_max_message_chunks = '10'
|
51
|
+
@fetch_min_bytes = '1'
|
52
|
+
@fetch_wait_max_ms = '100'
|
53
|
+
@refresh_leader_backoff_ms = '200'
|
54
|
+
@consumer_timeout_ms = '-1'
|
55
|
+
@consumer_restart_on_error = "#{false}"
|
56
|
+
@consumer_restart_sleep_ms = '0'
|
57
|
+
@consumer_id = nil
|
58
|
+
@key_decoder_class = "kafka.serializer.DefaultDecoder"
|
59
|
+
@value_decoder_class = "kafka.serializer.DefaultDecoder"
|
60
|
+
|
61
|
+
if options[:zk_connect_timeout]
|
62
|
+
@zk_connect_timeout = "#{options[:zk_connect_timeout]}"
|
63
|
+
end
|
64
|
+
if options[:zk_session_timeout]
|
65
|
+
@zk_session_timeout = "#{options[:zk_session_timeout]}"
|
66
|
+
end
|
67
|
+
if options[:zk_sync_time]
|
68
|
+
@zk_sync_time = "#{options[:zk_sync_time]}"
|
69
|
+
end
|
70
|
+
if options[:auto_commit_interval]
|
71
|
+
@auto_commit_interval = "#{options[:auto_commit_interval]}"
|
72
|
+
end
|
73
|
+
|
74
|
+
if options[:rebalance_max_retries]
|
75
|
+
@rebalance_max_retries = "#{options[:rebalance_max_retries]}"
|
76
|
+
end
|
77
|
+
|
78
|
+
if options[:rebalance_backoff_ms]
|
79
|
+
@rebalance_backoff_ms = "#{options[:rebalance_backoff_ms]}"
|
80
|
+
end
|
81
|
+
|
82
|
+
if options[:socket_timeout_ms]
|
83
|
+
@socket_timeout_ms = "#{options[:socket_timeout_ms]}"
|
84
|
+
end
|
85
|
+
|
86
|
+
if options[:socket_receive_buffer_bytes]
|
87
|
+
@socket_receive_buffer_bytes = "#{options[:socket_receive_buffer_bytes]}"
|
88
|
+
end
|
89
|
+
|
90
|
+
if options[:fetch_message_max_bytes]
|
91
|
+
@fetch_message_max_bytes = "#{options[:fetch_message_max_bytes]}"
|
92
|
+
end
|
93
|
+
|
94
|
+
if options[:auto_commit_enable]
|
95
|
+
@auto_commit_enable = "#{options[:auto_commit_enable]}"
|
96
|
+
end
|
97
|
+
|
98
|
+
if options[:queued_max_message_chunks]
|
99
|
+
@queued_max_message_chunks = "#{options[:queued_max_message_chunks]}"
|
100
|
+
end
|
101
|
+
|
102
|
+
if options[:fetch_min_bytes]
|
103
|
+
@fetch_min_bytes = "#{options[:fetch_min_bytes]}"
|
104
|
+
end
|
105
|
+
|
106
|
+
if options[:fetch_wait_max_ms]
|
107
|
+
@fetch_wait_max_ms = "#{options[:fetch_wait_max_ms]}"
|
108
|
+
end
|
109
|
+
|
110
|
+
if options[:refresh_leader_backoff_ms]
|
111
|
+
@refresh_leader_backoff_ms = "#{options[:refresh_leader_backoff_ms]}"
|
112
|
+
end
|
113
|
+
|
114
|
+
if options[:consumer_timeout_ms]
|
115
|
+
@consumer_timeout_ms = "#{options[:consumer_timeout_ms]}"
|
116
|
+
end
|
117
|
+
|
118
|
+
if options[:consumer_restart_on_error]
|
119
|
+
@consumer_restart_on_error = "#{options[:consumer_restart_on_error]}"
|
120
|
+
end
|
121
|
+
|
122
|
+
if options[:consumer_restart_sleep_ms]
|
123
|
+
@consumer_restart_sleep_ms = "#{options[:consumer_restart_sleep_ms]}"
|
124
|
+
end
|
125
|
+
|
126
|
+
if options[:auto_offset_reset]
|
127
|
+
@auto_offset_reset = "#{options[:auto_offset_reset]}"
|
128
|
+
end
|
129
|
+
|
130
|
+
if options[:key_decoder_class]
|
131
|
+
@key_decoder_class = "#{options[:key_decoder_class]}"
|
132
|
+
end
|
133
|
+
|
134
|
+
if options[:value_decoder_class]
|
135
|
+
@value_decoder_class = "#{options[:value_decoder_class]}"
|
136
|
+
end
|
137
|
+
|
138
|
+
if options[:reset_beginning]
|
139
|
+
if not options[:auto_offset_reset] || options[:auto_offset_reset] != 'smallest'
|
140
|
+
raise KafkaError.new('reset_beginning => from-beginning must be used with auto_offset_reset => smallest')
|
141
|
+
end
|
142
|
+
@reset_beginning = "#{options[:reset_beginning]}"
|
143
|
+
end
|
144
|
+
|
145
|
+
if options[:consumer_id]
|
146
|
+
@consumer_id = options[:consumer_id]
|
147
|
+
end
|
148
|
+
end
|
149
|
+
|
150
|
+
public
|
151
|
+
|
152
|
+
def shutdown
|
153
|
+
if @consumer
|
154
|
+
@consumer.shutdown
|
155
|
+
end
|
156
|
+
if @executor
|
157
|
+
@executor.shutdown
|
158
|
+
end
|
159
|
+
@running = false
|
160
|
+
end
|
161
|
+
|
162
|
+
def run(a_num_threads, &block)
|
163
|
+
begin
|
164
|
+
if @reset_beginning == 'from-beginning'
|
165
|
+
Java::kafka::utils::ZkUtils.maybeDeletePath(@zk_connect, "/consumers/#{@group_id}")
|
166
|
+
end
|
167
|
+
|
168
|
+
@consumer = Java::kafka::consumer::Consumer.createJavaConsumerConnector(create_consumer_config)
|
169
|
+
rescue ZkException => e
|
170
|
+
raise KafkaError.new(e), "Got ZkException: #{e}"
|
171
|
+
end
|
172
|
+
|
173
|
+
thread_value = a_num_threads.to_java Java::int
|
174
|
+
streams = get_streams(thread_value)
|
175
|
+
|
176
|
+
@executor = Executors.newFixedThreadPool(a_num_threads)
|
177
|
+
@executor_submit = @executor.java_method(:submit, [Java::JavaLang::Runnable.java_class])
|
178
|
+
|
179
|
+
thread_number = 0
|
180
|
+
streams.each do |stream|
|
181
|
+
@executor_submit.call(Kafka::Consumer.new(stream, thread_number, @consumer_restart_on_error, @consumer_restart_sleep_ms, block))
|
182
|
+
thread_number += 1
|
183
|
+
end
|
184
|
+
@running = true
|
185
|
+
end
|
186
|
+
|
187
|
+
def running?
|
188
|
+
@running
|
189
|
+
end
|
190
|
+
|
191
|
+
def commit(metadata)
|
192
|
+
@consumer.commitOffset(metadata.topic, metadata.partition, metadata.offset+1)
|
193
|
+
end
|
194
|
+
|
195
|
+
private
|
196
|
+
|
197
|
+
def validate_required_arguments(options={})
|
198
|
+
[:zk_connect, :group_id].each do |opt|
|
199
|
+
raise(ArgumentError, "#{opt} is required.") unless options[opt]
|
200
|
+
end
|
201
|
+
unless [ options[:topic_id],
|
202
|
+
options[:allow_topics],
|
203
|
+
options[:filter_topics] ].compact.length == 1
|
204
|
+
raise(ArgumentError,
|
205
|
+
"exactly one of topic_id, allow_topics, filter_topics is required.")
|
206
|
+
end
|
207
|
+
end
|
208
|
+
|
209
|
+
def get_streams(threads)
|
210
|
+
constructor_param_class_name = "kafka.utils.VerifiableProperties"
|
211
|
+
key_decoder_instance = Java::JavaClass.for_name(@key_decoder_class).constructor(constructor_param_class_name).new_instance(nil)
|
212
|
+
value_decoder_instance = Java::JavaClass.for_name(@value_decoder_class).constructor(constructor_param_class_name).new_instance(nil)
|
213
|
+
if @topic
|
214
|
+
topic_count_map = java.util.HashMap.new
|
215
|
+
topic_count_map.put(@topic, threads)
|
216
|
+
consumer_map = @consumer.createMessageStreams(topic_count_map, key_decoder_instance, value_decoder_instance)
|
217
|
+
Array.new(consumer_map[@topic])
|
218
|
+
elsif @topics_allowed
|
219
|
+
filter = Java::kafka::consumer::Whitelist.new(@topics_allowed)
|
220
|
+
Array.new(@consumer.createMessageStreamsByFilter(filter, threads, key_decoder_instance, value_decoder_instance))
|
221
|
+
else # @topics_filtered
|
222
|
+
filter = Java::kafka::consumer::Blacklist.new(@topics_filtered)
|
223
|
+
Array.new(@consumer.createMessageStreamsByFilter(filter, threads, key_decoder_instance, value_decoder_instance))
|
224
|
+
end
|
225
|
+
end
|
226
|
+
|
227
|
+
def create_consumer_config
|
228
|
+
properties = java.util.Properties.new
|
229
|
+
properties.put('zookeeper.connect', @zk_connect)
|
230
|
+
properties.put('group.id', @group_id)
|
231
|
+
properties.put('zookeeper.connection.timeout.ms', @zk_connect_timeout)
|
232
|
+
properties.put('zookeeper.session.timeout.ms', @zk_session_timeout)
|
233
|
+
properties.put('zookeeper.sync.time.ms', @zk_sync_time)
|
234
|
+
properties.put('auto.commit.interval.ms', @auto_commit_interval)
|
235
|
+
properties.put('auto.offset.reset', @auto_offset_reset)
|
236
|
+
properties.put('rebalance.max.retries', @rebalance_max_retries)
|
237
|
+
properties.put('rebalance.backoff.ms', @rebalance_backoff_ms)
|
238
|
+
properties.put('socket.timeout.ms', @socket_timeout_ms)
|
239
|
+
properties.put('socket.receive.buffer.bytes', @socket_receive_buffer_bytes)
|
240
|
+
properties.put('fetch.message.max.bytes', @fetch_message_max_bytes)
|
241
|
+
properties.put('auto.commit.enable', @auto_commit_enable)
|
242
|
+
properties.put('queued.max.message.chunks', @queued_max_message_chunks)
|
243
|
+
properties.put('fetch.min.bytes', @fetch_min_bytes)
|
244
|
+
properties.put('fetch.wait.max.ms', @fetch_wait_max_ms)
|
245
|
+
properties.put('refresh.leader.backoff.ms', @refresh_leader_backoff_ms)
|
246
|
+
properties.put('consumer.timeout.ms', @consumer_timeout_ms)
|
247
|
+
unless @consumer_id.nil?
|
248
|
+
properties.put('consumer.id', @consumer_id)
|
249
|
+
end
|
250
|
+
Java::kafka::consumer::ConsumerConfig.new(properties)
|
251
|
+
end
|
252
|
+
end
|
@@ -0,0 +1,76 @@
|
|
1
|
+
require 'jruby-kafka/namespace'
|
2
|
+
require 'jruby-kafka/error'
|
3
|
+
|
4
|
+
# noinspection JRubyStringImportInspection
|
5
|
+
class Kafka::KafkaProducer
|
6
|
+
java_import 'org.apache.kafka.clients.producer.ProducerRecord'
|
7
|
+
KAFKA_PRODUCER = Java::org.apache.kafka.clients.producer.KafkaProducer
|
8
|
+
|
9
|
+
VALIDATIONS = {
|
10
|
+
:'required.codecs' => %w[
|
11
|
+
none gzip snappy lz4
|
12
|
+
]
|
13
|
+
}
|
14
|
+
|
15
|
+
REQUIRED = %w[
|
16
|
+
bootstrap.servers key.serializer
|
17
|
+
]
|
18
|
+
|
19
|
+
KNOWN = %w[
|
20
|
+
acks batch.size block.on.buffer.full
|
21
|
+
bootstrap.servers buffer.memory client.id
|
22
|
+
compression.type key.serializer linger.ms
|
23
|
+
max.in.flight.requests.per.connection max.request.size
|
24
|
+
metadata.fetch.timeout.ms metadata.max.age.ms metric.reporters
|
25
|
+
metrics.num.samples metrics.sample.window.ms receive.buffer.bytes
|
26
|
+
reconnect.backoff.ms retries retry.backoff.ms
|
27
|
+
send.buffer.bytes timeout.ms value.serializer
|
28
|
+
]
|
29
|
+
|
30
|
+
attr_reader :producer, :send_method, :options
|
31
|
+
|
32
|
+
def initialize(opts = {})
|
33
|
+
@options = opts.reduce({}) do |opts_array, (k, v)|
|
34
|
+
unless v.nil?
|
35
|
+
opts_array[k.to_s.gsub(/_/, '.')] = v
|
36
|
+
end
|
37
|
+
opts_array
|
38
|
+
end
|
39
|
+
validate_arguments
|
40
|
+
@send_method = proc { throw StandardError.new 'Producer is not connected' }
|
41
|
+
end
|
42
|
+
|
43
|
+
def connect
|
44
|
+
@producer = KAFKA_PRODUCER.new(create_producer_config)
|
45
|
+
@send_method = producer.java_method :send, [ProducerRecord]
|
46
|
+
end
|
47
|
+
|
48
|
+
# throws FailedToSendMessageException or if not connected, StandardError.
|
49
|
+
def send_msg(topic, partition, key, value)
|
50
|
+
send_method.call(ProducerRecord.new(topic, partition, key, value))
|
51
|
+
end
|
52
|
+
|
53
|
+
def close
|
54
|
+
@producer.close
|
55
|
+
end
|
56
|
+
|
57
|
+
private
|
58
|
+
|
59
|
+
def validate_arguments
|
60
|
+
errors = []
|
61
|
+
missing = REQUIRED.reject { |opt| options[opt] }
|
62
|
+
errors = ["Required settings: #{ missing.join(', ')}"] if missing.any?
|
63
|
+
invalid = VALIDATIONS.reject { |opt, valid| options[opt].nil? or valid.include? options[opt].to_s }
|
64
|
+
errors += invalid.map { |opt, valid| "#{ opt } should be one of: [#{ valid.join(', ')}]" }
|
65
|
+
fail StandardError.new "Invalid configuration arguments: #{ errors.join('; ') }" if errors.any?
|
66
|
+
options.keys.each do |opt|
|
67
|
+
STDERR.puts "WARNING: Unknown configuration key: #{opt}" unless KNOWN.include? opt
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
def create_producer_config
|
72
|
+
properties = java.util.Properties.new
|
73
|
+
options.each { |opt, value| properties.put opt, value.to_s }
|
74
|
+
properties
|
75
|
+
end
|
76
|
+
end
|
@@ -0,0 +1,101 @@
|
|
1
|
+
# basically we are porting this https://cwiki.apache.org/confluence/display/KAFKA/0.8.0+Producer+Example
|
2
|
+
require 'jruby-kafka/namespace'
|
3
|
+
require 'jruby-kafka/error'
|
4
|
+
|
5
|
+
# noinspection JRubyStringImportInspection
|
6
|
+
class Kafka::Producer
|
7
|
+
extend Gem::Deprecate
|
8
|
+
java_import 'kafka.producer.ProducerConfig'
|
9
|
+
java_import 'kafka.producer.KeyedMessage'
|
10
|
+
KAFKA_PRODUCER = Java::kafka.javaapi.producer.Producer
|
11
|
+
java_import 'kafka.message.NoCompressionCodec'
|
12
|
+
java_import 'kafka.message.GZIPCompressionCodec'
|
13
|
+
java_import 'kafka.message.SnappyCompressionCodec'
|
14
|
+
|
15
|
+
VALIDATIONS = {
|
16
|
+
:'request.required.acks' => %w[ 0 1 -1 ],
|
17
|
+
:'required.codecs' => [NoCompressionCodec.name, GZIPCompressionCodec.name, SnappyCompressionCodec.name],
|
18
|
+
:'producer.type' => %w[ sync async ]
|
19
|
+
}
|
20
|
+
|
21
|
+
REQUIRED = %w[
|
22
|
+
metadata.broker.list
|
23
|
+
]
|
24
|
+
|
25
|
+
# List of all available options extracted from http://kafka.apache.org/documentation.html#producerconfigs Apr. 27, 2014
|
26
|
+
# If new options are added, they should just work. Please add them to the list so that we can get handy warnings.
|
27
|
+
KNOWN = %w[
|
28
|
+
metadata.broker.list request.required.acks request.timeout.ms
|
29
|
+
producer.type serializer.class key.serializer.class
|
30
|
+
partitioner.class compression.codec compressed.topics
|
31
|
+
message.send.max.retries retry.backoff.ms topic.metadata.refresh.interval.ms
|
32
|
+
queue.buffering.max.ms queue.buffering.max.messages queue.enqueue.timeout.ms
|
33
|
+
batch.num.messages send.buffer.bytes client.id
|
34
|
+
broker.list serializer.encoding
|
35
|
+
]
|
36
|
+
|
37
|
+
attr_reader :producer, :send_method, :options
|
38
|
+
|
39
|
+
# Create a Kafka Producer
|
40
|
+
#
|
41
|
+
# options:
|
42
|
+
# metadata_broker_list: ["localhost:9092"] - REQUIRED: a seed list of kafka brokers
|
43
|
+
def initialize(opts = {})
|
44
|
+
@options = opts.reduce({}) do |opts_array, (k, v)|
|
45
|
+
unless v.nil?
|
46
|
+
opts_array[k.to_s.gsub(/_/, '.')] = v
|
47
|
+
end
|
48
|
+
opts_array
|
49
|
+
end
|
50
|
+
if options['broker.list']
|
51
|
+
options['metadata.broker.list'] = options.delete 'broker.list'
|
52
|
+
end
|
53
|
+
if options['metadata.broker.list'].is_a? Array
|
54
|
+
options['metadata.broker.list'] = options['metadata.broker.list'].join(',')
|
55
|
+
end
|
56
|
+
if options['compressed.topics'].is_a? Array
|
57
|
+
options['compressed.topics'] = options['compressed.topics'].join(',')
|
58
|
+
end
|
59
|
+
validate_arguments
|
60
|
+
@send_method = proc { throw StandardError.new 'Producer is not connected' }
|
61
|
+
end
|
62
|
+
|
63
|
+
def connect
|
64
|
+
@producer = KAFKA_PRODUCER.new(create_producer_config)
|
65
|
+
@send_method = producer.java_method :send, [KeyedMessage]
|
66
|
+
end
|
67
|
+
|
68
|
+
# throws FailedToSendMessageException or if not connected, StandardError.
|
69
|
+
def send_msg(topic, key, msg)
|
70
|
+
send_method.call(KeyedMessage.new(topic, key, msg))
|
71
|
+
end
|
72
|
+
|
73
|
+
def sendMsg(topic, key, msg)
|
74
|
+
send_msg(topic, key, msg)
|
75
|
+
end
|
76
|
+
deprecate :sendMsg, :send_msg, 2015, 01
|
77
|
+
|
78
|
+
def close
|
79
|
+
@producer.close
|
80
|
+
end
|
81
|
+
|
82
|
+
private
|
83
|
+
|
84
|
+
def validate_arguments
|
85
|
+
errors = []
|
86
|
+
missing = REQUIRED.reject { |opt| options[opt] }
|
87
|
+
errors = ["Required settings: #{ missing.join(', ')}"] if missing.any?
|
88
|
+
invalid = VALIDATIONS.reject { |opt, valid| options[opt].nil? or valid.include? options[opt].to_s }
|
89
|
+
errors += invalid.map { |opt, valid| "#{ opt } should be one of: [#{ valid.join(', ')}]" }
|
90
|
+
fail StandardError.new "Invalid configuration arguments: #{ errors.join('; ') }" if errors.any?
|
91
|
+
options.keys.each do |opt|
|
92
|
+
STDERR.puts "WARNING: Unknown configuration key: #{opt}" unless KNOWN.include? opt
|
93
|
+
end
|
94
|
+
end
|
95
|
+
|
96
|
+
def create_producer_config
|
97
|
+
properties = java.util.Properties.new
|
98
|
+
options.each { |opt, value| properties.put opt, value.to_s }
|
99
|
+
ProducerConfig.new(properties)
|
100
|
+
end
|
101
|
+
end
|
@@ -0,0 +1,18 @@
|
|
1
|
+
# this is a generated file, to avoid over-writing it just delete this comment
|
2
|
+
require 'jar_dependencies'
|
3
|
+
|
4
|
+
require_jar( 'io.netty', 'netty', '3.7.0.Final' )
|
5
|
+
require_jar( 'org.slf4j', 'slf4j-api', '1.7.10' )
|
6
|
+
require_jar( 'org.slf4j', 'slf4j-log4j12', '1.7.10' )
|
7
|
+
require_jar( 'log4j', 'log4j', '1.2.17' )
|
8
|
+
require_jar( 'jline', 'jline', '0.9.94' )
|
9
|
+
require_jar( 'net.sf.jopt-simple', 'jopt-simple', '3.2' )
|
10
|
+
require_jar( 'org.xerial.snappy', 'snappy-java', '1.1.1.6' )
|
11
|
+
require_jar( 'junit', 'junit', '3.8.1' )
|
12
|
+
require_jar( 'com.yammer.metrics', 'metrics-core', '2.2.0' )
|
13
|
+
require_jar( 'org.apache.zookeeper', 'zookeeper', '3.4.6' )
|
14
|
+
require_jar( 'net.jpountz.lz4', 'lz4', '1.2.0' )
|
15
|
+
require_jar( 'org.apache.kafka', 'kafka-clients', '0.8.2.1' )
|
16
|
+
require_jar( 'org.apache.kafka', 'kafka_2.10', '0.8.2.1' )
|
17
|
+
require_jar( 'com.101tec', 'zkclient', '0.3' )
|
18
|
+
require_jar( 'org.scala-lang', 'scala-library', '2.10.4' )
|
metadata
ADDED
@@ -0,0 +1,97 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: sorceror_jruby-kafka
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 2.0.0
|
5
|
+
platform: java
|
6
|
+
authors:
|
7
|
+
- Kareem Kouddous
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
date: 2015-06-12 00:00:00.000000000 Z
|
12
|
+
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
requirement: !ruby/object:Gem::Requirement
|
15
|
+
requirements:
|
16
|
+
- - "~>"
|
17
|
+
- !ruby/object:Gem::Version
|
18
|
+
version: '0'
|
19
|
+
name: jar-dependencies
|
20
|
+
prerelease: false
|
21
|
+
type: :runtime
|
22
|
+
version_requirements: !ruby/object:Gem::Requirement
|
23
|
+
requirements:
|
24
|
+
- - "~>"
|
25
|
+
- !ruby/object:Gem::Version
|
26
|
+
version: '0'
|
27
|
+
- !ruby/object:Gem::Dependency
|
28
|
+
requirement: !ruby/object:Gem::Requirement
|
29
|
+
requirements:
|
30
|
+
- - "~>"
|
31
|
+
- !ruby/object:Gem::Version
|
32
|
+
version: '3.1'
|
33
|
+
name: ruby-maven
|
34
|
+
prerelease: false
|
35
|
+
type: :runtime
|
36
|
+
version_requirements: !ruby/object:Gem::Requirement
|
37
|
+
requirements:
|
38
|
+
- - "~>"
|
39
|
+
- !ruby/object:Gem::Version
|
40
|
+
version: '3.1'
|
41
|
+
- !ruby/object:Gem::Dependency
|
42
|
+
requirement: !ruby/object:Gem::Requirement
|
43
|
+
requirements:
|
44
|
+
- - "~>"
|
45
|
+
- !ruby/object:Gem::Version
|
46
|
+
version: '10.4'
|
47
|
+
name: rake
|
48
|
+
prerelease: false
|
49
|
+
type: :development
|
50
|
+
version_requirements: !ruby/object:Gem::Requirement
|
51
|
+
requirements:
|
52
|
+
- - "~>"
|
53
|
+
- !ruby/object:Gem::Version
|
54
|
+
version: '10.4'
|
55
|
+
description: Ruby wrapper around java kafka high-level consumer
|
56
|
+
email:
|
57
|
+
- kareemknyc@gmail.com
|
58
|
+
executables: []
|
59
|
+
extensions: []
|
60
|
+
extra_rdoc_files: []
|
61
|
+
files:
|
62
|
+
- lib/jruby-kafka.rb
|
63
|
+
- lib/jruby-kafka/consumer.rb
|
64
|
+
- lib/jruby-kafka/error.rb
|
65
|
+
- lib/jruby-kafka/ext.rb
|
66
|
+
- lib/jruby-kafka/group.rb
|
67
|
+
- lib/jruby-kafka/kafka-producer.rb
|
68
|
+
- lib/jruby-kafka/namespace.rb
|
69
|
+
- lib/jruby-kafka/producer.rb
|
70
|
+
- lib/jruby-kafka_jars.rb
|
71
|
+
homepage: https://github.com/itskoko/jruby-kafka
|
72
|
+
licenses:
|
73
|
+
- Apache 2.0
|
74
|
+
metadata: {}
|
75
|
+
post_install_message:
|
76
|
+
rdoc_options: []
|
77
|
+
require_paths:
|
78
|
+
- lib
|
79
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
80
|
+
requirements:
|
81
|
+
- - ">="
|
82
|
+
- !ruby/object:Gem::Version
|
83
|
+
version: '0'
|
84
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
85
|
+
requirements:
|
86
|
+
- - ">="
|
87
|
+
- !ruby/object:Gem::Version
|
88
|
+
version: '0'
|
89
|
+
requirements:
|
90
|
+
- jar 'org.apache.kafka:kafka_2.10', '0.8.2.1'
|
91
|
+
- jar 'org.slf4j:slf4j-log4j12', '1.7.10'
|
92
|
+
rubyforge_project:
|
93
|
+
rubygems_version: 2.4.5
|
94
|
+
signing_key:
|
95
|
+
specification_version: 4
|
96
|
+
summary: jruby Kafka wrapper
|
97
|
+
test_files: []
|