jruby-kafka 0.0.1 → 0.0.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/jruby-kafka/client.rb +6 -0
- data/lib/jruby-kafka/error.rb +9 -0
- data/lib/jruby-kafka/group.rb +55 -22
- data/lib/kafka.rb +5 -0
- metadata +5 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: f29e4e146f6d94e9aa3622e67897435f6303f7fe
|
4
|
+
data.tar.gz: 6d588c93d8349ed20c27d6d24997acbaaf0effa8
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: af2d509b566451f0a37b36ec87e1929bf5e2ea34a01b9ff57d9a561e9f8d12ae512b01a346794f74d19cd094dd4a32fe3c26266027b6584387421de0597b5167
|
7
|
+
data.tar.gz: 040f39ffb72f187efce499345212b67155d07a9552067d9456fc85697701413f9953c3cf142dc6ce7d35b30bddcaf3563b616d02c76463ffb0a90d050633c84b
|
data/lib/jruby-kafka/group.rb
CHANGED
@@ -4,9 +4,11 @@ require "java"
|
|
4
4
|
|
5
5
|
require "jruby-kafka/namespace"
|
6
6
|
require "jruby-kafka/consumer"
|
7
|
+
require "jruby-kafka/error"
|
7
8
|
|
8
9
|
java_import 'java.util.concurrent.ExecutorService'
|
9
10
|
java_import 'java.util.concurrent.Executors'
|
11
|
+
java_import 'org.I0Itec.zkclient.exception.ZkException'
|
10
12
|
|
11
13
|
class Kafka::Group
|
12
14
|
@consumer
|
@@ -19,40 +21,53 @@ class Kafka::Group
|
|
19
21
|
# Create a Kafka client group
|
20
22
|
#
|
21
23
|
# options:
|
22
|
-
# :
|
24
|
+
# :zk_connect => "localhost:2181" - REQUIRED: The connection string for the
|
23
25
|
# zookeeper connection in the form host:port. Multiple URLS can be given to allow fail-over.
|
24
|
-
# :
|
25
|
-
# :
|
26
|
-
# :
|
26
|
+
# :zk_connect_timeout => "6000" - (optional) The max time that the client waits while establishing a connection to zookeeper.
|
27
|
+
# :group_id => "group" - REQUIRED: The group id to consume on.
|
28
|
+
# :topic_id => "topic" - REQUIRED: The topic id to consume on.
|
29
|
+
# :reset_beginning => "from-beginning" - (optional) If the consumer does not already have an established offset
|
27
30
|
# to consume from, start with the earliest message present in the log rather than the latest message.
|
31
|
+
#
|
28
32
|
def initialize(options={})
|
29
33
|
validate_required_arguments(options)
|
30
34
|
|
31
|
-
@zk_connect = options[:
|
32
|
-
@group_id = options[:
|
33
|
-
@topic = options[:
|
35
|
+
@zk_connect = options[:zk_connect]
|
36
|
+
@group_id = options[:group_id]
|
37
|
+
@topic = options[:topic_id]
|
38
|
+
@zk_session_timeout = '6000'
|
39
|
+
@zk_connect_timeout = '6000'
|
40
|
+
@zk_sync_time = '2000'
|
41
|
+
@auto_offset_reset = 'largest'
|
42
|
+
@auto_commit_interval = '1000'
|
43
|
+
@running = false
|
44
|
+
|
45
|
+
if options[:zk_connect_timeout]
|
46
|
+
@zk_connect_timeout = options[:zk_connect_timeout]
|
47
|
+
end
|
48
|
+
if options[:zk_session_timeout]
|
49
|
+
@zk_session_timeout = options[:zk_session_timeout]
|
50
|
+
end
|
51
|
+
if options[:zk_sync_time]
|
52
|
+
@zk_sync_time = options[:zk_sync_time]
|
53
|
+
end
|
54
|
+
if options[:auto_commit_interval]
|
55
|
+
@auto_commit_interval = options[:auto_commit_interval]
|
56
|
+
end
|
34
57
|
|
35
58
|
|
36
|
-
if options[:
|
37
|
-
if options[:
|
59
|
+
if options[:reset_beginning]
|
60
|
+
if options[:reset_beginning] == 'from-beginning'
|
38
61
|
@auto_offset_reset = 'smallest'
|
39
62
|
else
|
40
63
|
@auto_offset_reset = 'largest'
|
41
64
|
end
|
42
|
-
else
|
43
|
-
@auto_offset_reset = 'largest'
|
44
|
-
end
|
45
|
-
|
46
|
-
if @auto_offset_reset == 'smallest'
|
47
|
-
Java::kafka::utils::ZkUtils.maybeDeletePath(@zk_connect, "/consumers/#{@group_id}")
|
48
65
|
end
|
49
|
-
|
50
|
-
@consumer = Java::kafka::consumer::Consumer.createJavaConsumerConnector(createConsumerConfig())
|
51
66
|
end
|
52
67
|
|
53
68
|
private
|
54
69
|
def validate_required_arguments(options={})
|
55
|
-
[:
|
70
|
+
[:zk_connect, :group_id, :topic_id].each do |opt|
|
56
71
|
raise(ArgumentError, "#{opt} is required.") unless options[opt]
|
57
72
|
end
|
58
73
|
end
|
@@ -65,10 +80,20 @@ class Kafka::Group
|
|
65
80
|
if @executor
|
66
81
|
@executor.shutdown()
|
67
82
|
end
|
83
|
+
@running = false
|
68
84
|
end
|
69
85
|
|
70
86
|
public
|
71
87
|
def run(a_numThreads, a_queue)
|
88
|
+
begin
|
89
|
+
if @auto_offset_reset == 'smallest'
|
90
|
+
Java::kafka::utils::ZkUtils.maybeDeletePath(@zk_connect, "/consumers/#{@group_id}")
|
91
|
+
end
|
92
|
+
|
93
|
+
@consumer = Java::kafka::consumer::Consumer.createJavaConsumerConnector(createConsumerConfig())
|
94
|
+
rescue ZkException => e
|
95
|
+
raise KafkaError.new(e), "Got ZkException: #{e}"
|
96
|
+
end
|
72
97
|
topicCountMap = java.util.HashMap.new()
|
73
98
|
thread_value = a_numThreads.to_java Java::int
|
74
99
|
topicCountMap.put(@topic, thread_value)
|
@@ -76,12 +101,19 @@ class Kafka::Group
|
|
76
101
|
streams = Array.new(consumerMap[@topic])
|
77
102
|
|
78
103
|
@executor = Executors.newFixedThreadPool(a_numThreads)
|
104
|
+
@executor_submit = @executor.java_method(:submit, [Java::JavaLang::Runnable.java_class])
|
79
105
|
|
80
106
|
threadNumber = 0
|
81
107
|
for stream in streams
|
82
|
-
@
|
108
|
+
@executor_submit.call(Kafka::Consumer.new(stream, threadNumber, a_queue))
|
83
109
|
threadNumber += 1
|
84
110
|
end
|
111
|
+
@running = true
|
112
|
+
end
|
113
|
+
|
114
|
+
public
|
115
|
+
def running?
|
116
|
+
@running
|
85
117
|
end
|
86
118
|
|
87
119
|
private
|
@@ -89,9 +121,10 @@ class Kafka::Group
|
|
89
121
|
properties = java.util.Properties.new()
|
90
122
|
properties.put("zookeeper.connect", @zk_connect)
|
91
123
|
properties.put("group.id", @group_id)
|
92
|
-
properties.put("zookeeper.
|
93
|
-
properties.put("zookeeper.
|
94
|
-
properties.put("
|
124
|
+
properties.put("zookeeper.connection.timeout.ms", @zk_connect_timeout)
|
125
|
+
properties.put("zookeeper.session.timeout.ms", @zk_session_timeout)
|
126
|
+
properties.put("zookeeper.sync.time.ms", @zk_sync_time)
|
127
|
+
properties.put("auto.commit.interval.ms", @auto_commit_interval)
|
95
128
|
properties.put("auto.offset.reset", @auto_offset_reset)
|
96
129
|
return Java::kafka::consumer::ConsumerConfig.new(properties)
|
97
130
|
end
|
data/lib/kafka.rb
ADDED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: jruby-kafka
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.0.
|
4
|
+
version: 0.0.3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Joseph Lawson
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2013-10-
|
11
|
+
date: 2013-10-16 00:00:00.000000000 Z
|
12
12
|
dependencies: []
|
13
13
|
description: this is primarily to be used as an interface for logstash
|
14
14
|
email:
|
@@ -18,8 +18,11 @@ extensions: []
|
|
18
18
|
extra_rdoc_files: []
|
19
19
|
files:
|
20
20
|
- lib/jruby-kafka.rb
|
21
|
+
- lib/kafka.rb
|
22
|
+
- lib/jruby-kafka/client.rb
|
21
23
|
- lib/jruby-kafka/config.rb
|
22
24
|
- lib/jruby-kafka/consumer.rb
|
25
|
+
- lib/jruby-kafka/error.rb
|
23
26
|
- lib/jruby-kafka/group.rb
|
24
27
|
- lib/jruby-kafka/namespace.rb
|
25
28
|
homepage: https://github.com/joekiller/jruby-kafka
|