jruby-kafka 0.0.3 → 0.0.4

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: f29e4e146f6d94e9aa3622e67897435f6303f7fe
4
- data.tar.gz: 6d588c93d8349ed20c27d6d24997acbaaf0effa8
3
+ metadata.gz: 80e5d2eb808795e2f28d2df2e264840c25073ca9
4
+ data.tar.gz: 8483ec0b6fcc88dfadd0424d84fdefc63ab42897
5
5
  SHA512:
6
- metadata.gz: af2d509b566451f0a37b36ec87e1929bf5e2ea34a01b9ff57d9a561e9f8d12ae512b01a346794f74d19cd094dd4a32fe3c26266027b6584387421de0597b5167
7
- data.tar.gz: 040f39ffb72f187efce499345212b67155d07a9552067d9456fc85697701413f9953c3cf142dc6ce7d35b30bddcaf3563b616d02c76463ffb0a90d050633c84b
6
+ metadata.gz: 0d84128636a3da96081bf0354851abf2ddc079ce35fe45c2f726d6f4d1f332df6cfeaaba4f13f04477d21e2c9e1649c0e8593753ca9e8b91b1d62f5237ae2281
7
+ data.tar.gz: 8d79aa88dc013327e5137eab9ec59dad24d54927540c24d88467bcb8edb6fa864266d76d53624d1f90486db5086371c26844e65790bf32ec9294bff97b6f0d34
@@ -3,6 +3,7 @@ require "jruby-kafka/namespace"
3
3
 
4
4
  java_import 'kafka.consumer.ConsumerIterator'
5
5
  java_import 'kafka.consumer.KafkaStream'
6
+ java_import 'kafka.common.ConsumerRebalanceFailedException'
6
7
 
7
8
  class Kafka::Consumer
8
9
  include Java::JavaLang::Runnable
@@ -21,7 +22,13 @@ class Kafka::Consumer
21
22
  def run
22
23
  it = @m_stream.iterator()
23
24
  while it.hasNext()
24
- @m_queue << it.next().message()
25
+ begin
26
+ begin
27
+ @m_queue << it.next().message()
28
+ rescue ConsumerRebalanceFailedException => e
29
+ raise KafkaError.new(e), "Got ConsumerRebalanceFailedException: #{e}"
30
+ end
31
+ end
25
32
  end
26
33
  end
27
34
  end
@@ -41,6 +41,17 @@ class Kafka::Group
41
41
  @auto_offset_reset = 'largest'
42
42
  @auto_commit_interval = '1000'
43
43
  @running = false
44
+ @rebalance_max_retries = '4'
45
+ @rebalance_backoff_ms = '2000'
46
+ @socket_timeout_ms = "#{30 * 1000}"
47
+ @socket_receive_buffer_bytes = "#{64 * 1024}"
48
+ @fetch_message_max_bytes = "#{1024 * 1024}"
49
+ @auto_commit_enable = true
50
+ @queued_max_message_chunks = '10'
51
+ @fetch_min_bytes = '1'
52
+ @fetch_wait_max_ms = '100'
53
+ @refresh_leader_backoff_ms = '200'
54
+ @consumer_timeout_ms = '-1'
44
55
 
45
56
  if options[:zk_connect_timeout]
46
57
  @zk_connect_timeout = options[:zk_connect_timeout]
@@ -55,6 +66,49 @@ class Kafka::Group
55
66
  @auto_commit_interval = options[:auto_commit_interval]
56
67
  end
57
68
 
69
+ if options[:rebalance_max_retries]
70
+ @rebalance_max_retries = options[:rebalance_max_retries]
71
+ end
72
+
73
+ if options[:rebalance_backoff_ms]
74
+ @rebalance_backoff_ms = options[:rebalance_backoff_ms]
75
+ end
76
+
77
+ if options[:socket_timeout_ms]
78
+ @socket_timeout_ms = options[:socket_timeout_ms]
79
+ end
80
+
81
+ if options[:socket_receive_buffer_bytes]
82
+ @socket_receive_buffer_bytes = options[:socket_receive_buffer_bytes]
83
+ end
84
+
85
+ if options[:fetch_message_max_bytes]
86
+ @fetch_message_max_bytes = options[:fetch_message_max_bytes]
87
+ end
88
+
89
+ if options[:auto_commit_enable]
90
+ @auto_commit_enable = options[:auto_commit_enable]
91
+ end
92
+
93
+ if options[:queued_max_message_chunks]
94
+ @queued_max_message_chunks = options[:queued_max_message_chunks]
95
+ end
96
+
97
+ if options[:fetch_min_bytes]
98
+ @fetch_min_bytes = options[:fetch_min_bytes]
99
+ end
100
+
101
+ if options[:fetch_wait_max_ms]
102
+ @fetch_wait_max_ms = options[:fetch_wait_max_ms]
103
+ end
104
+
105
+ if options[:refresh_leader_backoff_ms]
106
+ @refresh_leader_backoff_ms = options[:refresh_leader_backoff_ms]
107
+ end
108
+
109
+ if options[:consumer_timeout_ms]
110
+ @consumer_timeout_ms = options[:consumer_timeout_ms]
111
+ end
58
112
 
59
113
  if options[:reset_beginning]
60
114
  if options[:reset_beginning] == 'from-beginning'
@@ -126,6 +180,17 @@ class Kafka::Group
126
180
  properties.put("zookeeper.sync.time.ms", @zk_sync_time)
127
181
  properties.put("auto.commit.interval.ms", @auto_commit_interval)
128
182
  properties.put("auto.offset.reset", @auto_offset_reset)
183
+ properties.put("rebalance.max.retries", @rebalance_max_retries)
184
+ properties.put("rebalance.backoff.ms", @rebalance_backoff_ms)
185
+ properties.put("socket.timeout.ms", @socket_timeout_ms)
186
+ properties.put("socket.receive.buffer.bytes", @socket_receive_buffer_bytes)
187
+ properties.put("fetch.message.max.bytes", @fetch_message_max_bytes)
188
+ properties.put("auto.commit.enable", @auto_commit_enable)
189
+ properties.put("queued.max.message.chunks", @queued_max_message_chunks)
190
+ properties.put("fetch.min.bytes", @fetch_min_bytes)
191
+ properties.put("fetch.wait.max.ms", @fetch_wait_max_ms)
192
+ properties.put("refresh.leader.backoff.ms", @refresh_leader_backoff_ms)
193
+ properties.put("consumer.timeout.ms", @consumer_timeout_ms)
129
194
  return Java::kafka::consumer::ConsumerConfig.new(properties)
130
195
  end
131
196
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: jruby-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.3
4
+ version: 0.0.4
5
5
  platform: ruby
6
6
  authors:
7
7
  - Joseph Lawson
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2013-10-16 00:00:00.000000000 Z
11
+ date: 2013-10-28 00:00:00.000000000 Z
12
12
  dependencies: []
13
13
  description: this is primarily to be used as an interface for logstash
14
14
  email: