logstash-kafka 0.7.0-java → 0.7.1-java

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 00adb861cf25fabd675f94d437b0e4dc36a7fbad
4
- data.tar.gz: ce5d97a0e3b9081bb60f595364da29125e2b6a5d
3
+ metadata.gz: 6d4335bb55722df77c77e125ed449af1314267d6
4
+ data.tar.gz: e30d2c5517d8d33fafc354f00996eee9bd1bbb19
5
5
  SHA512:
6
- metadata.gz: 1b0675791f6af79c8cc64371787cb322ba108bdc2d431cf55bf49a543b92a8d4cab09efeb593803b50b05d7dd88f6f3404d76f3207de665c816d5b061183abd2
7
- data.tar.gz: 919df956e078237ee9f01de276227b5f61ab454faf830e794962c76fc2132ce0cd928137aeaa648ef36e4fc8e2e9b66a3e893101cde6f150595ac017d401edec
6
+ metadata.gz: f98d93c2e93a03884331f7931b4b4b76b3eb67600fc3420cbbca2d81581fa9c0876df5d8c51078aae95f1a0e8354febb545b34f37c911f8f98e7f6dd5250c32c
7
+ data.tar.gz: 944d346a9daf69dbe79cf701abe66d62362acbc7df3d47d94d000f7dee7cecb42727476ebe31d568432e01867c1869c0c774bdfcb34bcf5149f7c36e9dc5abdd
@@ -6,7 +6,7 @@ require 'jruby-kafka'
6
6
  # by Kafka to read messages from the broker. It also maintains the state of what has been
7
7
  # consumed using Zookeeper. The default input codec is json
8
8
  #
9
- # The only required configuration is the topic name. By default it will connect to a Zookeeper
9
+ # You must configure topic_id, white_list or black_list. By default it will connect to a Zookeeper
10
10
  # running on localhost. All the broker information is read from Zookeeper state
11
11
  #
12
12
  # Ideally you should have as many threads as the number of partitions for a perfect balance --
@@ -37,16 +37,19 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
37
37
  # the same consumer group.
38
38
  config :group_id, :validate => :string, :default => 'logstash'
39
39
  # The topic to consume messages from
40
- config :topic_id, :validate => :string, :required => true
41
- # Specify whether to jump to beginning of the queue when there is no initial offset in
42
- # ZooKeeper, or if an offset is out of range. If this is false, messages are consumed
43
- # from the latest offset
44
- #
45
- # If reset_beginning is true, the consumer will check ZooKeeper to see if any other group members
46
- # are present and active. If not, the consumer deletes any offset information in the ZooKeeper
47
- # and starts at the smallest offset. If other group members are present reset_beginning will not
48
- # work and the consumer threads will rejoin the consumer group.
40
+ config :topic_id, :validate => :string, :default => nil
41
+ # Whitelist of topics to include for consumption.
42
+ config :white_list, :validate => :string, :default => nil
43
+ # Blacklist of topics to exclude from consumption.
44
+ config :black_list, :validate => :string, :default => nil
45
+ # Reset the consumer group to start at the earliest message present in the log by clearing any
46
+ # offsets for the group stored in Zookeeper. This is destructive! Must be used in conjunction
47
+ # with auto_offset_reset => 'smallest'
49
48
  config :reset_beginning, :validate => :boolean, :default => false
49
+ # "smallest" or "largest" - (optional, default 'largest') If the consumer does not already
50
+ # have an established offset or offset is invalid, start with the earliest message present in the log (smallest) or
51
+ # after the last message in the log (largest).
52
+ config :auto_offset_reset, :validate => %w( largest smallest ), :default => 'largest'
50
53
  # Number of threads to read from the partitions. Ideally you should have as many threads as the
51
54
  # number of partitions for a perfect balance. More threads than partitions means that some
52
55
  # threads will be idle. Less threads means a single thread could be consuming from more than
@@ -86,19 +89,28 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
86
89
  :zk_connect => @zk_connect,
87
90
  :group_id => @group_id,
88
91
  :topic_id => @topic_id,
92
+ :auto_offset_reset => @auto_offset_reset,
89
93
  :rebalance_max_retries => @rebalance_max_retries,
90
94
  :rebalance_backoff_ms => @rebalance_backoff_ms,
91
95
  :consumer_timeout_ms => @consumer_timeout_ms,
92
96
  :consumer_restart_on_error => @consumer_restart_on_error,
93
97
  :consumer_restart_sleep_ms => @consumer_restart_sleep_ms,
94
98
  :consumer_id => @consumer_id,
95
- :fetch_message_max_bytes => @fetch_message_max_bytes
99
+ :fetch_message_max_bytes => @fetch_message_max_bytes,
100
+ :allow_topics => @white_list,
101
+ :filter_topics => @black_list
96
102
  }
97
103
  if @reset_beginning
98
104
  options[:reset_beginning] = 'from-beginning'
99
105
  end # if :reset_beginning
106
+ topic_or_filter = [@topic_id, @white_list, @black_list].compact
107
+ if topic_or_filter.count == 0
108
+ raise('topic_id, white_list or black_list required.')
109
+ elsif topic_or_filter.count > 1
110
+ raise('Invalid combination of topic_id, white_list or black_list. Use only one.')
111
+ end
100
112
  @kafka_client_queue = SizedQueue.new(@queue_size)
101
- @consumer_group = Kafka::Group.new(options)
113
+ @consumer_group = create_consumer_group(options)
102
114
  @logger.info('Registering kafka', :group_id => @group_id, :topic_id => @topic_id, :zk_connect => @zk_connect)
103
115
  end # def register
104
116
 
@@ -111,14 +123,14 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
111
123
  begin
112
124
  while true
113
125
  event = @kafka_client_queue.pop
114
- queue_event("#{event}",logstash_queue)
126
+ queue_event(event, logstash_queue)
115
127
  end
116
128
  rescue LogStash::ShutdownSignal
117
129
  @logger.info('Kafka got shutdown signal')
118
130
  @consumer_group.shutdown
119
131
  end
120
132
  until @kafka_client_queue.empty?
121
- queue_event("#{@kafka_client_queue.pop}",logstash_queue)
133
+ queue_event(@kafka_client_queue.pop,logstash_queue)
122
134
  end
123
135
  @logger.info('Done running kafka input')
124
136
  rescue => e
@@ -134,17 +146,26 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
134
146
  end # def run
135
147
 
136
148
  private
137
- def queue_event(msg, output_queue)
149
+ def create_consumer_group(options)
150
+ Kafka::Group.new(options)
151
+ end
152
+
153
+ private
154
+ def queue_event(message_and_metadata, output_queue)
138
155
  begin
139
- @codec.decode(msg) do |event|
156
+ @codec.decode(message_and_metadata.message) do |event|
140
157
  decorate(event)
141
158
  if @decorate_events
142
- event['kafka'] = {'msg_size' => msg.bytesize, 'topic' => @topic_id, 'consumer_group' => @group_id}
159
+ event['kafka'] = {'msg_size' => message_and_metadata.message.bytesize,
160
+ 'topic' => message_and_metadata.topic,
161
+ 'consumer_group' => @group_id,
162
+ 'partition' => message_and_metadata.partition,
163
+ 'key' => message_and_metadata.key}
143
164
  end
144
165
  output_queue << event
145
166
  end # @codec.decode
146
167
  rescue => e # parse or event creation error
147
- @logger.error('Failed to create event', :message => msg, :exception => e,
168
+ @logger.error('Failed to create event', :message => message_and_metadata.message, :exception => e,
148
169
  :backtrace => e.backtrace)
149
170
  end # begin
150
171
  end # def queue_event
@@ -131,9 +131,9 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
131
131
 
132
132
  @logger.info('Registering kafka producer', :topic_id => @topic_id, :broker_list => @broker_list)
133
133
 
134
- @codec.on_event do |event|
134
+ @codec.on_event do |event, data|
135
135
  begin
136
- @producer.send_msg(@topic_id,nil,event)
136
+ @producer.send_msg(event.sprintf(@topic_id),nil,data)
137
137
  rescue LogStash::ShutdownSignal
138
138
  @logger.info('Kafka producer got shutdown signal')
139
139
  rescue => e
metadata CHANGED
@@ -1,30 +1,106 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.7.0
4
+ version: 0.7.1
5
5
  platform: java
6
6
  authors:
7
7
  - Joseph Lawson
8
- autorequire:
8
+ autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2015-01-20 00:00:00.000000000 Z
11
+ date: 2015-02-19 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
- name: jruby-kafka
15
14
  requirement: !ruby/object:Gem::Requirement
16
15
  requirements:
17
- - - "~>"
16
+ - - '>='
17
+ - !ruby/object:Gem::Version
18
+ version: 1.4.0
19
+ - - <
20
+ - !ruby/object:Gem::Version
21
+ version: 2.0.0
22
+ name: logstash
23
+ prerelease: false
24
+ type: :runtime
25
+ version_requirements: !ruby/object:Gem::Requirement
26
+ requirements:
27
+ - - '>='
28
+ - !ruby/object:Gem::Version
29
+ version: 1.4.0
30
+ - - <
31
+ - !ruby/object:Gem::Version
32
+ version: 2.0.0
33
+ - !ruby/object:Gem::Dependency
34
+ requirement: !ruby/object:Gem::Requirement
35
+ requirements:
36
+ - - '>='
37
+ - !ruby/object:Gem::Version
38
+ version: '0'
39
+ name: logstash-codec-plain
40
+ prerelease: false
41
+ type: :runtime
42
+ version_requirements: !ruby/object:Gem::Requirement
43
+ requirements:
44
+ - - '>='
45
+ - !ruby/object:Gem::Version
46
+ version: '0'
47
+ - !ruby/object:Gem::Dependency
48
+ requirement: !ruby/object:Gem::Requirement
49
+ requirements:
50
+ - - '>='
51
+ - !ruby/object:Gem::Version
52
+ version: '0'
53
+ name: logstash-codec-json
54
+ prerelease: false
55
+ type: :runtime
56
+ version_requirements: !ruby/object:Gem::Requirement
57
+ requirements:
58
+ - - '>='
59
+ - !ruby/object:Gem::Version
60
+ version: '0'
61
+ - !ruby/object:Gem::Dependency
62
+ requirement: !ruby/object:Gem::Requirement
63
+ requirements:
64
+ - - '>='
65
+ - !ruby/object:Gem::Version
66
+ version: '0'
67
+ name: jar-dependencies
68
+ prerelease: false
69
+ type: :runtime
70
+ version_requirements: !ruby/object:Gem::Requirement
71
+ requirements:
72
+ - - '>='
73
+ - !ruby/object:Gem::Version
74
+ version: '0'
75
+ - !ruby/object:Gem::Dependency
76
+ requirement: !ruby/object:Gem::Requirement
77
+ requirements:
78
+ - - ~>
18
79
  - !ruby/object:Gem::Version
19
- version: 1.0.0.beta
80
+ version: 1.1.0.beta
81
+ name: jruby-kafka
82
+ prerelease: false
20
83
  type: :runtime
84
+ version_requirements: !ruby/object:Gem::Requirement
85
+ requirements:
86
+ - - ~>
87
+ - !ruby/object:Gem::Version
88
+ version: 1.1.0.beta
89
+ - !ruby/object:Gem::Dependency
90
+ requirement: !ruby/object:Gem::Requirement
91
+ requirements:
92
+ - - '>='
93
+ - !ruby/object:Gem::Version
94
+ version: '0'
95
+ name: logstash-devutils
21
96
  prerelease: false
97
+ type: :development
22
98
  version_requirements: !ruby/object:Gem::Requirement
23
99
  requirements:
24
- - - "~>"
100
+ - - '>='
25
101
  - !ruby/object:Gem::Version
26
- version: 1.0.0.beta
27
- description: this is primarily to be used as an interface for logstash
102
+ version: '0'
103
+ description: Kafka input and output plugins for Logstash
28
104
  email:
29
105
  - joe@joekiller.com
30
106
  executables: []
@@ -33,28 +109,28 @@ extra_rdoc_files: []
33
109
  files:
34
110
  - lib/logstash/inputs/kafka.rb
35
111
  - lib/logstash/outputs/kafka.rb
36
- homepage: https://github.com/joekiller/jruby-kafka
112
+ homepage: https://github.com/joekiller/logstash-kafka
37
113
  licenses:
38
114
  - Apache 2.0
39
115
  metadata: {}
40
- post_install_message:
116
+ post_install_message:
41
117
  rdoc_options: []
42
118
  require_paths:
43
119
  - lib
44
120
  required_ruby_version: !ruby/object:Gem::Requirement
45
121
  requirements:
46
- - - ">="
122
+ - - '>='
47
123
  - !ruby/object:Gem::Version
48
124
  version: '0'
49
125
  required_rubygems_version: !ruby/object:Gem::Requirement
50
126
  requirements:
51
- - - ">="
127
+ - - '>='
52
128
  - !ruby/object:Gem::Version
53
129
  version: '0'
54
130
  requirements: []
55
- rubyforge_project:
56
- rubygems_version: 2.2.2
57
- signing_key:
131
+ rubyforge_project:
132
+ rubygems_version: 2.1.9
133
+ signing_key:
58
134
  specification_version: 4
59
- summary: jruby Kafka wrapper
135
+ summary: Provides input and output plugin functionality for Logstash
60
136
  test_files: []