logstash-output-kafka 2.0.5 → 3.0.0.beta1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 7b5b8efdd636a8bb4e471b679485d416f945db02
4
- data.tar.gz: c01e2b497f395cbbcba1337f27b84bb018ad3aea
3
+ metadata.gz: 4566180256b76a147bc313ba93a53ea87f0dd6b5
4
+ data.tar.gz: 01199234963282f640588f6e13ca5fa725d18313
5
5
  SHA512:
6
- metadata.gz: a856a0c4fe61dead3491d53fd10eaae3bd9582efbb77392f4649a01b88e08934e765cb7eb0dfb55486f3e832af7fc5c248770dfcaa5226bc47d4485fd1cca999
7
- data.tar.gz: 261084bc6d8b42e46e202e50b7f419515be42206d57415d00b9f9e00ede642c238ec8154c6348a14d0313d1d82646d0d44ad5e9df63cdbadc5996931186f2178
6
+ metadata.gz: f438f87c576958d1d7d1bcf1aeae82be404120b26e27cb64ccbd659b13fa149bfc88b74cfe75df0c9727b6f776bdf2ca3339c9ff0e7574027b54bc5a2d8318b8
7
+ data.tar.gz: 1297979da8c0d1bcad6f739f5c6e7fb8d5af9cafb64c441ba08ec1dbf8c6ef194c7c231dec9d23a39fd371f162c9594faca3a5733c4497fdb68b2a6e98ef743b
data/CHANGELOG.md CHANGED
@@ -1,9 +1,13 @@
1
- # 2.0.4
2
- - [Internal] Pin jruby-kafka to v1.6 to match input
3
-
4
- # 2.0.3
5
- - New dependency requirements for logstash-core for the 5.0 release
6
- - [Internal] Pin jruby-kafka to v2.2.2
1
+ ## 3.0.0.beta1
2
+ - Note: breaking changes in this version, and not backward compatible with Kafka 0.8 broker.
3
+ Please read carefully before installing
4
+ - Breaking: Changed default codec from json to plain. Json codec is really slow when used
5
+ with inputs because inputs by default are single threaded. This makes it a bad
6
+ first user experience. Plain codec is a much better default.
7
+ - Moved internal APIs to use Kafka's Java API directly instead of jruby-kafka. This
8
+ makes it consistent with logstash-input-kafka
9
+ - Breaking: Change in configuration options
10
+ - Added SSL options so you can connect securely to a 0.9 Kafka broker
7
11
 
8
12
  ## 2.0.2
9
13
  - [Internal] Pin jruby-kafka to v1.5
@@ -1,6 +1,7 @@
1
1
  require 'logstash/namespace'
2
2
  require 'logstash/outputs/base'
3
- require 'jruby-kafka'
3
+ require 'java'
4
+ require 'logstash-output-kafka_jars.rb'
4
5
 
5
6
  # Write events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on
6
7
  # the broker.
@@ -24,25 +25,8 @@ require 'jruby-kafka'
24
25
  class LogStash::Outputs::Kafka < LogStash::Outputs::Base
25
26
  config_name 'kafka'
26
27
 
27
- default :codec, 'json'
28
+ default :codec, 'plain'
28
29
 
29
- # The topic to produce messages to
30
- config :topic_id, :validate => :string, :required => true
31
- # This is for bootstrapping and the producer will only use it for getting metadata (topics,
32
- # partitions and replicas). The socket connections for sending the actual data will be
33
- # established based on the broker information returned in the metadata. The format is
34
- # `host1:port1,host2:port2`, and the list can be a subset of brokers or a VIP pointing to a
35
- # subset of brokers.
36
- config :bootstrap_servers, :validate => :string, :default => 'localhost:9092'
37
- # Serializer class for the key of the message
38
- config :key_serializer, :validate => :string, :default => 'org.apache.kafka.common.serialization.StringSerializer'
39
- # Serializer class for the value of the message
40
- config :value_serializer, :validate => :string, :default => 'org.apache.kafka.common.serialization.StringSerializer'
41
- # The key that will be included with the record
42
- #
43
- # If a `message_key` is present, a partition will be chosen using a hash of the key.
44
- # If not present, a partition for the message will be assigned in a round-robin fashion.
45
- config :message_key, :validate => :string
46
30
  # The number of acknowledgments the producer requires the leader to have received
47
31
  # before considering a request complete.
48
32
  #
@@ -51,22 +35,31 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
51
35
  # will respond without awaiting full acknowledgement from all followers.
52
36
  # acks=all, This means the leader will wait for the full set of in-sync replicas to acknowledge the record.
53
37
  config :acks, :validate => ["0", "1", "all"], :default => "1"
38
+ # The producer will attempt to batch records together into fewer requests whenever multiple
39
+ # records are being sent to the same partition. This helps performance on both the client
40
+ # and the server. This configuration controls the default batch size in bytes.
41
+ config :batch_size, :validate => :number, :default => 16384
42
+ # This is for bootstrapping and the producer will only use it for getting metadata (topics,
43
+ # partitions and replicas). The socket connections for sending the actual data will be
44
+ # established based on the broker information returned in the metadata. The format is
45
+ # `host1:port1,host2:port2`, and the list can be a subset of brokers or a VIP pointing to a
46
+ # subset of brokers.
47
+ config :bootstrap_servers, :validate => :string, :default => 'localhost:9092'
48
+ # When our memory buffer is exhausted we must either stop accepting new
49
+ # records (block) or throw errors. By default this setting is true and we block,
50
+ # however in some scenarios blocking is not desirable and it is better to immediately give an error.
51
+ config :block_on_buffer_full, :validate => :boolean, :default => true, :deprecated => "This config will be removed in a future release"
54
52
  # The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
55
53
  config :buffer_memory, :validate => :number, :default => 33554432
56
54
  # The compression type for all data generated by the producer.
57
55
  # The default is none (i.e. no compression). Valid values are none, gzip, or snappy.
58
56
  config :compression_type, :validate => ["none", "gzip", "snappy"], :default => "none"
59
- # Setting a value greater than zero will cause the client to
60
- # resend any record whose send fails with a potentially transient error.
61
- config :retries, :validate => :number, :default => 0
62
- # The producer will attempt to batch records together into fewer requests whenever multiple
63
- # records are being sent to the same partition. This helps performance on both the client
64
- # and the server. This configuration controls the default batch size in bytes.
65
- config :batch_size, :validate => :number, :default => 16384
66
57
  # The id string to pass to the server when making requests.
67
58
  # The purpose of this is to be able to track the source of requests beyond just
68
59
  # ip/port by allowing a logical application name to be included with the request
69
60
  config :client_id, :validate => :string
61
+ # Serializer class for the key of the message
62
+ config :key_serializer, :validate => :string, :default => 'org.apache.kafka.common.serialization.StringSerializer'
70
63
  # The producer groups together any records that arrive in between request
71
64
  # transmissions into a single batched request. Normally this occurs only under
72
65
  # load when records arrive faster than they can be sent out. However in some circumstances
@@ -77,63 +70,61 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
77
70
  config :linger_ms, :validate => :number, :default => 0
78
71
  # The maximum size of a request
79
72
  config :max_request_size, :validate => :number, :default => 1048576
73
+ # The key for the message
74
+ config :message_key, :validate => :string
75
+ # the timeout setting for initial metadata request to fetch topic metadata.
76
+ config :metadata_fetch_timeout_ms, :validate => :number, :default => 60000
77
+ # the max time in milliseconds before a metadata refresh is forced.
78
+ config :metadata_max_age_ms, :validate => :number, :default => 300000
80
79
  # The size of the TCP receive buffer to use when reading data
81
80
  config :receive_buffer_bytes, :validate => :number, :default => 32768
81
+ # The amount of time to wait before attempting to reconnect to a given host when a connection fails.
82
+ config :reconnect_backoff_ms, :validate => :number, :default => 10
83
+ # The configuration controls the maximum amount of time the client will wait
84
+ # for the response of a request. If the response is not received before the timeout
85
+ # elapses the client will resend the request if necessary or fail the request if
86
+ # retries are exhausted.
87
+ config :request_timeout_ms, :validate => :string
88
+ # Setting a value greater than zero will cause the client to
89
+ # resend any record whose send fails with a potentially transient error.
90
+ config :retries, :validate => :number, :default => 0
91
+ # The amount of time to wait before attempting to retry a failed produce request to a given topic partition.
92
+ config :retry_backoff_ms, :validate => :number, :default => 100
82
93
  # The size of the TCP send buffer to use when sending data.
83
94
  config :send_buffer_bytes, :validate => :number, :default => 131072
95
+ # Enable SSL/TLS secured communication to Kafka broker. Note that secure communication
96
+ # is only available with a broker running v0.9 of Kafka.
97
+ config :ssl, :validate => :boolean, :default => false
98
+ # The JKS truststore path to validate the Kafka broker's certificate.
99
+ config :ssl_truststore_location, :validate => :path
100
+ # The truststore password
101
+ config :ssl_truststore_password, :validate => :password
102
+ # If client authentication is required, this setting stores the keystore path.
103
+ config :ssl_keystore_location, :validate => :path
104
+ # If client authentication is required, this setting stores the keystore password
105
+ config :ssl_keystore_password, :validate => :password
84
106
  # The configuration controls the maximum amount of time the server will wait for acknowledgments
85
107
  # from followers to meet the acknowledgment requirements the producer has specified with the
86
108
  # acks configuration. If the requested number of acknowledgments are not met when the timeout
87
109
  # elapses an error will be returned. This timeout is measured on the server side and does not
88
110
  # include the network latency of the request.
89
- config :timeout_ms, :validate => :number, :default => 30000
90
- # When our memory buffer is exhausted we must either stop accepting new
91
- # records (block) or throw errors. By default this setting is true and we block,
92
- # however in some scenarios blocking is not desirable and it is better to immediately give an error.
93
- config :block_on_buffer_full, :validate => :boolean, :default => true
94
- # the timeout setting for initial metadata request to fetch topic metadata.
95
- config :metadata_fetch_timeout_ms, :validate => :number, :default => 60000
96
- # the max time in milliseconds before a metadata refresh is forced.
97
- config :metadata_max_age_ms, :validate => :number, :default => 300000
98
- # The amount of time to wait before attempting to reconnect to a given host when a connection fails.
99
- config :reconnect_backoff_ms, :validate => :number, :default => 10
100
- # The amount of time to wait before attempting to retry a failed produce request to a given topic partition.
101
- config :retry_backoff_ms, :validate => :number, :default => 100
111
+ config :timeout_ms, :validate => :number, :default => 30000, :deprecated => "This config will be removed in a future release. Please use request_timeout_ms"
112
+ # The topic to produce messages to
113
+ config :topic_id, :validate => :string, :required => true
114
+ # Serializer class for the value of the message
115
+ config :value_serializer, :validate => :string, :default => 'org.apache.kafka.common.serialization.StringSerializer'
102
116
 
103
117
  public
104
118
  def register
105
- LogStash::Logger.setup_log4j(@logger)
106
-
107
- options = {
108
- :key_serializer => @key_serializer,
109
- :value_serializer => @value_serializer,
110
- :bootstrap_servers => @bootstrap_servers,
111
- :acks => @acks,
112
- :buffer_memory => @buffer_memory,
113
- :compression_type => @compression_type,
114
- :retries => @retries,
115
- :batch_size => @batch_size,
116
- :client_id => @client_id,
117
- :linger_ms => @linger_ms,
118
- :max_request_size => @max_request_size,
119
- :receive_buffer_bytes => @receive_buffer_bytes,
120
- :send_buffer_bytes => @send_buffer_bytes,
121
- :timeout_ms => @timeout_ms,
122
- :block_on_buffer_full => @block_on_buffer_full,
123
- :metadata_fetch_timeout_ms => @metadata_fetch_timeout_ms,
124
- :metadata_max_age_ms => @metadata_max_age_ms,
125
- :reconnect_backoff_ms => @reconnect_backoff_ms,
126
- :retry_backoff_ms => @retry_backoff_ms
127
- }
128
- @producer = Kafka::KafkaProducer.new(options)
129
- @producer.connect
130
-
131
- @logger.info('Registering kafka producer', :topic_id => @topic_id, :bootstrap_servers => @bootstrap_servers)
132
-
119
+ @producer = create_producer
133
120
  @codec.on_event do |event, data|
134
121
  begin
135
- key = if @message_key.nil? then nil else event.sprintf(@message_key) end
136
- @producer.send_msg(event.sprintf(@topic_id), nil, key, data)
122
+ if @message_key.nil?
123
+ record = org.apache.kafka.clients.producer.ProducerRecord.new(event.sprintf(@topic_id), data)
124
+ else
125
+ record = org.apache.kafka.clients.producer.ProducerRecord.new(event.sprintf(@topic_id), event.sprintf(@message_key), data)
126
+ end
127
+ @producer.send(record)
137
128
  rescue LogStash::ShutdownSignal
138
129
  @logger.info('Kafka producer got shutdown signal')
139
130
  rescue => e
@@ -141,10 +132,10 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
141
132
  :exception => e)
142
133
  end
143
134
  end
135
+
144
136
  end # def register
145
137
 
146
138
  def receive(event)
147
-
148
139
  if event == LogStash::SHUTDOWN
149
140
  return
150
141
  end
@@ -154,4 +145,47 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
154
145
  def close
155
146
  @producer.close
156
147
  end
148
+
149
+ private
150
+ def create_producer
151
+ begin
152
+ props = java.util.Properties.new
153
+ kafka = org.apache.kafka.clients.producer.ProducerConfig
154
+
155
+ props.put(kafka::ACKS_CONFIG, acks)
156
+ props.put(kafka::BATCH_SIZE_CONFIG, batch_size.to_s)
157
+ props.put(kafka::BOOTSTRAP_SERVERS_CONFIG, bootstrap_servers)
158
+ props.put(kafka::BUFFER_MEMORY_CONFIG, buffer_memory.to_s)
159
+ props.put(kafka::COMPRESSION_TYPE_CONFIG, compression_type)
160
+ props.put(kafka::CLIENT_ID_CONFIG, client_id) unless client_id.nil?
161
+ props.put(kafka::KEY_SERIALIZER_CLASS_CONFIG, key_serializer)
162
+ props.put(kafka::LINGER_MS_CONFIG, linger_ms.to_s)
163
+ props.put(kafka::MAX_REQUEST_SIZE_CONFIG, max_request_size.to_s)
164
+ props.put(kafka::RECONNECT_BACKOFF_MS_CONFIG, reconnect_backoff_ms) unless reconnect_backoff_ms.nil?
165
+ props.put(kafka::REQUEST_TIMEOUT_MS_CONFIG, request_timeout_ms) unless request_timeout_ms.nil?
166
+ props.put(kafka::RETRIES_CONFIG, retries.to_s)
167
+ props.put(kafka::RETRY_BACKOFF_MS_CONFIG, retry_backoff_ms.to_s)
168
+ props.put(kafka::SEND_BUFFER_CONFIG, send_buffer_bytes.to_s)
169
+ props.put(kafka::VALUE_SERIALIZER_CLASS_CONFIG, value_serializer)
170
+
171
+ if ssl
172
+ if ssl_truststore_location.nil?
173
+ raise LogStash::ConfigurationError, "ssl_truststore_location must be set when SSL is enabled"
174
+ end
175
+ props.put("security.protocol", "SSL")
176
+ props.put("ssl.truststore.location", ssl_truststore_location)
177
+ props.put("ssl.truststore.password", ssl_truststore_password.value) unless ssl_truststore_password.nil?
178
+
179
+ #Client auth stuff
180
+ props.put("ssl.keystore.location", ssl_keystore_location) unless ssl_keystore_location.nil?
181
+ props.put("ssl.keystore.password", ssl_keystore_password.value) unless ssl_keystore_password.nil?
182
+ end
183
+
184
+ org.apache.kafka.clients.producer.KafkaProducer.new(props)
185
+ rescue => e
186
+ logger.error("Unable to create Kafka producer from given configuration", :kafka_error_message => e)
187
+ raise e
188
+ end
189
+ end
190
+
157
191
  end #class LogStash::Outputs::Kafka
@@ -0,0 +1,5 @@
1
+ # encoding: utf-8
2
+ require 'logstash/environment'
3
+
4
+ root_dir = File.expand_path(File.join(File.dirname(__FILE__), ".."))
5
+ LogStash::Environment.load_runtime_jars! File.join(root_dir, "vendor")
@@ -1,7 +1,8 @@
1
1
  Gem::Specification.new do |s|
2
2
 
3
3
  s.name = 'logstash-output-kafka'
4
- s.version = '2.0.5'
4
+ s.version = '2.0.2'
5
+ s.version = '3.0.0.beta1'
5
6
  s.licenses = ['Apache License (2.0)']
6
7
  s.summary = 'Output events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on the broker'
7
8
  s.description = "This gem is a logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/plugin install gemname. This gem is not a stand-alone program"
@@ -20,12 +21,10 @@ Gem::Specification.new do |s|
20
21
  s.metadata = { 'logstash_plugin' => 'true', 'group' => 'output'}
21
22
 
22
23
  # Gem dependencies
23
- s.add_runtime_dependency "logstash-core-plugin-api", "~> 1.0"
24
+ s.add_runtime_dependency "logstash-core", ">= 2.0.0", "< 3.0.0"
24
25
  s.add_runtime_dependency 'logstash-codec-plain'
25
26
  s.add_runtime_dependency 'logstash-codec-json'
26
27
 
27
- s.add_runtime_dependency 'jruby-kafka', '1.5.0'
28
-
29
28
  s.add_development_dependency 'logstash-devutils'
30
29
  s.add_development_dependency 'poseidon'
31
30
  end
@@ -2,20 +2,22 @@
2
2
 
3
3
  require "logstash/devutils/rspec/spec_helper"
4
4
  require 'logstash/outputs/kafka'
5
- require 'jruby-kafka'
6
5
  require 'json'
7
6
  require 'poseidon'
8
7
 
9
8
  describe "outputs/kafka", :integration => true do
10
- let(:test_topic) { 'test' }
11
- let(:base_config) { {'client_id' => 'spectest' } }
12
- let(:event) { LogStash::Event.new({'message' => 'hello', '@timestamp' => LogStash::Timestamp.at(0) }) }
9
+ let(:kafka_host) { 'localhost' }
10
+ let(:kafka_port) { 9092 }
11
+ let(:num_events) { 10 }
12
+ let(:base_config) { {'client_id' => 'kafkaoutputspec'} }
13
+ let(:event) { LogStash::Event.new({'message' => '183.60.215.50 - - [11/Sep/2014:22:00:00 +0000] "GET /scripts/netcat-webserver HTTP/1.1" 200 182 "-" "Mozilla/5.0 (compatible; EasouSpider; +http://www.easou.com/search/spider.html)"', '@timestamp' => LogStash::Timestamp.at(0) }) }
13
14
 
14
15
 
15
16
  context 'when outputting messages' do
17
+ let(:test_topic) { 'topic1' }
16
18
  let(:num_events) { 3 }
17
19
  let(:consumer) do
18
- Poseidon::PartitionConsumer.new("my_test_consumer", "localhost", 9092,
20
+ Poseidon::PartitionConsumer.new("my_test_consumer", kafka_host, kafka_port,
19
21
  test_topic, 0, :earliest_offset)
20
22
  end
21
23
  subject do
@@ -24,42 +26,107 @@ describe "outputs/kafka", :integration => true do
24
26
 
25
27
  before :each do
26
28
  config = base_config.merge({"topic_id" => test_topic})
27
- kafka = LogStash::Outputs::Kafka.new(config)
28
- kafka.register
29
- num_events.times do kafka.receive(event) end
30
- kafka.close
29
+ load_kafka_data(config)
31
30
  end
32
31
 
33
32
  it 'should have data integrity' do
34
33
  expect(subject.size).to eq(num_events)
35
34
  subject.each do |m|
36
- expect(m.value).to eq(event.to_json)
35
+ expect(m.value).to eq(event.to_s)
37
36
  end
38
37
  end
38
+
39
39
  end
40
40
 
41
41
  context 'when setting message_key' do
42
42
  let(:num_events) { 10 }
43
- let(:test_topic) { 'test2' }
43
+ let(:test_topic) { 'topic2' }
44
44
  let!(:consumer0) do
45
- Poseidon::PartitionConsumer.new("my_test_consumer2", "localhost", 9092,
45
+ Poseidon::PartitionConsumer.new("my_test_consumer", kafka_host, kafka_port,
46
46
  test_topic, 0, :earliest_offset)
47
47
  end
48
48
  let!(:consumer1) do
49
- Poseidon::PartitionConsumer.new("my_test_consumer2", "localhost", 9092,
49
+ Poseidon::PartitionConsumer.new("my_test_consumer", kafka_host, kafka_port,
50
50
  test_topic, 1, :earliest_offset)
51
51
  end
52
52
 
53
53
  before :each do
54
54
  config = base_config.merge({"topic_id" => test_topic, "message_key" => "static_key"})
55
- kafka = LogStash::Outputs::Kafka.new(config)
56
- kafka.register
57
- num_events.times do kafka.receive(event) end
58
- kafka.close
55
+ load_kafka_data(config)
59
56
  end
60
57
 
61
58
  it 'should send all events to one partition' do
62
59
  expect(consumer0.fetch.size == num_events || consumer1.fetch.size == num_events).to be true
63
60
  end
64
61
  end
62
+
63
+ context 'when using gzip compression' do
64
+ let(:test_topic) { 'gzip_topic' }
65
+ let!(:consumer) do
66
+ Poseidon::PartitionConsumer.new("my_test_consumer", kafka_host, kafka_port,
67
+ test_topic, 0, :earliest_offset)
68
+ end
69
+ subject do
70
+ consumer.fetch
71
+ end
72
+
73
+ before :each do
74
+ config = base_config.merge({"topic_id" => test_topic, "compression_type" => "gzip"})
75
+ load_kafka_data(config)
76
+ end
77
+
78
+ it 'should have data integrity' do
79
+ expect(subject.size).to eq(num_events)
80
+ subject.each do |m|
81
+ expect(m.value).to eq(event.to_s)
82
+ end
83
+ end
84
+ end
85
+
86
+ context 'when using multi partition topic' do
87
+ let(:num_events) { 10 }
88
+ let(:test_topic) { 'topic3' }
89
+ let!(:consumer0) do
90
+ Poseidon::PartitionConsumer.new("my_test_consumer", kafka_host, kafka_port,
91
+ test_topic, 0, :earliest_offset)
92
+ end
93
+ let!(:consumer1) do
94
+ Poseidon::PartitionConsumer.new("my_test_consumer", kafka_host, kafka_port,
95
+ test_topic, 1, :earliest_offset)
96
+ end
97
+
98
+ let!(:consumer2) do
99
+ Poseidon::PartitionConsumer.new("my_test_consumer", kafka_host, kafka_port,
100
+ test_topic, 2, :earliest_offset)
101
+ end
102
+
103
+ before :each do
104
+ config = base_config.merge({"topic_id" => test_topic})
105
+ load_kafka_data(config)
106
+ end
107
+
108
+ it 'should distribute events to all partition' do
109
+ consumer0_records = consumer0.fetch
110
+ consumer1_records = consumer1.fetch
111
+ consumer2_records = consumer2.fetch
112
+
113
+ expect(consumer0_records.size > 1 &&
114
+ consumer1_records.size > 1 &&
115
+ consumer2_records.size > 1).to be true
116
+
117
+ all_records = consumer0_records + consumer1_records + consumer2_records
118
+ expect(all_records.size).to eq(num_events)
119
+ all_records.each do |m|
120
+ expect(m.value).to eq(event.to_s)
121
+ end
122
+ end
123
+ end
124
+
125
+ def load_kafka_data(config)
126
+ kafka = LogStash::Outputs::Kafka.new(config)
127
+ kafka.register
128
+ num_events.times do kafka.receive(event) end
129
+ kafka.close
130
+ end
131
+
65
132
  end
@@ -1,7 +1,6 @@
1
1
  # encoding: utf-8
2
2
  require "logstash/devutils/rspec/spec_helper"
3
3
  require 'logstash/outputs/kafka'
4
- require 'jruby-kafka'
5
4
  require 'json'
6
5
 
7
6
  describe "outputs/kafka" do
@@ -25,8 +24,8 @@ describe "outputs/kafka" do
25
24
 
26
25
  context 'when outputting messages' do
27
26
  it 'should send logstash event to kafka broker' do
28
- expect_any_instance_of(Kafka::KafkaProducer).to receive(:send_msg)
29
- .with(simple_kafka_config['topic_id'], nil, nil, event.to_hash.to_json)
27
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
28
+ .with(an_instance_of(org.apache.kafka.clients.producer.ProducerRecord))
30
29
  kafka = LogStash::Outputs::Kafka.new(simple_kafka_config)
31
30
  kafka.register
32
31
  kafka.receive(event)
@@ -34,19 +33,26 @@ describe "outputs/kafka" do
34
33
 
35
34
  it 'should support Event#sprintf placeholders in topic_id' do
36
35
  topic_field = 'topic_name'
37
- expect_any_instance_of(Kafka::KafkaProducer).to receive(:send_msg)
38
- .with(event[topic_field], nil, nil, event.to_hash.to_json)
36
+ expect(org.apache.kafka.clients.producer.ProducerRecord).to receive(:new)
37
+ .with("my_topic", event.to_s)
38
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
39
39
  kafka = LogStash::Outputs::Kafka.new({'topic_id' => "%{#{topic_field}}"})
40
40
  kafka.register
41
41
  kafka.receive(event)
42
42
  end
43
43
 
44
44
  it 'should support field referenced message_keys' do
45
- expect_any_instance_of(Kafka::KafkaProducer).to receive(:send_msg)
46
- .with(simple_kafka_config['topic_id'], nil, event['host'], event.to_hash.to_json)
45
+ expect(org.apache.kafka.clients.producer.ProducerRecord).to receive(:new)
46
+ .with("test", "172.0.0.1", event.to_s)
47
+ expect_any_instance_of(org.apache.kafka.clients.producer.KafkaProducer).to receive(:send)
47
48
  kafka = LogStash::Outputs::Kafka.new(simple_kafka_config.merge({"message_key" => "%{host}"}))
48
49
  kafka.register
49
50
  kafka.receive(event)
50
51
  end
52
+
53
+ it 'should raise config error when truststore location is not set and ssl is enabled' do
54
+ kafka = LogStash::Outputs::Kafka.new(simple_kafka_config.merge({"ssl" => "true"}))
55
+ expect { kafka.register }.to raise_error(LogStash::ConfigurationError, /ssl_truststore_location must be set when SSL is enabled/)
56
+ end
51
57
  end
52
58
  end
metadata CHANGED
@@ -1,99 +1,91 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-output-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.0.5
4
+ version: 3.0.0.beta1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Elasticsearch
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2016-04-26 00:00:00.000000000 Z
11
+ date: 2016-03-11 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
- name: logstash-core-plugin-api
15
- version_requirements: !ruby/object:Gem::Requirement
16
- requirements:
17
- - - ~>
18
- - !ruby/object:Gem::Version
19
- version: '1.0'
20
14
  requirement: !ruby/object:Gem::Requirement
21
15
  requirements:
22
- - - ~>
16
+ - - '>='
17
+ - !ruby/object:Gem::Version
18
+ version: 2.0.0
19
+ - - <
23
20
  - !ruby/object:Gem::Version
24
- version: '1.0'
21
+ version: 3.0.0
22
+ name: logstash-core
25
23
  prerelease: false
26
24
  type: :runtime
27
- - !ruby/object:Gem::Dependency
28
- name: logstash-codec-plain
29
25
  version_requirements: !ruby/object:Gem::Requirement
30
26
  requirements:
31
27
  - - '>='
32
28
  - !ruby/object:Gem::Version
33
- version: '0'
29
+ version: 2.0.0
30
+ - - <
31
+ - !ruby/object:Gem::Version
32
+ version: 3.0.0
33
+ - !ruby/object:Gem::Dependency
34
34
  requirement: !ruby/object:Gem::Requirement
35
35
  requirements:
36
36
  - - '>='
37
37
  - !ruby/object:Gem::Version
38
38
  version: '0'
39
+ name: logstash-codec-plain
39
40
  prerelease: false
40
41
  type: :runtime
41
- - !ruby/object:Gem::Dependency
42
- name: logstash-codec-json
43
42
  version_requirements: !ruby/object:Gem::Requirement
44
43
  requirements:
45
44
  - - '>='
46
45
  - !ruby/object:Gem::Version
47
46
  version: '0'
47
+ - !ruby/object:Gem::Dependency
48
48
  requirement: !ruby/object:Gem::Requirement
49
49
  requirements:
50
50
  - - '>='
51
51
  - !ruby/object:Gem::Version
52
52
  version: '0'
53
+ name: logstash-codec-json
53
54
  prerelease: false
54
55
  type: :runtime
55
- - !ruby/object:Gem::Dependency
56
- name: jruby-kafka
57
- version_requirements: !ruby/object:Gem::Requirement
58
- requirements:
59
- - - '='
60
- - !ruby/object:Gem::Version
61
- version: 1.5.0
62
- requirement: !ruby/object:Gem::Requirement
63
- requirements:
64
- - - '='
65
- - !ruby/object:Gem::Version
66
- version: 1.5.0
67
- prerelease: false
68
- type: :runtime
69
- - !ruby/object:Gem::Dependency
70
- name: logstash-devutils
71
56
  version_requirements: !ruby/object:Gem::Requirement
72
57
  requirements:
73
58
  - - '>='
74
59
  - !ruby/object:Gem::Version
75
60
  version: '0'
61
+ - !ruby/object:Gem::Dependency
76
62
  requirement: !ruby/object:Gem::Requirement
77
63
  requirements:
78
64
  - - '>='
79
65
  - !ruby/object:Gem::Version
80
66
  version: '0'
67
+ name: logstash-devutils
81
68
  prerelease: false
82
69
  type: :development
83
- - !ruby/object:Gem::Dependency
84
- name: poseidon
85
70
  version_requirements: !ruby/object:Gem::Requirement
86
71
  requirements:
87
72
  - - '>='
88
73
  - !ruby/object:Gem::Version
89
74
  version: '0'
75
+ - !ruby/object:Gem::Dependency
90
76
  requirement: !ruby/object:Gem::Requirement
91
77
  requirements:
92
78
  - - '>='
93
79
  - !ruby/object:Gem::Version
94
80
  version: '0'
81
+ name: poseidon
95
82
  prerelease: false
96
83
  type: :development
84
+ version_requirements: !ruby/object:Gem::Requirement
85
+ requirements:
86
+ - - '>='
87
+ - !ruby/object:Gem::Version
88
+ version: '0'
97
89
  description: This gem is a logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/plugin install gemname. This gem is not a stand-alone program
98
90
  email: info@elastic.co
99
91
  executables: []
@@ -107,10 +99,14 @@ files:
107
99
  - LICENSE
108
100
  - NOTICE.TXT
109
101
  - README.md
102
+ - lib/logstash-output-kafka_jars.rb
110
103
  - lib/logstash/outputs/kafka.rb
111
104
  - logstash-output-kafka.gemspec
112
105
  - spec/integration/outputs/kafka_spec.rb
113
106
  - spec/unit/outputs/kafka_spec.rb
107
+ - vendor/jar-dependencies/runtime-jars/kafka-clients-0.9.0.1.jar
108
+ - vendor/jar-dependencies/runtime-jars/slf4j-api-1.7.13.jar
109
+ - vendor/jar-dependencies/runtime-jars/slf4j-noop-1.7.13.jar
114
110
  homepage: http://www.elastic.co/guide/en/logstash/current/index.html
115
111
  licenses:
116
112
  - Apache License (2.0)
@@ -128,12 +124,12 @@ required_ruby_version: !ruby/object:Gem::Requirement
128
124
  version: '0'
129
125
  required_rubygems_version: !ruby/object:Gem::Requirement
130
126
  requirements:
131
- - - '>='
127
+ - - '>'
132
128
  - !ruby/object:Gem::Version
133
- version: '0'
129
+ version: 1.3.1
134
130
  requirements: []
135
131
  rubyforge_project:
136
- rubygems_version: 2.4.8
132
+ rubygems_version: 2.4.5
137
133
  signing_key:
138
134
  specification_version: 4
139
135
  summary: Output events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on the broker