logstash-integration-kafka 11.0.0-java → 11.2.0-java

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: c0060f7684d8dd0787c1e106fdcc1b1b9673ec42cf2ffc01141dc5a2d9351967
4
- data.tar.gz: a0d878319e3ffde777330f9a15c64c23e898f16f8cde797622a804e9f4cbf89c
3
+ metadata.gz: 5f4d4eeff4c13ac6151467cb7998742151a2829a18b05d6a74377edd7acab7fc
4
+ data.tar.gz: 22fc71adaf29c11b9f0090436dc2e330a1a21eefc8fceb3aa4b4063f2adad626
5
5
  SHA512:
6
- metadata.gz: 721c864dff1a72f31cea49f7e3c674bdd827347b7b7a60c8ab1c2f5e88839205e3e94836cc4b664a55fd16107e0412b6885d15949c2ac1fd20798e82124fa502
7
- data.tar.gz: 49823c88d015acbdaf7b91d0fe64a9975ff22c9c8691c79c9270bed33aa174d3ea4f770c841e074c64ed82a1b092814817a63447d62fa17c84f9e3666bf89203
6
+ metadata.gz: 4ae9306a90795f0d877e896ee79e2c7ecd46b1c7033bca2d3182a5b0b4dbc66b50502d23c39a70451b5a7b83013763232e8e7a8a5dba8958d237831e2808ce6a
7
+ data.tar.gz: 5e4e48213df61f86cabf5e3535fc4f2c424714b904e12b0076812c832793b243ebbf54376edd90f9490e67f0a8268f290296be9570fa71704b626acd51ee92de
data/CHANGELOG.md CHANGED
@@ -1,3 +1,9 @@
1
+ ## 11.2.0
2
+ - Added TLS truststore and keystore settings specifically to access the schema registry [#137](https://github.com/logstash-plugins/logstash-integration-kafka/pull/137)
3
+
4
+ ## 11.1.0
5
+ - Added config `group_instance_id` to use the Kafka's consumer static membership feature [#135](https://github.com/logstash-plugins/logstash-integration-kafka/pull/135)
6
+
1
7
  ## 11.0.0
2
8
  - Changed Kafka client to 3.3.1, requires Logstash >= 8.3.0.
3
9
  - Deprecated `default` value for setting `client_dns_lookup` forcing to `use_all_dns_ips` when explicitly used [#130](https://github.com/logstash-plugins/logstash-integration-kafka/pull/130)
@@ -113,6 +113,7 @@ See the https://kafka.apache.org/{kafka_client_doc}/documentation for more detai
113
113
  | <<plugins-{type}s-{plugin}-fetch_max_wait_ms>> |<<number,number>>|No
114
114
  | <<plugins-{type}s-{plugin}-fetch_min_bytes>> |<<number,number>>|No
115
115
  | <<plugins-{type}s-{plugin}-group_id>> |<<string,string>>|No
116
+ | <<plugins-{type}s-{plugin}-group_instance_id>> |<<string,string>>|No
116
117
  | <<plugins-{type}s-{plugin}-heartbeat_interval_ms>> |<<number,number>>|No
117
118
  | <<plugins-{type}s-{plugin}-isolation_level>> |<<string,string>>|No
118
119
  | <<plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No
@@ -134,6 +135,12 @@ See the https://kafka.apache.org/{kafka_client_doc}/documentation for more detai
134
135
  | <<plugins-{type}s-{plugin}-schema_registry_key>> |<<string,string>>|No
135
136
  | <<plugins-{type}s-{plugin}-schema_registry_proxy>> |<<uri,uri>>|No
136
137
  | <<plugins-{type}s-{plugin}-schema_registry_secret>> |<<string,string>>|No
138
+ | <<plugins-{type}s-{plugin}-schema_registry_ssl_keystore_location>> |a valid filesystem path|No
139
+ | <<plugins-{type}s-{plugin}-schema_registry_ssl_keystore_password>> |<<password,password>>|No
140
+ | <<plugins-{type}s-{plugin}-schema_registry_ssl_keystore_type>> |<<string,string>>, one of `["jks", "PKCS12"]`|No
141
+ | <<plugins-{type}s-{plugin}-schema_registry_ssl_truststore_location>> |a valid filesystem path|No
142
+ | <<plugins-{type}s-{plugin}-schema_registry_ssl_truststore_password>> |<<password,password>>|No
143
+ | <<plugins-{type}s-{plugin}-schema_registry_ssl_truststore_type>> |<<string,string>>, one of `["jks", "PKCS12"]`|No
137
144
  | <<plugins-{type}s-{plugin}-schema_registry_url>> |<<uri,uri>>|No
138
145
  | <<plugins-{type}s-{plugin}-schema_registry_validation>> |<<string,string>>|No
139
146
  | <<plugins-{type}s-{plugin}-security_protocol>> |<<string,string>>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No
@@ -344,6 +351,28 @@ NOTE: In cases when multiple inputs are being used in a single pipeline, reading
344
351
  it's essential to set a different `group_id => ...` for each input. Setting a unique `client_id => ...`
345
352
  is also recommended.
346
353
 
354
+ [id="plugins-{type}s-{plugin}-group_instance_id"]
355
+ ===== `group_instance_id`
356
+
357
+ * Value type is <<string,string>>
358
+ * There is no default value for this setting.
359
+
360
+ The static membership identifier for this Logstash Kafka consumer. Static membership feature was introduced in
361
+ https://cwiki.apache.org/confluence/display/KAFKA/KIP-345%3A+Introduce+static+membership+protocol+to+reduce+consumer+rebalances[KIP-345],
362
+ available under Kafka property `group.instance.id`.
363
+ Its purpose is to avoid rebalances in situations in which a lot of data
364
+ has to be forwarded after a consumer goes offline.
365
+ This feature mitigates cases where the service state is heavy and the rebalance of one topic partition from instance
366
+ A to B would cause a huge amount of data to be transferred.
367
+ A client that goes offline/online frequently can avoid frequent and heavy rebalances by using this option.
368
+
369
+ NOTE: The `group_instance_id` setting must be unique across all the clients belonging to the same <<plugins-{type}s-{plugin}-group_id>>.
370
+ Otherwise, another client connecting with same `group.instance.id` value would cause the oldest instance to be disconnected.
371
+ You can set this value to use information such as a hostname, an IP, or anything that uniquely identifies the client application.
372
+
373
+ NOTE: In cases when multiple threads are configured and `consumer_threads` is greater than one, a suffix is appended to
374
+ the `group_instance_id` to avoid collisions.
375
+
347
376
  [id="plugins-{type}s-{plugin}-heartbeat_interval_ms"]
348
377
  ===== `heartbeat_interval_ms`
349
378
 
@@ -575,6 +604,54 @@ Set the address of a forward HTTP proxy. An empty string is treated as if proxy
575
604
 
576
605
  Set the password for basic authorization to access remote Schema Registry.
577
606
 
607
+ [id="plugins-{type}s-{plugin}-schema_registry_ssl_keystore_location"]
608
+ ===== `schema_registry_ssl_keystore_location`
609
+
610
+ * Value type is <<path,path>>
611
+ * There is no default value for this setting.
612
+
613
+ If schema registry client authentication is required, this setting stores the keystore path.
614
+
615
+ [id="plugins-{type}s-{plugin}-schema_registry_ssl_keystore_password"]
616
+ ===== `schema_registry_ssl_keystore_password`
617
+
618
+ * Value type is <<password,password>>
619
+ * There is no default value for this setting.
620
+
621
+ If schema registry authentication is required, this setting stores the keystore password.
622
+
623
+ [id="plugins-{type}s-{plugin}-schema_registry_ssl_keystore_type"]
624
+ ===== `schema_registry_ssl_keystore_type`
625
+
626
+ * Value type is <<string,string>>
627
+ * There is no default value for this setting.
628
+
629
+ The format of the keystore file. It must be either `jks` or `PKCS12`.
630
+
631
+ [id="plugins-{type}s-{plugin}-schema_registry_ssl_truststore_location"]
632
+ ===== `schema_registry_ssl_truststore_location`
633
+
634
+ * Value type is <<path,path>>
635
+ * There is no default value for this setting.
636
+
637
+ The truststore path to validate the schema registry's certificate.
638
+
639
+ [id="plugins-{type}s-{plugin}-schema_registry_ssl_truststore_password"]
640
+ ===== `schema_registry_ssl_truststore_password`
641
+
642
+ * Value type is <<password,password>>
643
+ * There is no default value for this setting.
644
+
645
+ The schema registry truststore password.
646
+
647
+ [id="plugins-{type}s-{plugin}-schema_registry_ssl_truststore_type"]
648
+ ===== `schema_registry_ssl_truststore_type`
649
+
650
+ * Value type is <<string,string>>
651
+ * There is no default value for this setting.
652
+
653
+ The format of the schema registry's truststore file. It must be either `jks` or `PKCS12`.
654
+
578
655
  [id="plugins-{type}s-{plugin}-schema_registry_url"]
579
656
  ===== `schema_registry_url`
580
657
 
@@ -124,6 +124,11 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
124
124
  # that happens to be made up of multiple processors. Messages in a topic will be distributed to all
125
125
  # Logstash instances with the same `group_id`
126
126
  config :group_id, :validate => :string, :default => "logstash"
127
+ # Set a static group instance id used in static membership feature to avoid rebalancing when a
128
+ # consumer goes offline. If set and `consumer_threads` is greater than 1 then for each
129
+ # consumer crated by each thread an artificial suffix is appended to the user provided `group_instance_id`
130
+ # to avoid clashing.
131
+ config :group_instance_id, :validate => :string
127
132
  # The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure
128
133
  # that the consumer's session stays active and to facilitate rebalancing when new
129
134
  # consumers join or leave the group. The value must be set lower than
@@ -136,7 +141,7 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
136
141
  # been aborted. Non-transactional messages will be returned unconditionally in either mode.
137
142
  config :isolation_level, :validate => ["read_uncommitted", "read_committed"], :default => "read_uncommitted" # Kafka default
138
143
  # Java Class used to deserialize the record's key
139
- config :key_deserializer_class, :validate => :string, :default => "org.apache.kafka.common.serialization.StringDeserializer"
144
+ config :key_deserializer_class, :validate => :string, :default => DEFAULT_DESERIALIZER_CLASS
140
145
  # The maximum delay between invocations of poll() when using consumer group management. This places
141
146
  # an upper bound on the amount of time that the consumer can be idle before fetching more records.
142
147
  # If poll() is not called before expiration of this timeout, then the consumer is considered failed and
@@ -287,7 +292,10 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
287
292
 
288
293
  public
289
294
  def run(logstash_queue)
290
- @runner_consumers = consumer_threads.times.map { |i| subscribe(create_consumer("#{client_id}-#{i}")) }
295
+ @runner_consumers = consumer_threads.times.map do |i|
296
+ thread_group_instance_id = consumer_threads > 1 && group_instance_id ? "#{group_instance_id}-#{i}" : group_instance_id
297
+ subscribe(create_consumer("#{client_id}-#{i}", thread_group_instance_id))
298
+ end
291
299
  @runner_threads = @runner_consumers.map.with_index { |consumer, i| thread_runner(logstash_queue, consumer,
292
300
  "kafka-input-worker-#{client_id}-#{i}") }
293
301
  @runner_threads.each(&:start)
@@ -335,6 +343,9 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
335
343
  rescue org.apache.kafka.common.errors.WakeupException => e
336
344
  logger.debug("Wake up from poll", :kafka_error_message => e)
337
345
  raise e unless stop?
346
+ rescue org.apache.kafka.common.errors.FencedInstanceIdException => e
347
+ logger.error("Another consumer with same group.instance.id has connected", :original_error_message => e.message)
348
+ raise e unless stop?
338
349
  rescue => e
339
350
  logger.error("Unable to poll Kafka consumer",
340
351
  :kafka_error_message => e,
@@ -389,7 +400,7 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
389
400
  end
390
401
 
391
402
  private
392
- def create_consumer(client_id)
403
+ def create_consumer(client_id, group_instance_id)
393
404
  begin
394
405
  props = java.util.Properties.new
395
406
  kafka = org.apache.kafka.clients.consumer.ConsumerConfig
@@ -407,6 +418,7 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
407
418
  props.put(kafka::FETCH_MAX_WAIT_MS_CONFIG, fetch_max_wait_ms.to_s) unless fetch_max_wait_ms.nil?
408
419
  props.put(kafka::FETCH_MIN_BYTES_CONFIG, fetch_min_bytes.to_s) unless fetch_min_bytes.nil?
409
420
  props.put(kafka::GROUP_ID_CONFIG, group_id)
421
+ props.put(kafka::GROUP_INSTANCE_ID_CONFIG, group_instance_id) unless group_instance_id.nil?
410
422
  props.put(kafka::HEARTBEAT_INTERVAL_MS_CONFIG, heartbeat_interval_ms.to_s) unless heartbeat_interval_ms.nil?
411
423
  props.put(kafka::ISOLATION_LEVEL_CONFIG, isolation_level)
412
424
  props.put(kafka::KEY_DESERIALIZER_CLASS_CONFIG, key_deserializer_class)
@@ -448,6 +460,17 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
448
460
  set_trustore_keystore_config(props)
449
461
  set_sasl_config(props)
450
462
  end
463
+ if schema_registry_ssl_truststore_location
464
+ props.put('schema.registry.ssl.truststore.location', schema_registry_ssl_truststore_location)
465
+ props.put('schema.registry.ssl.truststore.password', schema_registry_ssl_truststore_password.value)
466
+ props.put('schema.registry.ssl.truststore.type', schema_registry_ssl_truststore_type)
467
+ end
468
+
469
+ if schema_registry_ssl_keystore_location
470
+ props.put('schema.registry.ssl.keystore.location', schema_registry_ssl_keystore_location)
471
+ props.put('schema.registry.ssl.keystore.password', schema_registry_ssl_keystore_password.value)
472
+ props.put('schema.registry.ssl.keystore.type', schema_registry_ssl_keystore_type)
473
+ end
451
474
 
452
475
  org.apache.kafka.clients.consumer.KafkaConsumer.new(props)
453
476
  rescue => e
@@ -24,6 +24,24 @@ module LogStash module PluginMixins module Kafka
24
24
  # This option permits to define a proxy to be used to reach the schema registry service instance.
25
25
  config :schema_registry_proxy, :validate => :uri
26
26
 
27
+ # If schema registry client authentication is required, this setting stores the keystore path.
28
+ config :schema_registry_ssl_keystore_location, :validate => :string
29
+
30
+ # The keystore password.
31
+ config :schema_registry_ssl_keystore_password, :validate => :password
32
+
33
+ # The keystore type
34
+ config :schema_registry_ssl_keystore_type, :validate => ['jks', 'PKCS12'], :default => "jks"
35
+
36
+ # The JKS truststore path to validate the Schema Registry's certificate.
37
+ config :schema_registry_ssl_truststore_location, :validate => :string
38
+
39
+ # The truststore password.
40
+ config :schema_registry_ssl_truststore_password, :validate => :password
41
+
42
+ # The truststore type
43
+ config :schema_registry_ssl_truststore_type, :validate => ['jks', 'PKCS12'], :default => "jks"
44
+
27
45
  # Option to skip validating the schema registry during registration. This can be useful when using
28
46
  # certificate based auth
29
47
  config :schema_registry_validation, :validate => ['auto', 'skip'], :default => 'auto'
@@ -68,6 +86,19 @@ module LogStash module PluginMixins module Kafka
68
86
  if schema_registry_key and !schema_registry_key.empty?
69
87
  options[:auth] = {:user => schema_registry_key, :password => schema_registry_secret.value}
70
88
  end
89
+ if schema_registry_ssl_truststore_location and !schema_registry_ssl_truststore_location.empty?
90
+ options[:ssl] = {} unless options.key?(:ssl)
91
+ options[:ssl][:truststore] = schema_registry_ssl_truststore_location unless schema_registry_ssl_truststore_location.nil?
92
+ options[:ssl][:truststore_password] = schema_registry_ssl_truststore_password.value unless schema_registry_ssl_truststore_password.nil?
93
+ options[:ssl][:truststore_type] = schema_registry_ssl_truststore_type unless schema_registry_ssl_truststore_type.nil?
94
+ end
95
+ if schema_registry_ssl_keystore_location and !schema_registry_ssl_keystore_location.empty?
96
+ options[:ssl] = {} unless options.key? :ssl
97
+ options[:ssl][:keystore] = schema_registry_ssl_keystore_location unless schema_registry_ssl_keystore_location.nil?
98
+ options[:ssl][:keystore_password] = schema_registry_ssl_keystore_password.value unless schema_registry_ssl_keystore_password.nil?
99
+ options[:ssl][:keystore_type] = schema_registry_ssl_keystore_type unless schema_registry_ssl_keystore_type.nil?
100
+ end
101
+
71
102
  client = Manticore::Client.new(options)
72
103
  begin
73
104
  response = client.get(@schema_registry_url.uri.to_s + '/subjects').body
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-integration-kafka'
3
- s.version = '11.0.0'
3
+ s.version = '11.2.0'
4
4
  s.licenses = ['Apache-2.0']
5
5
  s.summary = "Integration with Kafka - input and output plugins"
6
6
  s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline "+
@@ -79,6 +79,7 @@ describe "inputs/kafka", :integration => true do
79
79
  producer = org.apache.kafka.clients.producer.KafkaProducer.new(props)
80
80
 
81
81
  producer.send(record)
82
+ producer.flush
82
83
  producer.close
83
84
  end
84
85
 
@@ -185,10 +186,105 @@ describe "inputs/kafka", :integration => true do
185
186
  end
186
187
  end
187
188
  end
189
+
190
+ context "static membership 'group.instance.id' setting" do
191
+ let(:base_config) do
192
+ {
193
+ "topics" => ["logstash_integration_static_membership_topic"],
194
+ "group_id" => "logstash",
195
+ "consumer_threads" => 1,
196
+ # this is needed because the worker thread could be executed little after the producer sent the "up" message
197
+ "auto_offset_reset" => "earliest",
198
+ "group_instance_id" => "test_static_group_id"
199
+ }
200
+ end
201
+ let(:consumer_config) { base_config }
202
+ let(:logger) { double("logger") }
203
+ let(:queue) { java.util.concurrent.ArrayBlockingQueue.new(10) }
204
+ let(:kafka_input) { LogStash::Inputs::Kafka.new(consumer_config) }
205
+ before :each do
206
+ allow(LogStash::Inputs::Kafka).to receive(:logger).and_return(logger)
207
+ [:error, :warn, :info, :debug].each do |level|
208
+ allow(logger).to receive(level)
209
+ end
210
+
211
+ kafka_input.register
212
+ end
213
+
214
+ it "input plugin disconnects from the broker when another client with same static membership connects" do
215
+ expect(logger).to receive(:error).with("Another consumer with same group.instance.id has connected", anything)
216
+
217
+ input_worker = java.lang.Thread.new { kafka_input.run(queue) }
218
+ begin
219
+ input_worker.start
220
+ wait_kafka_input_is_ready("logstash_integration_static_membership_topic", queue)
221
+ saboteur_kafka_consumer = create_consumer_and_start_consuming("test_static_group_id")
222
+ saboteur_kafka_consumer.run # ask to be scheduled
223
+ saboteur_kafka_consumer.join
224
+
225
+ expect(saboteur_kafka_consumer.value).to eq("saboteur exited")
226
+ ensure
227
+ input_worker.join(30_000)
228
+ end
229
+ end
230
+
231
+ context "when the plugin is configured with multiple consumer threads" do
232
+ let(:consumer_config) { base_config.merge({"consumer_threads" => 2}) }
233
+
234
+ it "should avoid to connect with same 'group.instance.id'" do
235
+ expect(logger).to_not receive(:error).with("Another consumer with same group.instance.id has connected", anything)
236
+
237
+ input_worker = java.lang.Thread.new { kafka_input.run(queue) }
238
+ begin
239
+ input_worker.start
240
+ wait_kafka_input_is_ready("logstash_integration_static_membership_topic", queue)
241
+ ensure
242
+ kafka_input.stop
243
+ input_worker.join(1_000)
244
+ end
245
+ end
246
+ end
247
+ end
248
+ end
249
+
250
+ # return consumer Ruby Thread
251
+ def create_consumer_and_start_consuming(static_group_id)
252
+ props = java.util.Properties.new
253
+ kafka = org.apache.kafka.clients.consumer.ConsumerConfig
254
+ props.put(kafka::BOOTSTRAP_SERVERS_CONFIG, "localhost:9092")
255
+ props.put(kafka::KEY_DESERIALIZER_CLASS_CONFIG, LogStash::Inputs::Kafka::DEFAULT_DESERIALIZER_CLASS)
256
+ props.put(kafka::VALUE_DESERIALIZER_CLASS_CONFIG, LogStash::Inputs::Kafka::DEFAULT_DESERIALIZER_CLASS)
257
+ props.put(kafka::GROUP_ID_CONFIG, "logstash")
258
+ props.put(kafka::GROUP_INSTANCE_ID_CONFIG, static_group_id)
259
+ consumer = org.apache.kafka.clients.consumer.KafkaConsumer.new(props)
260
+
261
+ Thread.new do
262
+ LogStash::Util::set_thread_name("integration_test_simple_consumer")
263
+ begin
264
+ consumer.subscribe(["logstash_integration_static_membership_topic"])
265
+ records = consumer.poll(java.time.Duration.ofSeconds(3))
266
+ "saboteur exited"
267
+ rescue => e
268
+ e # return the exception reached in thread.value
269
+ ensure
270
+ consumer.close
271
+ end
272
+ end
188
273
  end
189
274
 
190
275
  private
191
276
 
277
+ def wait_kafka_input_is_ready(topic, queue)
278
+ # this is needed to give time to the kafka input to be up and running
279
+ header = org.apache.kafka.common.header.internals.RecordHeader.new("name", "Ping Up".to_java_bytes)
280
+ record = org.apache.kafka.clients.producer.ProducerRecord.new(topic, 0, "key", "value", [header])
281
+ send_message(record)
282
+
283
+ # Wait the message is processed
284
+ message = queue.poll(1, java.util.concurrent.TimeUnit::MINUTES)
285
+ expect(message).to_not eq(nil)
286
+ end
287
+
192
288
  def consume_messages(config, queue: Queue.new, timeout:, event_count:)
193
289
  kafka_input = LogStash::Inputs::Kafka.new(config)
194
290
  kafka_input.register
@@ -257,10 +353,13 @@ describe "schema registry connection options" do
257
353
  end
258
354
  end
259
355
 
260
- def save_avro_schema_to_schema_registry(schema_file, subject_name)
356
+ def save_avro_schema_to_schema_registry(schema_file, subject_name, proto = 'http', port = 8081, manticore_options = {})
261
357
  raw_schema = File.readlines(schema_file).map(&:chomp).join
262
358
  raw_schema_quoted = raw_schema.gsub('"', '\"')
263
- response = Manticore.post("http://localhost:8081/subjects/#{subject_name}/versions",
359
+
360
+ client = Manticore::Client.new(manticore_options)
361
+
362
+ response = client.post("#{proto}://localhost:#{port}/subjects/#{subject_name}/versions",
264
363
  body: '{"schema": "' + raw_schema_quoted + '"}',
265
364
  headers: {"Content-Type" => "application/vnd.schemaregistry.v1+json"})
266
365
  response
@@ -282,8 +381,17 @@ def startup_schema_registry(schema_registry, auth=false)
282
381
  end
283
382
  end
284
383
 
285
- describe "Schema registry API", :integration => true do
286
- schema_registry = Manticore::Client.new
384
+ shared_examples 'it has endpoints available to' do |tls|
385
+ let(:port) { tls ? 8083 : 8081 }
386
+ let(:proto) { tls ? 'https' : 'http' }
387
+
388
+ manticore_options = {
389
+ :ssl => {
390
+ :truststore => File.join(Dir.pwd, "tls_repository/clienttruststore.jks"),
391
+ :truststore_password => "changeit"
392
+ }
393
+ }
394
+ schema_registry = Manticore::Client.new(manticore_options)
287
395
 
288
396
  before(:all) do
289
397
  startup_schema_registry(schema_registry)
@@ -295,36 +403,53 @@ describe "Schema registry API", :integration => true do
295
403
 
296
404
  context 'listing subject on clean instance' do
297
405
  it "should return an empty set" do
298
- subjects = JSON.parse schema_registry.get('http://localhost:8081/subjects').body
406
+ subjects = JSON.parse schema_registry.get("#{proto}://localhost:#{port}/subjects").body
299
407
  expect( subjects ).to be_empty
300
408
  end
301
409
  end
302
410
 
303
411
  context 'send a schema definition' do
304
412
  it "save the definition" do
305
- response = save_avro_schema_to_schema_registry(File.join(Dir.pwd, "spec", "unit", "inputs", "avro_schema_fixture_payment.asvc"), "schema_test_1")
413
+ response = save_avro_schema_to_schema_registry(File.join(Dir.pwd, "spec", "unit", "inputs", "avro_schema_fixture_payment.asvc"), "schema_test_1", proto, port, manticore_options)
306
414
  expect( response.code ).to be(200)
307
415
  delete_remote_schema(schema_registry, "schema_test_1")
308
416
  end
309
417
 
310
418
  it "delete the schema just added" do
311
- response = save_avro_schema_to_schema_registry(File.join(Dir.pwd, "spec", "unit", "inputs", "avro_schema_fixture_payment.asvc"), "schema_test_1")
419
+ response = save_avro_schema_to_schema_registry(File.join(Dir.pwd, "spec", "unit", "inputs", "avro_schema_fixture_payment.asvc"), "schema_test_1", proto, port, manticore_options)
312
420
  expect( response.code ).to be(200)
313
421
 
314
- expect( schema_registry.delete('http://localhost:8081/subjects/schema_test_1?permanent=false').code ).to be(200)
422
+ expect( schema_registry.delete("#{proto}://localhost:#{port}/subjects/schema_test_1?permanent=false").code ).to be(200)
315
423
  sleep(1)
316
- subjects = JSON.parse schema_registry.get('http://localhost:8081/subjects').body
424
+ subjects = JSON.parse schema_registry.get("#{proto}://localhost:#{port}/subjects").body
317
425
  expect( subjects ).to be_empty
318
426
  end
319
427
  end
320
428
  end
321
429
 
430
+ describe "Schema registry API", :integration => true do
431
+
432
+ context "when exposed with HTTPS" do
433
+ it_behaves_like 'it has endpoints available to', true
434
+ end
435
+
436
+ context "when exposed with plain HTTP" do
437
+ it_behaves_like 'it has endpoints available to', false
438
+ end
439
+ end
440
+
322
441
  def shutdown_schema_registry
323
442
  system('./stop_schema_registry.sh')
324
443
  end
325
444
 
326
445
  describe "Deserializing with the schema registry", :integration => true do
327
- schema_registry = Manticore::Client.new
446
+ manticore_options = {
447
+ :ssl => {
448
+ :truststore => File.join(Dir.pwd, "tls_repository/clienttruststore.jks"),
449
+ :truststore_password => "changeit"
450
+ }
451
+ }
452
+ schema_registry = Manticore::Client.new(manticore_options)
328
453
 
329
454
  shared_examples 'it reads from a topic using a schema registry' do |with_auth|
330
455
 
@@ -423,28 +548,57 @@ describe "Deserializing with the schema registry", :integration => true do
423
548
  end
424
549
  end
425
550
 
426
- context 'with an unauthed schema registry' do
551
+ shared_examples 'with an unauthed schema registry' do |tls|
552
+ let(:port) { tls ? 8083 : 8081 }
553
+ let(:proto) { tls ? 'https' : 'http' }
554
+
427
555
  let(:auth) { false }
428
556
  let(:avro_topic_name) { "topic_avro" }
429
- let(:subject_url) { "http://localhost:8081/subjects" }
430
- let(:plain_config) { base_config.merge!({'schema_registry_url' => "http://localhost:8081"}) }
557
+ let(:subject_url) { "#{proto}://localhost:#{port}/subjects" }
558
+ let(:plain_config) { base_config.merge!({
559
+ 'schema_registry_url' => "#{proto}://localhost:#{port}",
560
+ 'schema_registry_ssl_truststore_location' => File.join(Dir.pwd, "tls_repository/clienttruststore.jks"),
561
+ 'schema_registry_ssl_truststore_password' => 'changeit',
562
+ }) }
431
563
 
432
564
  it_behaves_like 'it reads from a topic using a schema registry', false
433
565
  end
434
566
 
435
- context 'with an authed schema registry' do
567
+ context 'with an unauthed schema registry' do
568
+ context "accessed through HTTPS" do
569
+ it_behaves_like 'with an unauthed schema registry', true
570
+ end
571
+
572
+ context "accessed through HTTPS" do
573
+ it_behaves_like 'with an unauthed schema registry', false
574
+ end
575
+ end
576
+
577
+ shared_examples 'with an authed schema registry' do |tls|
578
+ let(:port) { tls ? 8083 : 8081 }
579
+ let(:proto) { tls ? 'https' : 'http' }
436
580
  let(:auth) { true }
437
581
  let(:user) { "barney" }
438
582
  let(:password) { "changeme" }
439
583
  let(:avro_topic_name) { "topic_avro_auth" }
440
- let(:subject_url) { "http://#{user}:#{password}@localhost:8081/subjects" }
584
+ let(:subject_url) { "#{proto}://#{user}:#{password}@localhost:#{port}/subjects" }
585
+ let(:tls_base_config) do
586
+ if tls
587
+ base_config.merge({
588
+ 'schema_registry_ssl_truststore_location' => ::File.join(Dir.pwd, "tls_repository/clienttruststore.jks"),
589
+ 'schema_registry_ssl_truststore_password' => 'changeit',
590
+ })
591
+ else
592
+ base_config
593
+ end
594
+ end
441
595
 
442
596
  context 'using schema_registry_key' do
443
597
  let(:plain_config) do
444
- base_config.merge!({
445
- 'schema_registry_url' => "http://localhost:8081",
598
+ tls_base_config.merge!({
599
+ 'schema_registry_url' => "#{proto}://localhost:#{port}",
446
600
  'schema_registry_key' => user,
447
- 'schema_registry_secret' => password
601
+ 'schema_registry_secret' => password,
448
602
  })
449
603
  end
450
604
 
@@ -453,12 +607,22 @@ describe "Deserializing with the schema registry", :integration => true do
453
607
 
454
608
  context 'using schema_registry_url' do
455
609
  let(:plain_config) do
456
- base_config.merge!({
457
- 'schema_registry_url' => "http://#{user}:#{password}@localhost:8081"
610
+ tls_base_config.merge!({
611
+ 'schema_registry_url' => "#{proto}://#{user}:#{password}@localhost:#{port}",
458
612
  })
459
613
  end
460
614
 
461
615
  it_behaves_like 'it reads from a topic using a schema registry', true
462
616
  end
463
617
  end
618
+
619
+ context 'with an authed schema registry' do
620
+ context "accessed through HTTPS" do
621
+ it_behaves_like 'with an authed schema registry', true
622
+ end
623
+
624
+ context "accessed through HTTPS" do
625
+ it_behaves_like 'with an authed schema registry', false
626
+ end
627
+ end
464
628
  end
@@ -297,7 +297,7 @@ describe LogStash::Inputs::Kafka do
297
297
  to receive(:new).with(hash_including('client.rack' => 'EU-R1')).
298
298
  and_return kafka_client = double('kafka-consumer')
299
299
 
300
- expect( subject.send(:create_consumer, 'sample_client-0') ).to be kafka_client
300
+ expect( subject.send(:create_consumer, 'sample_client-0', 'group_instance_id') ).to be kafka_client
301
301
  end
302
302
  end
303
303
 
@@ -309,7 +309,7 @@ describe LogStash::Inputs::Kafka do
309
309
  to receive(:new).with(hash_including('session.timeout.ms' => '25000', 'max.poll.interval.ms' => '345000')).
310
310
  and_return kafka_client = double('kafka-consumer')
311
311
 
312
- expect( subject.send(:create_consumer, 'sample_client-1') ).to be kafka_client
312
+ expect( subject.send(:create_consumer, 'sample_client-1', 'group_instance_id') ).to be kafka_client
313
313
  end
314
314
  end
315
315
 
@@ -321,7 +321,7 @@ describe LogStash::Inputs::Kafka do
321
321
  to receive(:new).with(hash_including('session.timeout.ms' => '25200', 'max.poll.interval.ms' => '123000')).
322
322
  and_return kafka_client = double('kafka-consumer')
323
323
 
324
- expect( subject.send(:create_consumer, 'sample_client-2') ).to be kafka_client
324
+ expect( subject.send(:create_consumer, 'sample_client-2', 'group_instance_id') ).to be kafka_client
325
325
  end
326
326
  end
327
327
 
@@ -333,7 +333,7 @@ describe LogStash::Inputs::Kafka do
333
333
  to receive(:new).with(hash_including('enable.auto.commit' => 'false', 'check.crcs' => 'true')).
334
334
  and_return kafka_client = double('kafka-consumer')
335
335
 
336
- expect( subject.send(:create_consumer, 'sample_client-3') ).to be kafka_client
336
+ expect( subject.send(:create_consumer, 'sample_client-3', 'group_instance_id') ).to be kafka_client
337
337
  expect( subject.enable_auto_commit ).to be false
338
338
  end
339
339
  end
@@ -346,7 +346,7 @@ describe LogStash::Inputs::Kafka do
346
346
  to receive(:new).with(hash_including('enable.auto.commit' => 'true', 'check.crcs' => 'false')).
347
347
  and_return kafka_client = double('kafka-consumer')
348
348
 
349
- expect( subject.send(:create_consumer, 'sample_client-4') ).to be kafka_client
349
+ expect( subject.send(:create_consumer, 'sample_client-4', 'group_instance_id') ).to be kafka_client
350
350
  expect( subject.enable_auto_commit ).to be true
351
351
  end
352
352
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-integration-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 11.0.0
4
+ version: 11.2.0
5
5
  platform: java
6
6
  authors:
7
7
  - Elastic
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2022-12-16 00:00:00.000000000 Z
11
+ date: 2023-02-14 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  requirement: !ruby/object:Gem::Requirement