fluent-plugin-elasticsearch 3.6.0 → 3.6.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 22474d12620f4cc8c2a2224b19e5e98534bd38d86f9489ace710b50139ea1422
4
- data.tar.gz: 85c5043ca7ab0905efb9e2093c3b04d83dadae645e5aa2e580dbd601db77fe88
3
+ metadata.gz: 1073f1770d5c380f496b57f111e1792812b870392c5996111465ce9cd2cfa0c6
4
+ data.tar.gz: 1f8e70ec6516ba252d7f2b722ec7c85d3830e3eb0b616b67c909663ac5db9461
5
5
  SHA512:
6
- metadata.gz: f22d3fe7adbb44c8b38f9c61757857937c3293c762dfba6842081e08df65ceddd3c75a0bfbf791c325fa1d82380e6d5b09ed7a3a774f03706a095cb8553323ac
7
- data.tar.gz: 5f418e39091ac5bdceb2869604480e9b822c7654ba35da44c0ac636b88148168bee352c8bd7c3197fc5642019ab2a145430b4a88a722902c609457f1930eed2d
6
+ metadata.gz: 291e4a936373fc096718c4fa9db816874a7e53d2f950b9a8e5bfa3f7a6830d98a4d79ab4b36d16b5615cfd831e364b5e545ff91a49866f26805a5aae73670338
7
+ data.tar.gz: ea66679ef5406d950923e9f255532116f5378619b712f063c64b01997313aecf5ec53c3e04a7c1313125cda4370e058c200d539bd741c2afd55a08015a42c26b
data/History.md CHANGED
@@ -1,6 +1,11 @@
1
1
  ## Changelog [[tags]](https://github.com/uken/fluent-plugin-elasticsearch/tags)
2
2
 
3
3
  ### [Unreleased]
4
+ ### 3.6.1
5
+ - retry upsert on recoverable error. (#667)
6
+ - Allow `_index` in chunk_keys (#665)
7
+ - Support compression feature (#664)
8
+
4
9
  ### 3.6.0
5
10
  - Set order in newly created templates (#660)
6
11
  - Merge Support index lifecycle management into master (#659)
data/README.md CHANGED
@@ -66,6 +66,7 @@ Current maintainers: @cosmo0920
66
66
  + [include_index_in_url](#include_index_in_url)
67
67
  + [http_backend](#http_backend)
68
68
  + [prefer_oj_serializer](#prefer_oj_serializer)
69
+ + [compression_level](#compression_level)
69
70
  + [Client/host certificate options](#clienthost-certificate-options)
70
71
  + [Proxy Support](#proxy-support)
71
72
  + [Buffer options](#buffer-options)
@@ -792,6 +793,13 @@ Default value is `excon` which is default http_backend of elasticsearch plugin.
792
793
  http_backend typhoeus
793
794
  ```
794
795
 
796
+ ### compression_level
797
+ You can add gzip compression of output data. In this case `default_compression`, `best_compression` or `best speed` option should be chosen.
798
+ By default there is no compression, default value for this option is `no_compression`
799
+ ```
800
+ compression_level best_compression
801
+ ```
802
+
795
803
  ### prefer_oj_serializer
796
804
 
797
805
  With default behavior, Elasticsearch client uses `Yajl` as JSON encoder/decoder.
@@ -3,7 +3,7 @@ $:.push File.expand_path('../lib', __FILE__)
3
3
 
4
4
  Gem::Specification.new do |s|
5
5
  s.name = 'fluent-plugin-elasticsearch'
6
- s.version = '3.6.0'
6
+ s.version = '3.6.1'
7
7
  s.authors = ['diogo', 'pitr']
8
8
  s.email = ['pitr.vern@gmail.com', 'me@diogoterror.com']
9
9
  s.description = %q{Elasticsearch output plugin for Fluent event collector}
@@ -16,6 +16,10 @@ Gem::Specification.new do |s|
16
16
  s.test_files = s.files.grep(%r{^(test|spec|features)/})
17
17
  s.require_paths = ['lib']
18
18
 
19
+ if s.respond_to?(:metadata)
20
+ s.metadata["changelog_uri"] = "https://github.com/uken/fluent-plugin-elasticsearch/blob/master/History.md"
21
+ end
22
+
19
23
  s.required_ruby_version = Gem::Requirement.new(">= 2.0".freeze)
20
24
 
21
25
  s.add_runtime_dependency 'fluentd', '>= 0.14.22'
@@ -60,6 +60,8 @@ class Fluent::Plugin::ElasticsearchErrorHandler
60
60
  write_operation = @plugin.write_operation
61
61
  elsif INDEX_OP == @plugin.write_operation && item.is_a?(Hash) && item.has_key?(CREATE_OP)
62
62
  write_operation = CREATE_OP
63
+ elsif UPSERT_OP == @plugin.write_operation && item.is_a?(Hash) && item.has_key?(UPDATE_OP)
64
+ write_operation = UPDATE_OP
63
65
  elsif item.nil?
64
66
  stats[:errors_nil_resp] += 1
65
67
  next
@@ -18,6 +18,7 @@ require 'fluent/event'
18
18
  require 'fluent/error'
19
19
  require 'fluent/time'
20
20
  require 'fluent/log-ext'
21
+ require 'zlib'
21
22
  require_relative 'elasticsearch_constants'
22
23
  require_relative 'elasticsearch_error'
23
24
  require_relative 'elasticsearch_error_handler'
@@ -148,6 +149,7 @@ EOC
148
149
  config_param :ignore_exceptions, :array, :default => [], value_type: :string, :desc => "Ignorable exception list"
149
150
  config_param :exception_backup, :bool, :default => true, :desc => "Chunk backup flag when ignore exception occured"
150
151
  config_param :bulk_message_request_threshold, :size, :default => TARGET_BULK_BYTES
152
+ config_param :compression_level, :enum, {list: [:no_compression, :best_speed, :best_compression, :default_compression], :default => :no_compression}
151
153
  config_param :enable_ilm, :bool, :default => false
152
154
  config_param :ilm_policy_id, :string, :default => DEFAULT_POLICY_ID
153
155
  config_param :ilm_policy, :hash, :default => {}
@@ -170,7 +172,7 @@ EOC
170
172
  compat_parameters_convert(conf, :buffer)
171
173
 
172
174
  super
173
- raise Fluent::ConfigError, "'tag' in chunk_keys is required." if not @chunk_key_tag
175
+ raise Fluent::ConfigError, "'tag' or '_index' in chunk_keys is required." if not @buffer_config.chunk_keys.include? "tag" and not @buffer_config.chunk_keys.include? "_index"
174
176
 
175
177
  @time_parser = create_time_parser
176
178
  @backend_options = backend_options
@@ -327,6 +329,18 @@ EOC
327
329
  alias_method :split_request?, :split_request_size_check?
328
330
  end
329
331
  end
332
+
333
+ version_arr = Elasticsearch::Transport::VERSION.split('.')
334
+
335
+ if (version_arr[0].to_i < 7) || (version_arr[0].to_i == 7 && version_arr[1].to_i < 2)
336
+ if compression
337
+ raise Fluent::ConfigError, <<-EOC
338
+ Cannot use compression with elasticsearch-transport plugin version < 7.2.0
339
+ Your elasticsearch-transport plugin version version is #{Elasticsearch::Transport::VERSION}.
340
+ Please consider to upgrade ES client.
341
+ EOC
342
+ end
343
+ end
330
344
  end
331
345
 
332
346
  def placeholder?(name, param)
@@ -338,6 +352,23 @@ EOC
338
352
  end
339
353
  end
340
354
 
355
+ def compression
356
+ !(@compression_level == :no_compression)
357
+ end
358
+
359
+ def compression_strategy
360
+ case @compression_level
361
+ when :default_compression
362
+ Zlib::DEFAULT_COMPRESSION
363
+ when :best_compression
364
+ Zlib::BEST_COMPRESSION
365
+ when :best_speed
366
+ Zlib::BEST_SPEED
367
+ else
368
+ Zlib::NO_COMPRESSION
369
+ end
370
+ end
371
+
341
372
  def backend_options
342
373
  case @http_backend
343
374
  when :excon
@@ -438,7 +469,14 @@ EOC
438
469
  if local_reload_connections && @reload_after > DEFAULT_RELOAD_AFTER
439
470
  local_reload_connections = @reload_after
440
471
  end
441
- headers = { 'Content-Type' => @content_type.to_s }.merge(@custom_headers)
472
+
473
+ gzip_headers = if compression
474
+ {'Content-Encoding' => 'gzip'}
475
+ else
476
+ {}
477
+ end
478
+ headers = { 'Content-Type' => @content_type.to_s }.merge(@custom_headers).merge(gzip_headers)
479
+
442
480
  transport = Elasticsearch::Transport::Transport::HTTP::Faraday.new(connection_options.merge(
443
481
  options: {
444
482
  reload_connections: local_reload_connections,
@@ -456,6 +494,7 @@ EOC
456
494
  },
457
495
  sniffer_class: @sniffer_class,
458
496
  serializer_class: @serializer_class,
497
+ compression: compression,
459
498
  }), &adapter_conf)
460
499
  Elasticsearch::Client.new transport: transport
461
500
  end
@@ -768,6 +807,15 @@ EOC
768
807
  [parent_object, path[-1]]
769
808
  end
770
809
 
810
+ # gzip compress data
811
+ def gzip(string)
812
+ wio = StringIO.new("w")
813
+ w_gz = Zlib::GzipWriter.new(wio, strategy = compression_strategy)
814
+ w_gz.write(string)
815
+ w_gz.close
816
+ wio.string
817
+ end
818
+
771
819
  # send_bulk given a specific bulk request, the original tag,
772
820
  # chunk, and bulk_message_count
773
821
  def send_bulk(data, tag, chunk, bulk_message_count, extracted_values, info)
@@ -791,7 +839,14 @@ EOC
791
839
  begin
792
840
 
793
841
  log.on_trace { log.trace "bulk request: #{data}" }
794
- response = client(info.host).bulk body: data, index: info.index
842
+
843
+ prepared_data = if compression
844
+ gzip(data)
845
+ else
846
+ data
847
+ end
848
+
849
+ response = client(info.host).bulk body: prepared_data, index: info.index
795
850
  log.on_trace { log.trace "bulk response: #{response}" }
796
851
 
797
852
  if response['errors']
@@ -44,6 +44,12 @@ module Fluent::Plugin
44
44
  @_es ||= begin
45
45
  @current_config = connection_options[:hosts].clone
46
46
  adapter_conf = lambda {|f| f.adapter @http_backend, @backend_options }
47
+ gzip_headers = if compression
48
+ {'Content-Encoding' => 'gzip'}
49
+ else
50
+ {}
51
+ end
52
+ headers = { 'Content-Type' => @content_type.to_s, }.merge(gzip_headers)
47
53
  transport = Elasticsearch::Transport::Transport::HTTP::Faraday.new(connection_options.merge(
48
54
  options: {
49
55
  reload_connections: @reload_connections,
@@ -51,14 +57,15 @@ module Fluent::Plugin
51
57
  resurrect_after: @resurrect_after,
52
58
  logger: @transport_logger,
53
59
  transport_options: {
54
- headers: { 'Content-Type' => @content_type.to_s },
60
+ headers: headers,
55
61
  request: { timeout: @request_timeout },
56
62
  ssl: { verify: @ssl_verify, ca_file: @ca_file, version: @ssl_version }
57
63
  },
58
64
  http: {
59
65
  user: @user,
60
66
  password: @password
61
- }
67
+ },
68
+ compression: compression,
62
69
  }), &adapter_conf)
63
70
  Elasticsearch::Client.new transport: transport
64
71
  end
@@ -210,7 +217,12 @@ module Fluent::Plugin
210
217
 
211
218
  def send_bulk(data, host, index)
212
219
  begin
213
- response = client(host).bulk body: data, index: index
220
+ prepared_data = if compression
221
+ gzip(data)
222
+ else
223
+ data
224
+ end
225
+ response = client(host).bulk body: prepared_data, index: index
214
226
  if response['errors']
215
227
  log.error "Could not push log to Elasticsearch: #{response}"
216
228
  end
@@ -7,9 +7,10 @@ class TestElasticsearchErrorHandler < Test::Unit::TestCase
7
7
 
8
8
  class TestPlugin
9
9
  attr_reader :log
10
- attr_reader :write_operation, :error_events
10
+ attr_reader :error_events
11
11
  attr_accessor :unrecoverable_error_types
12
12
  attr_accessor :log_es_400_reason
13
+ attr_accessor :write_operation
13
14
  def initialize(log, log_es_400_reason = false)
14
15
  @log = log
15
16
  @write_operation = 'index'
@@ -522,4 +523,125 @@ class TestElasticsearchErrorHandler < Test::Unit::TestCase
522
523
 
523
524
  end
524
525
 
526
+ def test_retry_error_upsert
527
+ @plugin.write_operation = 'upsert'
528
+ records = []
529
+ error_records = Hash.new(false)
530
+ error_records.merge!({0=>true, 4=>true, 9=>true})
531
+ 10.times do |i|
532
+ records << {time: 12345, record: {"message"=>"record #{i}","_id"=>i,"raise"=>error_records[i]}}
533
+ end
534
+ chunk = MockChunk.new(records)
535
+
536
+ response = parse_response(%({
537
+ "took" : 1,
538
+ "errors" : true,
539
+ "items" : [
540
+ {
541
+ "update" : {
542
+ "_index" : "foo",
543
+ "_type" : "bar",
544
+ "_id" : "1",
545
+ "status" : 201
546
+ }
547
+ },
548
+ {
549
+ "update" : {
550
+ "_index" : "foo",
551
+ "_type" : "bar",
552
+ "_id" : "2",
553
+ "status" : 500,
554
+ "error" : {
555
+ "type" : "some unrecognized type",
556
+ "reason":"unrecognized error"
557
+ }
558
+ }
559
+ },
560
+ {
561
+ "update" : {
562
+ "_index" : "foo",
563
+ "_type" : "bar",
564
+ "_id" : "3",
565
+ "status" : 409,
566
+ "error" : {
567
+ "type":"version_conflict_engine_exception",
568
+ "reason":"document already exists"
569
+ }
570
+ }
571
+ },
572
+ {
573
+ "update" : {
574
+ "_index" : "foo",
575
+ "_type" : "bar",
576
+ "_id" : "5",
577
+ "status" : 500,
578
+ "error" : {
579
+ "reason":"unrecognized error - no type field"
580
+ }
581
+ }
582
+ },
583
+ {
584
+ "update" : {
585
+ "_index" : "foo",
586
+ "_type" : "bar",
587
+ "_id" : "6",
588
+ "status" : 400,
589
+ "error" : {
590
+ "type" : "mapper_parsing_exception",
591
+ "reason":"failed to parse"
592
+ }
593
+ }
594
+ },
595
+ {
596
+ "update" : {
597
+ "_index" : "foo",
598
+ "_type" : "bar",
599
+ "_id" : "7",
600
+ "status" : 400,
601
+ "error" : {
602
+ "type" : "some unrecognized type",
603
+ "reason":"unrecognized error"
604
+ }
605
+ }
606
+ },
607
+ {
608
+ "update" : {
609
+ "_index" : "foo",
610
+ "_type" : "bar",
611
+ "_id" : "8",
612
+ "status" : 500,
613
+ "error" : {
614
+ "type" : "some unrecognized type",
615
+ "reason":"unrecognized error"
616
+ }
617
+ }
618
+ }
619
+ ]
620
+ }))
621
+
622
+ begin
623
+ failed = false
624
+ dummy_extracted_values = []
625
+ @handler.handle_error(response, 'atag', chunk, response['items'].length, dummy_extracted_values)
626
+ rescue Fluent::Plugin::ElasticsearchErrorHandler::ElasticsearchRequestAbortError, Fluent::Plugin::ElasticsearchOutput::RetryStreamError=>e
627
+ failed = true
628
+ records = [].tap do |records|
629
+ next unless e.respond_to?(:retry_stream)
630
+ e.retry_stream.each {|time, record| records << record}
631
+ end
632
+ puts records
633
+ assert_equal 3, records.length
634
+ assert_equal 2, records[0]['_id']
635
+ # upsert is retried in case of conflict error.
636
+ assert_equal 3, records[1]['_id']
637
+ assert_equal 8, records[2]['_id']
638
+ error_ids = @plugin.error_events.collect {|h| h[:record]['_id']}
639
+ assert_equal 3, error_ids.length
640
+ assert_equal [5, 6, 7], error_ids
641
+ @plugin.error_events.collect {|h| h[:error]}.each do |e|
642
+ assert_true e.respond_to?(:backtrace)
643
+ end
644
+ end
645
+ assert_true failed
646
+ end
525
647
  end
@@ -232,6 +232,44 @@ class ElasticsearchOutput < Test::Unit::TestCase
232
232
  assert_equal Fluent::Plugin::ElasticsearchOutput::DEFAULT_ELASTICSEARCH_VERSION, instance.default_elasticsearch_version
233
233
  assert_false instance.log_es_400_reason
234
234
  assert_equal 20 * 1024 * 1024, Fluent::Plugin::ElasticsearchOutput::TARGET_BULK_BYTES
235
+ assert_false instance.compression
236
+ assert_equal :no_compression, instance.compression_level
237
+ end
238
+
239
+ test 'configure compression' do
240
+ config = %{
241
+ compression_level best_compression
242
+ }
243
+ instance = driver(config).instance
244
+
245
+ assert_equal true, instance.compression
246
+ end
247
+
248
+ test 'check compression strategy' do
249
+ config = %{
250
+ compression_level best_speed
251
+ }
252
+ instance = driver(config).instance
253
+
254
+ assert_equal Zlib::BEST_SPEED, instance.compression_strategy
255
+ end
256
+
257
+ test 'check content-encoding header with compression' do
258
+ config = %{
259
+ compression_level best_compression
260
+ }
261
+ instance = driver(config).instance
262
+
263
+ assert_equal "gzip", instance.client.transport.options[:transport_options][:headers]["Content-Encoding"]
264
+ end
265
+
266
+ test 'check compression option is passed to transport' do
267
+ config = %{
268
+ compression_level best_compression
269
+ }
270
+ instance = driver(config).instance
271
+
272
+ assert_equal true, instance.client.transport.options[:compression]
235
273
  end
236
274
 
237
275
  test 'configure Content-Type' do
@@ -341,23 +379,65 @@ class ElasticsearchOutput < Test::Unit::TestCase
341
379
  end
342
380
  end
343
381
 
344
- test 'lack of tag in chunk_keys' do
345
- assert_raise_message(/'tag' in chunk_keys is required./) do
346
- driver(Fluent::Config::Element.new(
347
- 'ROOT', '', {
348
- '@type' => 'elasticsearch',
349
- 'host' => 'log.google.com',
350
- 'port' => 777,
351
- 'scheme' => 'https',
352
- 'path' => '/es/',
353
- 'user' => 'john',
354
- 'password' => 'doe',
355
- }, [
356
- Fluent::Config::Element.new('buffer', 'mykey', {
357
- 'chunk_keys' => 'mykey'
358
- }, [])
359
- ]
360
- ))
382
+ sub_test_case 'chunk_keys requirement' do
383
+ test 'tag in chunk_keys' do
384
+ assert_nothing_raised do
385
+ driver(Fluent::Config::Element.new(
386
+ 'ROOT', '', {
387
+ '@type' => 'elasticsearch',
388
+ 'host' => 'log.google.com',
389
+ 'port' => 777,
390
+ 'scheme' => 'https',
391
+ 'path' => '/es/',
392
+ 'user' => 'john',
393
+ 'password' => 'doe',
394
+ }, [
395
+ Fluent::Config::Element.new('buffer', 'tag', {
396
+ 'chunk_keys' => 'tag'
397
+ }, [])
398
+ ]
399
+ ))
400
+ end
401
+ end
402
+
403
+ test '_index in chunk_keys' do
404
+ assert_nothing_raised do
405
+ driver(Fluent::Config::Element.new(
406
+ 'ROOT', '', {
407
+ '@type' => 'elasticsearch',
408
+ 'host' => 'log.google.com',
409
+ 'port' => 777,
410
+ 'scheme' => 'https',
411
+ 'path' => '/es/',
412
+ 'user' => 'john',
413
+ 'password' => 'doe',
414
+ }, [
415
+ Fluent::Config::Element.new('buffer', '_index', {
416
+ 'chunk_keys' => '_index'
417
+ }, [])
418
+ ]
419
+ ))
420
+ end
421
+ end
422
+
423
+ test 'lack of tag and _index in chunk_keys' do
424
+ assert_raise_message(/'tag' or '_index' in chunk_keys is required./) do
425
+ driver(Fluent::Config::Element.new(
426
+ 'ROOT', '', {
427
+ '@type' => 'elasticsearch',
428
+ 'host' => 'log.google.com',
429
+ 'port' => 777,
430
+ 'scheme' => 'https',
431
+ 'path' => '/es/',
432
+ 'user' => 'john',
433
+ 'password' => 'doe',
434
+ }, [
435
+ Fluent::Config::Element.new('buffer', 'mykey', {
436
+ 'chunk_keys' => 'mykey'
437
+ }, [])
438
+ ]
439
+ ))
440
+ end
361
441
  end
362
442
  end
363
443
 
@@ -1762,6 +1842,47 @@ class ElasticsearchOutput < Test::Unit::TestCase
1762
1842
  assert_equal(index_name, index_cmds.first['index']['_index'])
1763
1843
  end
1764
1844
 
1845
+ # gzip compress data
1846
+ def gzip(string, strategy)
1847
+ wio = StringIO.new("w")
1848
+ w_gz = Zlib::GzipWriter.new(wio, strategy = strategy)
1849
+ w_gz.write(string)
1850
+ w_gz.close
1851
+ wio.string
1852
+ end
1853
+
1854
+
1855
+ def test_writes_to_default_index_with_compression
1856
+ config = %[
1857
+ compression_level default_compression
1858
+ ]
1859
+
1860
+ bodystr = %({
1861
+ "took" : 500,
1862
+ "errors" : false,
1863
+ "items" : [
1864
+ {
1865
+ "create": {
1866
+ "_index" : "fluentd",
1867
+ "_type" : "fluentd"
1868
+ }
1869
+ }
1870
+ ]
1871
+ })
1872
+
1873
+ compressed_body = gzip(bodystr, Zlib::DEFAULT_COMPRESSION)
1874
+
1875
+ elastic_request = stub_request(:post, "http://localhost:9200/_bulk").
1876
+ to_return(:status => 200, :headers => {'Content-Type' => 'Application/json'}, :body => compressed_body)
1877
+
1878
+ driver(config)
1879
+ driver.run(default_tag: 'test') do
1880
+ driver.feed(sample_record)
1881
+ end
1882
+
1883
+ assert_requested(elastic_request)
1884
+ end
1885
+
1765
1886
  data('Elasticsearch 6' => [6, Fluent::Plugin::ElasticsearchOutput::DEFAULT_TYPE_NAME],
1766
1887
  'Elasticsearch 7' => [7, Fluent::Plugin::ElasticsearchOutput::DEFAULT_TYPE_NAME_ES_7x],
1767
1888
  'Elasticsearch 8' => [8, nil],
@@ -104,6 +104,44 @@ class ElasticsearchOutputDynamic < Test::Unit::TestCase
104
104
  assert_false instance.with_transporter_log
105
105
  assert_equal :"application/json", instance.content_type
106
106
  assert_equal :excon, instance.http_backend
107
+ assert_false instance.compression
108
+ assert_equal :no_compression, instance.compression_level
109
+ end
110
+
111
+ test 'configure compression' do
112
+ config = %{
113
+ compression_level best_compression
114
+ }
115
+ instance = driver(config).instance
116
+
117
+ assert_equal true, instance.compression
118
+ end
119
+
120
+ test 'check compression strategy' do
121
+ config = %{
122
+ compression_level best_speed
123
+ }
124
+ instance = driver(config).instance
125
+
126
+ assert_equal Zlib::BEST_SPEED, instance.compression_strategy
127
+ end
128
+
129
+ test 'check content-encoding header with compression' do
130
+ config = %{
131
+ compression_level best_compression
132
+ }
133
+ instance = driver(config).instance
134
+
135
+ assert_equal "gzip", instance.client.transport.options[:transport_options][:headers]["Content-Encoding"]
136
+ end
137
+
138
+ test 'check compression option is passed to transport' do
139
+ config = %{
140
+ compression_level best_compression
141
+ }
142
+ instance = driver(config).instance
143
+
144
+ assert_equal true, instance.client.transport.options[:compression]
107
145
  end
108
146
 
109
147
  test 'configure Content-Type' do
@@ -305,6 +343,46 @@ class ElasticsearchOutputDynamic < Test::Unit::TestCase
305
343
  assert_equal('fluentd', index_cmds.first['index']['_index'])
306
344
  end
307
345
 
346
+ # gzip compress data
347
+ def gzip(string, strategy)
348
+ wio = StringIO.new("w")
349
+ w_gz = Zlib::GzipWriter.new(wio, strategy = strategy)
350
+ w_gz.write(string)
351
+ w_gz.close
352
+ wio.string
353
+ end
354
+
355
+ def test_writes_to_default_index_with_compression
356
+ config = %[
357
+ compression_level default_compression
358
+ ]
359
+
360
+ bodystr = %({
361
+ "took" : 500,
362
+ "errors" : false,
363
+ "items" : [
364
+ {
365
+ "create": {
366
+ "_index" : "fluentd",
367
+ "_type" : "fluentd"
368
+ }
369
+ }
370
+ ]
371
+ })
372
+
373
+ compressed_body = gzip(bodystr, Zlib::DEFAULT_COMPRESSION)
374
+
375
+ elastic_request = stub_request(:post, "http://localhost:9200/_bulk").
376
+ to_return(:status => 200, :headers => {'Content-Type' => 'Application/json'}, :body => compressed_body)
377
+
378
+ driver(config)
379
+ driver.run(default_tag: 'test') do
380
+ driver.feed(sample_record)
381
+ end
382
+
383
+ assert_requested(elastic_request)
384
+ end
385
+
308
386
  def test_writes_to_default_type
309
387
  stub_elastic
310
388
  driver.run(default_tag: 'test') do
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: fluent-plugin-elasticsearch
3
3
  version: !ruby/object:Gem::Version
4
- version: 3.6.0
4
+ version: 3.6.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - diogo
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2019-11-06 00:00:00.000000000 Z
12
+ date: 2019-11-11 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: fluentd
@@ -170,7 +170,8 @@ files:
170
170
  homepage: https://github.com/uken/fluent-plugin-elasticsearch
171
171
  licenses:
172
172
  - Apache-2.0
173
- metadata: {}
173
+ metadata:
174
+ changelog_uri: https://github.com/uken/fluent-plugin-elasticsearch/blob/master/History.md
174
175
  post_install_message:
175
176
  rdoc_options: []
176
177
  require_paths: