fluent-plugin-webhdfs 1.2.4 → 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: fcefccfd606717624933c095b4450489b3a24bd40cebf8af180e824e77ece775
4
- data.tar.gz: 2d307f89de1c026443bcef00b7e821d884b8df2d64a79c37561617cc74ca6a5e
3
+ metadata.gz: 2d96f9304470f4d3409a1209b96c56722a62acfefc55633d81525e21103ec7e9
4
+ data.tar.gz: 8c6982670e30e112815a3abec28a2865609e4ca2cccbe097a42b6abb9080af21
5
5
  SHA512:
6
- metadata.gz: f318228d5b577be1151fa78951e317c68c862ef8981425d148d55d2e4df648004638fe8373ce6e7028553b6f06143e4dda8cc2318b27223090be9e454c88c5ac
7
- data.tar.gz: 7ff49536fe66c00f4888bb2266e78c9f58534e3789891c41db1c9a33300fcd3218966cb3d8e88398dcdca4a2e41136ced6f1452a7e9d095d12297a5ddd07a842
6
+ metadata.gz: a56a3b8ac2e7bf279ddb23d5a4fafb187289883442b01355f86cf3d626332aff35a78f4fee38c32767a7162741b550143fbfc50f9d953db0ce8a8220d022d35f
7
+ data.tar.gz: 7110d25391fc90d0e0aa8042014596994b3fe700737a08febd1f1d69485fd9eef212f0882e51832e2a3f95d64a7e21e0091f8f8e37444151404104230645029d
data/.gitignore CHANGED
@@ -15,3 +15,4 @@ spec/reports
15
15
  test/tmp
16
16
  test/version_tmp
17
17
  tmp
18
+ vendor/
@@ -2,10 +2,11 @@ sudo: false
2
2
  language: ruby
3
3
 
4
4
  rvm:
5
- - 2.1
6
- - 2.2
7
- - 2.3.3
8
- - 2.4.0
5
+ - 2.4
6
+ - 2.5
7
+ - 2.6
8
+ - 2.7
9
+ - ruby-head
9
10
 
10
11
  branches:
11
12
  only:
@@ -23,3 +24,7 @@ script: bundle exec rake test
23
24
 
24
25
  gemfile:
25
26
  - Gemfile
27
+
28
+ matrix:
29
+ allow_failures:
30
+ - rvm: ruby-head
data/README.md CHANGED
@@ -157,14 +157,43 @@ If you want to compress data before storing it:
157
157
  host namenode.your.cluster.local
158
158
  port 50070
159
159
  path /path/on/hdfs/access.log.%Y%m%d_%H
160
- compress gzip # or 'bzip2', 'snappy', 'lzo_command'
160
+ compress gzip # or 'bzip2', 'snappy', 'hadoop_snappy', 'lzo_command', 'zstd'
161
161
  </match>
162
162
 
163
- Note that if you set `compress gzip`, then the suffix `.gz` will be added to path (or `.bz2`, `sz`, `.lzo`).
163
+ Note that if you set `compress gzip`, then the suffix `.gz` will be added to path (or `.bz2`, `.sz`, `.snappy`, `.lzo`, `.zst`).
164
164
  Note that you have to install additional gem for several compress algorithms:
165
165
 
166
166
  - snappy: install snappy gem
167
+ - hadoop_snappy: install snappy gem
167
168
  - bzip2: install bzip2-ffi gem
169
+ - zstd: install zstandard gem
170
+
171
+ Note that zstd will require installation of the libzstd native library. See the [zstandard-ruby](https://github.com/msievers/zstandard-ruby#examples-for-installing-libzstd) repo for infomration on the required packages for your operating system.
172
+
173
+ You can also specify compression block size (currently supported only for Snappy codecs):
174
+
175
+ <match access.**>
176
+ @type webhdfs
177
+ host namenode.your.cluster.local
178
+ port 50070
179
+ path /path/on/hdfs/access.log.%Y%m%d_%H
180
+ compress hadoop_snappy
181
+ block_size 32768
182
+ </match>
183
+
184
+ If you want to explicitly specify file extensions in HDFS (override default compressor extensions):
185
+
186
+ <match access.**>
187
+ @type webhdfs
188
+ host namenode.your.cluster.local
189
+ port 50070
190
+ path /path/on/hdfs/access.log.%Y%m%d_%H
191
+ compress snappy
192
+ extension ".snappy"
193
+ </match>
194
+
195
+ With this configuration paths in HDFS will be like `/path/on/hdfs/access.log.20201003_12.snappy`.
196
+ This one may be useful when (for example) you need to use snappy codec but `.sz` files are not recognized as snappy files in HDFS.
168
197
 
169
198
  ### Namenode HA / Auto retry for WebHDFS known errors
170
199
 
@@ -2,7 +2,7 @@
2
2
 
3
3
  Gem::Specification.new do |gem|
4
4
  gem.name = "fluent-plugin-webhdfs"
5
- gem.version = "1.2.4"
5
+ gem.version = "1.4.0"
6
6
  gem.authors = ["TAGOMORI Satoshi"]
7
7
  gem.email = ["tagomoris@gmail.com"]
8
8
  gem.summary = %q{Fluentd plugin to write data on HDFS over WebHDFS, with flexible formatting}
@@ -21,6 +21,7 @@ Gem::Specification.new do |gem|
21
21
  gem.add_development_dependency "appraisal"
22
22
  gem.add_development_dependency "snappy", '>= 0.0.13'
23
23
  gem.add_development_dependency "bzip2-ffi"
24
+ gem.add_development_dependency "zstandard"
24
25
  gem.add_runtime_dependency "fluentd", '>= 0.14.22'
25
26
  gem.add_runtime_dependency "webhdfs", '>= 0.6.0'
26
27
  end
@@ -67,10 +67,13 @@ class Fluent::Plugin::WebHDFSOutput < Fluent::Plugin::Output
67
67
  desc 'kerberos keytab file'
68
68
  config_param :kerberos_keytab, :string, default: nil
69
69
 
70
- SUPPORTED_COMPRESS = [:gzip, :bzip2, :snappy, :lzo_command, :text]
71
- desc "Compress method (#{SUPPORTED_COMPRESS.join(',')})"
70
+ SUPPORTED_COMPRESS = [:gzip, :bzip2, :snappy, :hadoop_snappy, :lzo_command, :zstd, :text]
71
+ desc "Compression method (#{SUPPORTED_COMPRESS.join(',')})"
72
72
  config_param :compress, :enum, list: SUPPORTED_COMPRESS, default: :text
73
73
 
74
+ desc 'HDFS file extensions (overrides default compressor extensions)'
75
+ config_param :extension, :string, default: nil
76
+
74
77
  config_param :remove_prefix, :string, default: nil, deprecated: "use @label for routing"
75
78
  config_param :default_tag, :string, default: nil, deprecated: "use @label for routing"
76
79
  config_param :null_value, :string, default: nil, deprecated: "use filter plugins to convert null values into any specified string"
@@ -153,6 +156,7 @@ class Fluent::Plugin::WebHDFSOutput < Fluent::Plugin::Output
153
156
  end
154
157
 
155
158
  @compressor = COMPRESSOR_REGISTRY.lookup(@compress.to_s).new
159
+ @compressor.configure(conf)
156
160
 
157
161
  if @host
158
162
  @namenode_host = @host
@@ -267,17 +271,22 @@ class Fluent::Plugin::WebHDFSOutput < Fluent::Plugin::Output
267
271
  end
268
272
 
269
273
  def send_data(path, data)
270
- if @append
271
- begin
272
- @client.append(path, data)
273
- rescue WebHDFS::FileNotFoundError
274
- @client.create(path, data)
275
- end
274
+ return @client.create(path, data, {'overwrite' => 'true'}) unless @append
275
+
276
+ if path_exists?(path)
277
+ @client.append(path, data)
276
278
  else
277
- @client.create(path, data, {'overwrite' => 'true'})
279
+ @client.create(path, data)
278
280
  end
279
281
  end
280
282
 
283
+ def path_exists?(path)
284
+ @client.stat(path)
285
+ true
286
+ rescue WebHDFS::FileNotFoundError
287
+ false
288
+ end
289
+
281
290
  HOSTNAME_PLACEHOLDERS_DEPRECATED = ['${hostname}', '%{hostname}', '__HOSTNAME__']
282
291
  UUID_RANDOM_PLACEHOLDERS_DEPRECATED = ['${uuid}', '${uuid:random}', '__UUID__', '__UUID_RANDOM__']
283
292
  UUID_OTHER_PLACEHOLDERS_OBSOLETED = ['${uuid:hostname}', '%{uuid:hostname}', '__UUID_HOSTNAME__', '${uuid:timestamp}', '%{uuid:timestamp}', '__UUID_TIMESTAMP__']
@@ -319,7 +328,8 @@ class Fluent::Plugin::WebHDFSOutput < Fluent::Plugin::Output
319
328
  else
320
329
  extract_placeholders(@path.gsub(CHUNK_ID_PLACE_HOLDER, dump_unique_id_hex(chunk.unique_id)), chunk)
321
330
  end
322
- hdfs_path = "#{hdfs_path}#{@compressor.ext}"
331
+ hdfs_ext = @extension || @compressor.ext
332
+ hdfs_path = "#{hdfs_path}#{hdfs_ext}"
323
333
  if @replace_random_uuid
324
334
  uuid_random = SecureRandom.uuid
325
335
  hdfs_path = hdfs_path.gsub('%{uuid}', uuid_random).gsub('%{uuid_flush}', uuid_random)
@@ -502,7 +512,7 @@ class Fluent::Plugin::WebHDFSOutput < Fluent::Plugin::Output
502
512
  begin
503
513
  Open3.capture3("#{command} -V")
504
514
  rescue Errno::ENOENT
505
- raise ConfigError, "'#{command}' utility must be in PATH for #{algo} compression"
515
+ raise Fluent::ConfigError, "'#{command}' utility must be in PATH for #{algo} compression"
506
516
  end
507
517
  end
508
518
  end
@@ -518,4 +528,6 @@ require 'fluent/plugin/webhdfs_compressor_text'
518
528
  require 'fluent/plugin/webhdfs_compressor_gzip'
519
529
  require 'fluent/plugin/webhdfs_compressor_bzip2'
520
530
  require 'fluent/plugin/webhdfs_compressor_snappy'
531
+ require 'fluent/plugin/webhdfs_compressor_hadoop_snappy'
521
532
  require 'fluent/plugin/webhdfs_compressor_lzo_command'
533
+ require 'fluent/plugin/webhdfs_compressor_zstd'
@@ -0,0 +1,32 @@
1
+ module Fluent::Plugin
2
+ class WebHDFSOutput < Output
3
+ class HadoopSnappyCompressor < Compressor
4
+ WebHDFSOutput.register_compressor('hadoop_snappy', self)
5
+
6
+ DEFAULT_BLOCK_SIZE = 256 * 1024
7
+
8
+ desc 'Block size for compression algorithm'
9
+ config_param :block_size, :integer, default: DEFAULT_BLOCK_SIZE
10
+
11
+ def initialize(options = {})
12
+ super()
13
+ begin
14
+ require "snappy"
15
+ rescue LoadError
16
+ raise Fluent::ConfigError, "Install snappy before using snappy compressor"
17
+ end
18
+ end
19
+
20
+ def ext
21
+ ".snappy"
22
+ end
23
+
24
+ def compress(chunk, tmp)
25
+ Snappy::Hadoop::Writer.new(tmp, @block_size) do |w|
26
+ w << chunk.read
27
+ w.flush
28
+ end
29
+ end
30
+ end
31
+ end
32
+ end
@@ -5,8 +5,8 @@ module Fluent::Plugin
5
5
 
6
6
  config_param :command_parameter, :string, default: '-qf1'
7
7
 
8
- def configure(conf)
9
- super
8
+ def initialize(options = {})
9
+ super()
10
10
  check_command('lzop', 'LZO')
11
11
  end
12
12
 
@@ -3,11 +3,17 @@ module Fluent::Plugin
3
3
  class SnappyCompressor < Compressor
4
4
  WebHDFSOutput.register_compressor('snappy', self)
5
5
 
6
+ DEFAULT_BLOCK_SIZE = 32 * 1024
7
+
8
+ desc 'Block size for compression algorithm'
9
+ config_param :block_size, :integer, default: DEFAULT_BLOCK_SIZE
10
+
6
11
  def initialize(options = {})
12
+ super()
7
13
  begin
8
14
  require "snappy"
9
15
  rescue LoadError
10
- raise Fluent::ConfigError, "Install snappy before use snappy compressor"
16
+ raise Fluent::ConfigError, "Install snappy before using snappy compressor"
11
17
  end
12
18
  end
13
19
 
@@ -16,9 +22,10 @@ module Fluent::Plugin
16
22
  end
17
23
 
18
24
  def compress(chunk, tmp)
19
- w = Snappy::Writer.new(tmp)
20
- chunk.write_to(w)
21
- w.close
25
+ Snappy::Writer.new(tmp, @block_size) do |w|
26
+ w << chunk.read
27
+ w.flush
28
+ end
22
29
  end
23
30
  end
24
31
  end
@@ -0,0 +1,24 @@
1
+ module Fluent::Plugin
2
+ class WebHDFSOutput < Output
3
+ class ZstdCompressor < Compressor
4
+ WebHDFSOutput.register_compressor('zstd', self)
5
+
6
+ def initialize(options = {})
7
+ begin
8
+ require "zstandard"
9
+ rescue LoadError
10
+ raise Fluent::ConfigError, "Install zstandard gem before use of zstd compressor"
11
+ end
12
+ end
13
+
14
+ def ext
15
+ ".zst"
16
+ end
17
+
18
+ def compress(chunk, tmp)
19
+ tmp.binmode
20
+ tmp.write Zstandard.deflate(chunk.read)
21
+ end
22
+ end
23
+ end
24
+ end
@@ -107,6 +107,7 @@ class WebHDFSOutputTest < Test::Unit::TestCase
107
107
  data(gzip: [:gzip, Fluent::Plugin::WebHDFSOutput::GzipCompressor],
108
108
  bzip2: [:bzip2, Fluent::Plugin::WebHDFSOutput::Bzip2Compressor],
109
109
  snappy: [:snappy, Fluent::Plugin::WebHDFSOutput::SnappyCompressor],
110
+ hadoop_snappy: [:hadoop_snappy, Fluent::Plugin::WebHDFSOutput::HadoopSnappyCompressor],
110
111
  lzo: [:lzo_command, Fluent::Plugin::WebHDFSOutput::LZOCommandCompressor])
111
112
  def test_compress(data)
112
113
  compress_type, compressor_class = data
@@ -126,6 +127,43 @@ class WebHDFSOutputTest < Test::Unit::TestCase
126
127
  assert_equal '/hdfs/path/file.%Y%m%d.%H%M.log', d.instance.path
127
128
  assert_equal compress_type, d.instance.compress
128
129
  assert_equal compressor_class, d.instance.compressor.class
130
+
131
+ time = event_time("2020-10-03 15:07:00 +0300")
132
+ metadata = d.instance.metadata("test", time, {})
133
+ chunk = d.instance.buffer.generate_chunk(metadata)
134
+ assert_equal "/hdfs/path/file.20201003.1507.log#{d.instance.compressor.ext}", d.instance.generate_path(chunk)
135
+ end
136
+
137
+ def test_explicit_extensions
138
+ conf = config_element(
139
+ "ROOT", "", {
140
+ "host" => "namenode.local",
141
+ "path" => "/hdfs/path/file.%Y%m%d.log",
142
+ "compress" => "snappy",
143
+ "extension" => ".snappy"
144
+ })
145
+ d = create_driver(conf)
146
+ time = event_time("2020-10-07 15:15:00 +0300")
147
+ metadata = d.instance.metadata("test", time, {})
148
+ chunk = d.instance.buffer.generate_chunk(metadata)
149
+ assert_equal "/hdfs/path/file.20201007.log.snappy", d.instance.generate_path(chunk)
150
+ end
151
+
152
+ data(snappy: [:snappy, Fluent::Plugin::WebHDFSOutput::SnappyCompressor],
153
+ hadoop_snappy: [:hadoop_snappy, Fluent::Plugin::WebHDFSOutput::HadoopSnappyCompressor])
154
+ def test_compression_block_size(data)
155
+ compress_type, compressor_class = data
156
+ conf = config_element(
157
+ "ROOT", "", {
158
+ "host" => "namenode.local",
159
+ "path" => "/hdfs/path/file.%Y%m%d.log",
160
+ "compress" => compress_type,
161
+ "block_size" => 16384
162
+ })
163
+ d = create_driver(conf)
164
+
165
+ assert_equal compress_type, d.instance.compress
166
+ assert_equal 16384, d.instance.compressor.block_size
129
167
  end
130
168
 
131
169
  def test_placeholders_old_style
@@ -0,0 +1,72 @@
1
+ require "helper"
2
+ require "fluent/plugin/buf_memory"
3
+ begin
4
+ require "snappy"
5
+ rescue LoadError
6
+ end
7
+
8
+ class SnappyCompressorsTest < Test::Unit::TestCase
9
+ class Snappy < self
10
+
11
+ CONFIG = %[
12
+ host namenode.local
13
+ path /hdfs/path/file.%Y%m%d.log
14
+ ]
15
+
16
+ def setup
17
+ omit unless Object.const_defined?(:Snappy)
18
+ Fluent::Test.setup
19
+
20
+ @compressors_size = 2
21
+ @compressors = [
22
+ Fluent::Plugin::WebHDFSOutput::SnappyCompressor.new,
23
+ Fluent::Plugin::WebHDFSOutput::HadoopSnappyCompressor.new
24
+ ]
25
+ @readers = [
26
+ ::Snappy::Reader,
27
+ ::Snappy::Hadoop::Reader
28
+ ]
29
+ @exts = [".sz", ".snappy"]
30
+ end
31
+
32
+ def create_driver(conf = CONFIG)
33
+ Fluent::Test::Driver::Output.new(Fluent::Plugin::WebHDFSOutput).configure(conf)
34
+ end
35
+
36
+ def test_ext
37
+ for i in 0...@compressors_size do
38
+ assert_equal(@exts[i], @compressors[i].ext)
39
+ end
40
+ end
41
+
42
+ def test_compress
43
+ d = create_driver
44
+ if d.instance.respond_to?(:buffer)
45
+ buffer = d.instance.buffer
46
+ else
47
+ buffer = d.instance.instance_variable_get(:@buffer)
48
+ end
49
+
50
+ if buffer.respond_to?(:generate_chunk)
51
+ chunk = buffer.generate_chunk("test")
52
+ chunk.concat("hello snappy\n" * 32 * 1024, 1)
53
+ else
54
+ chunk = buffer.new_chunk("test")
55
+ chunk << "hello snappy\n" * 32 * 1024
56
+ end
57
+
58
+ for i in 0...@compressors_size do
59
+ io = Tempfile.new("snappy-")
60
+ @compressors[i].compress(chunk, io)
61
+ io.open
62
+ chunk_bytesize = chunk.respond_to?(:bytesize) ? chunk.bytesize : chunk.size
63
+ assert(chunk_bytesize > io.read.bytesize)
64
+ io.rewind
65
+ reader = @readers[i].new(io)
66
+ assert_equal(chunk.read, reader.read)
67
+ io.close
68
+ end
69
+ end
70
+ end
71
+ end
72
+
@@ -1,12 +1,9 @@
1
1
  require "helper"
2
2
  require "fluent/plugin/buf_memory"
3
- begin
4
- require "snappy"
5
- rescue LoadError
6
- end
3
+ require 'zstandard'
7
4
 
8
- class CompressorTest < Test::Unit::TestCase
9
- class Snappy < self
5
+ class ZstdCompressorTest < Test::Unit::TestCase
6
+ class Zstd < self
10
7
 
11
8
  CONFIG = %[
12
9
  host namenode.local
@@ -14,9 +11,9 @@ class CompressorTest < Test::Unit::TestCase
14
11
  ]
15
12
 
16
13
  def setup
17
- omit unless Object.const_defined?(:Snappy)
14
+ omit unless Object.const_defined?(:Zstandard)
18
15
  Fluent::Test.setup
19
- @compressor = Fluent::Plugin::WebHDFSOutput::SnappyCompressor.new
16
+ @compressor = Fluent::Plugin::WebHDFSOutput::ZstdCompressor.new
20
17
  end
21
18
 
22
19
  def create_driver(conf = CONFIG)
@@ -24,7 +21,7 @@ class CompressorTest < Test::Unit::TestCase
24
21
  end
25
22
 
26
23
  def test_ext
27
- assert_equal(".sz", @compressor.ext)
24
+ assert_equal(".zst", @compressor.ext)
28
25
  end
29
26
 
30
27
  def test_compress
@@ -37,22 +34,22 @@ class CompressorTest < Test::Unit::TestCase
37
34
 
38
35
  if buffer.respond_to?(:generate_chunk)
39
36
  chunk = buffer.generate_chunk("test")
40
- chunk.concat("hello snappy\n" * 32 * 1024, 1)
37
+ chunk.concat("hello gzip\n" * 32 * 1024, 1)
41
38
  else
42
39
  chunk = buffer.new_chunk("test")
43
- chunk << "hello snappy\n" * 32 * 1024
40
+ chunk << "hello gzip\n" * 32 * 1024
44
41
  end
45
42
 
46
- io = Tempfile.new("snappy-")
43
+ io = Tempfile.new("zstd-")
47
44
  @compressor.compress(chunk, io)
48
- io.open
45
+ assert !io.closed?
49
46
  chunk_bytesize = chunk.respond_to?(:bytesize) ? chunk.bytesize : chunk.size
50
47
  assert(chunk_bytesize > io.read.bytesize)
51
48
  io.rewind
52
- reader = ::Snappy::Reader.new(io)
53
- assert_equal(chunk.read, reader.read)
49
+ reader = Zstandard.inflate(io.read)
50
+ io.rewind
51
+ assert_equal(chunk.read, reader)
54
52
  io.close
55
53
  end
56
54
  end
57
55
  end
58
-
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: fluent-plugin-webhdfs
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.2.4
4
+ version: 1.4.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - TAGOMORI Satoshi
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2019-07-09 00:00:00.000000000 Z
11
+ date: 2020-12-22 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: rake
@@ -94,6 +94,20 @@ dependencies:
94
94
  - - ">="
95
95
  - !ruby/object:Gem::Version
96
96
  version: '0'
97
+ - !ruby/object:Gem::Dependency
98
+ name: zstandard
99
+ requirement: !ruby/object:Gem::Requirement
100
+ requirements:
101
+ - - ">="
102
+ - !ruby/object:Gem::Version
103
+ version: '0'
104
+ type: :development
105
+ prerelease: false
106
+ version_requirements: !ruby/object:Gem::Requirement
107
+ requirements:
108
+ - - ">="
109
+ - !ruby/object:Gem::Version
110
+ version: '0'
97
111
  - !ruby/object:Gem::Dependency
98
112
  name: fluentd
99
113
  requirement: !ruby/object:Gem::Requirement
@@ -140,13 +154,16 @@ files:
140
154
  - lib/fluent/plugin/out_webhdfs.rb
141
155
  - lib/fluent/plugin/webhdfs_compressor_bzip2.rb
142
156
  - lib/fluent/plugin/webhdfs_compressor_gzip.rb
157
+ - lib/fluent/plugin/webhdfs_compressor_hadoop_snappy.rb
143
158
  - lib/fluent/plugin/webhdfs_compressor_lzo_command.rb
144
159
  - lib/fluent/plugin/webhdfs_compressor_snappy.rb
145
160
  - lib/fluent/plugin/webhdfs_compressor_text.rb
161
+ - lib/fluent/plugin/webhdfs_compressor_zstd.rb
146
162
  - test/helper.rb
147
- - test/plugin/test_compressor.rb
148
163
  - test/plugin/test_gzip_compressor.rb
149
164
  - test/plugin/test_out_webhdfs.rb
165
+ - test/plugin/test_snappy_compressors.rb
166
+ - test/plugin/test_zstd_compressor.rb
150
167
  homepage: https://github.com/fluent/fluent-plugin-webhdfs
151
168
  licenses:
152
169
  - Apache-2.0
@@ -172,6 +189,7 @@ specification_version: 4
172
189
  summary: Fluentd plugin to write data on HDFS over WebHDFS, with flexible formatting
173
190
  test_files:
174
191
  - test/helper.rb
175
- - test/plugin/test_compressor.rb
176
192
  - test/plugin/test_gzip_compressor.rb
177
193
  - test/plugin/test_out_webhdfs.rb
194
+ - test/plugin/test_snappy_compressors.rb
195
+ - test/plugin/test_zstd_compressor.rb