fluent-plugin-webhdfs 1.2.1 → 1.2.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 6289f2441877bae9eee9aa392bcb9953518e0648
4
- data.tar.gz: 6ba27fa2dda31b18234ce9658a2b19cce1dbd3ff
3
+ metadata.gz: 4637e563a9e04c877cd6d897d013ac57a63d1a80
4
+ data.tar.gz: 305a8e89b883175c47b4fc9811fd1a27f9773141
5
5
  SHA512:
6
- metadata.gz: bb586a6832defd2f355f61fb0886b077877cd11926b875d45065bae7ad84128510782cc509c7b07b999a8409b23aeebb4e3fcebc816fc1b040c863ce76378e54
7
- data.tar.gz: 6ed07a2cad48bb9e58c1b4a9c1e8ecb8aabed2d19bedb4c5965ae45da794514c3b2b4ce5bcc248be3597b2745df871a6050af87b82e51c1c15af97b27d100738
6
+ metadata.gz: ded6f6b4ba2ca28fa49be0a4a23127ec643d1adfc046c7cb14d37cc91ad90705252ac7cc986439c0793849c876ffcf88c5b6abc21a4ba9f7284c6933e07a8632
7
+ data.tar.gz: 1e1d5b936463bfab84baeec2cf9b6064dcd25890712f8d315b5a18457dc87087c4270a2a2f1cdf0a9e390c91345bb9ac1eb739088644537c0a6bae1564cf1325
data/README.md CHANGED
@@ -225,7 +225,7 @@ For high load cluster nodes, you can specify timeouts for HTTP requests.
225
225
 
226
226
  With default configuration, fluent-plugin-webhdfs checks HDFS filesystem status and raise error for inacive NameNodes.
227
227
 
228
- If you were usging unstable NameNodes and have wanted to ignore NameNode errors on startup of fluentd, enable `ignore_start_check_error` option like below:
228
+ If you were using unstable NameNodes and have wanted to ignore NameNode errors on startup of fluentd, enable `ignore_start_check_error` option like below:
229
229
 
230
230
  <match access.**>
231
231
  @type webhdfs
@@ -2,7 +2,7 @@
2
2
 
3
3
  Gem::Specification.new do |gem|
4
4
  gem.name = "fluent-plugin-webhdfs"
5
- gem.version = "1.2.1"
5
+ gem.version = "1.2.2"
6
6
  gem.authors = ["TAGOMORI Satoshi"]
7
7
  gem.email = ["tagomoris@gmail.com"]
8
8
  gem.summary = %q{Fluentd plugin to write data on HDFS over WebHDFS, with flexible formatting}
@@ -310,7 +310,7 @@ class Fluent::Plugin::WebHDFSOutput < Fluent::Plugin::Output
310
310
  hdfs_path = if @append
311
311
  extract_placeholders(@path, chunk.metadata)
312
312
  else
313
- extract_placeholders(@path, chunk.metadata).gsub(CHUNK_ID_PLACE_HOLDER, dump_unique_id_hex(chunk.unique_id))
313
+ extract_placeholders(@path.gsub(CHUNK_ID_PLACE_HOLDER, dump_unique_id_hex(chunk.unique_id)), chunk.metadata)
314
314
  end
315
315
  hdfs_path = "#{hdfs_path}#{@compressor.ext}"
316
316
  if @replace_random_uuid
@@ -168,6 +168,7 @@ class WebHDFSOutputTest < Test::Unit::TestCase
168
168
  metadata = d.instance.metadata("test", nil, {})
169
169
  chunk = d.instance.buffer.generate_chunk(metadata)
170
170
  assert_equal "/hdfs/path/file.#{dump_unique_id_hex(chunk.unique_id)}.log", d.instance.generate_path(chunk)
171
+ assert_empty d.instance.log.out.logs
171
172
  end
172
173
 
173
174
  data(path: { "append" => false },
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: fluent-plugin-webhdfs
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.2.1
4
+ version: 1.2.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - TAGOMORI Satoshi
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2017-06-28 00:00:00.000000000 Z
11
+ date: 2017-08-23 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: rake