fluent-plugin-webhdfs 1.5.0 → 1.6.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: bc42357da759e1c34ec12b3994bdd96b9f56cc1b093bb890f0ec4bccf929362d
4
- data.tar.gz: e63cb6a5df15e5cf2fe8228d9e0e21ff5adf6db9cc5c4e11138aaac77429dc85
3
+ metadata.gz: 73ccae52314663de476e1dd1de3a9e57e134b925be65b9d9fa093870c6c03371
4
+ data.tar.gz: 594b0eea372de63d9c8d64af590514b07dd2e01652ab1c1b37fa6303ad141f68
5
5
  SHA512:
6
- metadata.gz: e613ca241b2624ac77c1b1651de28aad3b4e7060086067d1eefc4874241e0a60437bddf61f3775c834a94fdaf1cd374fad1cb5b60e16909db49cf9dc7663770b
7
- data.tar.gz: 7eb4b39ab4763f661e1e213d736eb3544d88e1a73b24a0b0d59b6a715fa3fdbeceb62f5a47d14be6d06126747d4751f0b73fa7267591c6d2713d2dac35901f5e
6
+ metadata.gz: a73865d4ee2f052fbacda6d30a6fe24d1ea7c6d701358d3742bba04395daf4989f6b0538f7afc988c8c2cae70ddd94a0143151159231a6c96f479219dc43467f
7
+ data.tar.gz: 5483f28a5b3ba3e7b64726c6974ddb05c0900bf000021e0d92bfb1c45850db928be278d375a45a1dfbb0d8da74ee14e0855a34336cbae0531c585dc15b391643
@@ -0,0 +1,6 @@
1
+ version: 2
2
+ updates:
3
+ - package-ecosystem: 'github-actions'
4
+ directory: '/'
5
+ schedule:
6
+ interval: 'weekly'
@@ -0,0 +1,28 @@
1
+ name: Testing with Ruby head
2
+ on:
3
+ schedule:
4
+ - cron: '32 14 * * 0'
5
+ workflow_dispatch:
6
+ jobs:
7
+ build:
8
+ runs-on: ${{ matrix.os }}
9
+ strategy:
10
+ fail-fast: false
11
+ matrix:
12
+ os: ['ubuntu-latest']
13
+ ruby: [ 'head' ]
14
+
15
+ name: Ruby ${{ matrix.ruby }} on ${{ matrix.os }}
16
+ steps:
17
+ - uses: actions/checkout@v4
18
+ - name: Install dependencies
19
+ run: sudo apt-get install libsnappy-dev libzstd-dev
20
+ - uses: ruby/setup-ruby@v1
21
+ with:
22
+ ruby-version: ${{ matrix.ruby }}
23
+ - name: unit testing
24
+ env:
25
+ CI: true
26
+ run: |
27
+ bundle install --jobs 4 --retry 3
28
+ bundle exec rake test
@@ -0,0 +1,29 @@
1
+ name: Test
2
+ on:
3
+ push:
4
+ branches: [master]
5
+ pull_request:
6
+ branches: [master]
7
+ jobs:
8
+ build:
9
+ runs-on: ${{ matrix.os }}
10
+ strategy:
11
+ fail-fast: false
12
+ matrix:
13
+ os: ['ubuntu-latest']
14
+ ruby: [ '3.3', '3.2', '3.1', '3.0', '2.7' ]
15
+
16
+ name: Ruby ${{ matrix.ruby }} on ${{ matrix.os }}
17
+ steps:
18
+ - uses: actions/checkout@v4
19
+ - name: Install dependencies
20
+ run: sudo apt-get install libsnappy-dev libzstd-dev
21
+ - uses: ruby/setup-ruby@v1
22
+ with:
23
+ ruby-version: ${{ matrix.ruby }}
24
+ - name: unit testing
25
+ env:
26
+ CI: true
27
+ run: |
28
+ bundle install --jobs 4 --retry 3
29
+ bundle exec rake test
@@ -2,7 +2,7 @@
2
2
 
3
3
  Gem::Specification.new do |gem|
4
4
  gem.name = "fluent-plugin-webhdfs"
5
- gem.version = "1.5.0"
5
+ gem.version = "1.6.0"
6
6
  gem.authors = ["TAGOMORI Satoshi"]
7
7
  gem.email = ["tagomoris@gmail.com"]
8
8
  gem.summary = %q{Fluentd plugin to write data on HDFS over WebHDFS, with flexible formatting}
@@ -23,5 +23,5 @@ Gem::Specification.new do |gem|
23
23
  gem.add_development_dependency "bzip2-ffi"
24
24
  gem.add_development_dependency "zstandard"
25
25
  gem.add_runtime_dependency "fluentd", '>= 0.14.22'
26
- gem.add_runtime_dependency "webhdfs", '>= 0.10.0'
26
+ gem.add_runtime_dependency "webhdfs", '>= 0.11.0'
27
27
  end
@@ -70,6 +70,8 @@ class Fluent::Plugin::WebHDFSOutput < Fluent::Plugin::Output
70
70
  config_param :renew_kerberos_delegation_token, :bool, default: false
71
71
  desc 'delegation token reuse timer (default 8h)'
72
72
  config_param :renew_kerberos_delegation_token_interval, :time, default: 8 * 60 * 60
73
+ desc 'delegation token max-lifetime (default 7d)'
74
+ config_param :kerberos_delegation_token_max_lifetime, :time, default: 7 * 24 * 60 * 60
73
75
 
74
76
  SUPPORTED_COMPRESS = [:gzip, :bzip2, :snappy, :hadoop_snappy, :lzo_command, :zstd, :text]
75
77
  desc "Compression method (#{SUPPORTED_COMPRESS.join(',')})"
@@ -114,7 +116,7 @@ class Fluent::Plugin::WebHDFSOutput < Fluent::Plugin::Output
114
116
  else 86400
115
117
  end
116
118
  if buffer_config = conf.elements(name: "buffer").first
117
- timekey = buffer_config["timekey"] || timekey
119
+ timekey = buffer_config["timekey"] || timekey
118
120
  end
119
121
 
120
122
  compat_parameters_convert(conf, :buffer, default_chunk_key: "time")
@@ -189,7 +191,9 @@ class Fluent::Plugin::WebHDFSOutput < Fluent::Plugin::Output
189
191
  end
190
192
 
191
193
  @renew_kerberos_delegation_token_interval_hour = nil
194
+ @kerberos_delegation_token_max_lifetime_hour = nil
192
195
  if @renew_kerberos_delegation_token
196
+ @kerberos_delegation_token_max_lifetime_hour = @kerberos_delegation_token_max_lifetime / 60 / 60
193
197
  unless @username
194
198
  raise Fluent::ConfigError, "username is missing. If you want to reuse delegation token, follow with kerberos accounts"
195
199
  end
@@ -215,7 +219,7 @@ class Fluent::Plugin::WebHDFSOutput < Fluent::Plugin::Output
215
219
  end
216
220
 
217
221
  def prepare_client(host, port, username)
218
- client = WebHDFS::Client.new(host, port, username, nil, nil, nil, {}, @renew_kerberos_delegation_token_interval_hour)
222
+ client = WebHDFS::Client.new(host, port, username, nil, nil, nil, {}, @renew_kerberos_delegation_token_interval_hour, @kerberos_delegation_token_max_lifetime_hour)
219
223
  if @httpfs
220
224
  client.httpfs_mode = true
221
225
  end
@@ -542,4 +546,4 @@ require 'fluent/plugin/webhdfs_compressor_bzip2'
542
546
  require 'fluent/plugin/webhdfs_compressor_snappy'
543
547
  require 'fluent/plugin/webhdfs_compressor_hadoop_snappy'
544
548
  require 'fluent/plugin/webhdfs_compressor_lzo_command'
545
- require 'fluent/plugin/webhdfs_compressor_zstd'
549
+ require 'fluent/plugin/webhdfs_compressor_zstd'
@@ -328,7 +328,7 @@ class WebHDFSOutputTest < Test::Unit::TestCase
328
328
  })
329
329
 
330
330
  test "renew_kerberos_delegation_token default" do
331
- mock.proxy(WebHDFS::Client).new("server.local", 14000, "hdfs_user", nil, nil, nil, {}, nil).once
331
+ mock.proxy(WebHDFS::Client).new("server.local", 14000, "hdfs_user", nil, nil, nil, {}, nil, nil).once
332
332
 
333
333
  d = create_driver(CONFIG_KERBEROS)
334
334
 
@@ -337,18 +337,20 @@ class WebHDFSOutputTest < Test::Unit::TestCase
337
337
  kerberos: true,
338
338
  renew_kerberos_delegation_token: false,
339
339
  renew_kerberos_delegation_token_interval_hour: nil,
340
+ kerberos_delegation_token_max_lifetime_hour: nil,
340
341
  },
341
342
  {
342
343
  kerberos: d.instance.kerberos,
343
344
  renew_kerberos_delegation_token: d.instance.instance_eval("@renew_kerberos_delegation_token"),
344
345
  renew_kerberos_delegation_token_interval_hour: d.instance.instance_eval("@renew_kerberos_delegation_token_interval_hour"),
346
+ kerberos_delegation_token_max_lifetime_hour: d.instance.instance_eval("@kerberos_delegation_token_max_lifetime_hour"),
345
347
  })
346
348
  end
347
349
 
348
350
  test "default renew_kerberos_delegation_token_interval" do
349
351
  expected_hour = 8
350
-
351
- mock.proxy(WebHDFS::Client).new("server.local", 14000, "hdfs_user", nil, nil, nil, {}, expected_hour).once
352
+ expected_delegation_token_max_lifetime_hour = 7 * 24
353
+ mock.proxy(WebHDFS::Client).new("server.local", 14000, "hdfs_user", nil, nil, nil, {}, expected_hour, expected_delegation_token_max_lifetime_hour).once
352
354
 
353
355
  d = create_driver(CONFIG_KERBEROS +
354
356
  config_element("", "", { "renew_kerberos_delegation_token" => true }))
@@ -359,19 +361,24 @@ class WebHDFSOutputTest < Test::Unit::TestCase
359
361
  renew_kerberos_delegation_token: true,
360
362
  renew_kerberos_delegation_token_interval: expected_hour * 60 * 60,
361
363
  renew_kerberos_delegation_token_interval_hour: expected_hour,
364
+ kerberos_delegation_token_max_lifetime: expected_delegation_token_max_lifetime_hour * 60 * 60,
365
+ kerberos_delegation_token_max_lifetime_hour: expected_delegation_token_max_lifetime_hour,
362
366
  },
363
367
  {
364
368
  kerberos: d.instance.kerberos,
365
369
  renew_kerberos_delegation_token: d.instance.instance_eval("@renew_kerberos_delegation_token"),
366
370
  renew_kerberos_delegation_token_interval: d.instance.instance_eval("@renew_kerberos_delegation_token_interval"),
367
371
  renew_kerberos_delegation_token_interval_hour: d.instance.instance_eval("@renew_kerberos_delegation_token_interval_hour"),
372
+ kerberos_delegation_token_max_lifetime: d.instance.instance_eval("@kerberos_delegation_token_max_lifetime"),
373
+ kerberos_delegation_token_max_lifetime_hour: d.instance.instance_eval("@kerberos_delegation_token_max_lifetime_hour"),
368
374
  })
369
375
  end
370
376
 
371
377
  test "renew_kerberos_delegation_token_interval" do
372
378
  expected_hour = 10
379
+ expected_delegation_token_max_lifetime_hour = 24
373
380
 
374
- mock.proxy(WebHDFS::Client).new("server.local", 14000, "hdfs_user", nil, nil, nil, {}, expected_hour).once
381
+ mock.proxy(WebHDFS::Client).new("server.local", 14000, "hdfs_user", nil, nil, nil, {}, expected_hour,expected_delegation_token_max_lifetime_hour).once
375
382
 
376
383
  d = create_driver(
377
384
  CONFIG_KERBEROS +
@@ -380,6 +387,7 @@ class WebHDFSOutputTest < Test::Unit::TestCase
380
387
  {
381
388
  "renew_kerberos_delegation_token" => true,
382
389
  "renew_kerberos_delegation_token_interval" => "#{expected_hour}h",
390
+ "kerberos_delegation_token_max_lifetime" => "#{expected_delegation_token_max_lifetime_hour}h"
383
391
  }))
384
392
 
385
393
  assert_equal(
@@ -388,12 +396,16 @@ class WebHDFSOutputTest < Test::Unit::TestCase
388
396
  renew_kerberos_delegation_token: true,
389
397
  renew_kerberos_delegation_token_interval: expected_hour * 60 * 60,
390
398
  renew_kerberos_delegation_token_interval_hour: expected_hour,
399
+ kerberos_delegation_token_max_lifetime: expected_delegation_token_max_lifetime_hour * 60 * 60,
400
+ kerberos_delegation_token_max_lifetime_hour: expected_delegation_token_max_lifetime_hour
391
401
  },
392
402
  {
393
403
  kerberos: d.instance.kerberos,
394
404
  renew_kerberos_delegation_token: d.instance.instance_eval("@renew_kerberos_delegation_token"),
395
405
  renew_kerberos_delegation_token_interval: d.instance.instance_eval("@renew_kerberos_delegation_token_interval"),
396
406
  renew_kerberos_delegation_token_interval_hour: d.instance.instance_eval("@renew_kerberos_delegation_token_interval_hour"),
407
+ kerberos_delegation_token_max_lifetime: d.instance.instance_eval("@kerberos_delegation_token_max_lifetime"),
408
+ kerberos_delegation_token_max_lifetime_hour: d.instance.instance_eval("@kerberos_delegation_token_max_lifetime_hour"),
397
409
  })
398
410
  end
399
411
 
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: fluent-plugin-webhdfs
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.5.0
4
+ version: 1.6.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - TAGOMORI Satoshi
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2021-08-04 00:00:00.000000000 Z
11
+ date: 2024-03-19 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: rake
@@ -128,14 +128,14 @@ dependencies:
128
128
  requirements:
129
129
  - - ">="
130
130
  - !ruby/object:Gem::Version
131
- version: 0.10.0
131
+ version: 0.11.0
132
132
  type: :runtime
133
133
  prerelease: false
134
134
  version_requirements: !ruby/object:Gem::Requirement
135
135
  requirements:
136
136
  - - ">="
137
137
  - !ruby/object:Gem::Version
138
- version: 0.10.0
138
+ version: 0.11.0
139
139
  description: For WebHDFS and HttpFs of Hadoop HDFS
140
140
  email:
141
141
  - tagomoris@gmail.com
@@ -143,7 +143,9 @@ executables: []
143
143
  extensions: []
144
144
  extra_rdoc_files: []
145
145
  files:
146
- - ".github/workflows/linux.yml"
146
+ - ".github/dependabot.yml"
147
+ - ".github/workflows/test-ruby-head.yml"
148
+ - ".github/workflows/test.yml"
147
149
  - ".gitignore"
148
150
  - ".travis.yml"
149
151
  - Appraisals
@@ -184,7 +186,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
184
186
  - !ruby/object:Gem::Version
185
187
  version: '0'
186
188
  requirements: []
187
- rubygems_version: 3.2.5
189
+ rubygems_version: 3.3.5
188
190
  signing_key:
189
191
  specification_version: 4
190
192
  summary: Fluentd plugin to write data on HDFS over WebHDFS, with flexible formatting
@@ -1,35 +0,0 @@
1
- name: Testing on Ubuntu
2
- on:
3
- - push
4
- - pull_request
5
- jobs:
6
- build:
7
- runs-on: ${{ matrix.os }}
8
- continue-on-error: ${{ matrix.experimental }}
9
- strategy:
10
- fail-fast: false
11
- matrix:
12
- ruby: [ '2.5', '2.6', '2.7', '3.0' ]
13
- os:
14
- - ubuntu-latest
15
- experimental: [false]
16
- include:
17
- - ruby: head
18
- os: ubuntu-latest
19
- experimental: true
20
-
21
- name: Ruby ${{ matrix.ruby }} unit testing on ${{ matrix.os }}
22
- steps:
23
- - uses: actions/checkout@v2
24
- - name: Install dependencies
25
- run: sudo apt-get install libsnappy-dev libzstd-dev
26
- - uses: ruby/setup-ruby@v1
27
- with:
28
- ruby-version: ${{ matrix.ruby }}
29
- - name: unit testing
30
- env:
31
- CI: true
32
- run: |
33
- gem install bundler rake
34
- bundle install --jobs 4 --retry 3
35
- bundle exec rake test