fluent-plugin-kubernetes_metadata_filter 2.4.1 → 2.4.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 636af222c31e7e4db80d96594f76d3f4918f981120e9a4006e414ddd6b94df15
4
- data.tar.gz: a4edbb9c153da167210ee0fc879d38e37428839ed640264ce81ad824e7991004
3
+ metadata.gz: 15b2d6f98c168e196ed582fda2cdc02775e66725c7f827c0b2e8aec361fea719
4
+ data.tar.gz: 3cef1e6ce88263b9202ffc897ddf833dd4056cc4ebbe73e211901957e71d08b2
5
5
  SHA512:
6
- metadata.gz: a7b667937578e95e259752e9ad64391774103d1ad2ea7f569259ee6f0d6addfbb0e0960fe95379300052d8863342bc6cb0235f95af8f0346a03f6dba3dffffb2
7
- data.tar.gz: b8e0a412669d5fe57ce3c9cb8ed641e75c0c67d42305cdf88b91838621e781d172a19e079a4e312117e3e1dfd411f5c91c92a0e1626ed6a9c152e0633f64bcb2
6
+ metadata.gz: 9a44d475049124488353e2e87dd6124eb05eff10d0f3f5dc2234f947379a5050bf7240e0b371b0e825097bacce38e056a0873f135b4cbae216c5a391fbce2bc4
7
+ data.tar.gz: ec40671937051616e5a57d89397d67a5dcc85cc6040ac53871602eee9b7947d9d0ab8337800ac51ca73aec1086ff8157e311edf299e502bac96e764cc5185549
data/.circleci/config.yml CHANGED
@@ -2,13 +2,15 @@ version: 2.1
2
2
 
3
3
  install: &install
4
4
  name: Install bundle
5
- command: bundle install --path vendor/bundle
5
+ command: |
6
+ gem install bundler
7
+ bundle install --path vendor/bundle
6
8
 
7
9
  missingdeps: &missingdeps
8
10
  name: Install missing dependecies
9
11
  command: |
10
12
  cat /etc/os-release
11
- printf "deb http://archive.debian.org/debian/ jessie main\ndeb-src http://archive.debian.org/debian/ jessie main\ndeb http://security.debian.org jessie/updates main\ndeb-src http://security.debian.org jessie/updates main" > /tmp/sources.list
13
+ printf "deb http://deb.debian.org/debian buster main\ndeb http://security.debian.org buster/updates main\ndeb-src http://security.debian.org buster/updates main" > /tmp/sources.list
12
14
  sudo cp /tmp/sources.list /etc/apt/sources.list
13
15
  sudo apt-get update
14
16
  sudo apt-get install cmake libicu-dev libssl-dev
@@ -53,4 +55,3 @@ workflows:
53
55
  ruby-version: ruby-2-5
54
56
  - ruby-test:
55
57
  ruby-version: ruby-2-6
56
-
data/README.md CHANGED
@@ -61,6 +61,7 @@ when true (default: `true`)
61
61
  * `skip_container_metadata` - Skip some of the container data of the metadata. The metadata will not contain the container_image and container_image_id fields.
62
62
  * `skip_master_url` - Skip the master_url field from the metadata.
63
63
  * `skip_namespace_metadata` - Skip the namespace_id field from the metadata. The fetch_namespace_metadata function will be skipped. The plugin will be faster and cpu consumption will be less.
64
+ * `watch_retry_interval` - The time interval in seconds for retry backoffs when watch connections fail. (default: `10`)
64
65
 
65
66
  **NOTE:** As of the release 2.1.x of this plugin, it no longer supports parsing the source message into JSON and attaching it to the
66
67
  payload. The following configuration options are removed:
@@ -80,16 +81,36 @@ then the plugin will parse those values using `container_name_to_kubernetes_rege
80
81
  - Otherwise, if the tag matches `tag_to_kubernetes_name_regexp`, the plugin will parse the tag and use those values to
81
82
  lookup the metdata
82
83
 
83
- Reading from the JSON formatted log files with `in_tail` and wildcard filenames:
84
+ Reading from the JSON formatted log files with `in_tail` and wildcard filenames while respecting the CRI-o log format with the same config you need the fluent-plugin "multi-format-parser":
85
+
86
+ ```
87
+ fluent-gem install fluent-plugin-multi-format-parser
88
+ ```
89
+
90
+ The config block could look like this:
84
91
  ```
85
92
  <source>
86
93
  @type tail
87
94
  path /var/log/containers/*.log
88
95
  pos_file fluentd-docker.pos
89
- time_format %Y-%m-%dT%H:%M:%S
90
- tag kubernetes.*
91
- format json
92
96
  read_from_head true
97
+ tag kubernetes.*
98
+ <parse>
99
+ @type multi_format
100
+ <pattern>
101
+ format json
102
+ time_key time
103
+ time_type string
104
+ time_format "%Y-%m-%dT%H:%M:%S.%NZ"
105
+ keep_time_key false
106
+ </pattern>
107
+ <pattern>
108
+ format regexp
109
+ expression /^(?<time>.+) (?<stream>stdout|stderr)( (?<logtag>.))? (?<log>.*)$/
110
+ time_format '%Y-%m-%dT%H:%M:%S.%N%:z'
111
+ keep_time_key false
112
+ </pattern>
113
+ </parse>
93
114
  </source>
94
115
 
95
116
  <filter kubernetes.var.log.containers.**.log>
@@ -128,6 +149,22 @@ Reading from the systemd journal (requires the fluentd `fluent-plugin-systemd` a
128
149
  @type stdout
129
150
  </match>
130
151
  ```
152
+ ## Log content as JSON
153
+ In former versions this plugin parsed the value of the key log as JSON. In the current version this feature was removed, to avoid duplicate features in the fluentd plugin ecosystem. It can parsed with the parser plugin like this:
154
+ ```
155
+ <filter kubernetes.**>
156
+ @type parser
157
+ key_name log
158
+ <parse>
159
+ @type json
160
+ json_parser json
161
+ </parse>
162
+ replace_invalid_sequence true
163
+ reserve_data true # this preserves unparsable log lines
164
+ emit_invalid_record_to_error false # In case of unparsable log lines keep the error log clean
165
+ reserve_time # the time was already parsed in the source, we don't want to overwrite it with current time.
166
+ </filter>
167
+ ```
131
168
 
132
169
  ## Environment variables for Kubernetes
133
170
 
@@ -4,7 +4,7 @@ $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
4
4
 
5
5
  Gem::Specification.new do |gem|
6
6
  gem.name = "fluent-plugin-kubernetes_metadata_filter"
7
- gem.version = "2.4.1"
7
+ gem.version = "2.4.2"
8
8
  gem.authors = ["Jimmi Dyson"]
9
9
  gem.email = ["jimmidyson@gmail.com"]
10
10
  gem.description = %q{Filter plugin to add Kubernetes metadata}
@@ -23,7 +23,7 @@ Gem::Specification.new do |gem|
23
23
  gem.add_runtime_dependency "lru_redux"
24
24
  gem.add_runtime_dependency "kubeclient", '< 5'
25
25
 
26
- gem.add_development_dependency "bundler", "~> 2.0.2"
26
+ gem.add_development_dependency "bundler", "~> 2.0"
27
27
  gem.add_development_dependency "rake"
28
28
  gem.add_development_dependency "minitest", "~> 4.0"
29
29
  gem.add_development_dependency "test-unit", "~> 3.0.2"
@@ -22,6 +22,8 @@ require_relative 'kubernetes_metadata_common'
22
22
  require_relative 'kubernetes_metadata_stats'
23
23
  require_relative 'kubernetes_metadata_watch_namespaces'
24
24
  require_relative 'kubernetes_metadata_watch_pods'
25
+
26
+ require 'fluent/plugin_helper/thread'
25
27
  require 'fluent/plugin/filter'
26
28
  require 'resolv'
27
29
 
@@ -37,6 +39,8 @@ module Fluent::Plugin
37
39
 
38
40
  Fluent::Plugin.register_filter('kubernetes_metadata', self)
39
41
 
42
+ helpers :thread
43
+
40
44
  config_param :kubernetes_url, :string, default: nil
41
45
  config_param :cache_size, :integer, default: 1000
42
46
  config_param :cache_ttl, :integer, default: 60 * 60
@@ -80,6 +84,12 @@ module Fluent::Plugin
80
84
  config_param :skip_container_metadata, :bool, default: false
81
85
  config_param :skip_master_url, :bool, default: false
82
86
  config_param :skip_namespace_metadata, :bool, default: false
87
+ # The time interval in seconds for retry backoffs when watch connections fail.
88
+ config_param :watch_retry_interval, :bool, default: 1
89
+ # The base number of exponential backoff for retries.
90
+ config_param :watch_retry_exponential_backoff_base, :bool, default: 2
91
+ # The maximum number of times to retry pod and namespace watches.
92
+ config_param :watch_retry_max_times, :bool, default: 10
83
93
 
84
94
  def fetch_pod_metadata(namespace_name, pod_name)
85
95
  log.trace("fetching pod metadata: #{namespace_name}/#{pod_name}") if log.trace?
@@ -264,9 +274,14 @@ module Fluent::Plugin
264
274
  end
265
275
 
266
276
  if @watch
267
- thread = Thread.new(self) { |this| this.start_pod_watch }
268
- thread.abort_on_exception = true
269
- namespace_thread = Thread.new(self) { |this| this.start_namespace_watch }
277
+ pod_thread = thread_create :"pod_watch_thread" do
278
+ set_up_pod_thread
279
+ end
280
+ pod_thread.abort_on_exception = true
281
+
282
+ namespace_thread = thread_create :"namespace_watch_thread" do
283
+ set_up_namespace_thread
284
+ end
270
285
  namespace_thread.abort_on_exception = true
271
286
  end
272
287
  end
@@ -23,16 +23,81 @@ module KubernetesMetadata
23
23
 
24
24
  include ::KubernetesMetadata::Common
25
25
 
26
+ def set_up_namespace_thread
27
+ # Any failures / exceptions in the initial setup should raise
28
+ # Fluent:ConfigError, so that users can inspect potential errors in
29
+ # the configuration.
30
+ namespace_watcher = start_namespace_watch
31
+ Thread.current[:namespace_watch_retry_backoff_interval] = @watch_retry_interval
32
+ Thread.current[:namespace_watch_retry_count] = 0
33
+
34
+ # Any failures / exceptions in the followup watcher notice
35
+ # processing will be swallowed and retried. These failures /
36
+ # exceptions could be caused by Kubernetes API being temporarily
37
+ # down. We assume the configuration is correct at this point.
38
+ while thread_current_running?
39
+ begin
40
+ namespace_watcher ||= get_namespaces_and_start_watcher
41
+ process_namespace_watcher_notices(namespace_watcher)
42
+ rescue Exception => e
43
+ @stats.bump(:namespace_watch_failures)
44
+ if Thread.current[:namespace_watch_retry_count] < @watch_retry_max_times
45
+ # Instead of raising exceptions and crashing Fluentd, swallow
46
+ # the exception and reset the watcher.
47
+ log.info(
48
+ "Exception encountered parsing namespace watch event. " \
49
+ "The connection might have been closed. Sleeping for " \
50
+ "#{Thread.current[:namespace_watch_retry_backoff_interval]} " \
51
+ "seconds and resetting the namespace watcher.", e)
52
+ sleep(Thread.current[:namespace_watch_retry_backoff_interval])
53
+ Thread.current[:namespace_watch_retry_count] += 1
54
+ Thread.current[:namespace_watch_retry_backoff_interval] *= @watch_retry_exponential_backoff_base
55
+ namespace_watcher = nil
56
+ else
57
+ # Since retries failed for many times, log as errors instead
58
+ # of info and raise exceptions and trigger Fluentd to restart.
59
+ message =
60
+ "Exception encountered parsing namespace watch event. The " \
61
+ "connection might have been closed. Retried " \
62
+ "#{@watch_retry_max_times} times yet still failing. Restarting."
63
+ log.error(message, e)
64
+ raise Fluent::UnrecoverableError.new(message)
65
+ end
66
+ end
67
+ end
68
+ end
69
+
26
70
  def start_namespace_watch
27
- begin
28
- resource_version = @client.get_namespaces.resourceVersion
29
- watcher = @client.watch_namespaces(resource_version)
30
- rescue Exception=>e
31
- message = "start_namespace_watch: Exception encountered setting up namespace watch from Kubernetes API #{@apiVersion} endpoint #{@kubernetes_url}: #{e.message}"
32
- message += " (#{e.response})" if e.respond_to?(:response)
33
- log.debug(message)
34
- raise Fluent::ConfigError, message
71
+ return get_namespaces_and_start_watcher
72
+ rescue Exception => e
73
+ message = "start_namespace_watch: Exception encountered setting up " \
74
+ "namespace watch from Kubernetes API #{@apiVersion} endpoint " \
75
+ "#{@kubernetes_url}: #{e.message}"
76
+ message += " (#{e.response})" if e.respond_to?(:response)
77
+ log.debug(message)
78
+
79
+ raise Fluent::ConfigError, message
80
+ end
81
+
82
+ # List all namespaces, record the resourceVersion and return a watcher
83
+ # starting from that resourceVersion.
84
+ def get_namespaces_and_start_watcher
85
+ options = {
86
+ resource_version: '0' # Fetch from API server.
87
+ }
88
+ namespaces = @client.get_namespaces(options)
89
+ namespaces.each do |namespace|
90
+ cache_key = namespace.metadata['uid']
91
+ @namespace_cache[cache_key] = parse_namespace_metadata(namespace)
92
+ @stats.bump(:namespace_cache_host_updates)
35
93
  end
94
+ options[:resource_version] = namespaces.resourceVersion
95
+ watcher = @client.watch_namespaces(options)
96
+ watcher
97
+ end
98
+
99
+ # Process a watcher notice and potentially raise an exception.
100
+ def process_namespace_watcher_notices(watcher)
36
101
  watcher.each do |notice|
37
102
  case notice.type
38
103
  when 'MODIFIED'
@@ -45,16 +110,15 @@ module KubernetesMetadata
45
110
  @stats.bump(:namespace_cache_watch_misses)
46
111
  end
47
112
  when 'DELETED'
48
- # ignore and let age out for cases where
113
+ # ignore and let age out for cases where
49
114
  # deleted but still processing logs
50
115
  @stats.bump(:namespace_cache_watch_deletes_ignored)
51
116
  else
52
117
  # Don't pay attention to creations, since the created namespace may not
53
- # be used by any pod on this node.
118
+ # be used by any namespace on this node.
54
119
  @stats.bump(:namespace_cache_watch_ignored)
55
120
  end
56
121
  end
57
122
  end
58
-
59
123
  end
60
124
  end
@@ -23,29 +23,84 @@ module KubernetesMetadata
23
23
 
24
24
  include ::KubernetesMetadata::Common
25
25
 
26
- def start_pod_watch
27
- begin
28
- options = {
29
- resource_version: '0' # Fetch from API server.
30
- }
31
- if ENV['K8S_NODE_NAME']
32
- options[:field_selector] = 'spec.nodeName=' + ENV['K8S_NODE_NAME']
33
- end
34
- pods = @client.get_pods(options)
35
- pods.each do |pod|
36
- cache_key = pod.metadata['uid']
37
- @cache[cache_key] = parse_pod_metadata(pod)
38
- @stats.bump(:pod_cache_host_updates)
26
+ def set_up_pod_thread
27
+ # Any failures / exceptions in the initial setup should raise
28
+ # Fluent:ConfigError, so that users can inspect potential errors in
29
+ # the configuration.
30
+ pod_watcher = start_pod_watch
31
+ Thread.current[:pod_watch_retry_backoff_interval] = @watch_retry_interval
32
+ Thread.current[:pod_watch_retry_count] = 0
33
+
34
+ # Any failures / exceptions in the followup watcher notice
35
+ # processing will be swallowed and retried. These failures /
36
+ # exceptions could be caused by Kubernetes API being temporarily
37
+ # down. We assume the configuration is correct at this point.
38
+ while thread_current_running?
39
+ begin
40
+ pod_watcher ||= get_pods_and_start_watcher
41
+ process_pod_watcher_notices(pod_watcher)
42
+ rescue Exception => e
43
+ @stats.bump(:pod_watch_failures)
44
+ if Thread.current[:pod_watch_retry_count] < @watch_retry_max_times
45
+ # Instead of raising exceptions and crashing Fluentd, swallow
46
+ # the exception and reset the watcher.
47
+ log.info(
48
+ "Exception encountered parsing pod watch event. The " \
49
+ "connection might have been closed. Sleeping for " \
50
+ "#{Thread.current[:pod_watch_retry_backoff_interval]} " \
51
+ "seconds and resetting the pod watcher.", e)
52
+ sleep(Thread.current[:pod_watch_retry_backoff_interval])
53
+ Thread.current[:pod_watch_retry_count] += 1
54
+ Thread.current[:pod_watch_retry_backoff_interval] *= @watch_retry_exponential_backoff_base
55
+ pod_watcher = nil
56
+ else
57
+ # Since retries failed for many times, log as errors instead
58
+ # of info and raise exceptions and trigger Fluentd to restart.
59
+ message =
60
+ "Exception encountered parsing pod watch event. The " \
61
+ "connection might have been closed. Retried " \
62
+ "#{@watch_retry_max_times} times yet still failing. Restarting."
63
+ log.error(message, e)
64
+ raise Fluent::UnrecoverableError.new(message)
65
+ end
39
66
  end
40
- options[:resource_version] = pods.resourceVersion
41
- watcher = @client.watch_pods(options)
42
- rescue Exception => e
43
- message = "Exception encountered fetching metadata from Kubernetes API endpoint: #{e.message}"
44
- message += " (#{e.response})" if e.respond_to?(:response)
67
+ end
68
+ end
45
69
 
46
- raise Fluent::ConfigError, message
70
+ def start_pod_watch
71
+ get_pods_and_start_watcher
72
+ rescue Exception => e
73
+ message = "start_pod_watch: Exception encountered setting up pod watch " \
74
+ "from Kubernetes API #{@apiVersion} endpoint " \
75
+ "#{@kubernetes_url}: #{e.message}"
76
+ message += " (#{e.response})" if e.respond_to?(:response)
77
+ log.debug(message)
78
+
79
+ raise Fluent::ConfigError, message
80
+ end
81
+
82
+ # List all pods, record the resourceVersion and return a watcher starting
83
+ # from that resourceVersion.
84
+ def get_pods_and_start_watcher
85
+ options = {
86
+ resource_version: '0' # Fetch from API server.
87
+ }
88
+ if ENV['K8S_NODE_NAME']
89
+ options[:field_selector] = 'spec.nodeName=' + ENV['K8S_NODE_NAME']
90
+ end
91
+ pods = @client.get_pods(options)
92
+ pods.each do |pod|
93
+ cache_key = pod.metadata['uid']
94
+ @cache[cache_key] = parse_pod_metadata(pod)
95
+ @stats.bump(:pod_cache_host_updates)
47
96
  end
97
+ options[:resource_version] = pods.resourceVersion
98
+ watcher = @client.watch_pods(options)
99
+ watcher
100
+ end
48
101
 
102
+ # Process a watcher notice and potentially raise an exception.
103
+ def process_pod_watcher_notices(watcher)
49
104
  watcher.each do |notice|
50
105
  case notice.type
51
106
  when 'MODIFIED'
@@ -25,6 +25,24 @@ class WatchNamespacesTestTest < WatchTest
25
25
  include KubernetesMetadata::WatchNamespaces
26
26
 
27
27
  setup do
28
+ @initial = Kubeclient::Common::EntityList.new(
29
+ 'NamespaceList',
30
+ '123',
31
+ [
32
+ Kubeclient::Resource.new({
33
+ 'metadata' => {
34
+ 'name' => 'initial',
35
+ 'uid' => 'initial_uid'
36
+ }
37
+ }),
38
+ Kubeclient::Resource.new({
39
+ 'metadata' => {
40
+ 'name' => 'modified',
41
+ 'uid' => 'modified_uid'
42
+ }
43
+ })
44
+ ])
45
+
28
46
  @created = OpenStruct.new(
29
47
  type: 'CREATED',
30
48
  object: {
@@ -54,9 +72,31 @@ class WatchNamespacesTestTest < WatchTest
54
72
  )
55
73
  end
56
74
 
75
+ test 'namespace list caches namespaces' do
76
+ @client.stub :get_namespaces, @initial do
77
+ process_namespace_watcher_notices(start_namespace_watch)
78
+ assert_equal(true, @namespace_cache.key?('initial_uid'))
79
+ assert_equal(true, @namespace_cache.key?('modified_uid'))
80
+ assert_equal(2, @stats[:namespace_cache_host_updates])
81
+ end
82
+ end
83
+
84
+ test 'namespace list caches namespaces and watch updates' do
85
+ orig_env_val = ENV['K8S_NODE_NAME']
86
+ ENV['K8S_NODE_NAME'] = 'aNodeName'
87
+ @client.stub :get_namespaces, @initial do
88
+ @client.stub :watch_namespaces, [@modified] do
89
+ process_namespace_watcher_notices(start_namespace_watch)
90
+ assert_equal(2, @stats[:namespace_cache_host_updates])
91
+ assert_equal(1, @stats[:namespace_cache_watch_updates])
92
+ end
93
+ end
94
+ ENV['K8S_NODE_NAME'] = orig_env_val
95
+ end
96
+
57
97
  test 'namespace watch ignores CREATED' do
58
98
  @client.stub :watch_namespaces, [@created] do
59
- start_namespace_watch
99
+ process_namespace_watcher_notices(start_namespace_watch)
60
100
  assert_equal(false, @namespace_cache.key?('created_uid'))
61
101
  assert_equal(1, @stats[:namespace_cache_watch_ignored])
62
102
  end
@@ -64,7 +104,7 @@ class WatchNamespacesTestTest < WatchTest
64
104
 
65
105
  test 'namespace watch ignores MODIFIED when info not in cache' do
66
106
  @client.stub :watch_namespaces, [@modified] do
67
- start_namespace_watch
107
+ process_namespace_watcher_notices(start_namespace_watch)
68
108
  assert_equal(false, @namespace_cache.key?('modified_uid'))
69
109
  assert_equal(1, @stats[:namespace_cache_watch_misses])
70
110
  end
@@ -73,7 +113,7 @@ class WatchNamespacesTestTest < WatchTest
73
113
  test 'namespace watch updates cache when MODIFIED is received and info is cached' do
74
114
  @namespace_cache['modified_uid'] = {}
75
115
  @client.stub :watch_namespaces, [@modified] do
76
- start_namespace_watch
116
+ process_namespace_watcher_notices(start_namespace_watch)
77
117
  assert_equal(true, @namespace_cache.key?('modified_uid'))
78
118
  assert_equal(1, @stats[:namespace_cache_watch_updates])
79
119
  end
@@ -82,10 +122,22 @@ class WatchNamespacesTestTest < WatchTest
82
122
  test 'namespace watch ignores DELETED' do
83
123
  @namespace_cache['deleted_uid'] = {}
84
124
  @client.stub :watch_namespaces, [@deleted] do
85
- start_namespace_watch
125
+ process_namespace_watcher_notices(start_namespace_watch)
86
126
  assert_equal(true, @namespace_cache.key?('deleted_uid'))
87
127
  assert_equal(1, @stats[:namespace_cache_watch_deletes_ignored])
88
128
  end
89
129
  end
90
130
 
131
+ test 'namespace watch retries when exceptions are encountered' do
132
+ @client.stub :get_namespaces, @initial do
133
+ @client.stub :watch_namespaces, [[@created, @exception_raised]] do
134
+ assert_raise Fluent::UnrecoverableError do
135
+ set_up_namespace_thread
136
+ end
137
+ assert_equal(3, @stats[:namespace_watch_failures])
138
+ assert_equal(2, Thread.current[:namespace_watch_retry_count])
139
+ assert_equal(4, Thread.current[:namespace_watch_retry_backoff_interval])
140
+ end
141
+ end
142
+ end
91
143
  end
@@ -142,7 +142,7 @@ class DefaultPodWatchStrategyTest < WatchTest
142
142
  orig_env_val = ENV['K8S_NODE_NAME']
143
143
  ENV['K8S_NODE_NAME'] = 'aNodeName'
144
144
  @client.stub :get_pods, @initial do
145
- start_pod_watch
145
+ process_pod_watcher_notices(start_pod_watch)
146
146
  assert_equal(true, @cache.key?('initial_uid'))
147
147
  assert_equal(true, @cache.key?('modified_uid'))
148
148
  assert_equal(2, @stats[:pod_cache_host_updates])
@@ -155,7 +155,7 @@ class DefaultPodWatchStrategyTest < WatchTest
155
155
  ENV['K8S_NODE_NAME'] = 'aNodeName'
156
156
  @client.stub :get_pods, @initial do
157
157
  @client.stub :watch_pods, [@modified] do
158
- start_pod_watch
158
+ process_pod_watcher_notices(start_pod_watch)
159
159
  assert_equal(2, @stats[:pod_cache_host_updates])
160
160
  assert_equal(1, @stats[:pod_cache_watch_updates])
161
161
  end
@@ -166,7 +166,7 @@ class DefaultPodWatchStrategyTest < WatchTest
166
166
  test 'pod watch notice ignores CREATED' do
167
167
  @client.stub :get_pods, @initial do
168
168
  @client.stub :watch_pods, [@created] do
169
- start_pod_watch
169
+ process_pod_watcher_notices(start_pod_watch)
170
170
  assert_equal(false, @cache.key?('created_uid'))
171
171
  assert_equal(1, @stats[:pod_cache_watch_ignored])
172
172
  end
@@ -175,7 +175,7 @@ class DefaultPodWatchStrategyTest < WatchTest
175
175
 
176
176
  test 'pod watch notice is ignored when info not cached and MODIFIED is received' do
177
177
  @client.stub :watch_pods, [@modified] do
178
- start_pod_watch
178
+ process_pod_watcher_notices(start_pod_watch)
179
179
  assert_equal(false, @cache.key?('modified_uid'))
180
180
  assert_equal(1, @stats[:pod_cache_watch_misses])
181
181
  end
@@ -185,7 +185,7 @@ class DefaultPodWatchStrategyTest < WatchTest
185
185
  orig_env_val = ENV['K8S_NODE_NAME']
186
186
  ENV['K8S_NODE_NAME'] = 'aNodeName'
187
187
  @client.stub :watch_pods, [@modified] do
188
- start_pod_watch
188
+ process_pod_watcher_notices(start_pod_watch)
189
189
  assert_equal(true, @cache.key?('modified_uid'))
190
190
  assert_equal(1, @stats[:pod_cache_host_updates])
191
191
  end
@@ -195,7 +195,7 @@ class DefaultPodWatchStrategyTest < WatchTest
195
195
  test 'pod watch notice is updated when MODIFIED is received' do
196
196
  @cache['modified_uid'] = {}
197
197
  @client.stub :watch_pods, [@modified] do
198
- start_pod_watch
198
+ process_pod_watcher_notices(start_pod_watch)
199
199
  assert_equal(true, @cache.key?('modified_uid'))
200
200
  assert_equal(1, @stats[:pod_cache_watch_updates])
201
201
  end
@@ -204,10 +204,22 @@ class DefaultPodWatchStrategyTest < WatchTest
204
204
  test 'pod watch notice is ignored when delete is received' do
205
205
  @cache['deleted_uid'] = {}
206
206
  @client.stub :watch_pods, [@deleted] do
207
- start_pod_watch
207
+ process_pod_watcher_notices(start_pod_watch)
208
208
  assert_equal(true, @cache.key?('deleted_uid'))
209
209
  assert_equal(1, @stats[:pod_cache_watch_delete_ignored])
210
210
  end
211
211
  end
212
212
 
213
+ test 'pod watch retries when exceptions are encountered' do
214
+ @client.stub :get_pods, @initial do
215
+ @client.stub :watch_pods, [[@created, @exception_raised]] do
216
+ assert_raise Fluent::UnrecoverableError do
217
+ set_up_pod_thread
218
+ end
219
+ assert_equal(3, @stats[:pod_watch_failures])
220
+ assert_equal(2, Thread.current[:pod_watch_retry_count])
221
+ assert_equal(4, Thread.current[:pod_watch_retry_backoff_interval])
222
+ end
223
+ end
224
+ end
213
225
  end
@@ -20,38 +20,54 @@ require_relative '../helper'
20
20
  require 'ostruct'
21
21
 
22
22
  class WatchTest < Test::Unit::TestCase
23
-
24
- setup do
25
- @annotations_regexps = []
26
- @namespace_cache = {}
27
- @cache = {}
28
- @stats = KubernetesMetadata::Stats.new
29
- @client = OpenStruct.new
30
- def @client.resourceVersion
31
- '12345'
32
- end
33
- def @client.watch_pods(options = {})
34
- []
35
- end
36
- def @client.watch_namespaces(options = {})
37
- []
38
- end
39
- def @client.get_namespaces(options = {})
40
- self
41
- end
42
- def @client.get_pods(options = {})
43
- self
44
- end
45
- end
46
23
 
47
- def watcher=(value)
24
+ def thread_current_running?
25
+ true
26
+ end
27
+
28
+ setup do
29
+ @annotations_regexps = []
30
+ @namespace_cache = {}
31
+ @watch_retry_max_times = 2
32
+ @watch_retry_interval = 1
33
+ @watch_retry_exponential_backoff_base = 2
34
+ @cache = {}
35
+ @stats = KubernetesMetadata::Stats.new
36
+
37
+ @client = OpenStruct.new
38
+ def @client.resourceVersion
39
+ '12345'
40
+ end
41
+ def @client.watch_pods(options = {})
42
+ []
43
+ end
44
+ def @client.watch_namespaces(options = {})
45
+ []
46
+ end
47
+ def @client.get_namespaces(options = {})
48
+ self
49
+ end
50
+ def @client.get_pods(options = {})
51
+ self
48
52
  end
49
53
 
50
- def log
51
- logger = {}
52
- def logger.debug(message)
53
- end
54
- logger
54
+ @exception_raised = OpenStruct.new
55
+ def @exception_raised.each
56
+ raise Exception
55
57
  end
58
+ end
59
+
60
+ def watcher=(value)
61
+ end
56
62
 
63
+ def log
64
+ logger = {}
65
+ def logger.debug(message)
66
+ end
67
+ def logger.info(message, error)
68
+ end
69
+ def logger.error(message, error)
70
+ end
71
+ logger
72
+ end
57
73
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: fluent-plugin-kubernetes_metadata_filter
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.4.1
4
+ version: 2.4.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Jimmi Dyson
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2019-12-09 00:00:00.000000000 Z
11
+ date: 2020-01-24 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: fluentd
@@ -64,14 +64,14 @@ dependencies:
64
64
  requirements:
65
65
  - - "~>"
66
66
  - !ruby/object:Gem::Version
67
- version: 2.0.2
67
+ version: '2.0'
68
68
  type: :development
69
69
  prerelease: false
70
70
  version_requirements: !ruby/object:Gem::Requirement
71
71
  requirements:
72
72
  - - "~>"
73
73
  - !ruby/object:Gem::Version
74
- version: 2.0.2
74
+ version: '2.0'
75
75
  - !ruby/object:Gem::Dependency
76
76
  name: rake
77
77
  requirement: !ruby/object:Gem::Requirement
@@ -258,7 +258,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
258
258
  - !ruby/object:Gem::Version
259
259
  version: '0'
260
260
  requirements: []
261
- rubygems_version: 3.0.3
261
+ rubygems_version: 3.0.6
262
262
  signing_key:
263
263
  specification_version: 4
264
264
  summary: Fluentd filter plugin to add Kubernetes metadata