fluent-plugin-kubernetes_metadata_filter 2.1.0 → 3.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. checksums.yaml +5 -5
  2. data/.circleci/config.yml +53 -0
  3. data/.gitignore +0 -2
  4. data/.rubocop.yml +57 -0
  5. data/Gemfile +4 -2
  6. data/Gemfile.lock +159 -0
  7. data/README.md +49 -60
  8. data/Rakefile +15 -11
  9. data/doc/benchmark/5m-1-2500lps-256b-baseline-01/cpu.png +0 -0
  10. data/doc/benchmark/5m-1-2500lps-256b-baseline-01/latency.png +0 -0
  11. data/doc/benchmark/5m-1-2500lps-256b-baseline-01/loss.png +0 -0
  12. data/doc/benchmark/5m-1-2500lps-256b-baseline-01/mem.png +0 -0
  13. data/doc/benchmark/5m-1-2500lps-256b-baseline-01/readme.md +88 -0
  14. data/doc/benchmark/5m-1-2500lps-256b-baseline-01/results.html +127 -0
  15. data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/cpu.png +0 -0
  16. data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/latency.png +0 -0
  17. data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/loss.png +0 -0
  18. data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/mem.png +0 -0
  19. data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/readme.md +97 -0
  20. data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/results.html +136 -0
  21. data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/cpu.png +0 -0
  22. data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/latency.png +0 -0
  23. data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/loss.png +0 -0
  24. data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/mem.png +0 -0
  25. data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/readme.md +97 -0
  26. data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/results.html +136 -0
  27. data/fluent-plugin-kubernetes_metadata_filter.gemspec +25 -28
  28. data/lib/fluent/plugin/filter_kubernetes_metadata.rb +207 -187
  29. data/lib/fluent/plugin/kubernetes_metadata_cache_strategy.rb +30 -23
  30. data/lib/fluent/plugin/kubernetes_metadata_common.rb +66 -24
  31. data/lib/fluent/plugin/kubernetes_metadata_stats.rb +21 -5
  32. data/lib/fluent/plugin/kubernetes_metadata_test_api_adapter.rb +68 -0
  33. data/lib/fluent/plugin/kubernetes_metadata_util.rb +33 -0
  34. data/lib/fluent/plugin/kubernetes_metadata_watch_namespaces.rb +154 -27
  35. data/lib/fluent/plugin/kubernetes_metadata_watch_pods.rb +171 -29
  36. data/release_notes.md +42 -0
  37. data/test/cassettes/kubernetes_docker_metadata_annotations.yml +0 -34
  38. data/test/cassettes/{kubernetes_docker_metadata_dotted_labels.yml → kubernetes_docker_metadata_dotted_slashed_labels.yml} +0 -34
  39. data/test/cassettes/kubernetes_get_api_v1.yml +193 -0
  40. data/test/cassettes/kubernetes_get_api_v1_using_token.yml +195 -0
  41. data/test/cassettes/kubernetes_get_namespace_default.yml +72 -0
  42. data/test/cassettes/kubernetes_get_namespace_default_using_token.yml +71 -0
  43. data/test/cassettes/{kubernetes_docker_metadata.yml → kubernetes_get_pod.yml} +0 -82
  44. data/test/cassettes/kubernetes_get_pod_container_init.yml +145 -0
  45. data/test/cassettes/{metadata_with_namespace_id.yml → kubernetes_get_pod_using_token.yml} +2 -130
  46. data/test/cassettes/{kubernetes_docker_metadata_using_bearer_token.yml → kubernetes_get_pod_with_ownerrefs.yml} +17 -109
  47. data/test/cassettes/metadata_from_tag_and_journald_fields.yml +153 -0
  48. data/test/cassettes/metadata_from_tag_journald_and_kubernetes_fields.yml +285 -0
  49. data/test/cassettes/{non_kubernetes_docker_metadata.yml → valid_kubernetes_api_server_using_token.yml} +4 -44
  50. data/test/helper.rb +20 -2
  51. data/test/plugin/test_cache_stats.rb +10 -13
  52. data/test/plugin/test_cache_strategy.rb +158 -160
  53. data/test/plugin/test_filter_kubernetes_metadata.rb +451 -314
  54. data/test/plugin/test_watch_namespaces.rb +209 -55
  55. data/test/plugin/test_watch_pods.rb +302 -71
  56. data/test/plugin/watch_test.rb +52 -33
  57. metadata +91 -70
  58. data/circle.yml +0 -17
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  #
2
4
  # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
3
5
  # Kubernetes metadata
@@ -18,6 +20,11 @@
18
20
  #
19
21
  module KubernetesMetadata
20
22
  module Common
23
+ class GoneError < StandardError
24
+ def initialize(msg = '410 Gone')
25
+ super
26
+ end
27
+ end
21
28
 
22
29
  def match_annotations(annotations)
23
30
  result = {}
@@ -32,43 +39,79 @@ module KubernetesMetadata
32
39
  end
33
40
 
34
41
  def parse_namespace_metadata(namespace_object)
35
- labels = syms_to_strs(namespace_object['metadata']['labels'].to_h)
36
- annotations = match_annotations(syms_to_strs(namespace_object['metadata']['annotations'].to_h))
37
- if @de_dot
38
- self.de_dot!(labels)
39
- self.de_dot!(annotations)
40
- end
42
+ labels = ''
43
+ labels = syms_to_strs(namespace_object[:metadata][:labels].to_h) unless (@skip_labels || @skip_namespace_labels)
44
+
45
+ annotations = match_annotations(syms_to_strs(namespace_object[:metadata][:annotations].to_h))
46
+
41
47
  kubernetes_metadata = {
42
- 'namespace_id' => namespace_object['metadata']['uid'],
43
- 'creation_timestamp' => namespace_object['metadata']['creationTimestamp']
48
+ 'namespace_id' => namespace_object[:metadata][:uid],
49
+ 'creation_timestamp' => namespace_object[:metadata][:creationTimestamp]
44
50
  }
45
51
  kubernetes_metadata['namespace_labels'] = labels unless labels.empty?
46
52
  kubernetes_metadata['namespace_annotations'] = annotations unless annotations.empty?
47
- return kubernetes_metadata
53
+ kubernetes_metadata
48
54
  end
49
55
 
50
56
  def parse_pod_metadata(pod_object)
51
- labels = syms_to_strs(pod_object['metadata']['labels'].to_h)
52
- annotations = match_annotations(syms_to_strs(pod_object['metadata']['annotations'].to_h))
53
- if @de_dot
54
- self.de_dot!(labels)
55
- self.de_dot!(annotations)
57
+ labels = ''
58
+ labels = syms_to_strs(pod_object[:metadata][:labels].to_h) unless (@skip_labels || @skip_pod_labels)
59
+
60
+ annotations = match_annotations(syms_to_strs(pod_object[:metadata][:annotations].to_h))
61
+
62
+ # collect container information
63
+ container_meta = {}
64
+ begin
65
+ pod_object[:status][:containerStatuses].each do |container_status|
66
+ container_id = (container_status[:containerID]||"").sub(%r{^[-_a-zA-Z0-9]+://}, '')
67
+ key = container_status[:name]
68
+ container_meta[key] = if @skip_container_metadata
69
+ {
70
+ 'name' => container_status[:name]
71
+ }
72
+ else
73
+ {
74
+ 'name' => container_status[:name],
75
+ 'image' => container_status[:image],
76
+ 'image_id' => container_status[:imageID],
77
+ :containerID => container_id
78
+ }
79
+ end
80
+ end if pod_object[:status] && pod_object[:status][:containerStatuses]
81
+ rescue StandardError=>e
82
+ log.warn("parsing container meta information failed for: #{pod_object[:metadata][:namespace]}/#{pod_object[:metadata][:name]}: #{e}")
56
83
  end
84
+
85
+ ownerrefs_meta = []
86
+ begin
87
+ pod_object[:metadata][:ownerReferences].each do |owner_reference|
88
+ ownerrefs_meta.append({
89
+ 'kind' => owner_reference[:kind],
90
+ 'name' => owner_reference[:name]
91
+ })
92
+ end
93
+ rescue StandardError => e
94
+ log.warn("parsing ownerrefs meta information failed for: #{pod_object[:metadata][:namespace]}/#{pod_object[:metadata][:name]}: #{e}")
95
+ end if @include_ownerrefs_metadata && pod_object[:metadata][:ownerReferences]
96
+
57
97
  kubernetes_metadata = {
58
- 'namespace_name' => pod_object['metadata']['namespace'],
59
- 'pod_id' => pod_object['metadata']['uid'],
60
- 'pod_name' => pod_object['metadata']['name'],
61
- 'labels' => labels,
62
- 'host' => pod_object['spec']['nodeName'],
63
- 'master_url' => @kubernetes_url
64
- }
98
+ 'namespace_name' => pod_object[:metadata][:namespace],
99
+ 'pod_id' => pod_object[:metadata][:uid],
100
+ 'pod_name' => pod_object[:metadata][:name],
101
+ 'pod_ip' => pod_object[:status][:podIP],
102
+ 'containers' => syms_to_strs(container_meta),
103
+ 'host' => pod_object[:spec][:nodeName],
104
+ 'ownerrefs' => (ownerrefs_meta if @include_ownerrefs_metadata)
105
+ }.compact
65
106
  kubernetes_metadata['annotations'] = annotations unless annotations.empty?
66
- return kubernetes_metadata
107
+ kubernetes_metadata['labels'] = labels unless labels.empty?
108
+ kubernetes_metadata['master_url'] = @kubernetes_url unless @skip_master_url
109
+ kubernetes_metadata
67
110
  end
68
111
 
69
112
  def syms_to_strs(hsh)
70
113
  newhsh = {}
71
- hsh.each_pair do |kk,vv|
114
+ hsh.each_pair do |kk, vv|
72
115
  if vv.is_a?(Hash)
73
116
  vv = syms_to_strs(vv)
74
117
  end
@@ -80,6 +123,5 @@ module KubernetesMetadata
80
123
  end
81
124
  newhsh
82
125
  end
83
-
84
126
  end
85
127
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  #
2
4
  # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
3
5
  # Kubernetes metadata
@@ -19,17 +21,16 @@
19
21
  require 'lru_redux'
20
22
  module KubernetesMetadata
21
23
  class Stats
22
-
23
24
  def initialize
24
25
  @stats = ::LruRedux::TTL::ThreadSafeCache.new(1000, 3600)
25
26
  end
26
27
 
27
28
  def bump(key)
28
- @stats[key] = @stats.getset(key) { 0 } + 1
29
+ @stats[key] = @stats.getset(key) { 0 } + 1
29
30
  end
30
31
 
31
32
  def set(key, value)
32
- @stats[key] = value
33
+ @stats[key] = value
33
34
  end
34
35
 
35
36
  def [](key)
@@ -37,10 +38,25 @@ module KubernetesMetadata
37
38
  end
38
39
 
39
40
  def to_s
40
- "stats - " + [].tap do |a|
41
- @stats.each {|k,v| a << "#{k.to_s}: #{v}"}
41
+ 'stats - ' + [].tap do |a|
42
+ @stats.each { |k, v| a << "#{k}: #{v}" }
42
43
  end.join(', ')
43
44
  end
45
+ end
46
+ class NoOpStats
47
+ def initialize
48
+ end
49
+
50
+ def bump(key)
51
+ end
44
52
 
53
+ def set(key, value)
54
+ end
55
+
56
+ def [](key)
57
+ end
58
+
59
+ def to_s
60
+ end
45
61
  end
46
62
  end
@@ -0,0 +1,68 @@
1
+ # frozen_string_literal: true
2
+
3
+ #
4
+ # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
5
+ # Kubernetes metadata
6
+ #
7
+ # Copyright 2021 Red Hat, Inc.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ #
21
+ require 'kubeclient'
22
+
23
+ module KubernetesMetadata
24
+ module TestApiAdapter
25
+
26
+ def api_valid?
27
+ true
28
+ end
29
+ def get_namespace(namespace_name, unused, options)
30
+ return {
31
+ metadata: {
32
+ name: namespace_name,
33
+ uid: namespace_name + 'uuid',
34
+ labels: {
35
+ foo_ns: 'bar_ns'
36
+ }
37
+ }
38
+ }
39
+ end
40
+
41
+ def get_pod(pod_name, namespace_name, options)
42
+ return {
43
+ metadata: {
44
+ name: pod_name,
45
+ namespace: namespace_name,
46
+ uid: namespace_name + namespace_name + "uuid",
47
+ labels: {
48
+ foo: 'bar'
49
+ }
50
+ },
51
+ spec: {
52
+ nodeName: 'aNodeName',
53
+ containers: [{
54
+ name: 'foo',
55
+ image: 'bar'
56
+ }, {
57
+ name: 'bar',
58
+ image: 'foo'
59
+ }]
60
+ },
61
+ status: {
62
+ podIP: '172.17.0.8'
63
+ }
64
+ }
65
+ end
66
+
67
+ end
68
+ end
@@ -0,0 +1,33 @@
1
+ # frozen_string_literal: true
2
+
3
+ #
4
+ # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
5
+ # Kubernetes metadata
6
+ #
7
+ # Copyright 2021 Red Hat, Inc.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ #
21
+
22
+ #https://stackoverflow.com/questions/5622435/how-do-i-convert-a-ruby-class-name-to-a-underscore-delimited-symbol
23
+ class String
24
+ def underscore
25
+ word = self.dup
26
+ word.gsub!(/::/, '_')
27
+ word.gsub!(/([A-Z]+)([A-Z][a-z])/,'\1_\2')
28
+ word.gsub!(/([a-z\d])([A-Z])/,'\1_\2')
29
+ word.tr!("-", "_")
30
+ word.downcase!
31
+ word
32
+ end
33
+ end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  #
2
4
  # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
3
5
  # Kubernetes metadata
@@ -16,45 +18,170 @@
16
18
  # See the License for the specific language governing permissions and
17
19
  # limitations under the License.
18
20
  #
21
+ # TODO: this is mostly copy-paste from kubernetes_metadata_watch_pods.rb unify them
19
22
  require_relative 'kubernetes_metadata_common'
20
23
 
21
24
  module KubernetesMetadata
22
25
  module WatchNamespaces
23
-
24
26
  include ::KubernetesMetadata::Common
25
27
 
28
+ def set_up_namespace_thread
29
+ # Any failures / exceptions in the initial setup should raise
30
+ # Fluent:ConfigError, so that users can inspect potential errors in
31
+ # the configuration.
32
+ namespace_watcher = start_namespace_watch
33
+ Thread.current[:namespace_watch_retry_backoff_interval] = @watch_retry_interval
34
+ Thread.current[:namespace_watch_retry_count] = 0
35
+
36
+ # Any failures / exceptions in the followup watcher notice
37
+ # processing will be swallowed and retried. These failures /
38
+ # exceptions could be caused by Kubernetes API being temporarily
39
+ # down. We assume the configuration is correct at this point.
40
+ loop do
41
+ namespace_watcher ||= get_namespaces_and_start_watcher
42
+ process_namespace_watcher_notices(namespace_watcher)
43
+ rescue GoneError => e
44
+ # Expected error. Quietly go back through the loop in order to
45
+ # start watching from the latest resource versions
46
+ @stats.bump(:namespace_watch_gone_errors)
47
+ log.info('410 Gone encountered. Restarting namespace watch to reset resource versions.', e)
48
+ namespace_watcher = nil
49
+ rescue KubeException => e
50
+ if e.error_code == 401
51
+ # recreate client to refresh token
52
+ log.info("Encountered '401 Unauthorized' exception in watch, recreating client to refresh token")
53
+ create_client()
54
+ namespace_watcher = nil
55
+ else
56
+ # treat all other errors the same as StandardError, log, swallow and reset
57
+ @stats.bump(:namespace_watch_failures)
58
+ if Thread.current[:namespace_watch_retry_count] < @watch_retry_max_times
59
+ # Instead of raising exceptions and crashing Fluentd, swallow
60
+ # the exception and reset the watcher.
61
+ log.info(
62
+ 'Exception encountered parsing namespace watch event. ' \
63
+ 'The connection might have been closed. Sleeping for ' \
64
+ "#{Thread.current[:namespace_watch_retry_backoff_interval]} " \
65
+ 'seconds and resetting the namespace watcher.', e
66
+ )
67
+ sleep(Thread.current[:namespace_watch_retry_backoff_interval])
68
+ Thread.current[:namespace_watch_retry_count] += 1
69
+ Thread.current[:namespace_watch_retry_backoff_interval] *= @watch_retry_exponential_backoff_base
70
+ namespace_watcher = nil
71
+ else
72
+ # Since retries failed for many times, log as errors instead
73
+ # of info and raise exceptions and trigger Fluentd to restart.
74
+ message =
75
+ 'Exception encountered parsing namespace watch event. The ' \
76
+ 'connection might have been closed. Retried ' \
77
+ "#{@watch_retry_max_times} times yet still failing. Restarting."
78
+ log.error(message, e)
79
+ raise Fluent::UnrecoverableError, message
80
+ end
81
+ end
82
+ rescue StandardError => e
83
+ @stats.bump(:namespace_watch_failures)
84
+ if Thread.current[:namespace_watch_retry_count] < @watch_retry_max_times
85
+ # Instead of raising exceptions and crashing Fluentd, swallow
86
+ # the exception and reset the watcher.
87
+ log.info(
88
+ 'Exception encountered parsing namespace watch event. ' \
89
+ 'The connection might have been closed. Sleeping for ' \
90
+ "#{Thread.current[:namespace_watch_retry_backoff_interval]} " \
91
+ 'seconds and resetting the namespace watcher.', e
92
+ )
93
+ sleep(Thread.current[:namespace_watch_retry_backoff_interval])
94
+ Thread.current[:namespace_watch_retry_count] += 1
95
+ Thread.current[:namespace_watch_retry_backoff_interval] *= @watch_retry_exponential_backoff_base
96
+ namespace_watcher = nil
97
+ else
98
+ # Since retries failed for many times, log as errors instead
99
+ # of info and raise exceptions and trigger Fluentd to restart.
100
+ message =
101
+ 'Exception encountered parsing namespace watch event. The ' \
102
+ 'connection might have been closed. Retried ' \
103
+ "#{@watch_retry_max_times} times yet still failing. Restarting."
104
+ log.error(message, e)
105
+ raise Fluent::UnrecoverableError, message
106
+ end
107
+ end
108
+ end
109
+
26
110
  def start_namespace_watch
27
- begin
28
- resource_version = @client.get_namespaces.resourceVersion
29
- watcher = @client.watch_namespaces(resource_version)
30
- rescue Exception=>e
31
- message = "start_namespace_watch: Exception encountered setting up namespace watch from Kubernetes API #{@apiVersion} endpoint #{@kubernetes_url}: #{e.message}"
32
- message += " (#{e.response})" if e.respond_to?(:response)
33
- log.debug(message)
34
- raise Fluent::ConfigError, message
111
+ get_namespaces_and_start_watcher
112
+ rescue StandardError => e
113
+ message = 'start_namespace_watch: Exception encountered setting up ' \
114
+ "namespace watch from Kubernetes API #{@apiVersion} endpoint " \
115
+ "#{@kubernetes_url}: #{e.message}"
116
+ message += " (#{e.response})" if e.respond_to?(:response)
117
+ log.debug(message)
118
+
119
+ raise Fluent::ConfigError, message
120
+ end
121
+
122
+ # List all namespaces, record the resourceVersion and return a watcher
123
+ # starting from that resourceVersion.
124
+ def get_namespaces_and_start_watcher
125
+ options = {
126
+ resource_version: '0' # Fetch from API server cache instead of etcd quorum read
127
+ }
128
+ namespaces = @client.get_namespaces(options)
129
+ namespaces[:items].each do |namespace|
130
+ cache_key = namespace[:metadata][:uid]
131
+ @namespace_cache[cache_key] = parse_namespace_metadata(namespace)
132
+ @stats.bump(:namespace_cache_host_updates)
35
133
  end
134
+
135
+ # continue watching from most recent resourceVersion
136
+ options[:resource_version] = namespaces[:metadata][:resourceVersion]
137
+
138
+ watcher = @client.watch_namespaces(options)
139
+ reset_namespace_watch_retry_stats
140
+ watcher
141
+ end
142
+
143
+ # Reset namespace watch retry count and backoff interval as there is a
144
+ # successful watch notice.
145
+ def reset_namespace_watch_retry_stats
146
+ Thread.current[:namespace_watch_retry_count] = 0
147
+ Thread.current[:namespace_watch_retry_backoff_interval] = @watch_retry_interval
148
+ end
149
+
150
+ # Process a watcher notice and potentially raise an exception.
151
+ def process_namespace_watcher_notices(watcher)
36
152
  watcher.each do |notice|
37
- case notice.type
38
- when 'MODIFIED'
39
- cache_key = notice.object['metadata']['uid']
40
- cached = @namespace_cache[cache_key]
41
- if cached
42
- @namespace_cache[cache_key] = parse_namespace_metadata(notice.object)
43
- @stats.bump(:namespace_cache_watch_updates)
44
- else
45
- @stats.bump(:namespace_cache_watch_misses)
46
- end
47
- when 'DELETED'
48
- # ignore and let age out for cases where
49
- # deleted but still processing logs
50
- @stats.bump(:namespace_cache_watch_deletes_ignored)
153
+ case notice[:type]
154
+ when 'MODIFIED'
155
+ reset_namespace_watch_retry_stats
156
+ cache_key = notice[:object][:metadata][:uid]
157
+ cached = @namespace_cache[cache_key]
158
+ if cached
159
+ @namespace_cache[cache_key] = parse_namespace_metadata(notice[:object])
160
+ @stats.bump(:namespace_cache_watch_updates)
51
161
  else
52
- # Don't pay attention to creations, since the created namespace may not
53
- # be used by any pod on this node.
54
- @stats.bump(:namespace_cache_watch_ignored)
162
+ @stats.bump(:namespace_cache_watch_misses)
163
+ end
164
+ when 'DELETED'
165
+ reset_namespace_watch_retry_stats
166
+ # ignore and let age out for cases where
167
+ # deleted but still processing logs
168
+ @stats.bump(:namespace_cache_watch_deletes_ignored)
169
+ when 'ERROR'
170
+ if notice[:object] && notice[:object][:code] == 410
171
+ @stats.bump(:namespace_watch_gone_notices)
172
+ raise GoneError
173
+ else
174
+ @stats.bump(:namespace_watch_error_type_notices)
175
+ message = notice[:object][:message] if notice[:object] && notice[:object][:message]
176
+ raise "Error while watching namespaces: #{message}"
177
+ end
178
+ else
179
+ reset_namespace_watch_retry_stats
180
+ # Don't pay attention to creations, since the created namespace may not
181
+ # be used by any namespace on this node.
182
+ @stats.bump(:namespace_cache_watch_ignored)
55
183
  end
56
184
  end
57
185
  end
58
-
59
186
  end
60
187
  end