fluent-plugin-kubernetes_metadata_filter 2.5.0 → 2.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  #
2
4
  # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
3
5
  # Kubernetes metadata
@@ -16,11 +18,11 @@
16
18
  # See the License for the specific language governing permissions and
17
19
  # limitations under the License.
18
20
  #
21
+ # TODO: this is mostly copy-paste from kubernetes_metadata_watch_namespaces.rb unify them
19
22
  require_relative 'kubernetes_metadata_common'
20
23
 
21
24
  module KubernetesMetadata
22
25
  module WatchPods
23
-
24
26
  include ::KubernetesMetadata::Common
25
27
 
26
28
  def set_up_pod_thread
@@ -28,6 +30,7 @@ module KubernetesMetadata
28
30
  # Fluent:ConfigError, so that users can inspect potential errors in
29
31
  # the configuration.
30
32
  pod_watcher = start_pod_watch
33
+
31
34
  Thread.current[:pod_watch_retry_backoff_interval] = @watch_retry_interval
32
35
  Thread.current[:pod_watch_retry_count] = 0
33
36
 
@@ -35,42 +38,47 @@ module KubernetesMetadata
35
38
  # processing will be swallowed and retried. These failures /
36
39
  # exceptions could be caused by Kubernetes API being temporarily
37
40
  # down. We assume the configuration is correct at this point.
38
- while true
39
- begin
40
- pod_watcher ||= get_pods_and_start_watcher
41
- process_pod_watcher_notices(pod_watcher)
42
- rescue Exception => e
43
- @stats.bump(:pod_watch_failures)
44
- if Thread.current[:pod_watch_retry_count] < @watch_retry_max_times
45
- # Instead of raising exceptions and crashing Fluentd, swallow
46
- # the exception and reset the watcher.
47
- log.info(
48
- "Exception encountered parsing pod watch event. The " \
49
- "connection might have been closed. Sleeping for " \
50
- "#{Thread.current[:pod_watch_retry_backoff_interval]} " \
51
- "seconds and resetting the pod watcher.", e)
52
- sleep(Thread.current[:pod_watch_retry_backoff_interval])
53
- Thread.current[:pod_watch_retry_count] += 1
54
- Thread.current[:pod_watch_retry_backoff_interval] *= @watch_retry_exponential_backoff_base
55
- pod_watcher = nil
56
- else
57
- # Since retries failed for many times, log as errors instead
58
- # of info and raise exceptions and trigger Fluentd to restart.
59
- message =
60
- "Exception encountered parsing pod watch event. The " \
61
- "connection might have been closed. Retried " \
62
- "#{@watch_retry_max_times} times yet still failing. Restarting."
63
- log.error(message, e)
64
- raise Fluent::UnrecoverableError.new(message)
65
- end
41
+ loop do
42
+ pod_watcher ||= get_pods_and_start_watcher
43
+ process_pod_watcher_notices(pod_watcher)
44
+ rescue GoneError => e
45
+ # Expected error. Quietly go back through the loop in order to
46
+ # start watching from the latest resource versions
47
+ @stats.bump(:pod_watch_gone_errors)
48
+ log.info('410 Gone encountered. Restarting pod watch to reset resource versions.', e)
49
+ pod_watcher = nil
50
+ rescue StandardError => e
51
+ @stats.bump(:pod_watch_failures)
52
+ if Thread.current[:pod_watch_retry_count] < @watch_retry_max_times
53
+ # Instead of raising exceptions and crashing Fluentd, swallow
54
+ # the exception and reset the watcher.
55
+ log.info(
56
+ 'Exception encountered parsing pod watch event. The ' \
57
+ 'connection might have been closed. Sleeping for ' \
58
+ "#{Thread.current[:pod_watch_retry_backoff_interval]} " \
59
+ 'seconds and resetting the pod watcher.', e
60
+ )
61
+ sleep(Thread.current[:pod_watch_retry_backoff_interval])
62
+ Thread.current[:pod_watch_retry_count] += 1
63
+ Thread.current[:pod_watch_retry_backoff_interval] *= @watch_retry_exponential_backoff_base
64
+ pod_watcher = nil
65
+ else
66
+ # Since retries failed for many times, log as errors instead
67
+ # of info and raise exceptions and trigger Fluentd to restart.
68
+ message =
69
+ 'Exception encountered parsing pod watch event. The ' \
70
+ 'connection might have been closed. Retried ' \
71
+ "#{@watch_retry_max_times} times yet still failing. Restarting."
72
+ log.error(message, e)
73
+ raise Fluent::UnrecoverableError, message
66
74
  end
67
75
  end
68
76
  end
69
77
 
70
78
  def start_pod_watch
71
79
  get_pods_and_start_watcher
72
- rescue Exception => e
73
- message = "start_pod_watch: Exception encountered setting up pod watch " \
80
+ rescue StandardError => e
81
+ message = 'start_pod_watch: Exception encountered setting up pod watch ' \
74
82
  "from Kubernetes API #{@apiVersion} endpoint " \
75
83
  "#{@kubernetes_url}: #{e.message}"
76
84
  message += " (#{e.response})" if e.respond_to?(:response)
@@ -83,19 +91,27 @@ module KubernetesMetadata
83
91
  # from that resourceVersion.
84
92
  def get_pods_and_start_watcher
85
93
  options = {
86
- resource_version: '0' # Fetch from API server.
94
+ resource_version: '0' # Fetch from API server cache instead of etcd quorum read
87
95
  }
88
96
  if ENV['K8S_NODE_NAME']
89
97
  options[:field_selector] = 'spec.nodeName=' + ENV['K8S_NODE_NAME']
90
98
  end
91
- pods = @client.get_pods(options)
92
- pods.each do |pod|
93
- cache_key = pod.metadata['uid']
94
- @cache[cache_key] = parse_pod_metadata(pod)
95
- @stats.bump(:pod_cache_host_updates)
99
+ if @last_seen_resource_version
100
+ options[:resource_version] = @last_seen_resource_version
101
+ else
102
+ pods = @client.get_pods(options)
103
+ pods[:items].each do |pod|
104
+ cache_key = pod[:metadata][:uid]
105
+ @cache[cache_key] = parse_pod_metadata(pod)
106
+ @stats.bump(:pod_cache_host_updates)
107
+ end
108
+
109
+ # continue watching from most recent resourceVersion
110
+ options[:resource_version] = pods[:metadata][:resourceVersion]
96
111
  end
97
- options[:resource_version] = pods.resourceVersion
112
+
98
113
  watcher = @client.watch_pods(options)
114
+ reset_pod_watch_retry_stats
99
115
  watcher
100
116
  end
101
117
 
@@ -109,34 +125,46 @@ module KubernetesMetadata
109
125
  # Process a watcher notice and potentially raise an exception.
110
126
  def process_pod_watcher_notices(watcher)
111
127
  watcher.each do |notice|
112
- case notice.type
113
- when 'MODIFIED'
114
- reset_pod_watch_retry_stats
115
- cache_key = notice.object['metadata']['uid']
116
- cached = @cache[cache_key]
117
- if cached
118
- @cache[cache_key] = parse_pod_metadata(notice.object)
119
- @stats.bump(:pod_cache_watch_updates)
120
- elsif ENV['K8S_NODE_NAME'] == notice.object['spec']['nodeName'] then
121
- @cache[cache_key] = parse_pod_metadata(notice.object)
122
- @stats.bump(:pod_cache_host_updates)
123
- else
124
- @stats.bump(:pod_cache_watch_misses)
125
- end
126
- when 'DELETED'
127
- reset_pod_watch_retry_stats
128
- # ignore and let age out for cases where pods
129
- # deleted but still processing logs
130
- @stats.bump(:pod_cache_watch_delete_ignored)
131
- when 'ERROR'
128
+ # store version we processed to not reprocess it ... do not unset when there is no version in response
129
+ version = ( # TODO: replace with &.dig once we are on ruby 2.5+
130
+ notice[:object] && notice[:object][:metadata] && notice[:object][:metadata][:resourceVersion]
131
+ )
132
+ @last_seen_resource_version = version if version
133
+
134
+ case notice[:type]
135
+ when 'MODIFIED'
136
+ reset_pod_watch_retry_stats
137
+ cache_key = notice.dig(:object, :metadata, :uid)
138
+ cached = @cache[cache_key]
139
+ if cached
140
+ @cache[cache_key] = parse_pod_metadata(notice[:object])
141
+ @stats.bump(:pod_cache_watch_updates)
142
+ elsif ENV['K8S_NODE_NAME'] == notice[:object][:spec][:nodeName]
143
+ @cache[cache_key] = parse_pod_metadata(notice[:object])
144
+ @stats.bump(:pod_cache_host_updates)
145
+ else
146
+ @stats.bump(:pod_cache_watch_misses)
147
+ end
148
+ when 'DELETED'
149
+ reset_pod_watch_retry_stats
150
+ # ignore and let age out for cases where pods
151
+ # deleted but still processing logs
152
+ @stats.bump(:pod_cache_watch_delete_ignored)
153
+ when 'ERROR'
154
+ if notice[:object] && notice[:object][:code] == 410
155
+ @last_seen_resource_version = nil # requested resourceVersion was too old, need to reset
156
+ @stats.bump(:pod_watch_gone_notices)
157
+ raise GoneError
158
+ else
132
159
  @stats.bump(:pod_watch_error_type_notices)
133
- message = notice['object']['message'] if notice['object'] && notice['object']['message']
160
+ message = notice[:object][:message] if notice[:object] && notice[:object][:message]
134
161
  raise "Error while watching pods: #{message}"
135
- else
136
- reset_pod_watch_retry_stats
137
- # Don't pay attention to creations, since the created pod may not
138
- # end up on this node.
139
- @stats.bump(:pod_cache_watch_ignored)
162
+ end
163
+ else
164
+ reset_pod_watch_retry_stats
165
+ # Don't pay attention to creations, since the created pod may not
166
+ # end up on this node.
167
+ @stats.bump(:pod_cache_watch_ignored)
140
168
  end
141
169
  end
142
170
  end
data/test/helper.rb CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  #
2
4
  # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
3
5
  # Kubernetes metadata
@@ -16,6 +18,7 @@
16
18
  # See the License for the specific language governing permissions and
17
19
  # limitations under the License.
18
20
  #
21
+ require 'bundler/setup'
19
22
  require 'codeclimate-test-reporter'
20
23
  SimpleCov.start do
21
24
  formatter SimpleCov::Formatter::MultiFormatter.new [
@@ -31,8 +34,14 @@ require 'fileutils'
31
34
  require 'fluent/log'
32
35
  require 'fluent/test'
33
36
  require 'minitest/autorun'
34
- require 'webmock/test_unit'
35
37
  require 'vcr'
38
+ require 'ostruct'
39
+ require 'fluent/plugin/filter_kubernetes_metadata'
40
+ require 'fluent/test/driver/filter'
41
+ require 'kubeclient'
42
+
43
+ require 'webmock/test_unit'
44
+ WebMock.disable_net_connect!
36
45
 
37
46
  VCR.configure do |config|
38
47
  config.cassette_library_dir = 'test/cassettes'
@@ -58,7 +67,16 @@ def ipv6_enabled?
58
67
  begin
59
68
  TCPServer.open('::1', 0)
60
69
  true
61
- rescue
70
+ rescue StandardError
62
71
  false
63
72
  end
64
73
  end
74
+
75
+ # TEST_NAME='foo' ruby test_file.rb to run a single test case
76
+ if ENV['TEST_NAME']
77
+ (class << Test::Unit::TestCase; self; end).prepend(Module.new do
78
+ def test(name)
79
+ super if name == ENV['TEST_NAME']
80
+ end
81
+ end)
82
+ end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  #
2
4
  # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
3
5
  # Kubernetes metadata
@@ -17,20 +19,15 @@
17
19
  # limitations under the License.
18
20
  #
19
21
  require_relative '../helper'
20
- require 'fluent/plugin/kubernetes_metadata_stats'
21
- require 'webmock/test_unit'
22
- WebMock.disable_net_connect!
23
22
 
24
23
  class KubernetesMetadataCacheStatsTest < Test::Unit::TestCase
25
-
26
- test 'watch stats' do
27
- require 'lru_redux'
28
- stats = KubernetesMetadata::Stats.new
29
- stats.bump(:missed)
30
- stats.bump(:deleted)
31
- stats.bump(:deleted)
24
+ test 'watch stats' do
25
+ require 'lru_redux'
26
+ stats = KubernetesMetadata::Stats.new
27
+ stats.bump(:missed)
28
+ stats.bump(:deleted)
29
+ stats.bump(:deleted)
32
30
 
33
- assert_equal("stats - deleted: 2, missed: 1", stats.to_s)
34
- end
35
-
31
+ assert_equal('stats - deleted: 2, missed: 1', stats.to_s)
32
+ end
36
33
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  #
2
4
  # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
3
5
  # Kubernetes metadata
@@ -17,180 +19,176 @@
17
19
  # limitations under the License.
18
20
  #
19
21
  require_relative '../helper'
20
- require_relative '../../lib/fluent/plugin/kubernetes_metadata_cache_strategy'
21
- require_relative '../../lib/fluent/plugin/kubernetes_metadata_stats'
22
- require 'lru_redux'
23
- require 'webmock/test_unit'
24
- WebMock.disable_net_connect!
25
22
 
26
23
  class TestCacheStrategy
27
- include KubernetesMetadata::CacheStrategy
28
-
29
- def initialize
30
- @stats = KubernetesMetadata::Stats.new
31
- @cache = LruRedux::TTL::ThreadSafeCache.new(100,3600)
32
- @id_cache = LruRedux::TTL::ThreadSafeCache.new(100,3600)
33
- @namespace_cache = LruRedux::TTL::ThreadSafeCache.new(100,3600)
34
- @orphaned_namespace_name = '.orphaned'
35
- @orphaned_namespace_id = 'orphaned'
24
+ include KubernetesMetadata::CacheStrategy
25
+
26
+ def initialize
27
+ @stats = KubernetesMetadata::Stats.new
28
+ @cache = LruRedux::TTL::ThreadSafeCache.new(100, 3600)
29
+ @id_cache = LruRedux::TTL::ThreadSafeCache.new(100, 3600)
30
+ @namespace_cache = LruRedux::TTL::ThreadSafeCache.new(100, 3600)
31
+ @orphaned_namespace_name = '.orphaned'
32
+ @orphaned_namespace_id = 'orphaned'
33
+ end
34
+
35
+ attr_accessor :stats, :cache, :id_cache, :namespace_cache, :allow_orphans
36
+
37
+ def fetch_pod_metadata(_namespace_name, _pod_name)
38
+ {}
39
+ end
40
+
41
+ def fetch_namespace_metadata(_namespace_name)
42
+ {}
43
+ end
44
+
45
+ def log
46
+ logger = {}
47
+ def logger.trace?
48
+ true
36
49
  end
37
50
 
38
- attr_accessor :stats, :cache, :id_cache, :namespace_cache, :allow_orphans
39
-
40
- def fetch_pod_metadata(namespace_name, pod_name)
51
+ def logger.trace(message)
41
52
  end
42
-
43
- def fetch_namespace_metadata(namespace_name)
44
- end
45
-
46
- def log
47
- logger = {}
48
- def logger.trace?
49
- true
50
- end
51
- def logger.trace(message)
52
- end
53
- logger
54
- end
55
-
53
+ logger
54
+ end
56
55
  end
57
56
 
58
57
  class KubernetesMetadataCacheStrategyTest < Test::Unit::TestCase
59
-
60
- def setup
61
- @strategy = TestCacheStrategy.new
62
- @cache_key = 'some_long_container_id'
63
- @namespace_name = 'some_namespace_name'
64
- @namespace_uuid = 'some_namespace_uuid'
65
- @pod_name = 'some_pod_name'
66
- @pod_uuid = 'some_pod_uuid'
67
- @time = Time.now
68
- @pod_meta = {'pod_id'=> @pod_uuid, 'labels'=> {'meta'=>'pod'}}
69
- @namespace_meta = {'namespace_id'=> @namespace_uuid, 'creation_timestamp'=>@time.to_s}
58
+ def setup
59
+ @strategy = TestCacheStrategy.new
60
+ @cache_key = 'some_long_container_id'
61
+ @namespace_name = 'some_namespace_name'
62
+ @namespace_uuid = 'some_namespace_uuid'
63
+ @pod_name = 'some_pod_name'
64
+ @pod_uuid = 'some_pod_uuid'
65
+ @time = Time.now
66
+ @pod_meta = { 'pod_id' => @pod_uuid, 'labels' => { 'meta' => 'pod' } }
67
+ @namespace_meta = { 'namespace_id' => @namespace_uuid, 'creation_timestamp' => @time.to_s }
68
+ end
69
+
70
+ test 'when cached metadata is found' do
71
+ exp = @pod_meta.merge(@namespace_meta)
72
+ exp.delete('creation_timestamp')
73
+ @strategy.id_cache[@cache_key] = {
74
+ pod_id: @pod_uuid,
75
+ namespace_id: @namespace_uuid
76
+ }
77
+ @strategy.cache[@pod_uuid] = @pod_meta
78
+ @strategy.namespace_cache[@namespace_uuid] = @namespace_meta
79
+ assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, {}))
80
+ end
81
+
82
+ test 'when previously processed record for pod but metadata is not cached and can not be fetched' do
83
+ exp = {
84
+ 'pod_id' => @pod_uuid,
85
+ 'namespace_id' => @namespace_uuid
86
+ }
87
+ @strategy.id_cache[@cache_key] = {
88
+ pod_id: @pod_uuid,
89
+ namespace_id: @namespace_uuid
90
+ }
91
+ @strategy.stub :fetch_pod_metadata, {} do
92
+ @strategy.stub :fetch_namespace_metadata, nil do
93
+ assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, {}))
94
+ end
70
95
  end
71
-
72
- test 'when cached metadata is found' do
73
- exp = @pod_meta.merge(@namespace_meta)
74
- exp.delete('creation_timestamp')
75
- @strategy.id_cache[@cache_key] = {
76
- pod_id: @pod_uuid,
77
- namespace_id: @namespace_uuid
78
- }
79
- @strategy.cache[@pod_uuid] = @pod_meta
80
- @strategy.namespace_cache[@namespace_uuid] = @namespace_meta
81
- assert_equal(exp, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, {}))
96
+ end
97
+
98
+ test 'when metadata is not cached and is fetched' do
99
+ exp = @pod_meta.merge(@namespace_meta)
100
+ exp.delete('creation_timestamp')
101
+ @strategy.stub :fetch_pod_metadata, @pod_meta do
102
+ @strategy.stub :fetch_namespace_metadata, @namespace_meta do
103
+ assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, {}))
104
+ assert_true(@strategy.id_cache.key?(@cache_key))
105
+ end
82
106
  end
83
-
84
- test 'when previously processed record for pod but metadata is not cached and can not be fetched' do
85
- exp = {
86
- 'pod_id'=> @pod_uuid,
87
- 'namespace_id'=> @namespace_uuid
88
- }
89
- @strategy.id_cache[@cache_key] = {
90
- pod_id: @pod_uuid,
91
- namespace_id: @namespace_uuid
92
- }
93
- @strategy.stub :fetch_pod_metadata, {} do
94
- @strategy.stub :fetch_namespace_metadata, nil do
95
- assert_equal(exp, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, {}))
96
- end
97
- end
107
+ end
108
+
109
+ test 'when metadata is not cached and pod is deleted and namespace metadata is fetched' do
110
+ # this is the case for a record from a deleted pod where no other
111
+ # records were read. using the container hash since that is all
112
+ # we ever will have and should allow us to process all the deleted
113
+ # pod records
114
+ exp = {
115
+ 'pod_id' => @cache_key,
116
+ 'namespace_id' => @namespace_uuid
117
+ }
118
+ @strategy.stub :fetch_pod_metadata, {} do
119
+ @strategy.stub :fetch_namespace_metadata, @namespace_meta do
120
+ assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, {}))
121
+ assert_true(@strategy.id_cache.key?(@cache_key))
122
+ end
98
123
  end
99
-
100
- test 'when metadata is not cached and is fetched' do
101
- exp = @pod_meta.merge(@namespace_meta)
102
- exp.delete('creation_timestamp')
103
- @strategy.stub :fetch_pod_metadata, @pod_meta do
104
- @strategy.stub :fetch_namespace_metadata, @namespace_meta do
105
- assert_equal(exp, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, {}))
106
- assert_true(@strategy.id_cache.key?(@cache_key))
107
- end
108
- end
109
- end
110
-
111
- test 'when metadata is not cached and pod is deleted and namespace metadata is fetched' do
112
- # this is the case for a record from a deleted pod where no other
113
- # records were read. using the container hash since that is all
114
- # we ever will have and should allow us to process all the deleted
115
- # pod records
116
- exp = {
117
- 'pod_id'=> @cache_key,
118
- 'namespace_id'=> @namespace_uuid
119
- }
120
- @strategy.stub :fetch_pod_metadata, {} do
121
- @strategy.stub :fetch_namespace_metadata, @namespace_meta do
122
- assert_equal(exp, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, {}))
123
- assert_true(@strategy.id_cache.key?(@cache_key))
124
- end
125
- end
126
- end
127
-
128
- test 'when metadata is not cached and pod is deleted and namespace is for a different namespace with the same name' do
129
- # this is the case for a record from a deleted pod from a deleted namespace
130
- # where new namespace was created with the same name
131
- exp = {
132
- 'namespace_id'=> @namespace_uuid
133
- }
134
- @strategy.stub :fetch_pod_metadata, {} do
135
- @strategy.stub :fetch_namespace_metadata, @namespace_meta do
136
- assert_equal(exp, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time - 1*86400, {}))
137
- assert_true(@strategy.id_cache.key?(@cache_key))
138
- end
139
- end
124
+ end
125
+
126
+ test 'when metadata is not cached and pod is deleted and namespace is for a different namespace with the same name' do
127
+ # this is the case for a record from a deleted pod from a deleted namespace
128
+ # where new namespace was created with the same name
129
+ exp = {
130
+ 'namespace_id' => @namespace_uuid
131
+ }
132
+ @strategy.stub :fetch_pod_metadata, {} do
133
+ @strategy.stub :fetch_namespace_metadata, @namespace_meta do
134
+ assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time - 1 * 86_400, {}))
135
+ assert_true(@strategy.id_cache.key?(@cache_key))
136
+ end
140
137
  end
141
-
142
- test 'when metadata is not cached and no metadata can be fetched and not allowing orphans' do
143
- # we should never see this since pod meta should not be retrievable
144
- # unless the namespace exists
145
- @strategy.stub :fetch_pod_metadata, @pod_meta do
146
- @strategy.stub :fetch_namespace_metadata, {} do
147
- assert_equal({}, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time - 1*86400, {}))
148
- end
149
- end
138
+ end
139
+
140
+ test 'when metadata is not cached and no metadata can be fetched and not allowing orphans' do
141
+ # we should never see this since pod meta should not be retrievable
142
+ # unless the namespace exists
143
+ @strategy.stub :fetch_pod_metadata, @pod_meta do
144
+ @strategy.stub :fetch_namespace_metadata, {} do
145
+ assert_equal({}, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time - 1 * 86_400, {}))
146
+ end
150
147
  end
151
-
152
- test 'when metadata is not cached and no metadata can be fetched and allowing orphans' do
153
- # we should never see this since pod meta should not be retrievable
154
- # unless the namespace exists
155
- @strategy.allow_orphans = true
156
- exp = {
157
- 'orphaned_namespace' => 'namespace',
158
- 'namespace_name' => '.orphaned',
159
- 'namespace_id' => 'orphaned'
160
- }
161
- @strategy.stub :fetch_pod_metadata, @pod_meta do
162
- @strategy.stub :fetch_namespace_metadata, {} do
163
- assert_equal(exp, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time - 1*86400, {}))
164
- end
165
- end
148
+ end
149
+
150
+ test 'when metadata is not cached and no metadata can be fetched and allowing orphans' do
151
+ # we should never see this since pod meta should not be retrievable
152
+ # unless the namespace exists
153
+ @strategy.allow_orphans = true
154
+ exp = {
155
+ 'orphaned_namespace' => 'namespace',
156
+ 'namespace_name' => '.orphaned',
157
+ 'namespace_id' => 'orphaned'
158
+ }
159
+ @strategy.stub :fetch_pod_metadata, @pod_meta do
160
+ @strategy.stub :fetch_namespace_metadata, {} do
161
+ assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time - 1 * 86_400, {}))
162
+ end
166
163
  end
167
-
168
- test 'when metadata is not cached and no metadata can be fetched and not allowing orphans for multiple records' do
169
- # processing a batch of records with no meta. ideally we only hit the api server once
170
- batch_miss_cache = {}
171
- @strategy.stub :fetch_pod_metadata, {} do
172
- @strategy.stub :fetch_namespace_metadata, {} do
173
- assert_equal({}, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, batch_miss_cache))
174
- end
175
- end
176
- assert_equal({}, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, batch_miss_cache))
164
+ end
165
+
166
+ test 'when metadata is not cached and no metadata can be fetched and not allowing orphans for multiple records' do
167
+ # processing a batch of records with no meta. ideally we only hit the api server once
168
+ batch_miss_cache = {}
169
+ @strategy.stub :fetch_pod_metadata, {} do
170
+ @strategy.stub :fetch_namespace_metadata, {} do
171
+ assert_equal({}, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, batch_miss_cache))
172
+ end
177
173
  end
178
-
179
- test 'when metadata is not cached and no metadata can be fetched and allowing orphans for multiple records' do
180
- # we should never see this since pod meta should not be retrievable
181
- # unless the namespace exists
182
- @strategy.allow_orphans = true
183
- exp = {
184
- 'orphaned_namespace' => 'namespace',
185
- 'namespace_name' => '.orphaned',
186
- 'namespace_id' => 'orphaned'
187
- }
188
- batch_miss_cache = {}
189
- @strategy.stub :fetch_pod_metadata, {} do
190
- @strategy.stub :fetch_namespace_metadata, {} do
191
- assert_equal(exp, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, batch_miss_cache))
192
- end
193
- end
194
- assert_equal(exp, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, batch_miss_cache))
174
+ assert_equal({}, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, batch_miss_cache))
175
+ end
176
+
177
+ test 'when metadata is not cached and no metadata can be fetched and allowing orphans for multiple records' do
178
+ # we should never see this since pod meta should not be retrievable
179
+ # unless the namespace exists
180
+ @strategy.allow_orphans = true
181
+ exp = {
182
+ 'orphaned_namespace' => 'namespace',
183
+ 'namespace_name' => '.orphaned',
184
+ 'namespace_id' => 'orphaned'
185
+ }
186
+ batch_miss_cache = {}
187
+ @strategy.stub :fetch_pod_metadata, {} do
188
+ @strategy.stub :fetch_namespace_metadata, {} do
189
+ assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, batch_miss_cache))
190
+ end
195
191
  end
192
+ assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, batch_miss_cache))
193
+ end
196
194
  end