fluent-plugin-kubernetes_metadata_filter 2.6.0 → 2.7.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  #
2
4
  # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
3
5
  # Kubernetes metadata
@@ -20,9 +22,7 @@
20
22
  require_relative 'kubernetes_metadata_common'
21
23
 
22
24
  module KubernetesMetadata
23
-
24
25
  module WatchPods
25
-
26
26
  include ::KubernetesMetadata::Common
27
27
 
28
28
  def set_up_pod_thread
@@ -38,48 +38,47 @@ module KubernetesMetadata
38
38
  # processing will be swallowed and retried. These failures /
39
39
  # exceptions could be caused by Kubernetes API being temporarily
40
40
  # down. We assume the configuration is correct at this point.
41
- while true
42
- begin
43
- pod_watcher ||= get_pods_and_start_watcher
44
- process_pod_watcher_notices(pod_watcher)
45
- rescue GoneError => e
46
- # Expected error. Quietly go back through the loop in order to
47
- # start watching from the latest resource versions
48
- @stats.bump(:pod_watch_gone_errors)
49
- log.info("410 Gone encountered. Restarting pod watch to reset resource versions.", e)
41
+ loop do
42
+ pod_watcher ||= get_pods_and_start_watcher
43
+ process_pod_watcher_notices(pod_watcher)
44
+ rescue GoneError => e
45
+ # Expected error. Quietly go back through the loop in order to
46
+ # start watching from the latest resource versions
47
+ @stats.bump(:pod_watch_gone_errors)
48
+ log.info('410 Gone encountered. Restarting pod watch to reset resource versions.', e)
49
+ pod_watcher = nil
50
+ rescue StandardError => e
51
+ @stats.bump(:pod_watch_failures)
52
+ if Thread.current[:pod_watch_retry_count] < @watch_retry_max_times
53
+ # Instead of raising exceptions and crashing Fluentd, swallow
54
+ # the exception and reset the watcher.
55
+ log.info(
56
+ 'Exception encountered parsing pod watch event. The ' \
57
+ 'connection might have been closed. Sleeping for ' \
58
+ "#{Thread.current[:pod_watch_retry_backoff_interval]} " \
59
+ 'seconds and resetting the pod watcher.', e
60
+ )
61
+ sleep(Thread.current[:pod_watch_retry_backoff_interval])
62
+ Thread.current[:pod_watch_retry_count] += 1
63
+ Thread.current[:pod_watch_retry_backoff_interval] *= @watch_retry_exponential_backoff_base
50
64
  pod_watcher = nil
51
- rescue => e
52
- @stats.bump(:pod_watch_failures)
53
- if Thread.current[:pod_watch_retry_count] < @watch_retry_max_times
54
- # Instead of raising exceptions and crashing Fluentd, swallow
55
- # the exception and reset the watcher.
56
- log.info(
57
- "Exception encountered parsing pod watch event. The " \
58
- "connection might have been closed. Sleeping for " \
59
- "#{Thread.current[:pod_watch_retry_backoff_interval]} " \
60
- "seconds and resetting the pod watcher.", e)
61
- sleep(Thread.current[:pod_watch_retry_backoff_interval])
62
- Thread.current[:pod_watch_retry_count] += 1
63
- Thread.current[:pod_watch_retry_backoff_interval] *= @watch_retry_exponential_backoff_base
64
- pod_watcher = nil
65
- else
66
- # Since retries failed for many times, log as errors instead
67
- # of info and raise exceptions and trigger Fluentd to restart.
68
- message =
69
- "Exception encountered parsing pod watch event. The " \
70
- "connection might have been closed. Retried " \
71
- "#{@watch_retry_max_times} times yet still failing. Restarting."
72
- log.error(message, e)
73
- raise Fluent::UnrecoverableError.new(message)
74
- end
65
+ else
66
+ # Since retries failed for many times, log as errors instead
67
+ # of info and raise exceptions and trigger Fluentd to restart.
68
+ message =
69
+ 'Exception encountered parsing pod watch event. The ' \
70
+ 'connection might have been closed. Retried ' \
71
+ "#{@watch_retry_max_times} times yet still failing. Restarting."
72
+ log.error(message, e)
73
+ raise Fluent::UnrecoverableError, message
75
74
  end
76
75
  end
77
76
  end
78
77
 
79
78
  def start_pod_watch
80
79
  get_pods_and_start_watcher
81
- rescue => e
82
- message = "start_pod_watch: Exception encountered setting up pod watch " \
80
+ rescue StandardError => e
81
+ message = 'start_pod_watch: Exception encountered setting up pod watch ' \
83
82
  "from Kubernetes API #{@apiVersion} endpoint " \
84
83
  "#{@kubernetes_url}: #{e.message}"
85
84
  message += " (#{e.response})" if e.respond_to?(:response)
@@ -92,7 +91,7 @@ module KubernetesMetadata
92
91
  # from that resourceVersion.
93
92
  def get_pods_and_start_watcher
94
93
  options = {
95
- resource_version: '0' # Fetch from API server cache instead of etcd quorum read
94
+ resource_version: '0' # Fetch from API server cache instead of etcd quorum read
96
95
  }
97
96
  if ENV['K8S_NODE_NAME']
98
97
  options[:field_selector] = 'spec.nodeName=' + ENV['K8S_NODE_NAME']
@@ -133,39 +132,39 @@ module KubernetesMetadata
133
132
  @last_seen_resource_version = version if version
134
133
 
135
134
  case notice[:type]
136
- when 'MODIFIED'
137
- reset_pod_watch_retry_stats
138
- cache_key = notice.dig(:object, :metadata, :uid)
139
- cached = @cache[cache_key]
140
- if cached
141
- @cache[cache_key] = parse_pod_metadata(notice[:object])
142
- @stats.bump(:pod_cache_watch_updates)
143
- elsif ENV['K8S_NODE_NAME'] == notice[:object][:spec][:nodeName] then
144
- @cache[cache_key] = parse_pod_metadata(notice[:object])
145
- @stats.bump(:pod_cache_host_updates)
146
- else
147
- @stats.bump(:pod_cache_watch_misses)
148
- end
149
- when 'DELETED'
150
- reset_pod_watch_retry_stats
151
- # ignore and let age out for cases where pods
152
- # deleted but still processing logs
153
- @stats.bump(:pod_cache_watch_delete_ignored)
154
- when 'ERROR'
155
- if notice[:object] && notice[:object][:code] == 410
156
- @last_seen_resource_version = nil # requested resourceVersion was too old, need to reset
157
- @stats.bump(:pod_watch_gone_notices)
158
- raise GoneError
159
- else
160
- @stats.bump(:pod_watch_error_type_notices)
161
- message = notice[:object][:message] if notice[:object] && notice[:object][:message]
162
- raise "Error while watching pods: #{message}"
163
- end
135
+ when 'MODIFIED'
136
+ reset_pod_watch_retry_stats
137
+ cache_key = notice.dig(:object, :metadata, :uid)
138
+ cached = @cache[cache_key]
139
+ if cached
140
+ @cache[cache_key] = parse_pod_metadata(notice[:object])
141
+ @stats.bump(:pod_cache_watch_updates)
142
+ elsif ENV['K8S_NODE_NAME'] == notice[:object][:spec][:nodeName]
143
+ @cache[cache_key] = parse_pod_metadata(notice[:object])
144
+ @stats.bump(:pod_cache_host_updates)
145
+ else
146
+ @stats.bump(:pod_cache_watch_misses)
147
+ end
148
+ when 'DELETED'
149
+ reset_pod_watch_retry_stats
150
+ # ignore and let age out for cases where pods
151
+ # deleted but still processing logs
152
+ @stats.bump(:pod_cache_watch_delete_ignored)
153
+ when 'ERROR'
154
+ if notice[:object] && notice[:object][:code] == 410
155
+ @last_seen_resource_version = nil # requested resourceVersion was too old, need to reset
156
+ @stats.bump(:pod_watch_gone_notices)
157
+ raise GoneError
164
158
  else
165
- reset_pod_watch_retry_stats
166
- # Don't pay attention to creations, since the created pod may not
167
- # end up on this node.
168
- @stats.bump(:pod_cache_watch_ignored)
159
+ @stats.bump(:pod_watch_error_type_notices)
160
+ message = notice[:object][:message] if notice[:object] && notice[:object][:message]
161
+ raise "Error while watching pods: #{message}"
162
+ end
163
+ else
164
+ reset_pod_watch_retry_stats
165
+ # Don't pay attention to creations, since the created pod may not
166
+ # end up on this node.
167
+ @stats.bump(:pod_cache_watch_ignored)
169
168
  end
170
169
  end
171
170
  end
data/test/helper.rb CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  #
2
4
  # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
3
5
  # Kubernetes metadata
@@ -65,16 +67,16 @@ def ipv6_enabled?
65
67
  begin
66
68
  TCPServer.open('::1', 0)
67
69
  true
68
- rescue
70
+ rescue StandardError
69
71
  false
70
72
  end
71
73
  end
72
74
 
73
75
  # TEST_NAME='foo' ruby test_file.rb to run a single test case
74
- if ENV["TEST_NAME"]
76
+ if ENV['TEST_NAME']
75
77
  (class << Test::Unit::TestCase; self; end).prepend(Module.new do
76
78
  def test(name)
77
- super if name == ENV["TEST_NAME"]
79
+ super if name == ENV['TEST_NAME']
78
80
  end
79
81
  end)
80
82
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  #
2
4
  # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
3
5
  # Kubernetes metadata
@@ -19,15 +21,13 @@
19
21
  require_relative '../helper'
20
22
 
21
23
  class KubernetesMetadataCacheStatsTest < Test::Unit::TestCase
24
+ test 'watch stats' do
25
+ require 'lru_redux'
26
+ stats = KubernetesMetadata::Stats.new
27
+ stats.bump(:missed)
28
+ stats.bump(:deleted)
29
+ stats.bump(:deleted)
22
30
 
23
- test 'watch stats' do
24
- require 'lru_redux'
25
- stats = KubernetesMetadata::Stats.new
26
- stats.bump(:missed)
27
- stats.bump(:deleted)
28
- stats.bump(:deleted)
29
-
30
- assert_equal("stats - deleted: 2, missed: 1", stats.to_s)
31
- end
32
-
31
+ assert_equal('stats - deleted: 2, missed: 1', stats.to_s)
32
+ end
33
33
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  #
2
4
  # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
3
5
  # Kubernetes metadata
@@ -19,175 +21,174 @@
19
21
  require_relative '../helper'
20
22
 
21
23
  class TestCacheStrategy
22
- include KubernetesMetadata::CacheStrategy
23
-
24
- def initialize
25
- @stats = KubernetesMetadata::Stats.new
26
- @cache = LruRedux::TTL::ThreadSafeCache.new(100,3600)
27
- @id_cache = LruRedux::TTL::ThreadSafeCache.new(100,3600)
28
- @namespace_cache = LruRedux::TTL::ThreadSafeCache.new(100,3600)
29
- @orphaned_namespace_name = '.orphaned'
30
- @orphaned_namespace_id = 'orphaned'
31
- end
32
-
33
- attr_accessor :stats, :cache, :id_cache, :namespace_cache, :allow_orphans
34
-
35
- def fetch_pod_metadata(namespace_name, pod_name)
36
- {}
37
- end
38
-
39
- def fetch_namespace_metadata(namespace_name)
40
- {}
24
+ include KubernetesMetadata::CacheStrategy
25
+
26
+ def initialize
27
+ @stats = KubernetesMetadata::Stats.new
28
+ @cache = LruRedux::TTL::ThreadSafeCache.new(100, 3600)
29
+ @id_cache = LruRedux::TTL::ThreadSafeCache.new(100, 3600)
30
+ @namespace_cache = LruRedux::TTL::ThreadSafeCache.new(100, 3600)
31
+ @orphaned_namespace_name = '.orphaned'
32
+ @orphaned_namespace_id = 'orphaned'
33
+ end
34
+
35
+ attr_accessor :stats, :cache, :id_cache, :namespace_cache, :allow_orphans
36
+
37
+ def fetch_pod_metadata(_namespace_name, _pod_name)
38
+ {}
39
+ end
40
+
41
+ def fetch_namespace_metadata(_namespace_name)
42
+ {}
43
+ end
44
+
45
+ def log
46
+ logger = {}
47
+ def logger.trace?
48
+ true
41
49
  end
42
50
 
43
- def log
44
- logger = {}
45
- def logger.trace?
46
- true
47
- end
48
- def logger.trace(message)
49
- end
50
- logger
51
+ def logger.trace(message)
51
52
  end
52
-
53
+ logger
54
+ end
53
55
  end
54
56
 
55
57
  class KubernetesMetadataCacheStrategyTest < Test::Unit::TestCase
56
-
57
- def setup
58
- @strategy = TestCacheStrategy.new
59
- @cache_key = 'some_long_container_id'
60
- @namespace_name = 'some_namespace_name'
61
- @namespace_uuid = 'some_namespace_uuid'
62
- @pod_name = 'some_pod_name'
63
- @pod_uuid = 'some_pod_uuid'
64
- @time = Time.now
65
- @pod_meta = {'pod_id'=> @pod_uuid, 'labels'=> {'meta'=>'pod'}}
66
- @namespace_meta = {'namespace_id'=> @namespace_uuid, 'creation_timestamp'=>@time.to_s}
58
+ def setup
59
+ @strategy = TestCacheStrategy.new
60
+ @cache_key = 'some_long_container_id'
61
+ @namespace_name = 'some_namespace_name'
62
+ @namespace_uuid = 'some_namespace_uuid'
63
+ @pod_name = 'some_pod_name'
64
+ @pod_uuid = 'some_pod_uuid'
65
+ @time = Time.now
66
+ @pod_meta = { 'pod_id' => @pod_uuid, 'labels' => { 'meta' => 'pod' } }
67
+ @namespace_meta = { 'namespace_id' => @namespace_uuid, 'creation_timestamp' => @time.to_s }
68
+ end
69
+
70
+ test 'when cached metadata is found' do
71
+ exp = @pod_meta.merge(@namespace_meta)
72
+ exp.delete('creation_timestamp')
73
+ @strategy.id_cache[@cache_key] = {
74
+ pod_id: @pod_uuid,
75
+ namespace_id: @namespace_uuid
76
+ }
77
+ @strategy.cache[@pod_uuid] = @pod_meta
78
+ @strategy.namespace_cache[@namespace_uuid] = @namespace_meta
79
+ assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, {}))
80
+ end
81
+
82
+ test 'when previously processed record for pod but metadata is not cached and can not be fetched' do
83
+ exp = {
84
+ 'pod_id' => @pod_uuid,
85
+ 'namespace_id' => @namespace_uuid
86
+ }
87
+ @strategy.id_cache[@cache_key] = {
88
+ pod_id: @pod_uuid,
89
+ namespace_id: @namespace_uuid
90
+ }
91
+ @strategy.stub :fetch_pod_metadata, {} do
92
+ @strategy.stub :fetch_namespace_metadata, nil do
93
+ assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, {}))
94
+ end
67
95
  end
68
-
69
- test 'when cached metadata is found' do
70
- exp = @pod_meta.merge(@namespace_meta)
71
- exp.delete('creation_timestamp')
72
- @strategy.id_cache[@cache_key] = {
73
- pod_id: @pod_uuid,
74
- namespace_id: @namespace_uuid
75
- }
76
- @strategy.cache[@pod_uuid] = @pod_meta
77
- @strategy.namespace_cache[@namespace_uuid] = @namespace_meta
78
- assert_equal(exp, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, {}))
96
+ end
97
+
98
+ test 'when metadata is not cached and is fetched' do
99
+ exp = @pod_meta.merge(@namespace_meta)
100
+ exp.delete('creation_timestamp')
101
+ @strategy.stub :fetch_pod_metadata, @pod_meta do
102
+ @strategy.stub :fetch_namespace_metadata, @namespace_meta do
103
+ assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, {}))
104
+ assert_true(@strategy.id_cache.key?(@cache_key))
105
+ end
79
106
  end
80
-
81
- test 'when previously processed record for pod but metadata is not cached and can not be fetched' do
82
- exp = {
83
- 'pod_id'=> @pod_uuid,
84
- 'namespace_id'=> @namespace_uuid
85
- }
86
- @strategy.id_cache[@cache_key] = {
87
- pod_id: @pod_uuid,
88
- namespace_id: @namespace_uuid
89
- }
90
- @strategy.stub :fetch_pod_metadata, {} do
91
- @strategy.stub :fetch_namespace_metadata, nil do
92
- assert_equal(exp, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, {}))
93
- end
94
- end
107
+ end
108
+
109
+ test 'when metadata is not cached and pod is deleted and namespace metadata is fetched' do
110
+ # this is the case for a record from a deleted pod where no other
111
+ # records were read. using the container hash since that is all
112
+ # we ever will have and should allow us to process all the deleted
113
+ # pod records
114
+ exp = {
115
+ 'pod_id' => @cache_key,
116
+ 'namespace_id' => @namespace_uuid
117
+ }
118
+ @strategy.stub :fetch_pod_metadata, {} do
119
+ @strategy.stub :fetch_namespace_metadata, @namespace_meta do
120
+ assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, {}))
121
+ assert_true(@strategy.id_cache.key?(@cache_key))
122
+ end
95
123
  end
96
-
97
- test 'when metadata is not cached and is fetched' do
98
- exp = @pod_meta.merge(@namespace_meta)
99
- exp.delete('creation_timestamp')
100
- @strategy.stub :fetch_pod_metadata, @pod_meta do
101
- @strategy.stub :fetch_namespace_metadata, @namespace_meta do
102
- assert_equal(exp, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, {}))
103
- assert_true(@strategy.id_cache.key?(@cache_key))
104
- end
105
- end
124
+ end
125
+
126
+ test 'when metadata is not cached and pod is deleted and namespace is for a different namespace with the same name' do
127
+ # this is the case for a record from a deleted pod from a deleted namespace
128
+ # where new namespace was created with the same name
129
+ exp = {
130
+ 'namespace_id' => @namespace_uuid
131
+ }
132
+ @strategy.stub :fetch_pod_metadata, {} do
133
+ @strategy.stub :fetch_namespace_metadata, @namespace_meta do
134
+ assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time - 1 * 86_400, {}))
135
+ assert_true(@strategy.id_cache.key?(@cache_key))
136
+ end
106
137
  end
107
-
108
- test 'when metadata is not cached and pod is deleted and namespace metadata is fetched' do
109
- # this is the case for a record from a deleted pod where no other
110
- # records were read. using the container hash since that is all
111
- # we ever will have and should allow us to process all the deleted
112
- # pod records
113
- exp = {
114
- 'pod_id'=> @cache_key,
115
- 'namespace_id'=> @namespace_uuid
116
- }
117
- @strategy.stub :fetch_pod_metadata, {} do
118
- @strategy.stub :fetch_namespace_metadata, @namespace_meta do
119
- assert_equal(exp, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, {}))
120
- assert_true(@strategy.id_cache.key?(@cache_key))
121
- end
122
- end
138
+ end
139
+
140
+ test 'when metadata is not cached and no metadata can be fetched and not allowing orphans' do
141
+ # we should never see this since pod meta should not be retrievable
142
+ # unless the namespace exists
143
+ @strategy.stub :fetch_pod_metadata, @pod_meta do
144
+ @strategy.stub :fetch_namespace_metadata, {} do
145
+ assert_equal({}, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time - 1 * 86_400, {}))
146
+ end
123
147
  end
124
-
125
- test 'when metadata is not cached and pod is deleted and namespace is for a different namespace with the same name' do
126
- # this is the case for a record from a deleted pod from a deleted namespace
127
- # where new namespace was created with the same name
128
- exp = {
129
- 'namespace_id'=> @namespace_uuid
130
- }
131
- @strategy.stub :fetch_pod_metadata, {} do
132
- @strategy.stub :fetch_namespace_metadata, @namespace_meta do
133
- assert_equal(exp, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time - 1*86400, {}))
134
- assert_true(@strategy.id_cache.key?(@cache_key))
135
- end
136
- end
148
+ end
149
+
150
+ test 'when metadata is not cached and no metadata can be fetched and allowing orphans' do
151
+ # we should never see this since pod meta should not be retrievable
152
+ # unless the namespace exists
153
+ @strategy.allow_orphans = true
154
+ exp = {
155
+ 'orphaned_namespace' => 'namespace',
156
+ 'namespace_name' => '.orphaned',
157
+ 'namespace_id' => 'orphaned'
158
+ }
159
+ @strategy.stub :fetch_pod_metadata, @pod_meta do
160
+ @strategy.stub :fetch_namespace_metadata, {} do
161
+ assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time - 1 * 86_400, {}))
162
+ end
137
163
  end
138
-
139
- test 'when metadata is not cached and no metadata can be fetched and not allowing orphans' do
140
- # we should never see this since pod meta should not be retrievable
141
- # unless the namespace exists
142
- @strategy.stub :fetch_pod_metadata, @pod_meta do
143
- @strategy.stub :fetch_namespace_metadata, {} do
144
- assert_equal({}, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time - 1*86400, {}))
145
- end
146
- end
164
+ end
165
+
166
+ test 'when metadata is not cached and no metadata can be fetched and not allowing orphans for multiple records' do
167
+ # processing a batch of records with no meta. ideally we only hit the api server once
168
+ batch_miss_cache = {}
169
+ @strategy.stub :fetch_pod_metadata, {} do
170
+ @strategy.stub :fetch_namespace_metadata, {} do
171
+ assert_equal({}, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, batch_miss_cache))
172
+ end
147
173
  end
148
-
149
- test 'when metadata is not cached and no metadata can be fetched and allowing orphans' do
150
- # we should never see this since pod meta should not be retrievable
151
- # unless the namespace exists
152
- @strategy.allow_orphans = true
153
- exp = {
154
- 'orphaned_namespace' => 'namespace',
155
- 'namespace_name' => '.orphaned',
156
- 'namespace_id' => 'orphaned'
157
- }
158
- @strategy.stub :fetch_pod_metadata, @pod_meta do
159
- @strategy.stub :fetch_namespace_metadata, {} do
160
- assert_equal(exp, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time - 1*86400, {}))
161
- end
162
- end
163
- end
164
-
165
- test 'when metadata is not cached and no metadata can be fetched and not allowing orphans for multiple records' do
166
- # processing a batch of records with no meta. ideally we only hit the api server once
167
- batch_miss_cache = {}
168
- @strategy.stub :fetch_pod_metadata, {} do
169
- @strategy.stub :fetch_namespace_metadata, {} do
170
- assert_equal({}, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, batch_miss_cache))
171
- end
172
- end
173
- assert_equal({}, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, batch_miss_cache))
174
- end
175
-
176
- test 'when metadata is not cached and no metadata can be fetched and allowing orphans for multiple records' do
177
- # we should never see this since pod meta should not be retrievable
178
- # unless the namespace exists
179
- @strategy.allow_orphans = true
180
- exp = {
181
- 'orphaned_namespace' => 'namespace',
182
- 'namespace_name' => '.orphaned',
183
- 'namespace_id' => 'orphaned'
184
- }
185
- batch_miss_cache = {}
186
- @strategy.stub :fetch_pod_metadata, {} do
187
- @strategy.stub :fetch_namespace_metadata, {} do
188
- assert_equal(exp, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, batch_miss_cache))
189
- end
190
- end
191
- assert_equal(exp, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, batch_miss_cache))
174
+ assert_equal({}, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, batch_miss_cache))
175
+ end
176
+
177
+ test 'when metadata is not cached and no metadata can be fetched and allowing orphans for multiple records' do
178
+ # we should never see this since pod meta should not be retrievable
179
+ # unless the namespace exists
180
+ @strategy.allow_orphans = true
181
+ exp = {
182
+ 'orphaned_namespace' => 'namespace',
183
+ 'namespace_name' => '.orphaned',
184
+ 'namespace_id' => 'orphaned'
185
+ }
186
+ batch_miss_cache = {}
187
+ @strategy.stub :fetch_pod_metadata, {} do
188
+ @strategy.stub :fetch_namespace_metadata, {} do
189
+ assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, batch_miss_cache))
190
+ end
192
191
  end
192
+ assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, batch_miss_cache))
193
+ end
193
194
  end