fluent-plugin-kubernetes_metadata_filter 2.5.3 → 2.7.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.circleci/config.yml +1 -1
- data/.rubocop.yml +57 -0
- data/Gemfile +4 -2
- data/Gemfile.lock +46 -42
- data/README.md +4 -2
- data/Rakefile +15 -11
- data/fluent-plugin-kubernetes_metadata_filter.gemspec +24 -23
- data/lib/fluent/plugin/filter_kubernetes_metadata.rb +82 -72
- data/lib/fluent/plugin/kubernetes_metadata_cache_strategy.rb +22 -18
- data/lib/fluent/plugin/kubernetes_metadata_common.rb +30 -29
- data/lib/fluent/plugin/kubernetes_metadata_stats.rb +6 -6
- data/lib/fluent/plugin/kubernetes_metadata_test_api_adapter.rb +68 -0
- data/lib/fluent/plugin/kubernetes_metadata_util.rb +53 -0
- data/lib/fluent/plugin/kubernetes_metadata_watch_namespaces.rb +65 -65
- data/lib/fluent/plugin/kubernetes_metadata_watch_pods.rb +69 -70
- data/test/helper.rb +5 -3
- data/test/plugin/test_cache_stats.rb +10 -10
- data/test/plugin/test_cache_strategy.rb +158 -157
- data/test/plugin/test_filter_kubernetes_metadata.rb +363 -344
- data/test/plugin/test_utils.rb +56 -0
- data/test/plugin/test_watch_namespaces.rb +191 -190
- data/test/plugin/test_watch_pods.rb +278 -267
- data/test/plugin/watch_test.rb +13 -7
- metadata +46 -42
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
#
|
2
4
|
# Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
|
3
5
|
# Kubernetes metadata
|
@@ -20,9 +22,7 @@
|
|
20
22
|
require_relative 'kubernetes_metadata_common'
|
21
23
|
|
22
24
|
module KubernetesMetadata
|
23
|
-
|
24
25
|
module WatchPods
|
25
|
-
|
26
26
|
include ::KubernetesMetadata::Common
|
27
27
|
|
28
28
|
def set_up_pod_thread
|
@@ -38,48 +38,47 @@ module KubernetesMetadata
|
|
38
38
|
# processing will be swallowed and retried. These failures /
|
39
39
|
# exceptions could be caused by Kubernetes API being temporarily
|
40
40
|
# down. We assume the configuration is correct at this point.
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
41
|
+
loop do
|
42
|
+
pod_watcher ||= get_pods_and_start_watcher
|
43
|
+
process_pod_watcher_notices(pod_watcher)
|
44
|
+
rescue GoneError => e
|
45
|
+
# Expected error. Quietly go back through the loop in order to
|
46
|
+
# start watching from the latest resource versions
|
47
|
+
@stats.bump(:pod_watch_gone_errors)
|
48
|
+
log.info('410 Gone encountered. Restarting pod watch to reset resource versions.', e)
|
49
|
+
pod_watcher = nil
|
50
|
+
rescue StandardError => e
|
51
|
+
@stats.bump(:pod_watch_failures)
|
52
|
+
if Thread.current[:pod_watch_retry_count] < @watch_retry_max_times
|
53
|
+
# Instead of raising exceptions and crashing Fluentd, swallow
|
54
|
+
# the exception and reset the watcher.
|
55
|
+
log.info(
|
56
|
+
'Exception encountered parsing pod watch event. The ' \
|
57
|
+
'connection might have been closed. Sleeping for ' \
|
58
|
+
"#{Thread.current[:pod_watch_retry_backoff_interval]} " \
|
59
|
+
'seconds and resetting the pod watcher.', e
|
60
|
+
)
|
61
|
+
sleep(Thread.current[:pod_watch_retry_backoff_interval])
|
62
|
+
Thread.current[:pod_watch_retry_count] += 1
|
63
|
+
Thread.current[:pod_watch_retry_backoff_interval] *= @watch_retry_exponential_backoff_base
|
50
64
|
pod_watcher = nil
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
"seconds and resetting the pod watcher.", e)
|
61
|
-
sleep(Thread.current[:pod_watch_retry_backoff_interval])
|
62
|
-
Thread.current[:pod_watch_retry_count] += 1
|
63
|
-
Thread.current[:pod_watch_retry_backoff_interval] *= @watch_retry_exponential_backoff_base
|
64
|
-
pod_watcher = nil
|
65
|
-
else
|
66
|
-
# Since retries failed for many times, log as errors instead
|
67
|
-
# of info and raise exceptions and trigger Fluentd to restart.
|
68
|
-
message =
|
69
|
-
"Exception encountered parsing pod watch event. The " \
|
70
|
-
"connection might have been closed. Retried " \
|
71
|
-
"#{@watch_retry_max_times} times yet still failing. Restarting."
|
72
|
-
log.error(message, e)
|
73
|
-
raise Fluent::UnrecoverableError.new(message)
|
74
|
-
end
|
65
|
+
else
|
66
|
+
# Since retries failed for many times, log as errors instead
|
67
|
+
# of info and raise exceptions and trigger Fluentd to restart.
|
68
|
+
message =
|
69
|
+
'Exception encountered parsing pod watch event. The ' \
|
70
|
+
'connection might have been closed. Retried ' \
|
71
|
+
"#{@watch_retry_max_times} times yet still failing. Restarting."
|
72
|
+
log.error(message, e)
|
73
|
+
raise Fluent::UnrecoverableError, message
|
75
74
|
end
|
76
75
|
end
|
77
76
|
end
|
78
77
|
|
79
78
|
def start_pod_watch
|
80
79
|
get_pods_and_start_watcher
|
81
|
-
rescue => e
|
82
|
-
message =
|
80
|
+
rescue StandardError => e
|
81
|
+
message = 'start_pod_watch: Exception encountered setting up pod watch ' \
|
83
82
|
"from Kubernetes API #{@apiVersion} endpoint " \
|
84
83
|
"#{@kubernetes_url}: #{e.message}"
|
85
84
|
message += " (#{e.response})" if e.respond_to?(:response)
|
@@ -92,7 +91,7 @@ module KubernetesMetadata
|
|
92
91
|
# from that resourceVersion.
|
93
92
|
def get_pods_and_start_watcher
|
94
93
|
options = {
|
95
|
-
resource_version: '0'
|
94
|
+
resource_version: '0' # Fetch from API server cache instead of etcd quorum read
|
96
95
|
}
|
97
96
|
if ENV['K8S_NODE_NAME']
|
98
97
|
options[:field_selector] = 'spec.nodeName=' + ENV['K8S_NODE_NAME']
|
@@ -133,39 +132,39 @@ module KubernetesMetadata
|
|
133
132
|
@last_seen_resource_version = version if version
|
134
133
|
|
135
134
|
case notice[:type]
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
else
|
160
|
-
@stats.bump(:pod_watch_error_type_notices)
|
161
|
-
message = notice[:object][:message] if notice[:object] && notice[:object][:message]
|
162
|
-
raise "Error while watching pods: #{message}"
|
163
|
-
end
|
135
|
+
when 'MODIFIED'
|
136
|
+
reset_pod_watch_retry_stats
|
137
|
+
cache_key = notice.dig(:object, :metadata, :uid)
|
138
|
+
cached = @cache[cache_key]
|
139
|
+
if cached
|
140
|
+
@cache[cache_key] = parse_pod_metadata(notice[:object])
|
141
|
+
@stats.bump(:pod_cache_watch_updates)
|
142
|
+
elsif ENV['K8S_NODE_NAME'] == notice[:object][:spec][:nodeName]
|
143
|
+
@cache[cache_key] = parse_pod_metadata(notice[:object])
|
144
|
+
@stats.bump(:pod_cache_host_updates)
|
145
|
+
else
|
146
|
+
@stats.bump(:pod_cache_watch_misses)
|
147
|
+
end
|
148
|
+
when 'DELETED'
|
149
|
+
reset_pod_watch_retry_stats
|
150
|
+
# ignore and let age out for cases where pods
|
151
|
+
# deleted but still processing logs
|
152
|
+
@stats.bump(:pod_cache_watch_delete_ignored)
|
153
|
+
when 'ERROR'
|
154
|
+
if notice[:object] && notice[:object][:code] == 410
|
155
|
+
@last_seen_resource_version = nil # requested resourceVersion was too old, need to reset
|
156
|
+
@stats.bump(:pod_watch_gone_notices)
|
157
|
+
raise GoneError
|
164
158
|
else
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
159
|
+
@stats.bump(:pod_watch_error_type_notices)
|
160
|
+
message = notice[:object][:message] if notice[:object] && notice[:object][:message]
|
161
|
+
raise "Error while watching pods: #{message}"
|
162
|
+
end
|
163
|
+
else
|
164
|
+
reset_pod_watch_retry_stats
|
165
|
+
# Don't pay attention to creations, since the created pod may not
|
166
|
+
# end up on this node.
|
167
|
+
@stats.bump(:pod_cache_watch_ignored)
|
169
168
|
end
|
170
169
|
end
|
171
170
|
end
|
data/test/helper.rb
CHANGED
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
#
|
2
4
|
# Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
|
3
5
|
# Kubernetes metadata
|
@@ -65,16 +67,16 @@ def ipv6_enabled?
|
|
65
67
|
begin
|
66
68
|
TCPServer.open('::1', 0)
|
67
69
|
true
|
68
|
-
rescue
|
70
|
+
rescue StandardError
|
69
71
|
false
|
70
72
|
end
|
71
73
|
end
|
72
74
|
|
73
75
|
# TEST_NAME='foo' ruby test_file.rb to run a single test case
|
74
|
-
if ENV[
|
76
|
+
if ENV['TEST_NAME']
|
75
77
|
(class << Test::Unit::TestCase; self; end).prepend(Module.new do
|
76
78
|
def test(name)
|
77
|
-
super if name == ENV[
|
79
|
+
super if name == ENV['TEST_NAME']
|
78
80
|
end
|
79
81
|
end)
|
80
82
|
end
|
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
#
|
2
4
|
# Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
|
3
5
|
# Kubernetes metadata
|
@@ -19,15 +21,13 @@
|
|
19
21
|
require_relative '../helper'
|
20
22
|
|
21
23
|
class KubernetesMetadataCacheStatsTest < Test::Unit::TestCase
|
24
|
+
test 'watch stats' do
|
25
|
+
require 'lru_redux'
|
26
|
+
stats = KubernetesMetadata::Stats.new
|
27
|
+
stats.bump(:missed)
|
28
|
+
stats.bump(:deleted)
|
29
|
+
stats.bump(:deleted)
|
22
30
|
|
23
|
-
|
24
|
-
|
25
|
-
stats = KubernetesMetadata::Stats.new
|
26
|
-
stats.bump(:missed)
|
27
|
-
stats.bump(:deleted)
|
28
|
-
stats.bump(:deleted)
|
29
|
-
|
30
|
-
assert_equal("stats - deleted: 2, missed: 1", stats.to_s)
|
31
|
-
end
|
32
|
-
|
31
|
+
assert_equal('stats - deleted: 2, missed: 1', stats.to_s)
|
32
|
+
end
|
33
33
|
end
|
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
#
|
2
4
|
# Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
|
3
5
|
# Kubernetes metadata
|
@@ -19,175 +21,174 @@
|
|
19
21
|
require_relative '../helper'
|
20
22
|
|
21
23
|
class TestCacheStrategy
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
24
|
+
include KubernetesMetadata::CacheStrategy
|
25
|
+
|
26
|
+
def initialize
|
27
|
+
@stats = KubernetesMetadata::Stats.new
|
28
|
+
@cache = LruRedux::TTL::ThreadSafeCache.new(100, 3600)
|
29
|
+
@id_cache = LruRedux::TTL::ThreadSafeCache.new(100, 3600)
|
30
|
+
@namespace_cache = LruRedux::TTL::ThreadSafeCache.new(100, 3600)
|
31
|
+
@orphaned_namespace_name = '.orphaned'
|
32
|
+
@orphaned_namespace_id = 'orphaned'
|
33
|
+
end
|
34
|
+
|
35
|
+
attr_accessor :stats, :cache, :id_cache, :namespace_cache, :allow_orphans
|
36
|
+
|
37
|
+
def fetch_pod_metadata(_namespace_name, _pod_name)
|
38
|
+
{}
|
39
|
+
end
|
40
|
+
|
41
|
+
def fetch_namespace_metadata(_namespace_name)
|
42
|
+
{}
|
43
|
+
end
|
44
|
+
|
45
|
+
def log
|
46
|
+
logger = {}
|
47
|
+
def logger.trace?
|
48
|
+
true
|
41
49
|
end
|
42
50
|
|
43
|
-
def
|
44
|
-
logger = {}
|
45
|
-
def logger.trace?
|
46
|
-
true
|
47
|
-
end
|
48
|
-
def logger.trace(message)
|
49
|
-
end
|
50
|
-
logger
|
51
|
+
def logger.trace(message)
|
51
52
|
end
|
52
|
-
|
53
|
+
logger
|
54
|
+
end
|
53
55
|
end
|
54
56
|
|
55
57
|
class KubernetesMetadataCacheStrategyTest < Test::Unit::TestCase
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
58
|
+
def setup
|
59
|
+
@strategy = TestCacheStrategy.new
|
60
|
+
@cache_key = 'some_long_container_id'
|
61
|
+
@namespace_name = 'some_namespace_name'
|
62
|
+
@namespace_uuid = 'some_namespace_uuid'
|
63
|
+
@pod_name = 'some_pod_name'
|
64
|
+
@pod_uuid = 'some_pod_uuid'
|
65
|
+
@time = Time.now
|
66
|
+
@pod_meta = { 'pod_id' => @pod_uuid, 'labels' => { 'meta' => 'pod' } }
|
67
|
+
@namespace_meta = { 'namespace_id' => @namespace_uuid, 'creation_timestamp' => @time.to_s }
|
68
|
+
end
|
69
|
+
|
70
|
+
test 'when cached metadata is found' do
|
71
|
+
exp = @pod_meta.merge(@namespace_meta)
|
72
|
+
exp.delete('creation_timestamp')
|
73
|
+
@strategy.id_cache[@cache_key] = {
|
74
|
+
pod_id: @pod_uuid,
|
75
|
+
namespace_id: @namespace_uuid
|
76
|
+
}
|
77
|
+
@strategy.cache[@pod_uuid] = @pod_meta
|
78
|
+
@strategy.namespace_cache[@namespace_uuid] = @namespace_meta
|
79
|
+
assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, {}))
|
80
|
+
end
|
81
|
+
|
82
|
+
test 'when previously processed record for pod but metadata is not cached and can not be fetched' do
|
83
|
+
exp = {
|
84
|
+
'pod_id' => @pod_uuid,
|
85
|
+
'namespace_id' => @namespace_uuid
|
86
|
+
}
|
87
|
+
@strategy.id_cache[@cache_key] = {
|
88
|
+
pod_id: @pod_uuid,
|
89
|
+
namespace_id: @namespace_uuid
|
90
|
+
}
|
91
|
+
@strategy.stub :fetch_pod_metadata, {} do
|
92
|
+
@strategy.stub :fetch_namespace_metadata, nil do
|
93
|
+
assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, {}))
|
94
|
+
end
|
67
95
|
end
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
}
|
76
|
-
@strategy.
|
77
|
-
|
78
|
-
assert_equal(exp, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, {}))
|
96
|
+
end
|
97
|
+
|
98
|
+
test 'when metadata is not cached and is fetched' do
|
99
|
+
exp = @pod_meta.merge(@namespace_meta)
|
100
|
+
exp.delete('creation_timestamp')
|
101
|
+
@strategy.stub :fetch_pod_metadata, @pod_meta do
|
102
|
+
@strategy.stub :fetch_namespace_metadata, @namespace_meta do
|
103
|
+
assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, {}))
|
104
|
+
assert_true(@strategy.id_cache.key?(@cache_key))
|
105
|
+
end
|
79
106
|
end
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
107
|
+
end
|
108
|
+
|
109
|
+
test 'when metadata is not cached and pod is deleted and namespace metadata is fetched' do
|
110
|
+
# this is the case for a record from a deleted pod where no other
|
111
|
+
# records were read. using the container hash since that is all
|
112
|
+
# we ever will have and should allow us to process all the deleted
|
113
|
+
# pod records
|
114
|
+
exp = {
|
115
|
+
'pod_id' => @cache_key,
|
116
|
+
'namespace_id' => @namespace_uuid
|
117
|
+
}
|
118
|
+
@strategy.stub :fetch_pod_metadata, {} do
|
119
|
+
@strategy.stub :fetch_namespace_metadata, @namespace_meta do
|
120
|
+
assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, {}))
|
121
|
+
assert_true(@strategy.id_cache.key?(@cache_key))
|
122
|
+
end
|
95
123
|
end
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
124
|
+
end
|
125
|
+
|
126
|
+
test 'when metadata is not cached and pod is deleted and namespace is for a different namespace with the same name' do
|
127
|
+
# this is the case for a record from a deleted pod from a deleted namespace
|
128
|
+
# where new namespace was created with the same name
|
129
|
+
exp = {
|
130
|
+
'namespace_id' => @namespace_uuid
|
131
|
+
}
|
132
|
+
@strategy.stub :fetch_pod_metadata, {} do
|
133
|
+
@strategy.stub :fetch_namespace_metadata, @namespace_meta do
|
134
|
+
assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time - 1 * 86_400, {}))
|
135
|
+
assert_true(@strategy.id_cache.key?(@cache_key))
|
136
|
+
end
|
106
137
|
end
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
}
|
117
|
-
@strategy.stub :fetch_pod_metadata, {} do
|
118
|
-
@strategy.stub :fetch_namespace_metadata, @namespace_meta do
|
119
|
-
assert_equal(exp, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, {}))
|
120
|
-
assert_true(@strategy.id_cache.key?(@cache_key))
|
121
|
-
end
|
122
|
-
end
|
138
|
+
end
|
139
|
+
|
140
|
+
test 'when metadata is not cached and no metadata can be fetched and not allowing orphans' do
|
141
|
+
# we should never see this since pod meta should not be retrievable
|
142
|
+
# unless the namespace exists
|
143
|
+
@strategy.stub :fetch_pod_metadata, @pod_meta do
|
144
|
+
@strategy.stub :fetch_namespace_metadata, {} do
|
145
|
+
assert_equal({}, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time - 1 * 86_400, {}))
|
146
|
+
end
|
123
147
|
end
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
148
|
+
end
|
149
|
+
|
150
|
+
test 'when metadata is not cached and no metadata can be fetched and allowing orphans' do
|
151
|
+
# we should never see this since pod meta should not be retrievable
|
152
|
+
# unless the namespace exists
|
153
|
+
@strategy.allow_orphans = true
|
154
|
+
exp = {
|
155
|
+
'orphaned_namespace' => 'namespace',
|
156
|
+
'namespace_name' => '.orphaned',
|
157
|
+
'namespace_id' => 'orphaned'
|
158
|
+
}
|
159
|
+
@strategy.stub :fetch_pod_metadata, @pod_meta do
|
160
|
+
@strategy.stub :fetch_namespace_metadata, {} do
|
161
|
+
assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time - 1 * 86_400, {}))
|
162
|
+
end
|
137
163
|
end
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
164
|
+
end
|
165
|
+
|
166
|
+
test 'when metadata is not cached and no metadata can be fetched and not allowing orphans for multiple records' do
|
167
|
+
# processing a batch of records with no meta. ideally we only hit the api server once
|
168
|
+
batch_miss_cache = {}
|
169
|
+
@strategy.stub :fetch_pod_metadata, {} do
|
170
|
+
@strategy.stub :fetch_namespace_metadata, {} do
|
171
|
+
assert_equal({}, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, batch_miss_cache))
|
172
|
+
end
|
147
173
|
end
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
test 'when metadata is not cached and no metadata can be fetched and not allowing orphans for multiple records' do
|
166
|
-
# processing a batch of records with no meta. ideally we only hit the api server once
|
167
|
-
batch_miss_cache = {}
|
168
|
-
@strategy.stub :fetch_pod_metadata, {} do
|
169
|
-
@strategy.stub :fetch_namespace_metadata, {} do
|
170
|
-
assert_equal({}, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, batch_miss_cache))
|
171
|
-
end
|
172
|
-
end
|
173
|
-
assert_equal({}, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, batch_miss_cache))
|
174
|
-
end
|
175
|
-
|
176
|
-
test 'when metadata is not cached and no metadata can be fetched and allowing orphans for multiple records' do
|
177
|
-
# we should never see this since pod meta should not be retrievable
|
178
|
-
# unless the namespace exists
|
179
|
-
@strategy.allow_orphans = true
|
180
|
-
exp = {
|
181
|
-
'orphaned_namespace' => 'namespace',
|
182
|
-
'namespace_name' => '.orphaned',
|
183
|
-
'namespace_id' => 'orphaned'
|
184
|
-
}
|
185
|
-
batch_miss_cache = {}
|
186
|
-
@strategy.stub :fetch_pod_metadata, {} do
|
187
|
-
@strategy.stub :fetch_namespace_metadata, {} do
|
188
|
-
assert_equal(exp, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, batch_miss_cache))
|
189
|
-
end
|
190
|
-
end
|
191
|
-
assert_equal(exp, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, batch_miss_cache))
|
174
|
+
assert_equal({}, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, batch_miss_cache))
|
175
|
+
end
|
176
|
+
|
177
|
+
test 'when metadata is not cached and no metadata can be fetched and allowing orphans for multiple records' do
|
178
|
+
# we should never see this since pod meta should not be retrievable
|
179
|
+
# unless the namespace exists
|
180
|
+
@strategy.allow_orphans = true
|
181
|
+
exp = {
|
182
|
+
'orphaned_namespace' => 'namespace',
|
183
|
+
'namespace_name' => '.orphaned',
|
184
|
+
'namespace_id' => 'orphaned'
|
185
|
+
}
|
186
|
+
batch_miss_cache = {}
|
187
|
+
@strategy.stub :fetch_pod_metadata, {} do
|
188
|
+
@strategy.stub :fetch_namespace_metadata, {} do
|
189
|
+
assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, batch_miss_cache))
|
190
|
+
end
|
192
191
|
end
|
192
|
+
assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, batch_miss_cache))
|
193
|
+
end
|
193
194
|
end
|