fluent-plugin-kubernetes_metadata_filter 2.5.0 → 2.7.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.circleci/config.yml +7 -7
- data/.rubocop.yml +57 -0
- data/Gemfile +4 -2
- data/Gemfile.lock +55 -47
- data/README.md +4 -2
- data/Rakefile +15 -11
- data/fluent-plugin-kubernetes_metadata_filter.gemspec +25 -27
- data/lib/fluent/plugin/filter_kubernetes_metadata.rb +110 -121
- data/lib/fluent/plugin/kubernetes_metadata_cache_strategy.rb +22 -18
- data/lib/fluent/plugin/kubernetes_metadata_common.rb +44 -63
- data/lib/fluent/plugin/kubernetes_metadata_stats.rb +6 -6
- data/lib/fluent/plugin/kubernetes_metadata_test_api_adapter.rb +68 -0
- data/lib/fluent/plugin/kubernetes_metadata_util.rb +53 -0
- data/lib/fluent/plugin/kubernetes_metadata_watch_namespaces.rb +75 -59
- data/lib/fluent/plugin/kubernetes_metadata_watch_pods.rb +92 -64
- data/test/helper.rb +20 -2
- data/test/plugin/test_cache_stats.rb +10 -13
- data/test/plugin/test_cache_strategy.rb +158 -160
- data/test/plugin/test_filter_kubernetes_metadata.rb +366 -346
- data/test/plugin/test_utils.rb +56 -0
- data/test/plugin/test_watch_namespaces.rb +188 -125
- data/test/plugin/test_watch_pods.rb +282 -202
- data/test/plugin/watch_test.rb +14 -15
- metadata +49 -66
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
#
|
2
4
|
# Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
|
3
5
|
# Kubernetes metadata
|
@@ -16,11 +18,11 @@
|
|
16
18
|
# See the License for the specific language governing permissions and
|
17
19
|
# limitations under the License.
|
18
20
|
#
|
21
|
+
# TODO: this is mostly copy-paste from kubernetes_metadata_watch_namespaces.rb unify them
|
19
22
|
require_relative 'kubernetes_metadata_common'
|
20
23
|
|
21
24
|
module KubernetesMetadata
|
22
25
|
module WatchPods
|
23
|
-
|
24
26
|
include ::KubernetesMetadata::Common
|
25
27
|
|
26
28
|
def set_up_pod_thread
|
@@ -28,6 +30,7 @@ module KubernetesMetadata
|
|
28
30
|
# Fluent:ConfigError, so that users can inspect potential errors in
|
29
31
|
# the configuration.
|
30
32
|
pod_watcher = start_pod_watch
|
33
|
+
|
31
34
|
Thread.current[:pod_watch_retry_backoff_interval] = @watch_retry_interval
|
32
35
|
Thread.current[:pod_watch_retry_count] = 0
|
33
36
|
|
@@ -35,42 +38,47 @@ module KubernetesMetadata
|
|
35
38
|
# processing will be swallowed and retried. These failures /
|
36
39
|
# exceptions could be caused by Kubernetes API being temporarily
|
37
40
|
# down. We assume the configuration is correct at this point.
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
41
|
+
loop do
|
42
|
+
pod_watcher ||= get_pods_and_start_watcher
|
43
|
+
process_pod_watcher_notices(pod_watcher)
|
44
|
+
rescue GoneError => e
|
45
|
+
# Expected error. Quietly go back through the loop in order to
|
46
|
+
# start watching from the latest resource versions
|
47
|
+
@stats.bump(:pod_watch_gone_errors)
|
48
|
+
log.info('410 Gone encountered. Restarting pod watch to reset resource versions.', e)
|
49
|
+
pod_watcher = nil
|
50
|
+
rescue StandardError => e
|
51
|
+
@stats.bump(:pod_watch_failures)
|
52
|
+
if Thread.current[:pod_watch_retry_count] < @watch_retry_max_times
|
53
|
+
# Instead of raising exceptions and crashing Fluentd, swallow
|
54
|
+
# the exception and reset the watcher.
|
55
|
+
log.info(
|
56
|
+
'Exception encountered parsing pod watch event. The ' \
|
57
|
+
'connection might have been closed. Sleeping for ' \
|
58
|
+
"#{Thread.current[:pod_watch_retry_backoff_interval]} " \
|
59
|
+
'seconds and resetting the pod watcher.', e
|
60
|
+
)
|
61
|
+
sleep(Thread.current[:pod_watch_retry_backoff_interval])
|
62
|
+
Thread.current[:pod_watch_retry_count] += 1
|
63
|
+
Thread.current[:pod_watch_retry_backoff_interval] *= @watch_retry_exponential_backoff_base
|
64
|
+
pod_watcher = nil
|
65
|
+
else
|
66
|
+
# Since retries failed for many times, log as errors instead
|
67
|
+
# of info and raise exceptions and trigger Fluentd to restart.
|
68
|
+
message =
|
69
|
+
'Exception encountered parsing pod watch event. The ' \
|
70
|
+
'connection might have been closed. Retried ' \
|
71
|
+
"#{@watch_retry_max_times} times yet still failing. Restarting."
|
72
|
+
log.error(message, e)
|
73
|
+
raise Fluent::UnrecoverableError, message
|
66
74
|
end
|
67
75
|
end
|
68
76
|
end
|
69
77
|
|
70
78
|
def start_pod_watch
|
71
79
|
get_pods_and_start_watcher
|
72
|
-
rescue
|
73
|
-
message =
|
80
|
+
rescue StandardError => e
|
81
|
+
message = 'start_pod_watch: Exception encountered setting up pod watch ' \
|
74
82
|
"from Kubernetes API #{@apiVersion} endpoint " \
|
75
83
|
"#{@kubernetes_url}: #{e.message}"
|
76
84
|
message += " (#{e.response})" if e.respond_to?(:response)
|
@@ -83,19 +91,27 @@ module KubernetesMetadata
|
|
83
91
|
# from that resourceVersion.
|
84
92
|
def get_pods_and_start_watcher
|
85
93
|
options = {
|
86
|
-
resource_version: '0'
|
94
|
+
resource_version: '0' # Fetch from API server cache instead of etcd quorum read
|
87
95
|
}
|
88
96
|
if ENV['K8S_NODE_NAME']
|
89
97
|
options[:field_selector] = 'spec.nodeName=' + ENV['K8S_NODE_NAME']
|
90
98
|
end
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
99
|
+
if @last_seen_resource_version
|
100
|
+
options[:resource_version] = @last_seen_resource_version
|
101
|
+
else
|
102
|
+
pods = @client.get_pods(options)
|
103
|
+
pods[:items].each do |pod|
|
104
|
+
cache_key = pod[:metadata][:uid]
|
105
|
+
@cache[cache_key] = parse_pod_metadata(pod)
|
106
|
+
@stats.bump(:pod_cache_host_updates)
|
107
|
+
end
|
108
|
+
|
109
|
+
# continue watching from most recent resourceVersion
|
110
|
+
options[:resource_version] = pods[:metadata][:resourceVersion]
|
96
111
|
end
|
97
|
-
|
112
|
+
|
98
113
|
watcher = @client.watch_pods(options)
|
114
|
+
reset_pod_watch_retry_stats
|
99
115
|
watcher
|
100
116
|
end
|
101
117
|
|
@@ -109,34 +125,46 @@ module KubernetesMetadata
|
|
109
125
|
# Process a watcher notice and potentially raise an exception.
|
110
126
|
def process_pod_watcher_notices(watcher)
|
111
127
|
watcher.each do |notice|
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
@stats.bump(:
|
131
|
-
|
128
|
+
# store version we processed to not reprocess it ... do not unset when there is no version in response
|
129
|
+
version = ( # TODO: replace with &.dig once we are on ruby 2.5+
|
130
|
+
notice[:object] && notice[:object][:metadata] && notice[:object][:metadata][:resourceVersion]
|
131
|
+
)
|
132
|
+
@last_seen_resource_version = version if version
|
133
|
+
|
134
|
+
case notice[:type]
|
135
|
+
when 'MODIFIED'
|
136
|
+
reset_pod_watch_retry_stats
|
137
|
+
cache_key = notice.dig(:object, :metadata, :uid)
|
138
|
+
cached = @cache[cache_key]
|
139
|
+
if cached
|
140
|
+
@cache[cache_key] = parse_pod_metadata(notice[:object])
|
141
|
+
@stats.bump(:pod_cache_watch_updates)
|
142
|
+
elsif ENV['K8S_NODE_NAME'] == notice[:object][:spec][:nodeName]
|
143
|
+
@cache[cache_key] = parse_pod_metadata(notice[:object])
|
144
|
+
@stats.bump(:pod_cache_host_updates)
|
145
|
+
else
|
146
|
+
@stats.bump(:pod_cache_watch_misses)
|
147
|
+
end
|
148
|
+
when 'DELETED'
|
149
|
+
reset_pod_watch_retry_stats
|
150
|
+
# ignore and let age out for cases where pods
|
151
|
+
# deleted but still processing logs
|
152
|
+
@stats.bump(:pod_cache_watch_delete_ignored)
|
153
|
+
when 'ERROR'
|
154
|
+
if notice[:object] && notice[:object][:code] == 410
|
155
|
+
@last_seen_resource_version = nil # requested resourceVersion was too old, need to reset
|
156
|
+
@stats.bump(:pod_watch_gone_notices)
|
157
|
+
raise GoneError
|
158
|
+
else
|
132
159
|
@stats.bump(:pod_watch_error_type_notices)
|
133
|
-
message = notice[
|
160
|
+
message = notice[:object][:message] if notice[:object] && notice[:object][:message]
|
134
161
|
raise "Error while watching pods: #{message}"
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
162
|
+
end
|
163
|
+
else
|
164
|
+
reset_pod_watch_retry_stats
|
165
|
+
# Don't pay attention to creations, since the created pod may not
|
166
|
+
# end up on this node.
|
167
|
+
@stats.bump(:pod_cache_watch_ignored)
|
140
168
|
end
|
141
169
|
end
|
142
170
|
end
|
data/test/helper.rb
CHANGED
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
#
|
2
4
|
# Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
|
3
5
|
# Kubernetes metadata
|
@@ -16,6 +18,7 @@
|
|
16
18
|
# See the License for the specific language governing permissions and
|
17
19
|
# limitations under the License.
|
18
20
|
#
|
21
|
+
require 'bundler/setup'
|
19
22
|
require 'codeclimate-test-reporter'
|
20
23
|
SimpleCov.start do
|
21
24
|
formatter SimpleCov::Formatter::MultiFormatter.new [
|
@@ -31,8 +34,14 @@ require 'fileutils'
|
|
31
34
|
require 'fluent/log'
|
32
35
|
require 'fluent/test'
|
33
36
|
require 'minitest/autorun'
|
34
|
-
require 'webmock/test_unit'
|
35
37
|
require 'vcr'
|
38
|
+
require 'ostruct'
|
39
|
+
require 'fluent/plugin/filter_kubernetes_metadata'
|
40
|
+
require 'fluent/test/driver/filter'
|
41
|
+
require 'kubeclient'
|
42
|
+
|
43
|
+
require 'webmock/test_unit'
|
44
|
+
WebMock.disable_net_connect!
|
36
45
|
|
37
46
|
VCR.configure do |config|
|
38
47
|
config.cassette_library_dir = 'test/cassettes'
|
@@ -58,7 +67,16 @@ def ipv6_enabled?
|
|
58
67
|
begin
|
59
68
|
TCPServer.open('::1', 0)
|
60
69
|
true
|
61
|
-
rescue
|
70
|
+
rescue StandardError
|
62
71
|
false
|
63
72
|
end
|
64
73
|
end
|
74
|
+
|
75
|
+
# TEST_NAME='foo' ruby test_file.rb to run a single test case
|
76
|
+
if ENV['TEST_NAME']
|
77
|
+
(class << Test::Unit::TestCase; self; end).prepend(Module.new do
|
78
|
+
def test(name)
|
79
|
+
super if name == ENV['TEST_NAME']
|
80
|
+
end
|
81
|
+
end)
|
82
|
+
end
|
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
#
|
2
4
|
# Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
|
3
5
|
# Kubernetes metadata
|
@@ -17,20 +19,15 @@
|
|
17
19
|
# limitations under the License.
|
18
20
|
#
|
19
21
|
require_relative '../helper'
|
20
|
-
require 'fluent/plugin/kubernetes_metadata_stats'
|
21
|
-
require 'webmock/test_unit'
|
22
|
-
WebMock.disable_net_connect!
|
23
22
|
|
24
23
|
class KubernetesMetadataCacheStatsTest < Test::Unit::TestCase
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
stats.bump(:deleted)
|
24
|
+
test 'watch stats' do
|
25
|
+
require 'lru_redux'
|
26
|
+
stats = KubernetesMetadata::Stats.new
|
27
|
+
stats.bump(:missed)
|
28
|
+
stats.bump(:deleted)
|
29
|
+
stats.bump(:deleted)
|
32
30
|
|
33
|
-
|
34
|
-
|
35
|
-
|
31
|
+
assert_equal('stats - deleted: 2, missed: 1', stats.to_s)
|
32
|
+
end
|
36
33
|
end
|
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
#
|
2
4
|
# Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
|
3
5
|
# Kubernetes metadata
|
@@ -17,180 +19,176 @@
|
|
17
19
|
# limitations under the License.
|
18
20
|
#
|
19
21
|
require_relative '../helper'
|
20
|
-
require_relative '../../lib/fluent/plugin/kubernetes_metadata_cache_strategy'
|
21
|
-
require_relative '../../lib/fluent/plugin/kubernetes_metadata_stats'
|
22
|
-
require 'lru_redux'
|
23
|
-
require 'webmock/test_unit'
|
24
|
-
WebMock.disable_net_connect!
|
25
22
|
|
26
23
|
class TestCacheStrategy
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
24
|
+
include KubernetesMetadata::CacheStrategy
|
25
|
+
|
26
|
+
def initialize
|
27
|
+
@stats = KubernetesMetadata::Stats.new
|
28
|
+
@cache = LruRedux::TTL::ThreadSafeCache.new(100, 3600)
|
29
|
+
@id_cache = LruRedux::TTL::ThreadSafeCache.new(100, 3600)
|
30
|
+
@namespace_cache = LruRedux::TTL::ThreadSafeCache.new(100, 3600)
|
31
|
+
@orphaned_namespace_name = '.orphaned'
|
32
|
+
@orphaned_namespace_id = 'orphaned'
|
33
|
+
end
|
34
|
+
|
35
|
+
attr_accessor :stats, :cache, :id_cache, :namespace_cache, :allow_orphans
|
36
|
+
|
37
|
+
def fetch_pod_metadata(_namespace_name, _pod_name)
|
38
|
+
{}
|
39
|
+
end
|
40
|
+
|
41
|
+
def fetch_namespace_metadata(_namespace_name)
|
42
|
+
{}
|
43
|
+
end
|
44
|
+
|
45
|
+
def log
|
46
|
+
logger = {}
|
47
|
+
def logger.trace?
|
48
|
+
true
|
36
49
|
end
|
37
50
|
|
38
|
-
|
39
|
-
|
40
|
-
def fetch_pod_metadata(namespace_name, pod_name)
|
51
|
+
def logger.trace(message)
|
41
52
|
end
|
42
|
-
|
43
|
-
|
44
|
-
end
|
45
|
-
|
46
|
-
def log
|
47
|
-
logger = {}
|
48
|
-
def logger.trace?
|
49
|
-
true
|
50
|
-
end
|
51
|
-
def logger.trace(message)
|
52
|
-
end
|
53
|
-
logger
|
54
|
-
end
|
55
|
-
|
53
|
+
logger
|
54
|
+
end
|
56
55
|
end
|
57
56
|
|
58
57
|
class KubernetesMetadataCacheStrategyTest < Test::Unit::TestCase
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
58
|
+
def setup
|
59
|
+
@strategy = TestCacheStrategy.new
|
60
|
+
@cache_key = 'some_long_container_id'
|
61
|
+
@namespace_name = 'some_namespace_name'
|
62
|
+
@namespace_uuid = 'some_namespace_uuid'
|
63
|
+
@pod_name = 'some_pod_name'
|
64
|
+
@pod_uuid = 'some_pod_uuid'
|
65
|
+
@time = Time.now
|
66
|
+
@pod_meta = { 'pod_id' => @pod_uuid, 'labels' => { 'meta' => 'pod' } }
|
67
|
+
@namespace_meta = { 'namespace_id' => @namespace_uuid, 'creation_timestamp' => @time.to_s }
|
68
|
+
end
|
69
|
+
|
70
|
+
test 'when cached metadata is found' do
|
71
|
+
exp = @pod_meta.merge(@namespace_meta)
|
72
|
+
exp.delete('creation_timestamp')
|
73
|
+
@strategy.id_cache[@cache_key] = {
|
74
|
+
pod_id: @pod_uuid,
|
75
|
+
namespace_id: @namespace_uuid
|
76
|
+
}
|
77
|
+
@strategy.cache[@pod_uuid] = @pod_meta
|
78
|
+
@strategy.namespace_cache[@namespace_uuid] = @namespace_meta
|
79
|
+
assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, {}))
|
80
|
+
end
|
81
|
+
|
82
|
+
test 'when previously processed record for pod but metadata is not cached and can not be fetched' do
|
83
|
+
exp = {
|
84
|
+
'pod_id' => @pod_uuid,
|
85
|
+
'namespace_id' => @namespace_uuid
|
86
|
+
}
|
87
|
+
@strategy.id_cache[@cache_key] = {
|
88
|
+
pod_id: @pod_uuid,
|
89
|
+
namespace_id: @namespace_uuid
|
90
|
+
}
|
91
|
+
@strategy.stub :fetch_pod_metadata, {} do
|
92
|
+
@strategy.stub :fetch_namespace_metadata, nil do
|
93
|
+
assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, {}))
|
94
|
+
end
|
70
95
|
end
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
}
|
79
|
-
@strategy.
|
80
|
-
|
81
|
-
assert_equal(exp, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, {}))
|
96
|
+
end
|
97
|
+
|
98
|
+
test 'when metadata is not cached and is fetched' do
|
99
|
+
exp = @pod_meta.merge(@namespace_meta)
|
100
|
+
exp.delete('creation_timestamp')
|
101
|
+
@strategy.stub :fetch_pod_metadata, @pod_meta do
|
102
|
+
@strategy.stub :fetch_namespace_metadata, @namespace_meta do
|
103
|
+
assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, {}))
|
104
|
+
assert_true(@strategy.id_cache.key?(@cache_key))
|
105
|
+
end
|
82
106
|
end
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
107
|
+
end
|
108
|
+
|
109
|
+
test 'when metadata is not cached and pod is deleted and namespace metadata is fetched' do
|
110
|
+
# this is the case for a record from a deleted pod where no other
|
111
|
+
# records were read. using the container hash since that is all
|
112
|
+
# we ever will have and should allow us to process all the deleted
|
113
|
+
# pod records
|
114
|
+
exp = {
|
115
|
+
'pod_id' => @cache_key,
|
116
|
+
'namespace_id' => @namespace_uuid
|
117
|
+
}
|
118
|
+
@strategy.stub :fetch_pod_metadata, {} do
|
119
|
+
@strategy.stub :fetch_namespace_metadata, @namespace_meta do
|
120
|
+
assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, {}))
|
121
|
+
assert_true(@strategy.id_cache.key?(@cache_key))
|
122
|
+
end
|
98
123
|
end
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
# this is the case for a record from a deleted pod where no other
|
113
|
-
# records were read. using the container hash since that is all
|
114
|
-
# we ever will have and should allow us to process all the deleted
|
115
|
-
# pod records
|
116
|
-
exp = {
|
117
|
-
'pod_id'=> @cache_key,
|
118
|
-
'namespace_id'=> @namespace_uuid
|
119
|
-
}
|
120
|
-
@strategy.stub :fetch_pod_metadata, {} do
|
121
|
-
@strategy.stub :fetch_namespace_metadata, @namespace_meta do
|
122
|
-
assert_equal(exp, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, {}))
|
123
|
-
assert_true(@strategy.id_cache.key?(@cache_key))
|
124
|
-
end
|
125
|
-
end
|
126
|
-
end
|
127
|
-
|
128
|
-
test 'when metadata is not cached and pod is deleted and namespace is for a different namespace with the same name' do
|
129
|
-
# this is the case for a record from a deleted pod from a deleted namespace
|
130
|
-
# where new namespace was created with the same name
|
131
|
-
exp = {
|
132
|
-
'namespace_id'=> @namespace_uuid
|
133
|
-
}
|
134
|
-
@strategy.stub :fetch_pod_metadata, {} do
|
135
|
-
@strategy.stub :fetch_namespace_metadata, @namespace_meta do
|
136
|
-
assert_equal(exp, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time - 1*86400, {}))
|
137
|
-
assert_true(@strategy.id_cache.key?(@cache_key))
|
138
|
-
end
|
139
|
-
end
|
124
|
+
end
|
125
|
+
|
126
|
+
test 'when metadata is not cached and pod is deleted and namespace is for a different namespace with the same name' do
|
127
|
+
# this is the case for a record from a deleted pod from a deleted namespace
|
128
|
+
# where new namespace was created with the same name
|
129
|
+
exp = {
|
130
|
+
'namespace_id' => @namespace_uuid
|
131
|
+
}
|
132
|
+
@strategy.stub :fetch_pod_metadata, {} do
|
133
|
+
@strategy.stub :fetch_namespace_metadata, @namespace_meta do
|
134
|
+
assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time - 1 * 86_400, {}))
|
135
|
+
assert_true(@strategy.id_cache.key?(@cache_key))
|
136
|
+
end
|
140
137
|
end
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
138
|
+
end
|
139
|
+
|
140
|
+
test 'when metadata is not cached and no metadata can be fetched and not allowing orphans' do
|
141
|
+
# we should never see this since pod meta should not be retrievable
|
142
|
+
# unless the namespace exists
|
143
|
+
@strategy.stub :fetch_pod_metadata, @pod_meta do
|
144
|
+
@strategy.stub :fetch_namespace_metadata, {} do
|
145
|
+
assert_equal({}, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time - 1 * 86_400, {}))
|
146
|
+
end
|
150
147
|
end
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
148
|
+
end
|
149
|
+
|
150
|
+
test 'when metadata is not cached and no metadata can be fetched and allowing orphans' do
|
151
|
+
# we should never see this since pod meta should not be retrievable
|
152
|
+
# unless the namespace exists
|
153
|
+
@strategy.allow_orphans = true
|
154
|
+
exp = {
|
155
|
+
'orphaned_namespace' => 'namespace',
|
156
|
+
'namespace_name' => '.orphaned',
|
157
|
+
'namespace_id' => 'orphaned'
|
158
|
+
}
|
159
|
+
@strategy.stub :fetch_pod_metadata, @pod_meta do
|
160
|
+
@strategy.stub :fetch_namespace_metadata, {} do
|
161
|
+
assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time - 1 * 86_400, {}))
|
162
|
+
end
|
166
163
|
end
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
assert_equal({}, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, batch_miss_cache))
|
164
|
+
end
|
165
|
+
|
166
|
+
test 'when metadata is not cached and no metadata can be fetched and not allowing orphans for multiple records' do
|
167
|
+
# processing a batch of records with no meta. ideally we only hit the api server once
|
168
|
+
batch_miss_cache = {}
|
169
|
+
@strategy.stub :fetch_pod_metadata, {} do
|
170
|
+
@strategy.stub :fetch_namespace_metadata, {} do
|
171
|
+
assert_equal({}, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, batch_miss_cache))
|
172
|
+
end
|
177
173
|
end
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
174
|
+
assert_equal({}, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, batch_miss_cache))
|
175
|
+
end
|
176
|
+
|
177
|
+
test 'when metadata is not cached and no metadata can be fetched and allowing orphans for multiple records' do
|
178
|
+
# we should never see this since pod meta should not be retrievable
|
179
|
+
# unless the namespace exists
|
180
|
+
@strategy.allow_orphans = true
|
181
|
+
exp = {
|
182
|
+
'orphaned_namespace' => 'namespace',
|
183
|
+
'namespace_name' => '.orphaned',
|
184
|
+
'namespace_id' => 'orphaned'
|
185
|
+
}
|
186
|
+
batch_miss_cache = {}
|
187
|
+
@strategy.stub :fetch_pod_metadata, {} do
|
188
|
+
@strategy.stub :fetch_namespace_metadata, {} do
|
189
|
+
assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, batch_miss_cache))
|
190
|
+
end
|
195
191
|
end
|
192
|
+
assert_equal(exp, @strategy.get_pod_metadata(@cache_key, 'namespace', 'pod', @time, batch_miss_cache))
|
193
|
+
end
|
196
194
|
end
|