fluent-plugin-kubernetes_metadata_filter 2.5.0 → 2.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,56 @@
1
+ # frozen_string_literal: true
2
+
3
+ #
4
+ # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
5
+ # Kubernetes metadata
6
+ #
7
+ # Copyright 2015 Red Hat, Inc.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ #
21
+ class KubernetesMetadataCacheStatsTest < Test::Unit::TestCase
22
+ include KubernetesMetadata::Util
23
+
24
+ def setup
25
+ @time_fields = ['time']
26
+ @internal_time = Time.now
27
+ end
28
+
29
+ test '#create_time_from_record when time is empty' do
30
+ record = { 'time' => ' ' }
31
+ assert_equal(@internal_time.to_i, create_time_from_record(record, @internal_time).to_i)
32
+ end
33
+ test '#create_time_from_record when time is nil' do
34
+ record = {}
35
+ assert_equal(@internal_time.to_i, create_time_from_record(record, @internal_time).to_i)
36
+ end
37
+
38
+ test '#create_time_from_record when time is an integer' do
39
+ exp_time = Time.now
40
+ record = { 'time' => exp_time.to_i }
41
+ assert_equal(exp_time.to_i, create_time_from_record(record, @internal_time).to_i)
42
+ end
43
+
44
+ test '#create_time_from_record when time is a string' do
45
+ exp_time = Time.now
46
+ record = { 'time' => exp_time.to_s }
47
+ assert_equal(exp_time.to_i, create_time_from_record(record, @internal_time).to_i)
48
+ end
49
+
50
+ test '#create_time_from_record when timefields include journal time fields' do
51
+ @time_fields = ['_SOURCE_REALTIME_TIMESTAMP']
52
+ exp_time = Time.now
53
+ record = { '_SOURCE_REALTIME_TIMESTAMP' => exp_time.to_i.to_s }
54
+ assert_equal(Time.at(exp_time.to_i / 1_000_000, exp_time.to_i % 1_000_000).to_i, create_time_from_record(record, @internal_time).to_i)
55
+ end
56
+ end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  #
2
4
  # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
3
5
  # Kubernetes metadata
@@ -17,166 +19,227 @@
17
19
  # limitations under the License.
18
20
  #
19
21
  require_relative '../helper'
20
- require 'ostruct'
21
22
  require_relative 'watch_test'
22
23
 
23
24
  class WatchNamespacesTestTest < WatchTest
25
+ include KubernetesMetadata::WatchNamespaces
26
+
27
+ setup do
28
+ @initial = {
29
+ kind: 'NamespaceList',
30
+ metadata: { resourceVersion: '123' },
31
+ items: [
32
+ {
33
+ metadata: {
34
+ name: 'initial',
35
+ uid: 'initial_uid'
36
+ }
37
+ },
38
+ {
39
+ metadata: {
40
+ name: 'modified',
41
+ uid: 'modified_uid'
42
+ }
43
+ }
44
+ ]
45
+ }
46
+
47
+ @created = {
48
+ type: 'CREATED',
49
+ object: {
50
+ metadata: {
51
+ name: 'created',
52
+ uid: 'created_uid'
53
+ }
54
+ }
55
+ }
56
+ @modified = {
57
+ type: 'MODIFIED',
58
+ object: {
59
+ metadata: {
60
+ name: 'foo',
61
+ uid: 'modified_uid'
62
+ }
63
+ }
64
+ }
65
+ @deleted = {
66
+ type: 'DELETED',
67
+ object: {
68
+ metadata: {
69
+ name: 'deleteme',
70
+ uid: 'deleted_uid'
71
+ }
72
+ }
73
+ }
74
+ @error = {
75
+ type: 'ERROR',
76
+ object: {
77
+ message: 'some error message'
78
+ }
79
+ }
80
+ @gone = {
81
+ type: 'ERROR',
82
+ object: {
83
+ code: 410,
84
+ kind: 'Status',
85
+ message: 'too old resource version: 123 (391079)',
86
+ metadata: {
87
+ name: 'gone',
88
+ namespace: 'gone',
89
+ uid: 'gone_uid'
90
+ },
91
+ reason: 'Gone'
92
+ }
93
+ }
94
+ end
24
95
 
25
- include KubernetesMetadata::WatchNamespaces
26
-
27
- setup do
28
- @initial = Kubeclient::Common::EntityList.new(
29
- 'NamespaceList',
30
- '123',
31
- [
32
- Kubeclient::Resource.new({
33
- 'metadata' => {
34
- 'name' => 'initial',
35
- 'uid' => 'initial_uid'
36
- }
37
- }),
38
- Kubeclient::Resource.new({
39
- 'metadata' => {
40
- 'name' => 'modified',
41
- 'uid' => 'modified_uid'
42
- }
43
- })
44
- ])
45
-
46
- @created = OpenStruct.new(
47
- type: 'CREATED',
48
- object: {
49
- 'metadata' => {
50
- 'name' => 'created',
51
- 'uid' => 'created_uid'
52
- }
53
- }
54
- )
55
- @modified = OpenStruct.new(
56
- type: 'MODIFIED',
57
- object: {
58
- 'metadata' => {
59
- 'name' => 'foo',
60
- 'uid' => 'modified_uid'
61
- }
62
- }
63
- )
64
- @deleted = OpenStruct.new(
65
- type: 'DELETED',
66
- object: {
67
- 'metadata' => {
68
- 'name' => 'deleteme',
69
- 'uid' => 'deleted_uid'
70
- }
71
- }
72
- )
73
- @error = OpenStruct.new(
74
- type: 'ERROR',
75
- object: {
76
- 'message' => 'some error message'
77
- }
78
- )
79
- end
80
-
81
- test 'namespace list caches namespaces' do
82
- @client.stub :get_namespaces, @initial do
96
+ test 'namespace list caches namespaces' do
97
+ @client.stub :get_namespaces, @initial do
98
+ process_namespace_watcher_notices(start_namespace_watch)
99
+ assert_equal(true, @namespace_cache.key?('initial_uid'))
100
+ assert_equal(true, @namespace_cache.key?('modified_uid'))
101
+ assert_equal(2, @stats[:namespace_cache_host_updates])
102
+ end
103
+ end
104
+
105
+ test 'namespace list caches namespaces and watch updates' do
106
+ orig_env_val = ENV['K8S_NODE_NAME']
107
+ ENV['K8S_NODE_NAME'] = 'aNodeName'
108
+ @client.stub :get_namespaces, @initial do
109
+ @client.stub :watch_namespaces, [@modified] do
83
110
  process_namespace_watcher_notices(start_namespace_watch)
84
- assert_equal(true, @namespace_cache.key?('initial_uid'))
85
- assert_equal(true, @namespace_cache.key?('modified_uid'))
86
111
  assert_equal(2, @stats[:namespace_cache_host_updates])
112
+ assert_equal(1, @stats[:namespace_cache_watch_updates])
87
113
  end
88
114
  end
115
+ ENV['K8S_NODE_NAME'] = orig_env_val
116
+ end
89
117
 
90
- test 'namespace list caches namespaces and watch updates' do
91
- orig_env_val = ENV['K8S_NODE_NAME']
92
- ENV['K8S_NODE_NAME'] = 'aNodeName'
93
- @client.stub :get_namespaces, @initial do
94
- @client.stub :watch_namespaces, [@modified] do
95
- process_namespace_watcher_notices(start_namespace_watch)
96
- assert_equal(2, @stats[:namespace_cache_host_updates])
97
- assert_equal(1, @stats[:namespace_cache_watch_updates])
98
- end
99
- end
100
- ENV['K8S_NODE_NAME'] = orig_env_val
118
+ test 'namespace watch ignores CREATED' do
119
+ @client.stub :watch_namespaces, [@created] do
120
+ process_namespace_watcher_notices(start_namespace_watch)
121
+ assert_equal(false, @namespace_cache.key?('created_uid'))
122
+ assert_equal(1, @stats[:namespace_cache_watch_ignored])
101
123
  end
124
+ end
102
125
 
103
- test 'namespace watch ignores CREATED' do
104
- @client.stub :watch_namespaces, [@created] do
105
- process_namespace_watcher_notices(start_namespace_watch)
106
- assert_equal(false, @namespace_cache.key?('created_uid'))
107
- assert_equal(1, @stats[:namespace_cache_watch_ignored])
108
- end
126
+ test 'namespace watch ignores MODIFIED when info not in cache' do
127
+ @client.stub :watch_namespaces, [@modified] do
128
+ process_namespace_watcher_notices(start_namespace_watch)
129
+ assert_equal(false, @namespace_cache.key?('modified_uid'))
130
+ assert_equal(1, @stats[:namespace_cache_watch_misses])
109
131
  end
132
+ end
110
133
 
111
- test 'namespace watch ignores MODIFIED when info not in cache' do
112
- @client.stub :watch_namespaces, [@modified] do
113
- process_namespace_watcher_notices(start_namespace_watch)
114
- assert_equal(false, @namespace_cache.key?('modified_uid'))
115
- assert_equal(1, @stats[:namespace_cache_watch_misses])
116
- end
134
+ test 'namespace watch updates cache when MODIFIED is received and info is cached' do
135
+ @namespace_cache['modified_uid'] = {}
136
+ @client.stub :watch_namespaces, [@modified] do
137
+ process_namespace_watcher_notices(start_namespace_watch)
138
+ assert_equal(true, @namespace_cache.key?('modified_uid'))
139
+ assert_equal(1, @stats[:namespace_cache_watch_updates])
117
140
  end
141
+ end
118
142
 
119
- test 'namespace watch updates cache when MODIFIED is received and info is cached' do
120
- @namespace_cache['modified_uid'] = {}
121
- @client.stub :watch_namespaces, [@modified] do
122
- process_namespace_watcher_notices(start_namespace_watch)
123
- assert_equal(true, @namespace_cache.key?('modified_uid'))
124
- assert_equal(1, @stats[:namespace_cache_watch_updates])
143
+ test 'namespace watch ignores DELETED' do
144
+ @namespace_cache['deleted_uid'] = {}
145
+ @client.stub :watch_namespaces, [@deleted] do
146
+ process_namespace_watcher_notices(start_namespace_watch)
147
+ assert_equal(true, @namespace_cache.key?('deleted_uid'))
148
+ assert_equal(1, @stats[:namespace_cache_watch_deletes_ignored])
149
+ end
150
+ end
151
+
152
+ test 'namespace watch raises Fluent::UnrecoverableError when cannot re-establish connection to k8s API server' do
153
+ # Stub start_namespace_watch to simulate initial successful connection to API server
154
+ stub(self).start_namespace_watch
155
+ # Stub watch_namespaces to simluate not being able to set up watch connection to API server
156
+ stub(@client).watch_namespaces { raise }
157
+ @client.stub :get_namespaces, @initial do
158
+ assert_raise Fluent::UnrecoverableError do
159
+ set_up_namespace_thread
125
160
  end
126
161
  end
162
+ assert_equal(3, @stats[:namespace_watch_failures])
163
+ assert_equal(2, Thread.current[:namespace_watch_retry_count])
164
+ assert_equal(4, Thread.current[:namespace_watch_retry_backoff_interval])
165
+ assert_nil(@stats[:namespace_watch_error_type_notices])
166
+ end
127
167
 
128
- test 'namespace watch ignores DELETED' do
129
- @namespace_cache['deleted_uid'] = {}
130
- @client.stub :watch_namespaces, [@deleted] do
131
- process_namespace_watcher_notices(start_namespace_watch)
132
- assert_equal(true, @namespace_cache.key?('deleted_uid'))
133
- assert_equal(1, @stats[:namespace_cache_watch_deletes_ignored])
168
+ test 'namespace watch resets watch retry count when exceptions are encountered and connection to k8s API server is re-established' do
169
+ @client.stub :get_namespaces, @initial do
170
+ @client.stub :watch_namespaces, [[@created, @exception_raised]] do
171
+ # Force the infinite watch loop to exit after 3 seconds. Verifies that
172
+ # no unrecoverable error was thrown during this period of time.
173
+ assert_raise Timeout::Error.new('execution expired') do
174
+ Timeout.timeout(3) do
175
+ set_up_namespace_thread
176
+ end
177
+ end
178
+ assert_operator(@stats[:namespace_watch_failures], :>=, 3)
179
+ assert_operator(Thread.current[:namespace_watch_retry_count], :<=, 1)
180
+ assert_operator(Thread.current[:namespace_watch_retry_backoff_interval], :<=, 1)
134
181
  end
135
182
  end
183
+ end
136
184
 
137
- test 'namespace watch retries when exceptions are encountered' do
138
- @client.stub :get_namespaces, @initial do
139
- @client.stub :watch_namespaces, [[@created, @exception_raised]] do
140
- assert_raise Fluent::UnrecoverableError do
185
+ test 'namespace watch resets watch retry count when error is received and connection to k8s API server is re-established' do
186
+ @client.stub :get_namespaces, @initial do
187
+ @client.stub :watch_namespaces, [@error] do
188
+ # Force the infinite watch loop to exit after 3 seconds. Verifies that
189
+ # no unrecoverable error was thrown during this period of time.
190
+ assert_raise Timeout::Error.new('execution expired') do
191
+ Timeout.timeout(3) do
141
192
  set_up_namespace_thread
142
193
  end
143
- assert_equal(3, @stats[:namespace_watch_failures])
144
- assert_equal(2, Thread.current[:namespace_watch_retry_count])
145
- assert_equal(4, Thread.current[:namespace_watch_retry_backoff_interval])
146
- assert_nil(@stats[:namespace_watch_error_type_notices])
147
194
  end
195
+ assert_operator(@stats[:namespace_watch_failures], :>=, 3)
196
+ assert_operator(Thread.current[:namespace_watch_retry_count], :<=, 1)
197
+ assert_operator(Thread.current[:namespace_watch_retry_backoff_interval], :<=, 1)
148
198
  end
149
199
  end
200
+ end
150
201
 
151
- test 'namespace watch retries when error is received' do
152
- @client.stub :get_namespaces, @initial do
153
- @client.stub :watch_namespaces, [@error] do
154
- assert_raise Fluent::UnrecoverableError do
202
+ test 'namespace watch continues after retries succeed' do
203
+ @client.stub :get_namespaces, @initial do
204
+ @client.stub :watch_namespaces, [@modified, @error, @modified] do
205
+ # Force the infinite watch loop to exit after 3 seconds. Verifies that
206
+ # no unrecoverable error was thrown during this period of time.
207
+ assert_raise Timeout::Error.new('execution expired') do
208
+ Timeout.timeout(3) do
155
209
  set_up_namespace_thread
156
210
  end
157
- assert_equal(3, @stats[:namespace_watch_failures])
158
- assert_equal(2, Thread.current[:namespace_watch_retry_count])
159
- assert_equal(4, Thread.current[:namespace_watch_retry_backoff_interval])
160
- assert_equal(3, @stats[:namespace_watch_error_type_notices])
161
211
  end
212
+ assert_operator(@stats[:namespace_watch_failures], :>=, 3)
213
+ assert_operator(Thread.current[:namespace_watch_retry_count], :<=, 1)
214
+ assert_operator(Thread.current[:namespace_watch_retry_backoff_interval], :<=, 1)
215
+ assert_operator(@stats[:namespace_watch_error_type_notices], :>=, 3)
162
216
  end
163
217
  end
218
+ end
164
219
 
165
- test 'namespace watch continues after retries succeed' do
166
- @client.stub :get_namespaces, @initial do
167
- @client.stub :watch_namespaces, [@modified, @error, @modified] do
168
- # Force the infinite watch loop to exit after 3 seconds. Verifies that
169
- # no unrecoverable error was thrown during this period of time.
170
- assert_raise Timeout::Error.new('execution expired') do
171
- Timeout.timeout(3) do
172
- set_up_namespace_thread
173
- end
220
+ test 'namespace watch raises a GoneError when a 410 Gone error is received' do
221
+ @cache['gone_uid'] = {}
222
+ @client.stub :watch_namespaces, [@gone] do
223
+ assert_raise KubernetesMetadata::Common::GoneError do
224
+ process_namespace_watcher_notices(start_namespace_watch)
225
+ end
226
+ assert_equal(1, @stats[:namespace_watch_gone_notices])
227
+ end
228
+ end
229
+
230
+ test 'namespace watch retries when 410 Gone errors are encountered' do
231
+ @client.stub :get_namespaces, @initial do
232
+ @client.stub :watch_namespaces, [@created, @gone, @modified] do
233
+ # Force the infinite watch loop to exit after 3 seconds. Verifies that
234
+ # no unrecoverable error was thrown during this period of time.
235
+ assert_raise Timeout::Error.new('execution expired') do
236
+ Timeout.timeout(3) do
237
+ set_up_namespace_thread
174
238
  end
175
- assert_operator(@stats[:namespace_watch_failures], :>=, 3)
176
- assert_operator(Thread.current[:namespace_watch_retry_count], :<=, 1)
177
- assert_operator(Thread.current[:namespace_watch_retry_backoff_interval], :<=, 1)
178
- assert_operator(@stats[:namespace_watch_error_type_notices], :>=, 3)
179
239
  end
240
+ assert_operator(@stats[:namespace_watch_gone_errors], :>=, 3)
241
+ assert_operator(@stats[:namespace_watch_gone_notices], :>=, 3)
180
242
  end
181
243
  end
244
+ end
182
245
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  #
2
4
  # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
3
5
  # Kubernetes metadata
@@ -17,248 +19,326 @@
17
19
  # limitations under the License.
18
20
  #
19
21
  require_relative '../helper'
20
- require 'ostruct'
21
22
  require_relative 'watch_test'
22
23
 
23
24
  class DefaultPodWatchStrategyTest < WatchTest
25
+ include KubernetesMetadata::WatchPods
24
26
 
25
- include KubernetesMetadata::WatchPods
26
-
27
- setup do
28
- @initial = Kubeclient::Common::EntityList.new(
29
- 'PodList',
30
- '123',
31
- [
32
- Kubeclient::Resource.new({
33
- 'metadata' => {
34
- 'name' => 'initial',
35
- 'namespace' => 'initial_ns',
36
- 'uid' => 'initial_uid',
37
- 'labels' => {},
38
- },
39
- 'spec' => {
40
- 'nodeName' => 'aNodeName',
41
- 'containers' => [{
42
- 'name' => 'foo',
43
- 'image' => 'bar',
44
- }, {
45
- 'name' => 'bar',
46
- 'image' => 'foo',
47
- }]
48
- }
49
- }),
50
- Kubeclient::Resource.new({
51
- 'metadata' => {
52
- 'name' => 'modified',
53
- 'namespace' => 'create',
54
- 'uid' => 'modified_uid',
55
- 'labels' => {},
56
- },
57
- 'spec' => {
58
- 'nodeName' => 'aNodeName',
59
- 'containers' => [{
60
- 'name' => 'foo',
61
- 'image' => 'bar',
62
- }, {
63
- 'name' => 'bar',
64
- 'image' => 'foo',
65
- }]
66
- }
67
- }),
68
- ])
69
- @created = OpenStruct.new(
70
- type: 'CREATED',
71
- object: {
72
- 'metadata' => {
73
- 'name' => 'created',
74
- 'namespace' => 'create',
75
- 'uid' => 'created_uid',
76
- 'labels' => {},
77
- },
78
- 'spec' => {
79
- 'nodeName' => 'aNodeName',
80
- 'containers' => [{
81
- 'name' => 'foo',
82
- 'image' => 'bar',
83
- }, {
84
- 'name' => 'bar',
85
- 'image' => 'foo',
86
- }]
27
+ setup do
28
+ @initial = {
29
+ kind: 'PodList',
30
+ metadata: { resourceVersion: '123' },
31
+ items: [
32
+ {
33
+ metadata: {
34
+ name: 'initial',
35
+ namespace: 'initial_ns',
36
+ uid: 'initial_uid',
37
+ labels: {}
38
+ },
39
+ spec: {
40
+ nodeName: 'aNodeName',
41
+ containers: [{
42
+ name: 'foo',
43
+ image: 'bar'
44
+ }, {
45
+ name: 'bar',
46
+ image: 'foo'
47
+ }]
48
+ },
49
+ status: {
50
+ podIP: '172.17.0.8'
51
+ }
52
+ },
53
+ {
54
+ metadata: {
55
+ name: 'modified',
56
+ namespace: 'create',
57
+ uid: 'modified_uid',
58
+ labels: {}
59
+ },
60
+ spec: {
61
+ nodeName: 'aNodeName',
62
+ containers: [{
63
+ name: 'foo',
64
+ image: 'bar'
65
+ }, {
66
+ name: 'bar',
67
+ image: 'foo'
68
+ }]
69
+ },
70
+ status: {
71
+ podIP: '172.17.0.8'
72
+ }
73
+ }
74
+ ]
75
+ }
76
+ @created = {
77
+ type: 'CREATED',
78
+ object: {
79
+ metadata: {
80
+ name: 'created',
81
+ namespace: 'create',
82
+ uid: 'created_uid',
83
+ resourceVersion: '122',
84
+ labels: {}
85
+ },
86
+ spec: {
87
+ nodeName: 'aNodeName',
88
+ containers: [{
89
+ name: 'foo',
90
+ image: 'bar'
91
+ }, {
92
+ name: 'bar',
93
+ image: 'foo'
94
+ }]
95
+ },
96
+ status: {
97
+ podIP: '172.17.0.8'
98
+ }
99
+ }
100
+ }
101
+ @modified = {
102
+ type: 'MODIFIED',
103
+ object: {
104
+ metadata: {
105
+ name: 'foo',
106
+ namespace: 'modified',
107
+ uid: 'modified_uid',
108
+ resourceVersion: '123',
109
+ labels: {}
110
+ },
111
+ spec: {
112
+ nodeName: 'aNodeName',
113
+ containers: [{
114
+ name: 'foo',
115
+ image: 'bar'
116
+ }, {
117
+ name: 'bar',
118
+ image: 'foo'
119
+ }]
120
+ },
121
+ status: {
122
+ podIP: '172.17.0.8',
123
+ containerStatuses: [
124
+ {
125
+ name: 'fabric8-console-container',
126
+ state: {
127
+ running: {
128
+ startedAt: '2015-05-08T09:22:44Z'
129
+ }
130
+ },
131
+ lastState: {},
132
+ ready: true,
133
+ restartCount: 0,
134
+ image: 'fabric8/hawtio-kubernetes:latest',
135
+ imageID: 'docker://b2bd1a24a68356b2f30128e6e28e672c1ef92df0d9ec01ec0c7faea5d77d2303',
136
+ containerID: 'docker://49095a2894da899d3b327c5fde1e056a81376cc9a8f8b09a195f2a92bceed459'
87
137
  }
88
- }
89
- )
90
- @modified = OpenStruct.new(
91
- type: 'MODIFIED',
92
- object: {
93
- 'metadata' => {
94
- 'name' => 'foo',
95
- 'namespace' => 'modified',
96
- 'uid' => 'modified_uid',
97
- 'labels' => {},
98
- },
99
- 'spec' => {
100
- 'nodeName' => 'aNodeName',
101
- 'containers' => [{
102
- 'name' => 'foo',
103
- 'image' => 'bar',
104
- }, {
105
- 'name' => 'bar',
106
- 'image' => 'foo',
107
- }]
108
- },
109
- 'status' => {
110
- 'containerStatuses' => [
111
- {
112
- 'name' => 'fabric8-console-container',
113
- 'state' => {
114
- 'running' => {
115
- 'startedAt' => '2015-05-08T09:22:44Z'
116
- }
117
- },
118
- 'lastState' => {},
119
- 'ready' => true,
120
- 'restartCount' => 0,
121
- 'image' => 'fabric8/hawtio-kubernetes:latest',
122
- 'imageID' => 'docker://b2bd1a24a68356b2f30128e6e28e672c1ef92df0d9ec01ec0c7faea5d77d2303',
123
- 'containerID' => 'docker://49095a2894da899d3b327c5fde1e056a81376cc9a8f8b09a195f2a92bceed459'
124
- }
125
- ]
126
- }
127
- }
128
- )
129
- @deleted = OpenStruct.new(
130
- type: 'DELETED',
131
- object: {
132
- 'metadata' => {
133
- 'name' => 'deleteme',
134
- 'namespace' => 'deleted',
135
- 'uid' => 'deleted_uid'
136
- }
137
- }
138
- )
139
- @error = OpenStruct.new(
140
- type: 'ERROR',
141
- object: {
142
- 'message' => 'some error message'
143
- }
144
- )
145
- end
138
+ ]
139
+ }
140
+ }
141
+ }
142
+ @deleted = {
143
+ type: 'DELETED',
144
+ object: {
145
+ metadata: {
146
+ name: 'deleteme',
147
+ namespace: 'deleted',
148
+ uid: 'deleted_uid',
149
+ resourceVersion: '124'
150
+ }
151
+ }
152
+ }
153
+ @error = {
154
+ type: 'ERROR',
155
+ object: {
156
+ message: 'some error message'
157
+ }
158
+ }
159
+ @gone = {
160
+ type: 'ERROR',
161
+ object: {
162
+ code: 410,
163
+ kind: 'Status',
164
+ message: 'too old resource version: 123 (391079)',
165
+ metadata: {
166
+ name: 'gone',
167
+ namespace: 'gone',
168
+ uid: 'gone_uid'
169
+ },
170
+ reason: 'Gone'
171
+ }
172
+ }
173
+ end
146
174
 
147
- test 'pod list caches pods' do
148
- orig_env_val = ENV['K8S_NODE_NAME']
149
- ENV['K8S_NODE_NAME'] = 'aNodeName'
150
- @client.stub :get_pods, @initial do
175
+ test 'pod list caches pods' do
176
+ orig_env_val = ENV['K8S_NODE_NAME']
177
+ ENV['K8S_NODE_NAME'] = 'aNodeName'
178
+ @client.stub :get_pods, @initial do
179
+ process_pod_watcher_notices(start_pod_watch)
180
+ assert_equal(true, @cache.key?('initial_uid'))
181
+ assert_equal(true, @cache.key?('modified_uid'))
182
+ assert_equal(2, @stats[:pod_cache_host_updates])
183
+ end
184
+ ENV['K8S_NODE_NAME'] = orig_env_val
185
+ end
186
+
187
+ test 'pod list caches pods and watch updates' do
188
+ orig_env_val = ENV['K8S_NODE_NAME']
189
+ ENV['K8S_NODE_NAME'] = 'aNodeName'
190
+ @client.stub :get_pods, @initial do
191
+ @client.stub :watch_pods, [@modified] do
151
192
  process_pod_watcher_notices(start_pod_watch)
152
- assert_equal(true, @cache.key?('initial_uid'))
153
- assert_equal(true, @cache.key?('modified_uid'))
154
193
  assert_equal(2, @stats[:pod_cache_host_updates])
194
+ assert_equal(1, @stats[:pod_cache_watch_updates])
155
195
  end
156
- ENV['K8S_NODE_NAME'] = orig_env_val
157
196
  end
197
+ ENV['K8S_NODE_NAME'] = orig_env_val
198
+ assert_equal('123', @last_seen_resource_version) # from @modified
199
+ end
158
200
 
159
- test 'pod list caches pods and watch updates' do
160
- orig_env_val = ENV['K8S_NODE_NAME']
161
- ENV['K8S_NODE_NAME'] = 'aNodeName'
162
- @client.stub :get_pods, @initial do
163
- @client.stub :watch_pods, [@modified] do
164
- process_pod_watcher_notices(start_pod_watch)
165
- assert_equal(2, @stats[:pod_cache_host_updates])
166
- assert_equal(1, @stats[:pod_cache_watch_updates])
167
- end
201
+ test 'pod watch notice ignores CREATED' do
202
+ @client.stub :get_pods, @initial do
203
+ @client.stub :watch_pods, [@created] do
204
+ process_pod_watcher_notices(start_pod_watch)
205
+ assert_equal(false, @cache.key?('created_uid'))
206
+ assert_equal(1, @stats[:pod_cache_watch_ignored])
168
207
  end
169
- ENV['K8S_NODE_NAME'] = orig_env_val
170
208
  end
209
+ end
171
210
 
172
- test 'pod watch notice ignores CREATED' do
173
- @client.stub :get_pods, @initial do
174
- @client.stub :watch_pods, [@created] do
175
- process_pod_watcher_notices(start_pod_watch)
176
- assert_equal(false, @cache.key?('created_uid'))
177
- assert_equal(1, @stats[:pod_cache_watch_ignored])
178
- end
179
- end
211
+ test 'pod watch notice is ignored when info not cached and MODIFIED is received' do
212
+ @client.stub :watch_pods, [@modified] do
213
+ process_pod_watcher_notices(start_pod_watch)
214
+ assert_equal(false, @cache.key?('modified_uid'))
215
+ assert_equal(1, @stats[:pod_cache_watch_misses])
180
216
  end
217
+ end
181
218
 
182
- test 'pod watch notice is ignored when info not cached and MODIFIED is received' do
183
- @client.stub :watch_pods, [@modified] do
184
- process_pod_watcher_notices(start_pod_watch)
185
- assert_equal(false, @cache.key?('modified_uid'))
186
- assert_equal(1, @stats[:pod_cache_watch_misses])
187
- end
219
+ test 'pod MODIFIED cached when hostname matches' do
220
+ orig_env_val = ENV['K8S_NODE_NAME']
221
+ ENV['K8S_NODE_NAME'] = 'aNodeName'
222
+ @client.stub :watch_pods, [@modified] do
223
+ process_pod_watcher_notices(start_pod_watch)
224
+ assert_equal(true, @cache.key?('modified_uid'))
225
+ assert_equal(1, @stats[:pod_cache_host_updates])
188
226
  end
227
+ ENV['K8S_NODE_NAME'] = orig_env_val
228
+ end
189
229
 
190
- test 'pod MODIFIED cached when hostname matches' do
191
- orig_env_val = ENV['K8S_NODE_NAME']
192
- ENV['K8S_NODE_NAME'] = 'aNodeName'
193
- @client.stub :watch_pods, [@modified] do
194
- process_pod_watcher_notices(start_pod_watch)
195
- assert_equal(true, @cache.key?('modified_uid'))
196
- assert_equal(1, @stats[:pod_cache_host_updates])
197
- end
198
- ENV['K8S_NODE_NAME'] = orig_env_val
230
+ test 'pod watch notice is updated when MODIFIED is received' do
231
+ @cache['modified_uid'] = {}
232
+ @client.stub :watch_pods, [@modified] do
233
+ process_pod_watcher_notices(start_pod_watch)
234
+ assert_equal(true, @cache.key?('modified_uid'))
235
+ assert_equal(1, @stats[:pod_cache_watch_updates])
199
236
  end
237
+ end
200
238
 
201
- test 'pod watch notice is updated when MODIFIED is received' do
202
- @cache['modified_uid'] = {}
203
- @client.stub :watch_pods, [@modified] do
204
- process_pod_watcher_notices(start_pod_watch)
205
- assert_equal(true, @cache.key?('modified_uid'))
206
- assert_equal(1, @stats[:pod_cache_watch_updates])
239
+ test 'pod watch notice is ignored when delete is received' do
240
+ @cache['deleted_uid'] = {}
241
+ @client.stub :watch_pods, [@deleted] do
242
+ process_pod_watcher_notices(start_pod_watch)
243
+ assert_equal(true, @cache.key?('deleted_uid'))
244
+ assert_equal(1, @stats[:pod_cache_watch_delete_ignored])
245
+ end
246
+ end
247
+
248
+ test 'pod watch raises Fluent::UnrecoverableError when cannot re-establish connection to k8s API server' do
249
+ # Stub start_pod_watch to simulate initial successful connection to API server
250
+ stub(self).start_pod_watch
251
+ # Stub watch_pods to simluate not being able to set up watch connection to API server
252
+ stub(@client).watch_pods { raise }
253
+ @client.stub :get_pods, @initial do
254
+ assert_raise Fluent::UnrecoverableError do
255
+ set_up_pod_thread
207
256
  end
208
257
  end
258
+ assert_equal(3, @stats[:pod_watch_failures])
259
+ assert_equal(2, Thread.current[:pod_watch_retry_count])
260
+ assert_equal(4, Thread.current[:pod_watch_retry_backoff_interval])
261
+ assert_nil(@stats[:pod_watch_error_type_notices])
262
+ end
209
263
 
210
- test 'pod watch notice is ignored when delete is received' do
211
- @cache['deleted_uid'] = {}
212
- @client.stub :watch_pods, [@deleted] do
213
- process_pod_watcher_notices(start_pod_watch)
214
- assert_equal(true, @cache.key?('deleted_uid'))
215
- assert_equal(1, @stats[:pod_cache_watch_delete_ignored])
264
+ test 'pod watch resets watch retry count when exceptions are encountered and connection to k8s API server is re-established' do
265
+ @client.stub :get_pods, @initial do
266
+ @client.stub :watch_pods, [[@created, @exception_raised]] do
267
+ # Force the infinite watch loop to exit after 3 seconds. Verifies that
268
+ # no unrecoverable error was thrown during this period of time.
269
+ assert_raise Timeout::Error.new('execution expired') do
270
+ Timeout.timeout(3) do
271
+ set_up_pod_thread
272
+ end
273
+ end
274
+ assert_operator(@stats[:pod_watch_failures], :>=, 3)
275
+ assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
276
+ assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
216
277
  end
217
278
  end
279
+ end
218
280
 
219
- test 'pod watch retries when exceptions are encountered' do
220
- @client.stub :get_pods, @initial do
221
- @client.stub :watch_pods, [[@created, @exception_raised]] do
222
- assert_raise Fluent::UnrecoverableError do
281
+ test 'pod watch resets watch retry count when error is received and connection to k8s API server is re-established' do
282
+ @client.stub :get_pods, @initial do
283
+ @client.stub :watch_pods, [@error] do
284
+ # Force the infinite watch loop to exit after 3 seconds. Verifies that
285
+ # no unrecoverable error was thrown during this period of time.
286
+ assert_raise Timeout::Error.new('execution expired') do
287
+ Timeout.timeout(3) do
223
288
  set_up_pod_thread
224
289
  end
225
- assert_equal(3, @stats[:pod_watch_failures])
226
- assert_equal(2, Thread.current[:pod_watch_retry_count])
227
- assert_equal(4, Thread.current[:pod_watch_retry_backoff_interval])
228
- assert_nil(@stats[:pod_watch_error_type_notices])
229
290
  end
291
+ assert_operator(@stats[:pod_watch_failures], :>=, 3)
292
+ assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
293
+ assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
294
+ assert_operator(@stats[:pod_watch_error_type_notices], :>=, 3)
230
295
  end
231
296
  end
297
+ end
232
298
 
233
- test 'pod watch retries when error is received' do
234
- @client.stub :get_pods, @initial do
235
- @client.stub :watch_pods, [@error] do
236
- assert_raise Fluent::UnrecoverableError do
299
+ test 'pod watch continues after retries succeed' do
300
+ @client.stub :get_pods, @initial do
301
+ @client.stub :watch_pods, [@modified, @error, @modified] do
302
+ # Force the infinite watch loop to exit after 3 seconds. Verifies that
303
+ # no unrecoverable error was thrown during this period of time.
304
+ assert_raise Timeout::Error.new('execution expired') do
305
+ Timeout.timeout(3) do
237
306
  set_up_pod_thread
238
307
  end
239
- assert_equal(3, @stats[:pod_watch_failures])
240
- assert_equal(2, Thread.current[:pod_watch_retry_count])
241
- assert_equal(4, Thread.current[:pod_watch_retry_backoff_interval])
242
- assert_equal(3, @stats[:pod_watch_error_type_notices])
243
308
  end
309
+ assert_operator(@stats[:pod_watch_failures], :>=, 3)
310
+ assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
311
+ assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
312
+ assert_operator(@stats[:pod_watch_error_type_notices], :>=, 3)
313
+ end
314
+ end
315
+ end
316
+
317
+ test 'pod watch raises a GoneError when a 410 Gone error is received' do
318
+ @cache['gone_uid'] = {}
319
+ @client.stub :watch_pods, [@gone] do
320
+ @last_seen_resource_version = '100'
321
+ assert_raise KubernetesMetadata::Common::GoneError do
322
+ process_pod_watcher_notices(start_pod_watch)
244
323
  end
324
+ assert_equal(1, @stats[:pod_watch_gone_notices])
325
+ assert_nil @last_seen_resource_version # forced restart
245
326
  end
327
+ end
246
328
 
247
- test 'pod watch continues after retries succeed' do
248
- @client.stub :get_pods, @initial do
249
- @client.stub :watch_pods, [@modified, @error, @modified] do
250
- # Force the infinite watch loop to exit after 3 seconds. Verifies that
251
- # no unrecoverable error was thrown during this period of time.
252
- assert_raise Timeout::Error.new('execution expired') do
253
- Timeout.timeout(3) do
254
- set_up_pod_thread
255
- end
329
+ test 'pod watch retries when 410 Gone errors are encountered' do
330
+ @client.stub :get_pods, @initial do
331
+ @client.stub :watch_pods, [@created, @gone, @modified] do
332
+ # Force the infinite watch loop to exit after 3 seconds because the code sleeps 3 times.
333
+ # Verifies that no unrecoverable error was thrown during this period of time.
334
+ assert_raise Timeout::Error.new('execution expired') do
335
+ Timeout.timeout(3) do
336
+ set_up_pod_thread
256
337
  end
257
- assert_operator(@stats[:pod_watch_failures], :>=, 3)
258
- assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
259
- assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
260
- assert_operator(@stats[:pod_watch_error_type_notices], :>=, 3)
261
338
  end
339
+ assert_operator(@stats[:pod_watch_gone_errors], :>=, 3)
340
+ assert_operator(@stats[:pod_watch_gone_notices], :>=, 3)
262
341
  end
263
342
  end
343
+ end
264
344
  end