fluent-plugin-kubernetes_metadata_filter 2.6.0 → 2.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.circleci/config.yml +2 -2
- data/.rubocop.yml +57 -0
- data/Gemfile +4 -2
- data/Gemfile.lock +19 -17
- data/README.md +4 -2
- data/Rakefile +15 -11
- data/fluent-plugin-kubernetes_metadata_filter.gemspec +24 -23
- data/lib/fluent/plugin/filter_kubernetes_metadata.rb +82 -72
- data/lib/fluent/plugin/kubernetes_metadata_cache_strategy.rb +22 -18
- data/lib/fluent/plugin/kubernetes_metadata_common.rb +30 -29
- data/lib/fluent/plugin/kubernetes_metadata_stats.rb +6 -6
- data/lib/fluent/plugin/kubernetes_metadata_test_api_adapter.rb +68 -0
- data/lib/fluent/plugin/kubernetes_metadata_util.rb +53 -0
- data/lib/fluent/plugin/kubernetes_metadata_watch_namespaces.rb +65 -65
- data/lib/fluent/plugin/kubernetes_metadata_watch_pods.rb +69 -70
- data/test/helper.rb +5 -3
- data/test/plugin/test_cache_stats.rb +10 -10
- data/test/plugin/test_cache_strategy.rb +158 -157
- data/test/plugin/test_filter_kubernetes_metadata.rb +363 -344
- data/test/plugin/test_utils.rb +56 -0
- data/test/plugin/test_watch_namespaces.rb +191 -190
- data/test/plugin/test_watch_pods.rb +278 -267
- data/test/plugin/watch_test.rb +13 -7
- metadata +47 -43
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
#
|
2
4
|
# Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
|
3
5
|
# Kubernetes metadata
|
@@ -20,314 +22,323 @@ require_relative '../helper'
|
|
20
22
|
require_relative 'watch_test'
|
21
23
|
|
22
24
|
class DefaultPodWatchStrategyTest < WatchTest
|
25
|
+
include KubernetesMetadata::WatchPods
|
23
26
|
|
24
|
-
|
27
|
+
setup do
|
28
|
+
@initial = {
|
29
|
+
kind: 'PodList',
|
30
|
+
metadata: { resourceVersion: '123' },
|
31
|
+
items: [
|
32
|
+
{
|
33
|
+
metadata: {
|
34
|
+
name: 'initial',
|
35
|
+
namespace: 'initial_ns',
|
36
|
+
uid: 'initial_uid',
|
37
|
+
labels: {}
|
38
|
+
},
|
39
|
+
spec: {
|
40
|
+
nodeName: 'aNodeName',
|
41
|
+
containers: [{
|
42
|
+
name: 'foo',
|
43
|
+
image: 'bar'
|
44
|
+
}, {
|
45
|
+
name: 'bar',
|
46
|
+
image: 'foo'
|
47
|
+
}]
|
48
|
+
},
|
49
|
+
status: {
|
50
|
+
podIP: '172.17.0.8'
|
51
|
+
}
|
52
|
+
},
|
53
|
+
{
|
54
|
+
metadata: {
|
55
|
+
name: 'modified',
|
56
|
+
namespace: 'create',
|
57
|
+
uid: 'modified_uid',
|
58
|
+
labels: {}
|
59
|
+
},
|
60
|
+
spec: {
|
61
|
+
nodeName: 'aNodeName',
|
62
|
+
containers: [{
|
63
|
+
name: 'foo',
|
64
|
+
image: 'bar'
|
65
|
+
}, {
|
66
|
+
name: 'bar',
|
67
|
+
image: 'foo'
|
68
|
+
}]
|
69
|
+
},
|
70
|
+
status: {
|
71
|
+
podIP: '172.17.0.8'
|
72
|
+
}
|
73
|
+
}
|
74
|
+
]
|
75
|
+
}
|
76
|
+
@created = {
|
77
|
+
type: 'CREATED',
|
78
|
+
object: {
|
79
|
+
metadata: {
|
80
|
+
name: 'created',
|
81
|
+
namespace: 'create',
|
82
|
+
uid: 'created_uid',
|
83
|
+
resourceVersion: '122',
|
84
|
+
labels: {}
|
85
|
+
},
|
86
|
+
spec: {
|
87
|
+
nodeName: 'aNodeName',
|
88
|
+
containers: [{
|
89
|
+
name: 'foo',
|
90
|
+
image: 'bar'
|
91
|
+
}, {
|
92
|
+
name: 'bar',
|
93
|
+
image: 'foo'
|
94
|
+
}]
|
95
|
+
},
|
96
|
+
status: {
|
97
|
+
podIP: '172.17.0.8'
|
98
|
+
}
|
99
|
+
}
|
100
|
+
}
|
101
|
+
@modified = {
|
102
|
+
type: 'MODIFIED',
|
103
|
+
object: {
|
104
|
+
metadata: {
|
105
|
+
name: 'foo',
|
106
|
+
namespace: 'modified',
|
107
|
+
uid: 'modified_uid',
|
108
|
+
resourceVersion: '123',
|
109
|
+
labels: {}
|
110
|
+
},
|
111
|
+
spec: {
|
112
|
+
nodeName: 'aNodeName',
|
113
|
+
containers: [{
|
114
|
+
name: 'foo',
|
115
|
+
image: 'bar'
|
116
|
+
}, {
|
117
|
+
name: 'bar',
|
118
|
+
image: 'foo'
|
119
|
+
}]
|
120
|
+
},
|
121
|
+
status: {
|
122
|
+
podIP: '172.17.0.8',
|
123
|
+
containerStatuses: [
|
124
|
+
{
|
125
|
+
name: 'fabric8-console-container',
|
126
|
+
state: {
|
127
|
+
running: {
|
128
|
+
startedAt: '2015-05-08T09:22:44Z'
|
129
|
+
}
|
130
|
+
},
|
131
|
+
lastState: {},
|
132
|
+
ready: true,
|
133
|
+
restartCount: 0,
|
134
|
+
image: 'fabric8/hawtio-kubernetes:latest',
|
135
|
+
imageID: 'docker://b2bd1a24a68356b2f30128e6e28e672c1ef92df0d9ec01ec0c7faea5d77d2303',
|
136
|
+
containerID: 'docker://49095a2894da899d3b327c5fde1e056a81376cc9a8f8b09a195f2a92bceed459'
|
137
|
+
}
|
138
|
+
]
|
139
|
+
}
|
140
|
+
}
|
141
|
+
}
|
142
|
+
@deleted = {
|
143
|
+
type: 'DELETED',
|
144
|
+
object: {
|
145
|
+
metadata: {
|
146
|
+
name: 'deleteme',
|
147
|
+
namespace: 'deleted',
|
148
|
+
uid: 'deleted_uid',
|
149
|
+
resourceVersion: '124'
|
150
|
+
}
|
151
|
+
}
|
152
|
+
}
|
153
|
+
@error = {
|
154
|
+
type: 'ERROR',
|
155
|
+
object: {
|
156
|
+
message: 'some error message'
|
157
|
+
}
|
158
|
+
}
|
159
|
+
@gone = {
|
160
|
+
type: 'ERROR',
|
161
|
+
object: {
|
162
|
+
code: 410,
|
163
|
+
kind: 'Status',
|
164
|
+
message: 'too old resource version: 123 (391079)',
|
165
|
+
metadata: {
|
166
|
+
name: 'gone',
|
167
|
+
namespace: 'gone',
|
168
|
+
uid: 'gone_uid'
|
169
|
+
},
|
170
|
+
reason: 'Gone'
|
171
|
+
}
|
172
|
+
}
|
173
|
+
end
|
25
174
|
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
},
|
38
|
-
spec: {
|
39
|
-
nodeName: 'aNodeName',
|
40
|
-
containers: [{
|
41
|
-
name: 'foo',
|
42
|
-
image: 'bar',
|
43
|
-
}, {
|
44
|
-
name: 'bar',
|
45
|
-
image: 'foo',
|
46
|
-
}]
|
47
|
-
}
|
48
|
-
},
|
49
|
-
{
|
50
|
-
metadata: {
|
51
|
-
name: 'modified',
|
52
|
-
namespace: 'create',
|
53
|
-
uid: 'modified_uid',
|
54
|
-
labels: {},
|
55
|
-
},
|
56
|
-
spec: {
|
57
|
-
nodeName: 'aNodeName',
|
58
|
-
containers: [{
|
59
|
-
name: 'foo',
|
60
|
-
image: 'bar',
|
61
|
-
}, {
|
62
|
-
name: 'bar',
|
63
|
-
image: 'foo',
|
64
|
-
}]
|
65
|
-
}
|
66
|
-
}
|
67
|
-
]
|
68
|
-
}
|
69
|
-
@created = {
|
70
|
-
type: 'CREATED',
|
71
|
-
object: {
|
72
|
-
metadata: {
|
73
|
-
name: 'created',
|
74
|
-
namespace: 'create',
|
75
|
-
uid: 'created_uid',
|
76
|
-
resourceVersion: '122',
|
77
|
-
labels: {},
|
78
|
-
},
|
79
|
-
spec: {
|
80
|
-
nodeName: 'aNodeName',
|
81
|
-
containers: [{
|
82
|
-
name: 'foo',
|
83
|
-
image: 'bar',
|
84
|
-
}, {
|
85
|
-
name: 'bar',
|
86
|
-
image: 'foo',
|
87
|
-
}]
|
88
|
-
}
|
89
|
-
}
|
90
|
-
}
|
91
|
-
@modified = {
|
92
|
-
type: 'MODIFIED',
|
93
|
-
object: {
|
94
|
-
metadata: {
|
95
|
-
name: 'foo',
|
96
|
-
namespace: 'modified',
|
97
|
-
uid: 'modified_uid',
|
98
|
-
resourceVersion: '123',
|
99
|
-
labels: {},
|
100
|
-
},
|
101
|
-
spec: {
|
102
|
-
nodeName: 'aNodeName',
|
103
|
-
containers: [{
|
104
|
-
name: 'foo',
|
105
|
-
image: 'bar',
|
106
|
-
}, {
|
107
|
-
name: 'bar',
|
108
|
-
image: 'foo',
|
109
|
-
}]
|
110
|
-
},
|
111
|
-
status: {
|
112
|
-
containerStatuses: [
|
113
|
-
{
|
114
|
-
name: 'fabric8-console-container',
|
115
|
-
state: {
|
116
|
-
running: {
|
117
|
-
startedAt: '2015-05-08T09:22:44Z'
|
118
|
-
}
|
119
|
-
},
|
120
|
-
lastState: {},
|
121
|
-
ready: true,
|
122
|
-
restartCount: 0,
|
123
|
-
image: 'fabric8/hawtio-kubernetes:latest',
|
124
|
-
imageID: 'docker://b2bd1a24a68356b2f30128e6e28e672c1ef92df0d9ec01ec0c7faea5d77d2303',
|
125
|
-
containerID: 'docker://49095a2894da899d3b327c5fde1e056a81376cc9a8f8b09a195f2a92bceed459'
|
126
|
-
}
|
127
|
-
]
|
128
|
-
}
|
129
|
-
}
|
130
|
-
}
|
131
|
-
@deleted = {
|
132
|
-
type: 'DELETED',
|
133
|
-
object: {
|
134
|
-
metadata: {
|
135
|
-
name: 'deleteme',
|
136
|
-
namespace: 'deleted',
|
137
|
-
uid: 'deleted_uid',
|
138
|
-
resourceVersion: '124'
|
139
|
-
}
|
140
|
-
}
|
141
|
-
}
|
142
|
-
@error = {
|
143
|
-
type: 'ERROR',
|
144
|
-
object: {
|
145
|
-
message: 'some error message'
|
146
|
-
}
|
147
|
-
}
|
148
|
-
@gone = {
|
149
|
-
type: 'ERROR',
|
150
|
-
object: {
|
151
|
-
code: 410,
|
152
|
-
kind: 'Status',
|
153
|
-
message: 'too old resource version: 123 (391079)',
|
154
|
-
metadata: {
|
155
|
-
name: 'gone',
|
156
|
-
namespace: 'gone',
|
157
|
-
uid: 'gone_uid'
|
158
|
-
},
|
159
|
-
reason: 'Gone'
|
160
|
-
}
|
161
|
-
}
|
162
|
-
end
|
175
|
+
test 'pod list caches pods' do
|
176
|
+
orig_env_val = ENV['K8S_NODE_NAME']
|
177
|
+
ENV['K8S_NODE_NAME'] = 'aNodeName'
|
178
|
+
@client.stub :get_pods, @initial do
|
179
|
+
process_pod_watcher_notices(start_pod_watch)
|
180
|
+
assert_equal(true, @cache.key?('initial_uid'))
|
181
|
+
assert_equal(true, @cache.key?('modified_uid'))
|
182
|
+
assert_equal(2, @stats[:pod_cache_host_updates])
|
183
|
+
end
|
184
|
+
ENV['K8S_NODE_NAME'] = orig_env_val
|
185
|
+
end
|
163
186
|
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
187
|
+
test 'pod list caches pods and watch updates' do
|
188
|
+
orig_env_val = ENV['K8S_NODE_NAME']
|
189
|
+
ENV['K8S_NODE_NAME'] = 'aNodeName'
|
190
|
+
@client.stub :get_pods, @initial do
|
191
|
+
@client.stub :watch_pods, [@modified] do
|
168
192
|
process_pod_watcher_notices(start_pod_watch)
|
169
|
-
assert_equal(true, @cache.key?('initial_uid'))
|
170
|
-
assert_equal(true, @cache.key?('modified_uid'))
|
171
193
|
assert_equal(2, @stats[:pod_cache_host_updates])
|
194
|
+
assert_equal(1, @stats[:pod_cache_watch_updates])
|
172
195
|
end
|
173
|
-
ENV['K8S_NODE_NAME'] = orig_env_val
|
174
|
-
end
|
175
|
-
|
176
|
-
test 'pod list caches pods and watch updates' do
|
177
|
-
orig_env_val = ENV['K8S_NODE_NAME']
|
178
|
-
ENV['K8S_NODE_NAME'] = 'aNodeName'
|
179
|
-
@client.stub :get_pods, @initial do
|
180
|
-
@client.stub :watch_pods, [@modified] do
|
181
|
-
process_pod_watcher_notices(start_pod_watch)
|
182
|
-
assert_equal(2, @stats[:pod_cache_host_updates])
|
183
|
-
assert_equal(1, @stats[:pod_cache_watch_updates])
|
184
|
-
end
|
185
|
-
end
|
186
|
-
ENV['K8S_NODE_NAME'] = orig_env_val
|
187
|
-
assert_equal('123', @last_seen_resource_version) # from @modified
|
188
196
|
end
|
197
|
+
ENV['K8S_NODE_NAME'] = orig_env_val
|
198
|
+
assert_equal('123', @last_seen_resource_version) # from @modified
|
199
|
+
end
|
189
200
|
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
end
|
201
|
+
test 'pod watch notice ignores CREATED' do
|
202
|
+
@client.stub :get_pods, @initial do
|
203
|
+
@client.stub :watch_pods, [@created] do
|
204
|
+
process_pod_watcher_notices(start_pod_watch)
|
205
|
+
assert_equal(false, @cache.key?('created_uid'))
|
206
|
+
assert_equal(1, @stats[:pod_cache_watch_ignored])
|
197
207
|
end
|
198
208
|
end
|
209
|
+
end
|
199
210
|
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
end
|
211
|
+
test 'pod watch notice is ignored when info not cached and MODIFIED is received' do
|
212
|
+
@client.stub :watch_pods, [@modified] do
|
213
|
+
process_pod_watcher_notices(start_pod_watch)
|
214
|
+
assert_equal(false, @cache.key?('modified_uid'))
|
215
|
+
assert_equal(1, @stats[:pod_cache_watch_misses])
|
206
216
|
end
|
217
|
+
end
|
207
218
|
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
end
|
216
|
-
ENV['K8S_NODE_NAME'] = orig_env_val
|
219
|
+
test 'pod MODIFIED cached when hostname matches' do
|
220
|
+
orig_env_val = ENV['K8S_NODE_NAME']
|
221
|
+
ENV['K8S_NODE_NAME'] = 'aNodeName'
|
222
|
+
@client.stub :watch_pods, [@modified] do
|
223
|
+
process_pod_watcher_notices(start_pod_watch)
|
224
|
+
assert_equal(true, @cache.key?('modified_uid'))
|
225
|
+
assert_equal(1, @stats[:pod_cache_host_updates])
|
217
226
|
end
|
227
|
+
ENV['K8S_NODE_NAME'] = orig_env_val
|
228
|
+
end
|
218
229
|
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
end
|
230
|
+
test 'pod watch notice is updated when MODIFIED is received' do
|
231
|
+
@cache['modified_uid'] = {}
|
232
|
+
@client.stub :watch_pods, [@modified] do
|
233
|
+
process_pod_watcher_notices(start_pod_watch)
|
234
|
+
assert_equal(true, @cache.key?('modified_uid'))
|
235
|
+
assert_equal(1, @stats[:pod_cache_watch_updates])
|
226
236
|
end
|
237
|
+
end
|
227
238
|
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
end
|
239
|
+
test 'pod watch notice is ignored when delete is received' do
|
240
|
+
@cache['deleted_uid'] = {}
|
241
|
+
@client.stub :watch_pods, [@deleted] do
|
242
|
+
process_pod_watcher_notices(start_pod_watch)
|
243
|
+
assert_equal(true, @cache.key?('deleted_uid'))
|
244
|
+
assert_equal(1, @stats[:pod_cache_watch_delete_ignored])
|
235
245
|
end
|
246
|
+
end
|
236
247
|
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
end
|
248
|
+
test 'pod watch raises Fluent::UnrecoverableError when cannot re-establish connection to k8s API server' do
|
249
|
+
# Stub start_pod_watch to simulate initial successful connection to API server
|
250
|
+
stub(self).start_pod_watch
|
251
|
+
# Stub watch_pods to simluate not being able to set up watch connection to API server
|
252
|
+
stub(@client).watch_pods { raise }
|
253
|
+
@client.stub :get_pods, @initial do
|
254
|
+
assert_raise Fluent::UnrecoverableError do
|
255
|
+
set_up_pod_thread
|
246
256
|
end
|
247
|
-
assert_equal(3, @stats[:pod_watch_failures])
|
248
|
-
assert_equal(2, Thread.current[:pod_watch_retry_count])
|
249
|
-
assert_equal(4, Thread.current[:pod_watch_retry_backoff_interval])
|
250
|
-
assert_nil(@stats[:pod_watch_error_type_notices])
|
251
257
|
end
|
258
|
+
assert_equal(3, @stats[:pod_watch_failures])
|
259
|
+
assert_equal(2, Thread.current[:pod_watch_retry_count])
|
260
|
+
assert_equal(4, Thread.current[:pod_watch_retry_backoff_interval])
|
261
|
+
assert_nil(@stats[:pod_watch_error_type_notices])
|
262
|
+
end
|
252
263
|
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
end
|
263
|
-
assert_operator(@stats[:pod_watch_failures], :>=, 3)
|
264
|
-
assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
|
265
|
-
assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
|
264
|
+
test 'pod watch resets watch retry count when exceptions are encountered and connection to k8s API server is re-established' do
|
265
|
+
@client.stub :get_pods, @initial do
|
266
|
+
@client.stub :watch_pods, [[@created, @exception_raised]] do
|
267
|
+
# Force the infinite watch loop to exit after 3 seconds. Verifies that
|
268
|
+
# no unrecoverable error was thrown during this period of time.
|
269
|
+
assert_raise Timeout::Error.new('execution expired') do
|
270
|
+
Timeout.timeout(3) do
|
271
|
+
set_up_pod_thread
|
272
|
+
end
|
266
273
|
end
|
274
|
+
assert_operator(@stats[:pod_watch_failures], :>=, 3)
|
275
|
+
assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
|
276
|
+
assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
|
267
277
|
end
|
268
278
|
end
|
279
|
+
end
|
269
280
|
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
end
|
281
|
+
test 'pod watch resets watch retry count when error is received and connection to k8s API server is re-established' do
|
282
|
+
@client.stub :get_pods, @initial do
|
283
|
+
@client.stub :watch_pods, [@error] do
|
284
|
+
# Force the infinite watch loop to exit after 3 seconds. Verifies that
|
285
|
+
# no unrecoverable error was thrown during this period of time.
|
286
|
+
assert_raise Timeout::Error.new('execution expired') do
|
287
|
+
Timeout.timeout(3) do
|
288
|
+
set_up_pod_thread
|
279
289
|
end
|
280
|
-
assert_operator(@stats[:pod_watch_failures], :>=, 3)
|
281
|
-
assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
|
282
|
-
assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
|
283
|
-
assert_operator(@stats[:pod_watch_error_type_notices], :>=, 3)
|
284
290
|
end
|
291
|
+
assert_operator(@stats[:pod_watch_failures], :>=, 3)
|
292
|
+
assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
|
293
|
+
assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
|
294
|
+
assert_operator(@stats[:pod_watch_error_type_notices], :>=, 3)
|
285
295
|
end
|
286
296
|
end
|
297
|
+
end
|
287
298
|
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
end
|
299
|
+
test 'pod watch continues after retries succeed' do
|
300
|
+
@client.stub :get_pods, @initial do
|
301
|
+
@client.stub :watch_pods, [@modified, @error, @modified] do
|
302
|
+
# Force the infinite watch loop to exit after 3 seconds. Verifies that
|
303
|
+
# no unrecoverable error was thrown during this period of time.
|
304
|
+
assert_raise Timeout::Error.new('execution expired') do
|
305
|
+
Timeout.timeout(3) do
|
306
|
+
set_up_pod_thread
|
297
307
|
end
|
298
|
-
assert_operator(@stats[:pod_watch_failures], :>=, 3)
|
299
|
-
assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
|
300
|
-
assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
|
301
|
-
assert_operator(@stats[:pod_watch_error_type_notices], :>=, 3)
|
302
308
|
end
|
309
|
+
assert_operator(@stats[:pod_watch_failures], :>=, 3)
|
310
|
+
assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
|
311
|
+
assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
|
312
|
+
assert_operator(@stats[:pod_watch_error_type_notices], :>=, 3)
|
303
313
|
end
|
304
314
|
end
|
315
|
+
end
|
305
316
|
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
end
|
313
|
-
assert_equal(1, @stats[:pod_watch_gone_notices])
|
314
|
-
assert_nil @last_seen_resource_version # forced restart
|
317
|
+
test 'pod watch raises a GoneError when a 410 Gone error is received' do
|
318
|
+
@cache['gone_uid'] = {}
|
319
|
+
@client.stub :watch_pods, [@gone] do
|
320
|
+
@last_seen_resource_version = '100'
|
321
|
+
assert_raise KubernetesMetadata::Common::GoneError do
|
322
|
+
process_pod_watcher_notices(start_pod_watch)
|
315
323
|
end
|
324
|
+
assert_equal(1, @stats[:pod_watch_gone_notices])
|
325
|
+
assert_nil @last_seen_resource_version # forced restart
|
316
326
|
end
|
327
|
+
end
|
317
328
|
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
end
|
329
|
+
test 'pod watch retries when 410 Gone errors are encountered' do
|
330
|
+
@client.stub :get_pods, @initial do
|
331
|
+
@client.stub :watch_pods, [@created, @gone, @modified] do
|
332
|
+
# Force the infinite watch loop to exit after 3 seconds because the code sleeps 3 times.
|
333
|
+
# Verifies that no unrecoverable error was thrown during this period of time.
|
334
|
+
assert_raise Timeout::Error.new('execution expired') do
|
335
|
+
Timeout.timeout(3) do
|
336
|
+
set_up_pod_thread
|
327
337
|
end
|
328
|
-
assert_operator(@stats[:pod_watch_gone_errors], :>=, 3)
|
329
|
-
assert_operator(@stats[:pod_watch_gone_notices], :>=, 3)
|
330
338
|
end
|
339
|
+
assert_operator(@stats[:pod_watch_gone_errors], :>=, 3)
|
340
|
+
assert_operator(@stats[:pod_watch_gone_notices], :>=, 3)
|
331
341
|
end
|
332
342
|
end
|
343
|
+
end
|
333
344
|
end
|