fluent-plugin-kubernetes_metadata_filter 2.5.0 → 3.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.circleci/config.yml +10 -14
- data/.gitignore +0 -1
- data/.rubocop.yml +57 -0
- data/Gemfile +4 -2
- data/Gemfile.lock +76 -67
- data/README.md +9 -83
- data/Rakefile +15 -11
- data/doc/benchmark/5m-1-2500lps-256b-baseline-01/cpu.png +0 -0
- data/doc/benchmark/5m-1-2500lps-256b-baseline-01/latency.png +0 -0
- data/doc/benchmark/5m-1-2500lps-256b-baseline-01/loss.png +0 -0
- data/doc/benchmark/5m-1-2500lps-256b-baseline-01/mem.png +0 -0
- data/doc/benchmark/5m-1-2500lps-256b-baseline-01/readme.md +88 -0
- data/doc/benchmark/5m-1-2500lps-256b-baseline-01/results.html +127 -0
- data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/cpu.png +0 -0
- data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/latency.png +0 -0
- data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/loss.png +0 -0
- data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/mem.png +0 -0
- data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/readme.md +97 -0
- data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/results.html +136 -0
- data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/cpu.png +0 -0
- data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/latency.png +0 -0
- data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/loss.png +0 -0
- data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/mem.png +0 -0
- data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/readme.md +97 -0
- data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/results.html +136 -0
- data/fluent-plugin-kubernetes_metadata_filter.gemspec +25 -27
- data/lib/fluent/plugin/filter_kubernetes_metadata.rb +171 -192
- data/lib/fluent/plugin/kubernetes_metadata_cache_strategy.rb +25 -23
- data/lib/fluent/plugin/kubernetes_metadata_common.rb +44 -69
- data/lib/fluent/plugin/kubernetes_metadata_stats.rb +21 -5
- data/lib/fluent/plugin/kubernetes_metadata_test_api_adapter.rb +68 -0
- data/lib/fluent/plugin/kubernetes_metadata_util.rb +33 -0
- data/lib/fluent/plugin/kubernetes_metadata_watch_namespaces.rb +91 -42
- data/lib/fluent/plugin/kubernetes_metadata_watch_pods.rb +108 -47
- data/release_notes.md +42 -0
- data/test/cassettes/kubernetes_get_pod_container_init.yml +145 -0
- data/test/helper.rb +20 -2
- data/test/plugin/test_cache_stats.rb +10 -13
- data/test/plugin/test_cache_strategy.rb +158 -160
- data/test/plugin/test_filter_kubernetes_metadata.rb +340 -616
- data/test/plugin/test_watch_namespaces.rb +188 -125
- data/test/plugin/test_watch_pods.rb +282 -202
- data/test/plugin/watch_test.rb +16 -15
- metadata +77 -67
- /data/test/cassettes/{kubernetes_docker_metadata_dotted_labels.yml → kubernetes_docker_metadata_dotted_slashed_labels.yml} +0 -0
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
#
|
2
4
|
# Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
|
3
5
|
# Kubernetes metadata
|
@@ -17,248 +19,326 @@
|
|
17
19
|
# limitations under the License.
|
18
20
|
#
|
19
21
|
require_relative '../helper'
|
20
|
-
require 'ostruct'
|
21
22
|
require_relative 'watch_test'
|
22
23
|
|
23
24
|
class DefaultPodWatchStrategyTest < WatchTest
|
25
|
+
include KubernetesMetadata::WatchPods
|
24
26
|
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
27
|
+
setup do
|
28
|
+
@initial = {
|
29
|
+
kind: 'PodList',
|
30
|
+
metadata: { resourceVersion: '123' },
|
31
|
+
items: [
|
32
|
+
{
|
33
|
+
metadata: {
|
34
|
+
name: 'initial',
|
35
|
+
namespace: 'initial_ns',
|
36
|
+
uid: 'initial_uid',
|
37
|
+
labels: {}
|
38
|
+
},
|
39
|
+
spec: {
|
40
|
+
nodeName: 'aNodeName',
|
41
|
+
containers: [{
|
42
|
+
name: 'foo',
|
43
|
+
image: 'bar'
|
44
|
+
}, {
|
45
|
+
name: 'bar',
|
46
|
+
image: 'foo'
|
47
|
+
}]
|
48
|
+
},
|
49
|
+
status: {
|
50
|
+
podIP: '172.17.0.8'
|
51
|
+
}
|
52
|
+
},
|
53
|
+
{
|
54
|
+
metadata: {
|
55
|
+
name: 'modified',
|
56
|
+
namespace: 'create',
|
57
|
+
uid: 'modified_uid',
|
58
|
+
labels: {}
|
59
|
+
},
|
60
|
+
spec: {
|
61
|
+
nodeName: 'aNodeName',
|
62
|
+
containers: [{
|
63
|
+
name: 'foo',
|
64
|
+
image: 'bar'
|
65
|
+
}, {
|
66
|
+
name: 'bar',
|
67
|
+
image: 'foo'
|
68
|
+
}]
|
69
|
+
},
|
70
|
+
status: {
|
71
|
+
podIP: '172.17.0.8'
|
72
|
+
}
|
73
|
+
}
|
74
|
+
]
|
75
|
+
}
|
76
|
+
@created = {
|
77
|
+
type: 'CREATED',
|
78
|
+
object: {
|
79
|
+
metadata: {
|
80
|
+
name: 'created',
|
81
|
+
namespace: 'create',
|
82
|
+
uid: 'created_uid',
|
83
|
+
resourceVersion: '122',
|
84
|
+
labels: {}
|
85
|
+
},
|
86
|
+
spec: {
|
87
|
+
nodeName: 'aNodeName',
|
88
|
+
containers: [{
|
89
|
+
name: 'foo',
|
90
|
+
image: 'bar'
|
91
|
+
}, {
|
92
|
+
name: 'bar',
|
93
|
+
image: 'foo'
|
94
|
+
}]
|
95
|
+
},
|
96
|
+
status: {
|
97
|
+
podIP: '172.17.0.8'
|
98
|
+
}
|
99
|
+
}
|
100
|
+
}
|
101
|
+
@modified = {
|
102
|
+
type: 'MODIFIED',
|
103
|
+
object: {
|
104
|
+
metadata: {
|
105
|
+
name: 'foo',
|
106
|
+
namespace: 'modified',
|
107
|
+
uid: 'modified_uid',
|
108
|
+
resourceVersion: '123',
|
109
|
+
labels: {}
|
110
|
+
},
|
111
|
+
spec: {
|
112
|
+
nodeName: 'aNodeName',
|
113
|
+
containers: [{
|
114
|
+
name: 'foo',
|
115
|
+
image: 'bar'
|
116
|
+
}, {
|
117
|
+
name: 'bar',
|
118
|
+
image: 'foo'
|
119
|
+
}]
|
120
|
+
},
|
121
|
+
status: {
|
122
|
+
podIP: '172.17.0.8',
|
123
|
+
containerStatuses: [
|
124
|
+
{
|
125
|
+
name: 'fabric8-console-container',
|
126
|
+
state: {
|
127
|
+
running: {
|
128
|
+
startedAt: '2015-05-08T09:22:44Z'
|
129
|
+
}
|
130
|
+
},
|
131
|
+
lastState: {},
|
132
|
+
ready: true,
|
133
|
+
restartCount: 0,
|
134
|
+
image: 'fabric8/hawtio-kubernetes:latest',
|
135
|
+
imageID: 'docker://b2bd1a24a68356b2f30128e6e28e672c1ef92df0d9ec01ec0c7faea5d77d2303',
|
136
|
+
containerID: 'docker://49095a2894da899d3b327c5fde1e056a81376cc9a8f8b09a195f2a92bceed459'
|
87
137
|
}
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
}
|
125
|
-
]
|
126
|
-
}
|
127
|
-
}
|
128
|
-
)
|
129
|
-
@deleted = OpenStruct.new(
|
130
|
-
type: 'DELETED',
|
131
|
-
object: {
|
132
|
-
'metadata' => {
|
133
|
-
'name' => 'deleteme',
|
134
|
-
'namespace' => 'deleted',
|
135
|
-
'uid' => 'deleted_uid'
|
136
|
-
}
|
137
|
-
}
|
138
|
-
)
|
139
|
-
@error = OpenStruct.new(
|
140
|
-
type: 'ERROR',
|
141
|
-
object: {
|
142
|
-
'message' => 'some error message'
|
143
|
-
}
|
144
|
-
)
|
145
|
-
end
|
138
|
+
]
|
139
|
+
}
|
140
|
+
}
|
141
|
+
}
|
142
|
+
@deleted = {
|
143
|
+
type: 'DELETED',
|
144
|
+
object: {
|
145
|
+
metadata: {
|
146
|
+
name: 'deleteme',
|
147
|
+
namespace: 'deleted',
|
148
|
+
uid: 'deleted_uid',
|
149
|
+
resourceVersion: '124'
|
150
|
+
}
|
151
|
+
}
|
152
|
+
}
|
153
|
+
@error = {
|
154
|
+
type: 'ERROR',
|
155
|
+
object: {
|
156
|
+
message: 'some error message'
|
157
|
+
}
|
158
|
+
}
|
159
|
+
@gone = {
|
160
|
+
type: 'ERROR',
|
161
|
+
object: {
|
162
|
+
code: 410,
|
163
|
+
kind: 'Status',
|
164
|
+
message: 'too old resource version: 123 (391079)',
|
165
|
+
metadata: {
|
166
|
+
name: 'gone',
|
167
|
+
namespace: 'gone',
|
168
|
+
uid: 'gone_uid'
|
169
|
+
},
|
170
|
+
reason: 'Gone'
|
171
|
+
}
|
172
|
+
}
|
173
|
+
end
|
146
174
|
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
175
|
+
test 'pod list caches pods' do
|
176
|
+
orig_env_val = ENV['K8S_NODE_NAME']
|
177
|
+
ENV['K8S_NODE_NAME'] = 'aNodeName'
|
178
|
+
@client.stub :get_pods, @initial do
|
179
|
+
process_pod_watcher_notices(start_pod_watch)
|
180
|
+
assert_equal(true, @cache.key?('initial_uid'))
|
181
|
+
assert_equal(true, @cache.key?('modified_uid'))
|
182
|
+
assert_equal(2, @stats[:pod_cache_host_updates])
|
183
|
+
end
|
184
|
+
ENV['K8S_NODE_NAME'] = orig_env_val
|
185
|
+
end
|
186
|
+
|
187
|
+
test 'pod list caches pods and watch updates' do
|
188
|
+
orig_env_val = ENV['K8S_NODE_NAME']
|
189
|
+
ENV['K8S_NODE_NAME'] = 'aNodeName'
|
190
|
+
@client.stub :get_pods, @initial do
|
191
|
+
@client.stub :watch_pods, [@modified] do
|
151
192
|
process_pod_watcher_notices(start_pod_watch)
|
152
|
-
assert_equal(true, @cache.key?('initial_uid'))
|
153
|
-
assert_equal(true, @cache.key?('modified_uid'))
|
154
193
|
assert_equal(2, @stats[:pod_cache_host_updates])
|
194
|
+
assert_equal(1, @stats[:pod_cache_watch_updates])
|
155
195
|
end
|
156
|
-
ENV['K8S_NODE_NAME'] = orig_env_val
|
157
196
|
end
|
197
|
+
ENV['K8S_NODE_NAME'] = orig_env_val
|
198
|
+
assert_equal('123', @last_seen_resource_version) # from @modified
|
199
|
+
end
|
158
200
|
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
assert_equal(2, @stats[:pod_cache_host_updates])
|
166
|
-
assert_equal(1, @stats[:pod_cache_watch_updates])
|
167
|
-
end
|
201
|
+
test 'pod watch notice ignores CREATED' do
|
202
|
+
@client.stub :get_pods, @initial do
|
203
|
+
@client.stub :watch_pods, [@created] do
|
204
|
+
process_pod_watcher_notices(start_pod_watch)
|
205
|
+
assert_equal(false, @cache.key?('created_uid'))
|
206
|
+
assert_equal(1, @stats[:pod_cache_watch_ignored])
|
168
207
|
end
|
169
|
-
ENV['K8S_NODE_NAME'] = orig_env_val
|
170
208
|
end
|
209
|
+
end
|
171
210
|
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
assert_equal(1, @stats[:pod_cache_watch_ignored])
|
178
|
-
end
|
179
|
-
end
|
211
|
+
test 'pod watch notice is ignored when info not cached and MODIFIED is received' do
|
212
|
+
@client.stub :watch_pods, [@modified] do
|
213
|
+
process_pod_watcher_notices(start_pod_watch)
|
214
|
+
assert_equal(false, @cache.key?('modified_uid'))
|
215
|
+
assert_equal(1, @stats[:pod_cache_watch_misses])
|
180
216
|
end
|
217
|
+
end
|
181
218
|
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
219
|
+
test 'pod MODIFIED cached when hostname matches' do
|
220
|
+
orig_env_val = ENV['K8S_NODE_NAME']
|
221
|
+
ENV['K8S_NODE_NAME'] = 'aNodeName'
|
222
|
+
@client.stub :watch_pods, [@modified] do
|
223
|
+
process_pod_watcher_notices(start_pod_watch)
|
224
|
+
assert_equal(true, @cache.key?('modified_uid'))
|
225
|
+
assert_equal(1, @stats[:pod_cache_host_updates])
|
188
226
|
end
|
227
|
+
ENV['K8S_NODE_NAME'] = orig_env_val
|
228
|
+
end
|
189
229
|
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
assert_equal(1, @stats[:pod_cache_host_updates])
|
197
|
-
end
|
198
|
-
ENV['K8S_NODE_NAME'] = orig_env_val
|
230
|
+
test 'pod watch notice is updated when MODIFIED is received' do
|
231
|
+
@cache['modified_uid'] = {}
|
232
|
+
@client.stub :watch_pods, [@modified] do
|
233
|
+
process_pod_watcher_notices(start_pod_watch)
|
234
|
+
assert_equal(true, @cache.key?('modified_uid'))
|
235
|
+
assert_equal(1, @stats[:pod_cache_watch_updates])
|
199
236
|
end
|
237
|
+
end
|
200
238
|
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
239
|
+
test 'pod watch notice is ignored when delete is received' do
|
240
|
+
@cache['deleted_uid'] = {}
|
241
|
+
@client.stub :watch_pods, [@deleted] do
|
242
|
+
process_pod_watcher_notices(start_pod_watch)
|
243
|
+
assert_equal(true, @cache.key?('deleted_uid'))
|
244
|
+
assert_equal(1, @stats[:pod_cache_watch_delete_ignored])
|
245
|
+
end
|
246
|
+
end
|
247
|
+
|
248
|
+
test 'pod watch raises Fluent::UnrecoverableError when cannot re-establish connection to k8s API server' do
|
249
|
+
# Stub start_pod_watch to simulate initial successful connection to API server
|
250
|
+
stub(self).start_pod_watch
|
251
|
+
# Stub watch_pods to simluate not being able to set up watch connection to API server
|
252
|
+
stub(@client).watch_pods { raise }
|
253
|
+
@client.stub :get_pods, @initial do
|
254
|
+
assert_raise Fluent::UnrecoverableError do
|
255
|
+
set_up_pod_thread
|
207
256
|
end
|
208
257
|
end
|
258
|
+
assert_equal(3, @stats[:pod_watch_failures])
|
259
|
+
assert_equal(2, Thread.current[:pod_watch_retry_count])
|
260
|
+
assert_equal(4, Thread.current[:pod_watch_retry_backoff_interval])
|
261
|
+
assert_nil(@stats[:pod_watch_error_type_notices])
|
262
|
+
end
|
209
263
|
|
210
|
-
|
211
|
-
|
212
|
-
@client.stub :watch_pods, [@
|
213
|
-
|
214
|
-
|
215
|
-
|
264
|
+
test 'pod watch resets watch retry count when exceptions are encountered and connection to k8s API server is re-established' do
|
265
|
+
@client.stub :get_pods, @initial do
|
266
|
+
@client.stub :watch_pods, [[@created, @exception_raised]] do
|
267
|
+
# Force the infinite watch loop to exit after 3 seconds. Verifies that
|
268
|
+
# no unrecoverable error was thrown during this period of time.
|
269
|
+
assert_raise Timeout::Error.new('execution expired') do
|
270
|
+
Timeout.timeout(3) do
|
271
|
+
set_up_pod_thread
|
272
|
+
end
|
273
|
+
end
|
274
|
+
assert_operator(@stats[:pod_watch_failures], :>=, 3)
|
275
|
+
assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
|
276
|
+
assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
|
216
277
|
end
|
217
278
|
end
|
279
|
+
end
|
218
280
|
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
281
|
+
test 'pod watch resets watch retry count when error is received and connection to k8s API server is re-established' do
|
282
|
+
@client.stub :get_pods, @initial do
|
283
|
+
@client.stub :watch_pods, [@error] do
|
284
|
+
# Force the infinite watch loop to exit after 3 seconds. Verifies that
|
285
|
+
# no unrecoverable error was thrown during this period of time.
|
286
|
+
assert_raise Timeout::Error.new('execution expired') do
|
287
|
+
Timeout.timeout(3) do
|
223
288
|
set_up_pod_thread
|
224
289
|
end
|
225
|
-
assert_equal(3, @stats[:pod_watch_failures])
|
226
|
-
assert_equal(2, Thread.current[:pod_watch_retry_count])
|
227
|
-
assert_equal(4, Thread.current[:pod_watch_retry_backoff_interval])
|
228
|
-
assert_nil(@stats[:pod_watch_error_type_notices])
|
229
290
|
end
|
291
|
+
assert_operator(@stats[:pod_watch_failures], :>=, 3)
|
292
|
+
assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
|
293
|
+
assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
|
294
|
+
assert_operator(@stats[:pod_watch_error_type_notices], :>=, 3)
|
230
295
|
end
|
231
296
|
end
|
297
|
+
end
|
232
298
|
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
299
|
+
test 'pod watch continues after retries succeed' do
|
300
|
+
@client.stub :get_pods, @initial do
|
301
|
+
@client.stub :watch_pods, [@modified, @error, @modified] do
|
302
|
+
# Force the infinite watch loop to exit after 3 seconds. Verifies that
|
303
|
+
# no unrecoverable error was thrown during this period of time.
|
304
|
+
assert_raise Timeout::Error.new('execution expired') do
|
305
|
+
Timeout.timeout(3) do
|
237
306
|
set_up_pod_thread
|
238
307
|
end
|
239
|
-
assert_equal(3, @stats[:pod_watch_failures])
|
240
|
-
assert_equal(2, Thread.current[:pod_watch_retry_count])
|
241
|
-
assert_equal(4, Thread.current[:pod_watch_retry_backoff_interval])
|
242
|
-
assert_equal(3, @stats[:pod_watch_error_type_notices])
|
243
308
|
end
|
309
|
+
assert_operator(@stats[:pod_watch_failures], :>=, 3)
|
310
|
+
assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
|
311
|
+
assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
|
312
|
+
assert_operator(@stats[:pod_watch_error_type_notices], :>=, 3)
|
313
|
+
end
|
314
|
+
end
|
315
|
+
end
|
316
|
+
|
317
|
+
test 'pod watch raises a GoneError when a 410 Gone error is received' do
|
318
|
+
@cache['gone_uid'] = {}
|
319
|
+
@client.stub :watch_pods, [@gone] do
|
320
|
+
@last_seen_resource_version = '100'
|
321
|
+
assert_raise KubernetesMetadata::Common::GoneError do
|
322
|
+
process_pod_watcher_notices(start_pod_watch)
|
244
323
|
end
|
324
|
+
assert_equal(1, @stats[:pod_watch_gone_notices])
|
325
|
+
assert_nil @last_seen_resource_version # forced restart
|
245
326
|
end
|
327
|
+
end
|
246
328
|
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
end
|
329
|
+
test 'pod watch retries when 410 Gone errors are encountered' do
|
330
|
+
@client.stub :get_pods, @initial do
|
331
|
+
@client.stub :watch_pods, [@created, @gone, @modified] do
|
332
|
+
# Force the infinite watch loop to exit after 3 seconds because the code sleeps 3 times.
|
333
|
+
# Verifies that no unrecoverable error was thrown during this period of time.
|
334
|
+
assert_raise Timeout::Error.new('execution expired') do
|
335
|
+
Timeout.timeout(3) do
|
336
|
+
set_up_pod_thread
|
256
337
|
end
|
257
|
-
assert_operator(@stats[:pod_watch_failures], :>=, 3)
|
258
|
-
assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
|
259
|
-
assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
|
260
|
-
assert_operator(@stats[:pod_watch_error_type_notices], :>=, 3)
|
261
338
|
end
|
339
|
+
assert_operator(@stats[:pod_watch_gone_errors], :>=, 3)
|
340
|
+
assert_operator(@stats[:pod_watch_gone_notices], :>=, 3)
|
262
341
|
end
|
263
342
|
end
|
343
|
+
end
|
264
344
|
end
|
data/test/plugin/watch_test.rb
CHANGED
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
#
|
2
4
|
# Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
|
3
5
|
# Kubernetes metadata
|
@@ -17,10 +19,8 @@
|
|
17
19
|
# limitations under the License.
|
18
20
|
#
|
19
21
|
require_relative '../helper'
|
20
|
-
require 'ostruct'
|
21
22
|
|
22
23
|
class WatchTest < Test::Unit::TestCase
|
23
|
-
|
24
24
|
def thread_current_running?
|
25
25
|
true
|
26
26
|
end
|
@@ -37,26 +37,23 @@ class WatchTest < Test::Unit::TestCase
|
|
37
37
|
Thread.current[:namespace_watch_retry_count] = 0
|
38
38
|
|
39
39
|
@client = OpenStruct.new
|
40
|
-
def @client.
|
41
|
-
'12345'
|
42
|
-
end
|
43
|
-
def @client.watch_pods(options = {})
|
40
|
+
def @client.watch_pods(_options = {})
|
44
41
|
[]
|
45
42
|
end
|
46
|
-
|
43
|
+
|
44
|
+
def @client.watch_namespaces(_options = {})
|
47
45
|
[]
|
48
46
|
end
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
def @client.get_pods(options = {})
|
53
|
-
self
|
47
|
+
|
48
|
+
def @client.get_namespaces(_options = {})
|
49
|
+
{ items: [], metadata: { resourceVersion: '12345' } }
|
54
50
|
end
|
55
51
|
|
56
|
-
@
|
57
|
-
|
58
|
-
raise Exception
|
52
|
+
def @client.get_pods(_options = {})
|
53
|
+
{ items: [], metadata: { resourceVersion: '12345' } }
|
59
54
|
end
|
55
|
+
|
56
|
+
@exception_raised = :blow_up_when_used
|
60
57
|
end
|
61
58
|
|
62
59
|
def watcher=(value)
|
@@ -66,10 +63,14 @@ class WatchTest < Test::Unit::TestCase
|
|
66
63
|
logger = {}
|
67
64
|
def logger.debug(message)
|
68
65
|
end
|
66
|
+
|
69
67
|
def logger.info(message, error)
|
70
68
|
end
|
69
|
+
|
71
70
|
def logger.error(message, error)
|
72
71
|
end
|
72
|
+
def logger.warn(message)
|
73
|
+
end
|
73
74
|
logger
|
74
75
|
end
|
75
76
|
end
|