fluent-plugin-kubernetes_metadata_filter 2.5.0 → 3.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. checksums.yaml +4 -4
  2. data/.circleci/config.yml +10 -14
  3. data/.gitignore +0 -1
  4. data/.rubocop.yml +57 -0
  5. data/Gemfile +4 -2
  6. data/Gemfile.lock +76 -67
  7. data/README.md +9 -83
  8. data/Rakefile +15 -11
  9. data/doc/benchmark/5m-1-2500lps-256b-baseline-01/cpu.png +0 -0
  10. data/doc/benchmark/5m-1-2500lps-256b-baseline-01/latency.png +0 -0
  11. data/doc/benchmark/5m-1-2500lps-256b-baseline-01/loss.png +0 -0
  12. data/doc/benchmark/5m-1-2500lps-256b-baseline-01/mem.png +0 -0
  13. data/doc/benchmark/5m-1-2500lps-256b-baseline-01/readme.md +88 -0
  14. data/doc/benchmark/5m-1-2500lps-256b-baseline-01/results.html +127 -0
  15. data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/cpu.png +0 -0
  16. data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/latency.png +0 -0
  17. data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/loss.png +0 -0
  18. data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/mem.png +0 -0
  19. data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/readme.md +97 -0
  20. data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/results.html +136 -0
  21. data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/cpu.png +0 -0
  22. data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/latency.png +0 -0
  23. data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/loss.png +0 -0
  24. data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/mem.png +0 -0
  25. data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/readme.md +97 -0
  26. data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/results.html +136 -0
  27. data/fluent-plugin-kubernetes_metadata_filter.gemspec +25 -27
  28. data/lib/fluent/plugin/filter_kubernetes_metadata.rb +171 -192
  29. data/lib/fluent/plugin/kubernetes_metadata_cache_strategy.rb +25 -23
  30. data/lib/fluent/plugin/kubernetes_metadata_common.rb +44 -69
  31. data/lib/fluent/plugin/kubernetes_metadata_stats.rb +21 -5
  32. data/lib/fluent/plugin/kubernetes_metadata_test_api_adapter.rb +68 -0
  33. data/lib/fluent/plugin/kubernetes_metadata_util.rb +33 -0
  34. data/lib/fluent/plugin/kubernetes_metadata_watch_namespaces.rb +91 -42
  35. data/lib/fluent/plugin/kubernetes_metadata_watch_pods.rb +108 -47
  36. data/release_notes.md +42 -0
  37. data/test/cassettes/kubernetes_get_pod_container_init.yml +145 -0
  38. data/test/helper.rb +20 -2
  39. data/test/plugin/test_cache_stats.rb +10 -13
  40. data/test/plugin/test_cache_strategy.rb +158 -160
  41. data/test/plugin/test_filter_kubernetes_metadata.rb +340 -616
  42. data/test/plugin/test_watch_namespaces.rb +188 -125
  43. data/test/plugin/test_watch_pods.rb +282 -202
  44. data/test/plugin/watch_test.rb +16 -15
  45. metadata +77 -67
  46. /data/test/cassettes/{kubernetes_docker_metadata_dotted_labels.yml → kubernetes_docker_metadata_dotted_slashed_labels.yml} +0 -0
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  #
2
4
  # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
3
5
  # Kubernetes metadata
@@ -17,248 +19,326 @@
17
19
  # limitations under the License.
18
20
  #
19
21
  require_relative '../helper'
20
- require 'ostruct'
21
22
  require_relative 'watch_test'
22
23
 
23
24
  class DefaultPodWatchStrategyTest < WatchTest
25
+ include KubernetesMetadata::WatchPods
24
26
 
25
- include KubernetesMetadata::WatchPods
26
-
27
- setup do
28
- @initial = Kubeclient::Common::EntityList.new(
29
- 'PodList',
30
- '123',
31
- [
32
- Kubeclient::Resource.new({
33
- 'metadata' => {
34
- 'name' => 'initial',
35
- 'namespace' => 'initial_ns',
36
- 'uid' => 'initial_uid',
37
- 'labels' => {},
38
- },
39
- 'spec' => {
40
- 'nodeName' => 'aNodeName',
41
- 'containers' => [{
42
- 'name' => 'foo',
43
- 'image' => 'bar',
44
- }, {
45
- 'name' => 'bar',
46
- 'image' => 'foo',
47
- }]
48
- }
49
- }),
50
- Kubeclient::Resource.new({
51
- 'metadata' => {
52
- 'name' => 'modified',
53
- 'namespace' => 'create',
54
- 'uid' => 'modified_uid',
55
- 'labels' => {},
56
- },
57
- 'spec' => {
58
- 'nodeName' => 'aNodeName',
59
- 'containers' => [{
60
- 'name' => 'foo',
61
- 'image' => 'bar',
62
- }, {
63
- 'name' => 'bar',
64
- 'image' => 'foo',
65
- }]
66
- }
67
- }),
68
- ])
69
- @created = OpenStruct.new(
70
- type: 'CREATED',
71
- object: {
72
- 'metadata' => {
73
- 'name' => 'created',
74
- 'namespace' => 'create',
75
- 'uid' => 'created_uid',
76
- 'labels' => {},
77
- },
78
- 'spec' => {
79
- 'nodeName' => 'aNodeName',
80
- 'containers' => [{
81
- 'name' => 'foo',
82
- 'image' => 'bar',
83
- }, {
84
- 'name' => 'bar',
85
- 'image' => 'foo',
86
- }]
27
+ setup do
28
+ @initial = {
29
+ kind: 'PodList',
30
+ metadata: { resourceVersion: '123' },
31
+ items: [
32
+ {
33
+ metadata: {
34
+ name: 'initial',
35
+ namespace: 'initial_ns',
36
+ uid: 'initial_uid',
37
+ labels: {}
38
+ },
39
+ spec: {
40
+ nodeName: 'aNodeName',
41
+ containers: [{
42
+ name: 'foo',
43
+ image: 'bar'
44
+ }, {
45
+ name: 'bar',
46
+ image: 'foo'
47
+ }]
48
+ },
49
+ status: {
50
+ podIP: '172.17.0.8'
51
+ }
52
+ },
53
+ {
54
+ metadata: {
55
+ name: 'modified',
56
+ namespace: 'create',
57
+ uid: 'modified_uid',
58
+ labels: {}
59
+ },
60
+ spec: {
61
+ nodeName: 'aNodeName',
62
+ containers: [{
63
+ name: 'foo',
64
+ image: 'bar'
65
+ }, {
66
+ name: 'bar',
67
+ image: 'foo'
68
+ }]
69
+ },
70
+ status: {
71
+ podIP: '172.17.0.8'
72
+ }
73
+ }
74
+ ]
75
+ }
76
+ @created = {
77
+ type: 'CREATED',
78
+ object: {
79
+ metadata: {
80
+ name: 'created',
81
+ namespace: 'create',
82
+ uid: 'created_uid',
83
+ resourceVersion: '122',
84
+ labels: {}
85
+ },
86
+ spec: {
87
+ nodeName: 'aNodeName',
88
+ containers: [{
89
+ name: 'foo',
90
+ image: 'bar'
91
+ }, {
92
+ name: 'bar',
93
+ image: 'foo'
94
+ }]
95
+ },
96
+ status: {
97
+ podIP: '172.17.0.8'
98
+ }
99
+ }
100
+ }
101
+ @modified = {
102
+ type: 'MODIFIED',
103
+ object: {
104
+ metadata: {
105
+ name: 'foo',
106
+ namespace: 'modified',
107
+ uid: 'modified_uid',
108
+ resourceVersion: '123',
109
+ labels: {}
110
+ },
111
+ spec: {
112
+ nodeName: 'aNodeName',
113
+ containers: [{
114
+ name: 'foo',
115
+ image: 'bar'
116
+ }, {
117
+ name: 'bar',
118
+ image: 'foo'
119
+ }]
120
+ },
121
+ status: {
122
+ podIP: '172.17.0.8',
123
+ containerStatuses: [
124
+ {
125
+ name: 'fabric8-console-container',
126
+ state: {
127
+ running: {
128
+ startedAt: '2015-05-08T09:22:44Z'
129
+ }
130
+ },
131
+ lastState: {},
132
+ ready: true,
133
+ restartCount: 0,
134
+ image: 'fabric8/hawtio-kubernetes:latest',
135
+ imageID: 'docker://b2bd1a24a68356b2f30128e6e28e672c1ef92df0d9ec01ec0c7faea5d77d2303',
136
+ containerID: 'docker://49095a2894da899d3b327c5fde1e056a81376cc9a8f8b09a195f2a92bceed459'
87
137
  }
88
- }
89
- )
90
- @modified = OpenStruct.new(
91
- type: 'MODIFIED',
92
- object: {
93
- 'metadata' => {
94
- 'name' => 'foo',
95
- 'namespace' => 'modified',
96
- 'uid' => 'modified_uid',
97
- 'labels' => {},
98
- },
99
- 'spec' => {
100
- 'nodeName' => 'aNodeName',
101
- 'containers' => [{
102
- 'name' => 'foo',
103
- 'image' => 'bar',
104
- }, {
105
- 'name' => 'bar',
106
- 'image' => 'foo',
107
- }]
108
- },
109
- 'status' => {
110
- 'containerStatuses' => [
111
- {
112
- 'name' => 'fabric8-console-container',
113
- 'state' => {
114
- 'running' => {
115
- 'startedAt' => '2015-05-08T09:22:44Z'
116
- }
117
- },
118
- 'lastState' => {},
119
- 'ready' => true,
120
- 'restartCount' => 0,
121
- 'image' => 'fabric8/hawtio-kubernetes:latest',
122
- 'imageID' => 'docker://b2bd1a24a68356b2f30128e6e28e672c1ef92df0d9ec01ec0c7faea5d77d2303',
123
- 'containerID' => 'docker://49095a2894da899d3b327c5fde1e056a81376cc9a8f8b09a195f2a92bceed459'
124
- }
125
- ]
126
- }
127
- }
128
- )
129
- @deleted = OpenStruct.new(
130
- type: 'DELETED',
131
- object: {
132
- 'metadata' => {
133
- 'name' => 'deleteme',
134
- 'namespace' => 'deleted',
135
- 'uid' => 'deleted_uid'
136
- }
137
- }
138
- )
139
- @error = OpenStruct.new(
140
- type: 'ERROR',
141
- object: {
142
- 'message' => 'some error message'
143
- }
144
- )
145
- end
138
+ ]
139
+ }
140
+ }
141
+ }
142
+ @deleted = {
143
+ type: 'DELETED',
144
+ object: {
145
+ metadata: {
146
+ name: 'deleteme',
147
+ namespace: 'deleted',
148
+ uid: 'deleted_uid',
149
+ resourceVersion: '124'
150
+ }
151
+ }
152
+ }
153
+ @error = {
154
+ type: 'ERROR',
155
+ object: {
156
+ message: 'some error message'
157
+ }
158
+ }
159
+ @gone = {
160
+ type: 'ERROR',
161
+ object: {
162
+ code: 410,
163
+ kind: 'Status',
164
+ message: 'too old resource version: 123 (391079)',
165
+ metadata: {
166
+ name: 'gone',
167
+ namespace: 'gone',
168
+ uid: 'gone_uid'
169
+ },
170
+ reason: 'Gone'
171
+ }
172
+ }
173
+ end
146
174
 
147
- test 'pod list caches pods' do
148
- orig_env_val = ENV['K8S_NODE_NAME']
149
- ENV['K8S_NODE_NAME'] = 'aNodeName'
150
- @client.stub :get_pods, @initial do
175
+ test 'pod list caches pods' do
176
+ orig_env_val = ENV['K8S_NODE_NAME']
177
+ ENV['K8S_NODE_NAME'] = 'aNodeName'
178
+ @client.stub :get_pods, @initial do
179
+ process_pod_watcher_notices(start_pod_watch)
180
+ assert_equal(true, @cache.key?('initial_uid'))
181
+ assert_equal(true, @cache.key?('modified_uid'))
182
+ assert_equal(2, @stats[:pod_cache_host_updates])
183
+ end
184
+ ENV['K8S_NODE_NAME'] = orig_env_val
185
+ end
186
+
187
+ test 'pod list caches pods and watch updates' do
188
+ orig_env_val = ENV['K8S_NODE_NAME']
189
+ ENV['K8S_NODE_NAME'] = 'aNodeName'
190
+ @client.stub :get_pods, @initial do
191
+ @client.stub :watch_pods, [@modified] do
151
192
  process_pod_watcher_notices(start_pod_watch)
152
- assert_equal(true, @cache.key?('initial_uid'))
153
- assert_equal(true, @cache.key?('modified_uid'))
154
193
  assert_equal(2, @stats[:pod_cache_host_updates])
194
+ assert_equal(1, @stats[:pod_cache_watch_updates])
155
195
  end
156
- ENV['K8S_NODE_NAME'] = orig_env_val
157
196
  end
197
+ ENV['K8S_NODE_NAME'] = orig_env_val
198
+ assert_equal('123', @last_seen_resource_version) # from @modified
199
+ end
158
200
 
159
- test 'pod list caches pods and watch updates' do
160
- orig_env_val = ENV['K8S_NODE_NAME']
161
- ENV['K8S_NODE_NAME'] = 'aNodeName'
162
- @client.stub :get_pods, @initial do
163
- @client.stub :watch_pods, [@modified] do
164
- process_pod_watcher_notices(start_pod_watch)
165
- assert_equal(2, @stats[:pod_cache_host_updates])
166
- assert_equal(1, @stats[:pod_cache_watch_updates])
167
- end
201
+ test 'pod watch notice ignores CREATED' do
202
+ @client.stub :get_pods, @initial do
203
+ @client.stub :watch_pods, [@created] do
204
+ process_pod_watcher_notices(start_pod_watch)
205
+ assert_equal(false, @cache.key?('created_uid'))
206
+ assert_equal(1, @stats[:pod_cache_watch_ignored])
168
207
  end
169
- ENV['K8S_NODE_NAME'] = orig_env_val
170
208
  end
209
+ end
171
210
 
172
- test 'pod watch notice ignores CREATED' do
173
- @client.stub :get_pods, @initial do
174
- @client.stub :watch_pods, [@created] do
175
- process_pod_watcher_notices(start_pod_watch)
176
- assert_equal(false, @cache.key?('created_uid'))
177
- assert_equal(1, @stats[:pod_cache_watch_ignored])
178
- end
179
- end
211
+ test 'pod watch notice is ignored when info not cached and MODIFIED is received' do
212
+ @client.stub :watch_pods, [@modified] do
213
+ process_pod_watcher_notices(start_pod_watch)
214
+ assert_equal(false, @cache.key?('modified_uid'))
215
+ assert_equal(1, @stats[:pod_cache_watch_misses])
180
216
  end
217
+ end
181
218
 
182
- test 'pod watch notice is ignored when info not cached and MODIFIED is received' do
183
- @client.stub :watch_pods, [@modified] do
184
- process_pod_watcher_notices(start_pod_watch)
185
- assert_equal(false, @cache.key?('modified_uid'))
186
- assert_equal(1, @stats[:pod_cache_watch_misses])
187
- end
219
+ test 'pod MODIFIED cached when hostname matches' do
220
+ orig_env_val = ENV['K8S_NODE_NAME']
221
+ ENV['K8S_NODE_NAME'] = 'aNodeName'
222
+ @client.stub :watch_pods, [@modified] do
223
+ process_pod_watcher_notices(start_pod_watch)
224
+ assert_equal(true, @cache.key?('modified_uid'))
225
+ assert_equal(1, @stats[:pod_cache_host_updates])
188
226
  end
227
+ ENV['K8S_NODE_NAME'] = orig_env_val
228
+ end
189
229
 
190
- test 'pod MODIFIED cached when hostname matches' do
191
- orig_env_val = ENV['K8S_NODE_NAME']
192
- ENV['K8S_NODE_NAME'] = 'aNodeName'
193
- @client.stub :watch_pods, [@modified] do
194
- process_pod_watcher_notices(start_pod_watch)
195
- assert_equal(true, @cache.key?('modified_uid'))
196
- assert_equal(1, @stats[:pod_cache_host_updates])
197
- end
198
- ENV['K8S_NODE_NAME'] = orig_env_val
230
+ test 'pod watch notice is updated when MODIFIED is received' do
231
+ @cache['modified_uid'] = {}
232
+ @client.stub :watch_pods, [@modified] do
233
+ process_pod_watcher_notices(start_pod_watch)
234
+ assert_equal(true, @cache.key?('modified_uid'))
235
+ assert_equal(1, @stats[:pod_cache_watch_updates])
199
236
  end
237
+ end
200
238
 
201
- test 'pod watch notice is updated when MODIFIED is received' do
202
- @cache['modified_uid'] = {}
203
- @client.stub :watch_pods, [@modified] do
204
- process_pod_watcher_notices(start_pod_watch)
205
- assert_equal(true, @cache.key?('modified_uid'))
206
- assert_equal(1, @stats[:pod_cache_watch_updates])
239
+ test 'pod watch notice is ignored when delete is received' do
240
+ @cache['deleted_uid'] = {}
241
+ @client.stub :watch_pods, [@deleted] do
242
+ process_pod_watcher_notices(start_pod_watch)
243
+ assert_equal(true, @cache.key?('deleted_uid'))
244
+ assert_equal(1, @stats[:pod_cache_watch_delete_ignored])
245
+ end
246
+ end
247
+
248
+ test 'pod watch raises Fluent::UnrecoverableError when cannot re-establish connection to k8s API server' do
249
+ # Stub start_pod_watch to simulate initial successful connection to API server
250
+ stub(self).start_pod_watch
251
+ # Stub watch_pods to simluate not being able to set up watch connection to API server
252
+ stub(@client).watch_pods { raise }
253
+ @client.stub :get_pods, @initial do
254
+ assert_raise Fluent::UnrecoverableError do
255
+ set_up_pod_thread
207
256
  end
208
257
  end
258
+ assert_equal(3, @stats[:pod_watch_failures])
259
+ assert_equal(2, Thread.current[:pod_watch_retry_count])
260
+ assert_equal(4, Thread.current[:pod_watch_retry_backoff_interval])
261
+ assert_nil(@stats[:pod_watch_error_type_notices])
262
+ end
209
263
 
210
- test 'pod watch notice is ignored when delete is received' do
211
- @cache['deleted_uid'] = {}
212
- @client.stub :watch_pods, [@deleted] do
213
- process_pod_watcher_notices(start_pod_watch)
214
- assert_equal(true, @cache.key?('deleted_uid'))
215
- assert_equal(1, @stats[:pod_cache_watch_delete_ignored])
264
+ test 'pod watch resets watch retry count when exceptions are encountered and connection to k8s API server is re-established' do
265
+ @client.stub :get_pods, @initial do
266
+ @client.stub :watch_pods, [[@created, @exception_raised]] do
267
+ # Force the infinite watch loop to exit after 3 seconds. Verifies that
268
+ # no unrecoverable error was thrown during this period of time.
269
+ assert_raise Timeout::Error.new('execution expired') do
270
+ Timeout.timeout(3) do
271
+ set_up_pod_thread
272
+ end
273
+ end
274
+ assert_operator(@stats[:pod_watch_failures], :>=, 3)
275
+ assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
276
+ assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
216
277
  end
217
278
  end
279
+ end
218
280
 
219
- test 'pod watch retries when exceptions are encountered' do
220
- @client.stub :get_pods, @initial do
221
- @client.stub :watch_pods, [[@created, @exception_raised]] do
222
- assert_raise Fluent::UnrecoverableError do
281
+ test 'pod watch resets watch retry count when error is received and connection to k8s API server is re-established' do
282
+ @client.stub :get_pods, @initial do
283
+ @client.stub :watch_pods, [@error] do
284
+ # Force the infinite watch loop to exit after 3 seconds. Verifies that
285
+ # no unrecoverable error was thrown during this period of time.
286
+ assert_raise Timeout::Error.new('execution expired') do
287
+ Timeout.timeout(3) do
223
288
  set_up_pod_thread
224
289
  end
225
- assert_equal(3, @stats[:pod_watch_failures])
226
- assert_equal(2, Thread.current[:pod_watch_retry_count])
227
- assert_equal(4, Thread.current[:pod_watch_retry_backoff_interval])
228
- assert_nil(@stats[:pod_watch_error_type_notices])
229
290
  end
291
+ assert_operator(@stats[:pod_watch_failures], :>=, 3)
292
+ assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
293
+ assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
294
+ assert_operator(@stats[:pod_watch_error_type_notices], :>=, 3)
230
295
  end
231
296
  end
297
+ end
232
298
 
233
- test 'pod watch retries when error is received' do
234
- @client.stub :get_pods, @initial do
235
- @client.stub :watch_pods, [@error] do
236
- assert_raise Fluent::UnrecoverableError do
299
+ test 'pod watch continues after retries succeed' do
300
+ @client.stub :get_pods, @initial do
301
+ @client.stub :watch_pods, [@modified, @error, @modified] do
302
+ # Force the infinite watch loop to exit after 3 seconds. Verifies that
303
+ # no unrecoverable error was thrown during this period of time.
304
+ assert_raise Timeout::Error.new('execution expired') do
305
+ Timeout.timeout(3) do
237
306
  set_up_pod_thread
238
307
  end
239
- assert_equal(3, @stats[:pod_watch_failures])
240
- assert_equal(2, Thread.current[:pod_watch_retry_count])
241
- assert_equal(4, Thread.current[:pod_watch_retry_backoff_interval])
242
- assert_equal(3, @stats[:pod_watch_error_type_notices])
243
308
  end
309
+ assert_operator(@stats[:pod_watch_failures], :>=, 3)
310
+ assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
311
+ assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
312
+ assert_operator(@stats[:pod_watch_error_type_notices], :>=, 3)
313
+ end
314
+ end
315
+ end
316
+
317
+ test 'pod watch raises a GoneError when a 410 Gone error is received' do
318
+ @cache['gone_uid'] = {}
319
+ @client.stub :watch_pods, [@gone] do
320
+ @last_seen_resource_version = '100'
321
+ assert_raise KubernetesMetadata::Common::GoneError do
322
+ process_pod_watcher_notices(start_pod_watch)
244
323
  end
324
+ assert_equal(1, @stats[:pod_watch_gone_notices])
325
+ assert_nil @last_seen_resource_version # forced restart
245
326
  end
327
+ end
246
328
 
247
- test 'pod watch continues after retries succeed' do
248
- @client.stub :get_pods, @initial do
249
- @client.stub :watch_pods, [@modified, @error, @modified] do
250
- # Force the infinite watch loop to exit after 3 seconds. Verifies that
251
- # no unrecoverable error was thrown during this period of time.
252
- assert_raise Timeout::Error.new('execution expired') do
253
- Timeout.timeout(3) do
254
- set_up_pod_thread
255
- end
329
+ test 'pod watch retries when 410 Gone errors are encountered' do
330
+ @client.stub :get_pods, @initial do
331
+ @client.stub :watch_pods, [@created, @gone, @modified] do
332
+ # Force the infinite watch loop to exit after 3 seconds because the code sleeps 3 times.
333
+ # Verifies that no unrecoverable error was thrown during this period of time.
334
+ assert_raise Timeout::Error.new('execution expired') do
335
+ Timeout.timeout(3) do
336
+ set_up_pod_thread
256
337
  end
257
- assert_operator(@stats[:pod_watch_failures], :>=, 3)
258
- assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
259
- assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
260
- assert_operator(@stats[:pod_watch_error_type_notices], :>=, 3)
261
338
  end
339
+ assert_operator(@stats[:pod_watch_gone_errors], :>=, 3)
340
+ assert_operator(@stats[:pod_watch_gone_notices], :>=, 3)
262
341
  end
263
342
  end
343
+ end
264
344
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  #
2
4
  # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
3
5
  # Kubernetes metadata
@@ -17,10 +19,8 @@
17
19
  # limitations under the License.
18
20
  #
19
21
  require_relative '../helper'
20
- require 'ostruct'
21
22
 
22
23
  class WatchTest < Test::Unit::TestCase
23
-
24
24
  def thread_current_running?
25
25
  true
26
26
  end
@@ -37,26 +37,23 @@ class WatchTest < Test::Unit::TestCase
37
37
  Thread.current[:namespace_watch_retry_count] = 0
38
38
 
39
39
  @client = OpenStruct.new
40
- def @client.resourceVersion
41
- '12345'
42
- end
43
- def @client.watch_pods(options = {})
40
+ def @client.watch_pods(_options = {})
44
41
  []
45
42
  end
46
- def @client.watch_namespaces(options = {})
43
+
44
+ def @client.watch_namespaces(_options = {})
47
45
  []
48
46
  end
49
- def @client.get_namespaces(options = {})
50
- self
51
- end
52
- def @client.get_pods(options = {})
53
- self
47
+
48
+ def @client.get_namespaces(_options = {})
49
+ { items: [], metadata: { resourceVersion: '12345' } }
54
50
  end
55
51
 
56
- @exception_raised = OpenStruct.new
57
- def @exception_raised.each
58
- raise Exception
52
+ def @client.get_pods(_options = {})
53
+ { items: [], metadata: { resourceVersion: '12345' } }
59
54
  end
55
+
56
+ @exception_raised = :blow_up_when_used
60
57
  end
61
58
 
62
59
  def watcher=(value)
@@ -66,10 +63,14 @@ class WatchTest < Test::Unit::TestCase
66
63
  logger = {}
67
64
  def logger.debug(message)
68
65
  end
66
+
69
67
  def logger.info(message, error)
70
68
  end
69
+
71
70
  def logger.error(message, error)
72
71
  end
72
+ def logger.warn(message)
73
+ end
73
74
  logger
74
75
  end
75
76
  end