fluent-plugin-kubernetes_metadata_filter 2.1.4 → 2.9.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. checksums.yaml +4 -4
  2. data/.circleci/config.yml +57 -0
  3. data/.gitignore +0 -1
  4. data/.rubocop.yml +57 -0
  5. data/Gemfile +4 -2
  6. data/Gemfile.lock +158 -0
  7. data/README.md +48 -28
  8. data/Rakefile +15 -11
  9. data/fluent-plugin-kubernetes_metadata_filter.gemspec +25 -28
  10. data/lib/fluent/plugin/filter_kubernetes_metadata.rb +185 -131
  11. data/lib/fluent/plugin/kubernetes_metadata_cache_strategy.rb +27 -20
  12. data/lib/fluent/plugin/kubernetes_metadata_common.rb +59 -33
  13. data/lib/fluent/plugin/kubernetes_metadata_stats.rb +6 -6
  14. data/lib/fluent/plugin/kubernetes_metadata_test_api_adapter.rb +68 -0
  15. data/lib/fluent/plugin/kubernetes_metadata_util.rb +53 -0
  16. data/lib/fluent/plugin/kubernetes_metadata_watch_namespaces.rb +121 -27
  17. data/lib/fluent/plugin/kubernetes_metadata_watch_pods.rb +138 -29
  18. data/release_notes.md +42 -0
  19. data/test/cassettes/kubernetes_docker_metadata_annotations.yml +0 -34
  20. data/test/cassettes/{kubernetes_docker_metadata_dotted_labels.yml → kubernetes_docker_metadata_dotted_slashed_labels.yml} +0 -34
  21. data/test/cassettes/kubernetes_get_api_v1.yml +193 -0
  22. data/test/cassettes/kubernetes_get_api_v1_using_token.yml +195 -0
  23. data/test/cassettes/kubernetes_get_namespace_default.yml +69 -0
  24. data/test/cassettes/kubernetes_get_namespace_default_using_token.yml +71 -0
  25. data/test/cassettes/{kubernetes_docker_metadata.yml → kubernetes_get_pod.yml} +0 -82
  26. data/test/cassettes/{metadata_with_namespace_id.yml → kubernetes_get_pod_container_init.yml} +3 -134
  27. data/test/cassettes/{kubernetes_docker_metadata_using_bearer_token.yml → kubernetes_get_pod_using_token.yml} +5 -105
  28. data/test/cassettes/metadata_from_tag_and_journald_fields.yml +0 -255
  29. data/test/cassettes/metadata_from_tag_journald_and_kubernetes_fields.yml +0 -255
  30. data/test/cassettes/{non_kubernetes_docker_metadata.yml → valid_kubernetes_api_server_using_token.yml} +4 -44
  31. data/test/helper.rb +20 -2
  32. data/test/plugin/test_cache_stats.rb +10 -13
  33. data/test/plugin/test_cache_strategy.rb +158 -160
  34. data/test/plugin/test_filter_kubernetes_metadata.rb +480 -320
  35. data/test/plugin/test_utils.rb +56 -0
  36. data/test/plugin/test_watch_namespaces.rb +209 -55
  37. data/test/plugin/test_watch_pods.rb +302 -103
  38. data/test/plugin/watch_test.rb +52 -33
  39. metadata +69 -72
  40. data/circle.yml +0 -17
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  #
2
4
  # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
3
5
  # Kubernetes metadata
@@ -17,129 +19,326 @@
17
19
  # limitations under the License.
18
20
  #
19
21
  require_relative '../helper'
20
- require 'ostruct'
21
22
  require_relative 'watch_test'
22
23
 
23
24
  class DefaultPodWatchStrategyTest < WatchTest
25
+ include KubernetesMetadata::WatchPods
24
26
 
25
- include KubernetesMetadata::WatchPods
26
-
27
- setup do
28
- @created = OpenStruct.new(
29
- type: 'CREATED',
30
- object: {
31
- 'metadata' => {
32
- 'name' => 'created',
33
- 'namespace' => 'create',
34
- 'uid' => 'created_uid',
35
- 'labels' => {},
36
- },
37
- 'spec' => {
38
- 'nodeName' => 'aNodeName',
39
- 'containers' => [{
40
- 'name' => 'foo',
41
- 'image' => 'bar',
42
- }, {
43
- 'name' => 'bar',
44
- 'image' => 'foo',
45
- }]
46
- }
47
- }
48
- )
49
- @modified = OpenStruct.new(
50
- type: 'MODIFIED',
51
- object: {
52
- 'metadata' => {
53
- 'name' => 'foo',
54
- 'namespace' => 'modified',
55
- 'uid' => 'modified_uid',
56
- 'labels' => {},
57
- },
58
- 'spec' => {
59
- 'nodeName' => 'aNodeName',
60
- 'containers' => [{
61
- 'name' => 'foo',
62
- 'image' => 'bar',
63
- }, {
64
- 'name' => 'bar',
65
- 'image' => 'foo',
66
- }]
67
- },
68
- 'status' => {
69
- 'containerStatuses' => [
70
- {
71
- 'name' => 'fabric8-console-container',
72
- 'state' => {
73
- 'running' => {
74
- 'startedAt' => '2015-05-08T09:22:44Z'
75
- }
76
- },
77
- 'lastState' => {},
78
- 'ready' => true,
79
- 'restartCount' => 0,
80
- 'image' => 'fabric8/hawtio-kubernetes:latest',
81
- 'imageID' => 'docker://b2bd1a24a68356b2f30128e6e28e672c1ef92df0d9ec01ec0c7faea5d77d2303',
82
- 'containerID' => 'docker://49095a2894da899d3b327c5fde1e056a81376cc9a8f8b09a195f2a92bceed459'
83
- }
84
- ]
85
- }
86
- }
87
- )
88
- @deleted = OpenStruct.new(
89
- type: 'DELETED',
90
- object: {
91
- 'metadata' => {
92
- 'name' => 'deleteme',
93
- 'namespace' => 'deleted',
94
- 'uid' => 'deleted_uid'
27
+ setup do
28
+ @initial = {
29
+ kind: 'PodList',
30
+ metadata: { resourceVersion: '123' },
31
+ items: [
32
+ {
33
+ metadata: {
34
+ name: 'initial',
35
+ namespace: 'initial_ns',
36
+ uid: 'initial_uid',
37
+ labels: {}
38
+ },
39
+ spec: {
40
+ nodeName: 'aNodeName',
41
+ containers: [{
42
+ name: 'foo',
43
+ image: 'bar'
44
+ }, {
45
+ name: 'bar',
46
+ image: 'foo'
47
+ }]
48
+ },
49
+ status: {
50
+ podIP: '172.17.0.8'
51
+ }
52
+ },
53
+ {
54
+ metadata: {
55
+ name: 'modified',
56
+ namespace: 'create',
57
+ uid: 'modified_uid',
58
+ labels: {}
59
+ },
60
+ spec: {
61
+ nodeName: 'aNodeName',
62
+ containers: [{
63
+ name: 'foo',
64
+ image: 'bar'
65
+ }, {
66
+ name: 'bar',
67
+ image: 'foo'
68
+ }]
69
+ },
70
+ status: {
71
+ podIP: '172.17.0.8'
72
+ }
73
+ }
74
+ ]
75
+ }
76
+ @created = {
77
+ type: 'CREATED',
78
+ object: {
79
+ metadata: {
80
+ name: 'created',
81
+ namespace: 'create',
82
+ uid: 'created_uid',
83
+ resourceVersion: '122',
84
+ labels: {}
85
+ },
86
+ spec: {
87
+ nodeName: 'aNodeName',
88
+ containers: [{
89
+ name: 'foo',
90
+ image: 'bar'
91
+ }, {
92
+ name: 'bar',
93
+ image: 'foo'
94
+ }]
95
+ },
96
+ status: {
97
+ podIP: '172.17.0.8'
98
+ }
99
+ }
100
+ }
101
+ @modified = {
102
+ type: 'MODIFIED',
103
+ object: {
104
+ metadata: {
105
+ name: 'foo',
106
+ namespace: 'modified',
107
+ uid: 'modified_uid',
108
+ resourceVersion: '123',
109
+ labels: {}
110
+ },
111
+ spec: {
112
+ nodeName: 'aNodeName',
113
+ containers: [{
114
+ name: 'foo',
115
+ image: 'bar'
116
+ }, {
117
+ name: 'bar',
118
+ image: 'foo'
119
+ }]
120
+ },
121
+ status: {
122
+ podIP: '172.17.0.8',
123
+ containerStatuses: [
124
+ {
125
+ name: 'fabric8-console-container',
126
+ state: {
127
+ running: {
128
+ startedAt: '2015-05-08T09:22:44Z'
129
+ }
130
+ },
131
+ lastState: {},
132
+ ready: true,
133
+ restartCount: 0,
134
+ image: 'fabric8/hawtio-kubernetes:latest',
135
+ imageID: 'docker://b2bd1a24a68356b2f30128e6e28e672c1ef92df0d9ec01ec0c7faea5d77d2303',
136
+ containerID: 'docker://49095a2894da899d3b327c5fde1e056a81376cc9a8f8b09a195f2a92bceed459'
95
137
  }
96
- }
97
- )
98
- end
138
+ ]
139
+ }
140
+ }
141
+ }
142
+ @deleted = {
143
+ type: 'DELETED',
144
+ object: {
145
+ metadata: {
146
+ name: 'deleteme',
147
+ namespace: 'deleted',
148
+ uid: 'deleted_uid',
149
+ resourceVersion: '124'
150
+ }
151
+ }
152
+ }
153
+ @error = {
154
+ type: 'ERROR',
155
+ object: {
156
+ message: 'some error message'
157
+ }
158
+ }
159
+ @gone = {
160
+ type: 'ERROR',
161
+ object: {
162
+ code: 410,
163
+ kind: 'Status',
164
+ message: 'too old resource version: 123 (391079)',
165
+ metadata: {
166
+ name: 'gone',
167
+ namespace: 'gone',
168
+ uid: 'gone_uid'
169
+ },
170
+ reason: 'Gone'
171
+ }
172
+ }
173
+ end
174
+
175
+ test 'pod list caches pods' do
176
+ orig_env_val = ENV['K8S_NODE_NAME']
177
+ ENV['K8S_NODE_NAME'] = 'aNodeName'
178
+ @client.stub :get_pods, @initial do
179
+ process_pod_watcher_notices(start_pod_watch)
180
+ assert_equal(true, @cache.key?('initial_uid'))
181
+ assert_equal(true, @cache.key?('modified_uid'))
182
+ assert_equal(2, @stats[:pod_cache_host_updates])
183
+ end
184
+ ENV['K8S_NODE_NAME'] = orig_env_val
185
+ end
186
+
187
+ test 'pod list caches pods and watch updates' do
188
+ orig_env_val = ENV['K8S_NODE_NAME']
189
+ ENV['K8S_NODE_NAME'] = 'aNodeName'
190
+ @client.stub :get_pods, @initial do
191
+ @client.stub :watch_pods, [@modified] do
192
+ process_pod_watcher_notices(start_pod_watch)
193
+ assert_equal(2, @stats[:pod_cache_host_updates])
194
+ assert_equal(1, @stats[:pod_cache_watch_updates])
195
+ end
196
+ end
197
+ ENV['K8S_NODE_NAME'] = orig_env_val
198
+ assert_equal('123', @last_seen_resource_version) # from @modified
199
+ end
99
200
 
100
- test 'pod watch notice ignores CREATED' do
201
+ test 'pod watch notice ignores CREATED' do
202
+ @client.stub :get_pods, @initial do
101
203
  @client.stub :watch_pods, [@created] do
102
- start_pod_watch
103
- assert_equal(false, @cache.key?('created_uid'))
104
- assert_equal(1, @stats[:pod_cache_watch_ignored])
204
+ process_pod_watcher_notices(start_pod_watch)
205
+ assert_equal(false, @cache.key?('created_uid'))
206
+ assert_equal(1, @stats[:pod_cache_watch_ignored])
105
207
  end
106
208
  end
209
+ end
107
210
 
108
- test 'pod watch notice is ignored when info not cached and MODIFIED is received' do
109
- @client.stub :watch_pods, [@modified] do
110
- start_pod_watch
111
- assert_equal(false, @cache.key?('modified_uid'))
112
- assert_equal(1, @stats[:pod_cache_watch_misses])
211
+ test 'pod watch notice is ignored when info not cached and MODIFIED is received' do
212
+ @client.stub :watch_pods, [@modified] do
213
+ process_pod_watcher_notices(start_pod_watch)
214
+ assert_equal(false, @cache.key?('modified_uid'))
215
+ assert_equal(1, @stats[:pod_cache_watch_misses])
216
+ end
217
+ end
218
+
219
+ test 'pod MODIFIED cached when hostname matches' do
220
+ orig_env_val = ENV['K8S_NODE_NAME']
221
+ ENV['K8S_NODE_NAME'] = 'aNodeName'
222
+ @client.stub :watch_pods, [@modified] do
223
+ process_pod_watcher_notices(start_pod_watch)
224
+ assert_equal(true, @cache.key?('modified_uid'))
225
+ assert_equal(1, @stats[:pod_cache_host_updates])
226
+ end
227
+ ENV['K8S_NODE_NAME'] = orig_env_val
228
+ end
229
+
230
+ test 'pod watch notice is updated when MODIFIED is received' do
231
+ @cache['modified_uid'] = {}
232
+ @client.stub :watch_pods, [@modified] do
233
+ process_pod_watcher_notices(start_pod_watch)
234
+ assert_equal(true, @cache.key?('modified_uid'))
235
+ assert_equal(1, @stats[:pod_cache_watch_updates])
236
+ end
237
+ end
238
+
239
+ test 'pod watch notice is ignored when delete is received' do
240
+ @cache['deleted_uid'] = {}
241
+ @client.stub :watch_pods, [@deleted] do
242
+ process_pod_watcher_notices(start_pod_watch)
243
+ assert_equal(true, @cache.key?('deleted_uid'))
244
+ assert_equal(1, @stats[:pod_cache_watch_delete_ignored])
245
+ end
246
+ end
247
+
248
+ test 'pod watch raises Fluent::UnrecoverableError when cannot re-establish connection to k8s API server' do
249
+ # Stub start_pod_watch to simulate initial successful connection to API server
250
+ stub(self).start_pod_watch
251
+ # Stub watch_pods to simluate not being able to set up watch connection to API server
252
+ stub(@client).watch_pods { raise }
253
+ @client.stub :get_pods, @initial do
254
+ assert_raise Fluent::UnrecoverableError do
255
+ set_up_pod_thread
113
256
  end
114
257
  end
258
+ assert_equal(3, @stats[:pod_watch_failures])
259
+ assert_equal(2, Thread.current[:pod_watch_retry_count])
260
+ assert_equal(4, Thread.current[:pod_watch_retry_backoff_interval])
261
+ assert_nil(@stats[:pod_watch_error_type_notices])
262
+ end
115
263
 
116
- test 'pod MODIFIED cached when hostname matches' do
117
- orig_env_val = ENV['K8S_NODE_NAME']
118
- ENV['K8S_NODE_NAME'] = 'aNodeName'
119
- @client.stub :watch_pods, [@modified] do
120
- start_pod_watch
121
- assert_equal(true, @cache.key?('modified_uid'))
122
- assert_equal(1, @stats[:pod_cache_host_updates])
264
+ test 'pod watch resets watch retry count when exceptions are encountered and connection to k8s API server is re-established' do
265
+ @client.stub :get_pods, @initial do
266
+ @client.stub :watch_pods, [[@created, @exception_raised]] do
267
+ # Force the infinite watch loop to exit after 3 seconds. Verifies that
268
+ # no unrecoverable error was thrown during this period of time.
269
+ assert_raise Timeout::Error.new('execution expired') do
270
+ Timeout.timeout(3) do
271
+ set_up_pod_thread
272
+ end
273
+ end
274
+ assert_operator(@stats[:pod_watch_failures], :>=, 3)
275
+ assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
276
+ assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
123
277
  end
124
- ENV['K8S_NODE_NAME'] = orig_env_val
125
278
  end
279
+ end
126
280
 
127
- test 'pod watch notice is updated when MODIFIED is received' do
128
- @cache['modified_uid'] = {}
129
- @client.stub :watch_pods, [@modified] do
130
- start_pod_watch
131
- assert_equal(true, @cache.key?('modified_uid'))
132
- assert_equal(1, @stats[:pod_cache_watch_updates])
281
+ test 'pod watch resets watch retry count when error is received and connection to k8s API server is re-established' do
282
+ @client.stub :get_pods, @initial do
283
+ @client.stub :watch_pods, [@error] do
284
+ # Force the infinite watch loop to exit after 3 seconds. Verifies that
285
+ # no unrecoverable error was thrown during this period of time.
286
+ assert_raise Timeout::Error.new('execution expired') do
287
+ Timeout.timeout(3) do
288
+ set_up_pod_thread
289
+ end
290
+ end
291
+ assert_operator(@stats[:pod_watch_failures], :>=, 3)
292
+ assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
293
+ assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
294
+ assert_operator(@stats[:pod_watch_error_type_notices], :>=, 3)
295
+ end
296
+ end
297
+ end
298
+
299
+ test 'pod watch continues after retries succeed' do
300
+ @client.stub :get_pods, @initial do
301
+ @client.stub :watch_pods, [@modified, @error, @modified] do
302
+ # Force the infinite watch loop to exit after 3 seconds. Verifies that
303
+ # no unrecoverable error was thrown during this period of time.
304
+ assert_raise Timeout::Error.new('execution expired') do
305
+ Timeout.timeout(3) do
306
+ set_up_pod_thread
307
+ end
308
+ end
309
+ assert_operator(@stats[:pod_watch_failures], :>=, 3)
310
+ assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
311
+ assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
312
+ assert_operator(@stats[:pod_watch_error_type_notices], :>=, 3)
133
313
  end
134
314
  end
315
+ end
135
316
 
136
- test 'pod watch notice is ignored when delete is received' do
137
- @cache['deleted_uid'] = {}
138
- @client.stub :watch_pods, [@deleted] do
139
- start_pod_watch
140
- assert_equal(true, @cache.key?('deleted_uid'))
141
- assert_equal(1, @stats[:pod_cache_watch_delete_ignored])
317
+ test 'pod watch raises a GoneError when a 410 Gone error is received' do
318
+ @cache['gone_uid'] = {}
319
+ @client.stub :watch_pods, [@gone] do
320
+ @last_seen_resource_version = '100'
321
+ assert_raise KubernetesMetadata::Common::GoneError do
322
+ process_pod_watcher_notices(start_pod_watch)
142
323
  end
324
+ assert_equal(1, @stats[:pod_watch_gone_notices])
325
+ assert_nil @last_seen_resource_version # forced restart
143
326
  end
327
+ end
144
328
 
329
+ test 'pod watch retries when 410 Gone errors are encountered' do
330
+ @client.stub :get_pods, @initial do
331
+ @client.stub :watch_pods, [@created, @gone, @modified] do
332
+ # Force the infinite watch loop to exit after 3 seconds because the code sleeps 3 times.
333
+ # Verifies that no unrecoverable error was thrown during this period of time.
334
+ assert_raise Timeout::Error.new('execution expired') do
335
+ Timeout.timeout(3) do
336
+ set_up_pod_thread
337
+ end
338
+ end
339
+ assert_operator(@stats[:pod_watch_gone_errors], :>=, 3)
340
+ assert_operator(@stats[:pod_watch_gone_notices], :>=, 3)
341
+ end
342
+ end
343
+ end
145
344
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  #
2
4
  # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
3
5
  # Kubernetes metadata
@@ -17,41 +19,58 @@
17
19
  # limitations under the License.
18
20
  #
19
21
  require_relative '../helper'
20
- require 'ostruct'
21
22
 
22
23
  class WatchTest < Test::Unit::TestCase
23
-
24
- setup do
25
- @annotations_regexps = []
26
- @namespace_cache = {}
27
- @cache = {}
28
- @stats = KubernetesMetadata::Stats.new
29
- @client = OpenStruct.new
30
- def @client.resourceVersion
31
- '12345'
32
- end
33
- def @client.watch_pods(value)
34
- []
35
- end
36
- def @client.watch_namespaces(value)
37
- []
38
- end
39
- def @client.get_namespaces
40
- self
41
- end
42
- def @client.get_pods
43
- self
44
- end
45
- end
46
-
47
- def watcher=(value)
48
- end
49
-
50
- def log
51
- logger = {}
52
- def logger.debug(message)
53
- end
54
- logger
24
+ def thread_current_running?
25
+ true
26
+ end
27
+
28
+ setup do
29
+ @annotations_regexps = []
30
+ @namespace_cache = {}
31
+ @watch_retry_max_times = 2
32
+ @watch_retry_interval = 1
33
+ @watch_retry_exponential_backoff_base = 2
34
+ @cache = {}
35
+ @stats = KubernetesMetadata::Stats.new
36
+ Thread.current[:pod_watch_retry_count] = 0
37
+ Thread.current[:namespace_watch_retry_count] = 0
38
+
39
+ @client = OpenStruct.new
40
+ def @client.watch_pods(_options = {})
41
+ []
42
+ end
43
+
44
+ def @client.watch_namespaces(_options = {})
45
+ []
46
+ end
47
+
48
+ def @client.get_namespaces(_options = {})
49
+ { items: [], metadata: { resourceVersion: '12345' } }
50
+ end
51
+
52
+ def @client.get_pods(_options = {})
53
+ { items: [], metadata: { resourceVersion: '12345' } }
55
54
  end
56
55
 
56
+ @exception_raised = :blow_up_when_used
57
+ end
58
+
59
+ def watcher=(value)
60
+ end
61
+
62
+ def log
63
+ logger = {}
64
+ def logger.debug(message)
65
+ end
66
+
67
+ def logger.info(message, error)
68
+ end
69
+
70
+ def logger.error(message, error)
71
+ end
72
+ def logger.warn(message)
73
+ end
74
+ logger
75
+ end
57
76
  end