fluent-plugin-kubernetes_metadata_filter 2.5.2 → 2.11.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  #
2
4
  # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
3
5
  # Kubernetes metadata
@@ -17,288 +19,326 @@
17
19
  # limitations under the License.
18
20
  #
19
21
  require_relative '../helper'
20
- require 'ostruct'
21
22
  require_relative 'watch_test'
22
23
 
23
24
  class DefaultPodWatchStrategyTest < WatchTest
25
+ include KubernetesMetadata::WatchPods
24
26
 
25
- include KubernetesMetadata::WatchPods
26
-
27
- setup do
28
- @initial = Kubeclient::Common::EntityList.new(
29
- 'PodList',
30
- '123',
31
- [
32
- Kubeclient::Resource.new({
33
- 'metadata' => {
34
- 'name' => 'initial',
35
- 'namespace' => 'initial_ns',
36
- 'uid' => 'initial_uid',
37
- 'labels' => {},
38
- },
39
- 'spec' => {
40
- 'nodeName' => 'aNodeName',
41
- 'containers' => [{
42
- 'name' => 'foo',
43
- 'image' => 'bar',
44
- }, {
45
- 'name' => 'bar',
46
- 'image' => 'foo',
47
- }]
48
- }
49
- }),
50
- Kubeclient::Resource.new({
51
- 'metadata' => {
52
- 'name' => 'modified',
53
- 'namespace' => 'create',
54
- 'uid' => 'modified_uid',
55
- 'labels' => {},
56
- },
57
- 'spec' => {
58
- 'nodeName' => 'aNodeName',
59
- 'containers' => [{
60
- 'name' => 'foo',
61
- 'image' => 'bar',
62
- }, {
63
- 'name' => 'bar',
64
- 'image' => 'foo',
65
- }]
66
- }
67
- }),
68
- ])
69
- @created = OpenStruct.new(
70
- type: 'CREATED',
71
- object: {
72
- 'metadata' => {
73
- 'name' => 'created',
74
- 'namespace' => 'create',
75
- 'uid' => 'created_uid',
76
- 'labels' => {},
77
- },
78
- 'spec' => {
79
- 'nodeName' => 'aNodeName',
80
- 'containers' => [{
81
- 'name' => 'foo',
82
- 'image' => 'bar',
83
- }, {
84
- 'name' => 'bar',
85
- 'image' => 'foo',
86
- }]
87
- }
88
- }
89
- )
90
- @modified = OpenStruct.new(
91
- type: 'MODIFIED',
92
- object: {
93
- 'metadata' => {
94
- 'name' => 'foo',
95
- 'namespace' => 'modified',
96
- 'uid' => 'modified_uid',
97
- 'labels' => {},
98
- },
99
- 'spec' => {
100
- 'nodeName' => 'aNodeName',
101
- 'containers' => [{
102
- 'name' => 'foo',
103
- 'image' => 'bar',
104
- }, {
105
- 'name' => 'bar',
106
- 'image' => 'foo',
107
- }]
108
- },
109
- 'status' => {
110
- 'containerStatuses' => [
111
- {
112
- 'name' => 'fabric8-console-container',
113
- 'state' => {
114
- 'running' => {
115
- 'startedAt' => '2015-05-08T09:22:44Z'
116
- }
117
- },
118
- 'lastState' => {},
119
- 'ready' => true,
120
- 'restartCount' => 0,
121
- 'image' => 'fabric8/hawtio-kubernetes:latest',
122
- 'imageID' => 'docker://b2bd1a24a68356b2f30128e6e28e672c1ef92df0d9ec01ec0c7faea5d77d2303',
123
- 'containerID' => 'docker://49095a2894da899d3b327c5fde1e056a81376cc9a8f8b09a195f2a92bceed459'
124
- }
125
- ]
126
- }
127
- }
128
- )
129
- @deleted = OpenStruct.new(
130
- type: 'DELETED',
131
- object: {
132
- 'metadata' => {
133
- 'name' => 'deleteme',
134
- 'namespace' => 'deleted',
135
- 'uid' => 'deleted_uid'
27
+ setup do
28
+ @initial = {
29
+ kind: 'PodList',
30
+ metadata: { resourceVersion: '123' },
31
+ items: [
32
+ {
33
+ metadata: {
34
+ name: 'initial',
35
+ namespace: 'initial_ns',
36
+ uid: 'initial_uid',
37
+ labels: {}
38
+ },
39
+ spec: {
40
+ nodeName: 'aNodeName',
41
+ containers: [{
42
+ name: 'foo',
43
+ image: 'bar'
44
+ }, {
45
+ name: 'bar',
46
+ image: 'foo'
47
+ }]
48
+ },
49
+ status: {
50
+ podIP: '172.17.0.8'
51
+ }
52
+ },
53
+ {
54
+ metadata: {
55
+ name: 'modified',
56
+ namespace: 'create',
57
+ uid: 'modified_uid',
58
+ labels: {}
59
+ },
60
+ spec: {
61
+ nodeName: 'aNodeName',
62
+ containers: [{
63
+ name: 'foo',
64
+ image: 'bar'
65
+ }, {
66
+ name: 'bar',
67
+ image: 'foo'
68
+ }]
69
+ },
70
+ status: {
71
+ podIP: '172.17.0.8'
72
+ }
73
+ }
74
+ ]
75
+ }
76
+ @created = {
77
+ type: 'CREATED',
78
+ object: {
79
+ metadata: {
80
+ name: 'created',
81
+ namespace: 'create',
82
+ uid: 'created_uid',
83
+ resourceVersion: '122',
84
+ labels: {}
85
+ },
86
+ spec: {
87
+ nodeName: 'aNodeName',
88
+ containers: [{
89
+ name: 'foo',
90
+ image: 'bar'
91
+ }, {
92
+ name: 'bar',
93
+ image: 'foo'
94
+ }]
95
+ },
96
+ status: {
97
+ podIP: '172.17.0.8'
98
+ }
99
+ }
100
+ }
101
+ @modified = {
102
+ type: 'MODIFIED',
103
+ object: {
104
+ metadata: {
105
+ name: 'foo',
106
+ namespace: 'modified',
107
+ uid: 'modified_uid',
108
+ resourceVersion: '123',
109
+ labels: {}
110
+ },
111
+ spec: {
112
+ nodeName: 'aNodeName',
113
+ containers: [{
114
+ name: 'foo',
115
+ image: 'bar'
116
+ }, {
117
+ name: 'bar',
118
+ image: 'foo'
119
+ }]
120
+ },
121
+ status: {
122
+ podIP: '172.17.0.8',
123
+ containerStatuses: [
124
+ {
125
+ name: 'fabric8-console-container',
126
+ state: {
127
+ running: {
128
+ startedAt: '2015-05-08T09:22:44Z'
129
+ }
130
+ },
131
+ lastState: {},
132
+ ready: true,
133
+ restartCount: 0,
134
+ image: 'fabric8/hawtio-kubernetes:latest',
135
+ imageID: 'docker://b2bd1a24a68356b2f30128e6e28e672c1ef92df0d9ec01ec0c7faea5d77d2303',
136
+ containerID: 'docker://49095a2894da899d3b327c5fde1e056a81376cc9a8f8b09a195f2a92bceed459'
136
137
  }
137
- }
138
- )
139
- @error = OpenStruct.new(
140
- type: 'ERROR',
141
- object: {
142
- 'message' => 'some error message'
143
- }
144
- )
145
- @gone = OpenStruct.new(
146
- type: 'ERROR',
147
- object: {
148
- 'code' => 410,
149
- 'kind' => 'Status',
150
- 'message' => 'too old resource version: 123 (391079)',
151
- 'metadata' => {
152
- 'name' => 'gone',
153
- 'namespace' => 'gone',
154
- 'uid' => 'gone_uid'
155
- },
156
- 'reason' => 'Gone'
157
- }
158
- )
159
- end
138
+ ]
139
+ }
140
+ }
141
+ }
142
+ @deleted = {
143
+ type: 'DELETED',
144
+ object: {
145
+ metadata: {
146
+ name: 'deleteme',
147
+ namespace: 'deleted',
148
+ uid: 'deleted_uid',
149
+ resourceVersion: '124'
150
+ }
151
+ }
152
+ }
153
+ @error = {
154
+ type: 'ERROR',
155
+ object: {
156
+ message: 'some error message'
157
+ }
158
+ }
159
+ @gone = {
160
+ type: 'ERROR',
161
+ object: {
162
+ code: 410,
163
+ kind: 'Status',
164
+ message: 'too old resource version: 123 (391079)',
165
+ metadata: {
166
+ name: 'gone',
167
+ namespace: 'gone',
168
+ uid: 'gone_uid'
169
+ },
170
+ reason: 'Gone'
171
+ }
172
+ }
173
+ end
174
+
175
+ test 'pod list caches pods' do
176
+ orig_env_val = ENV['K8S_NODE_NAME']
177
+ ENV['K8S_NODE_NAME'] = 'aNodeName'
178
+ @client.stub :get_pods, @initial do
179
+ process_pod_watcher_notices(start_pod_watch)
180
+ assert_equal(true, @cache.key?('initial_uid'))
181
+ assert_equal(true, @cache.key?('modified_uid'))
182
+ assert_equal(2, @stats[:pod_cache_host_updates])
183
+ end
184
+ ENV['K8S_NODE_NAME'] = orig_env_val
185
+ end
160
186
 
161
- test 'pod list caches pods' do
162
- orig_env_val = ENV['K8S_NODE_NAME']
163
- ENV['K8S_NODE_NAME'] = 'aNodeName'
164
- @client.stub :get_pods, @initial do
187
+ test 'pod list caches pods and watch updates' do
188
+ orig_env_val = ENV['K8S_NODE_NAME']
189
+ ENV['K8S_NODE_NAME'] = 'aNodeName'
190
+ @client.stub :get_pods, @initial do
191
+ @client.stub :watch_pods, [@modified] do
165
192
  process_pod_watcher_notices(start_pod_watch)
166
- assert_equal(true, @cache.key?('initial_uid'))
167
- assert_equal(true, @cache.key?('modified_uid'))
168
193
  assert_equal(2, @stats[:pod_cache_host_updates])
194
+ assert_equal(1, @stats[:pod_cache_watch_updates])
169
195
  end
170
- ENV['K8S_NODE_NAME'] = orig_env_val
171
196
  end
197
+ ENV['K8S_NODE_NAME'] = orig_env_val
198
+ assert_equal('123', @last_seen_resource_version) # from @modified
199
+ end
172
200
 
173
- test 'pod list caches pods and watch updates' do
174
- orig_env_val = ENV['K8S_NODE_NAME']
175
- ENV['K8S_NODE_NAME'] = 'aNodeName'
176
- @client.stub :get_pods, @initial do
177
- @client.stub :watch_pods, [@modified] do
178
- process_pod_watcher_notices(start_pod_watch)
179
- assert_equal(2, @stats[:pod_cache_host_updates])
180
- assert_equal(1, @stats[:pod_cache_watch_updates])
181
- end
201
+ test 'pod watch notice ignores CREATED' do
202
+ @client.stub :get_pods, @initial do
203
+ @client.stub :watch_pods, [@created] do
204
+ process_pod_watcher_notices(start_pod_watch)
205
+ assert_equal(false, @cache.key?('created_uid'))
206
+ assert_equal(1, @stats[:pod_cache_watch_ignored])
182
207
  end
183
- ENV['K8S_NODE_NAME'] = orig_env_val
184
208
  end
209
+ end
185
210
 
186
- test 'pod watch notice ignores CREATED' do
187
- @client.stub :get_pods, @initial do
188
- @client.stub :watch_pods, [@created] do
189
- process_pod_watcher_notices(start_pod_watch)
190
- assert_equal(false, @cache.key?('created_uid'))
191
- assert_equal(1, @stats[:pod_cache_watch_ignored])
192
- end
193
- end
211
+ test 'pod watch notice is ignored when info not cached and MODIFIED is received' do
212
+ @client.stub :watch_pods, [@modified] do
213
+ process_pod_watcher_notices(start_pod_watch)
214
+ assert_equal(false, @cache.key?('modified_uid'))
215
+ assert_equal(1, @stats[:pod_cache_watch_misses])
194
216
  end
217
+ end
195
218
 
196
- test 'pod watch notice is ignored when info not cached and MODIFIED is received' do
197
- @client.stub :watch_pods, [@modified] do
198
- process_pod_watcher_notices(start_pod_watch)
199
- assert_equal(false, @cache.key?('modified_uid'))
200
- assert_equal(1, @stats[:pod_cache_watch_misses])
201
- end
219
+ test 'pod MODIFIED cached when hostname matches' do
220
+ orig_env_val = ENV['K8S_NODE_NAME']
221
+ ENV['K8S_NODE_NAME'] = 'aNodeName'
222
+ @client.stub :watch_pods, [@modified] do
223
+ process_pod_watcher_notices(start_pod_watch)
224
+ assert_equal(true, @cache.key?('modified_uid'))
225
+ assert_equal(1, @stats[:pod_cache_host_updates])
202
226
  end
227
+ ENV['K8S_NODE_NAME'] = orig_env_val
228
+ end
203
229
 
204
- test 'pod MODIFIED cached when hostname matches' do
205
- orig_env_val = ENV['K8S_NODE_NAME']
206
- ENV['K8S_NODE_NAME'] = 'aNodeName'
207
- @client.stub :watch_pods, [@modified] do
208
- process_pod_watcher_notices(start_pod_watch)
209
- assert_equal(true, @cache.key?('modified_uid'))
210
- assert_equal(1, @stats[:pod_cache_host_updates])
211
- end
212
- ENV['K8S_NODE_NAME'] = orig_env_val
230
+ test 'pod watch notice is updated when MODIFIED is received' do
231
+ @cache['modified_uid'] = {}
232
+ @client.stub :watch_pods, [@modified] do
233
+ process_pod_watcher_notices(start_pod_watch)
234
+ assert_equal(true, @cache.key?('modified_uid'))
235
+ assert_equal(1, @stats[:pod_cache_watch_updates])
213
236
  end
237
+ end
214
238
 
215
- test 'pod watch notice is updated when MODIFIED is received' do
216
- @cache['modified_uid'] = {}
217
- @client.stub :watch_pods, [@modified] do
218
- process_pod_watcher_notices(start_pod_watch)
219
- assert_equal(true, @cache.key?('modified_uid'))
220
- assert_equal(1, @stats[:pod_cache_watch_updates])
221
- end
239
+ test 'pod watch notice is ignored when delete is received' do
240
+ @cache['deleted_uid'] = {}
241
+ @client.stub :watch_pods, [@deleted] do
242
+ process_pod_watcher_notices(start_pod_watch)
243
+ assert_equal(true, @cache.key?('deleted_uid'))
244
+ assert_equal(1, @stats[:pod_cache_watch_delete_ignored])
222
245
  end
246
+ end
223
247
 
224
- test 'pod watch notice is ignored when delete is received' do
225
- @cache['deleted_uid'] = {}
226
- @client.stub :watch_pods, [@deleted] do
227
- process_pod_watcher_notices(start_pod_watch)
228
- assert_equal(true, @cache.key?('deleted_uid'))
229
- assert_equal(1, @stats[:pod_cache_watch_delete_ignored])
248
+ test 'pod watch raises Fluent::UnrecoverableError when cannot re-establish connection to k8s API server' do
249
+ # Stub start_pod_watch to simulate initial successful connection to API server
250
+ stub(self).start_pod_watch
251
+ # Stub watch_pods to simluate not being able to set up watch connection to API server
252
+ stub(@client).watch_pods { raise }
253
+ @client.stub :get_pods, @initial do
254
+ assert_raise Fluent::UnrecoverableError do
255
+ set_up_pod_thread
230
256
  end
231
257
  end
258
+ assert_equal(3, @stats[:pod_watch_failures])
259
+ assert_equal(2, Thread.current[:pod_watch_retry_count])
260
+ assert_equal(4, Thread.current[:pod_watch_retry_backoff_interval])
261
+ assert_nil(@stats[:pod_watch_error_type_notices])
262
+ end
232
263
 
233
- test 'pod watch retries when exceptions are encountered' do
234
- @client.stub :get_pods, @initial do
235
- @client.stub :watch_pods, [[@created, @exception_raised]] do
236
- assert_raise Fluent::UnrecoverableError do
264
+ test 'pod watch resets watch retry count when exceptions are encountered and connection to k8s API server is re-established' do
265
+ @client.stub :get_pods, @initial do
266
+ @client.stub :watch_pods, [[@created, @exception_raised]] do
267
+ # Force the infinite watch loop to exit after 3 seconds. Verifies that
268
+ # no unrecoverable error was thrown during this period of time.
269
+ assert_raise Timeout::Error.new('execution expired') do
270
+ Timeout.timeout(3) do
237
271
  set_up_pod_thread
238
272
  end
239
- assert_equal(3, @stats[:pod_watch_failures])
240
- assert_equal(2, Thread.current[:pod_watch_retry_count])
241
- assert_equal(4, Thread.current[:pod_watch_retry_backoff_interval])
242
- assert_nil(@stats[:pod_watch_error_type_notices])
243
273
  end
274
+ assert_operator(@stats[:pod_watch_failures], :>=, 3)
275
+ assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
276
+ assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
244
277
  end
245
278
  end
279
+ end
246
280
 
247
- test 'pod watch retries when error is received' do
248
- @client.stub :get_pods, @initial do
249
- @client.stub :watch_pods, [@error] do
250
- assert_raise Fluent::UnrecoverableError do
281
+ test 'pod watch resets watch retry count when error is received and connection to k8s API server is re-established' do
282
+ @client.stub :get_pods, @initial do
283
+ @client.stub :watch_pods, [@error] do
284
+ # Force the infinite watch loop to exit after 3 seconds. Verifies that
285
+ # no unrecoverable error was thrown during this period of time.
286
+ assert_raise Timeout::Error.new('execution expired') do
287
+ Timeout.timeout(3) do
251
288
  set_up_pod_thread
252
289
  end
253
- assert_equal(3, @stats[:pod_watch_failures])
254
- assert_equal(2, Thread.current[:pod_watch_retry_count])
255
- assert_equal(4, Thread.current[:pod_watch_retry_backoff_interval])
256
- assert_equal(3, @stats[:pod_watch_error_type_notices])
257
290
  end
291
+ assert_operator(@stats[:pod_watch_failures], :>=, 3)
292
+ assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
293
+ assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
294
+ assert_operator(@stats[:pod_watch_error_type_notices], :>=, 3)
258
295
  end
259
296
  end
297
+ end
260
298
 
261
- test 'pod watch continues after retries succeed' do
262
- @client.stub :get_pods, @initial do
263
- @client.stub :watch_pods, [@modified, @error, @modified] do
264
- # Force the infinite watch loop to exit after 3 seconds. Verifies that
265
- # no unrecoverable error was thrown during this period of time.
266
- assert_raise Timeout::Error.new('execution expired') do
267
- Timeout.timeout(3) do
268
- set_up_pod_thread
269
- end
299
+ test 'pod watch continues after retries succeed' do
300
+ @client.stub :get_pods, @initial do
301
+ @client.stub :watch_pods, [@modified, @error, @modified] do
302
+ # Force the infinite watch loop to exit after 3 seconds. Verifies that
303
+ # no unrecoverable error was thrown during this period of time.
304
+ assert_raise Timeout::Error.new('execution expired') do
305
+ Timeout.timeout(3) do
306
+ set_up_pod_thread
270
307
  end
271
- assert_operator(@stats[:pod_watch_failures], :>=, 3)
272
- assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
273
- assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
274
- assert_operator(@stats[:pod_watch_error_type_notices], :>=, 3)
275
308
  end
309
+ assert_operator(@stats[:pod_watch_failures], :>=, 3)
310
+ assert_operator(Thread.current[:pod_watch_retry_count], :<=, 1)
311
+ assert_operator(Thread.current[:pod_watch_retry_backoff_interval], :<=, 1)
312
+ assert_operator(@stats[:pod_watch_error_type_notices], :>=, 3)
276
313
  end
277
314
  end
315
+ end
278
316
 
279
- test 'pod watch raises a GoneError when a 410 Gone error is received' do
280
- @cache['gone_uid'] = {}
281
- @client.stub :watch_pods, [@gone] do
282
- assert_raise KubernetesMetadata::Common::GoneError do
283
- process_pod_watcher_notices(start_pod_watch)
284
- end
285
- assert_equal(1, @stats[:pod_watch_gone_notices])
317
+ test 'pod watch raises a GoneError when a 410 Gone error is received' do
318
+ @cache['gone_uid'] = {}
319
+ @client.stub :watch_pods, [@gone] do
320
+ @last_seen_resource_version = '100'
321
+ assert_raise KubernetesMetadata::Common::GoneError do
322
+ process_pod_watcher_notices(start_pod_watch)
286
323
  end
324
+ assert_equal(1, @stats[:pod_watch_gone_notices])
325
+ assert_nil @last_seen_resource_version # forced restart
287
326
  end
327
+ end
288
328
 
289
- test 'pod watch retries when 410 Gone errors are encountered' do
290
- @client.stub :get_pods, @initial do
291
- @client.stub :watch_pods, [@created, @gone, @modified] do
292
- # Force the infinite watch loop to exit after 3 seconds. Verifies that
293
- # no unrecoverable error was thrown during this period of time.
294
- assert_raise Timeout::Error.new('execution expired') do
295
- Timeout.timeout(3) do
296
- set_up_pod_thread
297
- end
329
+ test 'pod watch retries when 410 Gone errors are encountered' do
330
+ @client.stub :get_pods, @initial do
331
+ @client.stub :watch_pods, [@created, @gone, @modified] do
332
+ # Force the infinite watch loop to exit after 3 seconds because the code sleeps 3 times.
333
+ # Verifies that no unrecoverable error was thrown during this period of time.
334
+ assert_raise Timeout::Error.new('execution expired') do
335
+ Timeout.timeout(3) do
336
+ set_up_pod_thread
298
337
  end
299
- assert_operator(@stats[:pod_watch_gone_errors], :>=, 3)
300
- assert_operator(@stats[:pod_watch_gone_notices], :>=, 3)
301
338
  end
339
+ assert_operator(@stats[:pod_watch_gone_errors], :>=, 3)
340
+ assert_operator(@stats[:pod_watch_gone_notices], :>=, 3)
302
341
  end
303
342
  end
343
+ end
304
344
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  #
2
4
  # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
3
5
  # Kubernetes metadata
@@ -17,10 +19,8 @@
17
19
  # limitations under the License.
18
20
  #
19
21
  require_relative '../helper'
20
- require 'ostruct'
21
22
 
22
23
  class WatchTest < Test::Unit::TestCase
23
-
24
24
  def thread_current_running?
25
25
  true
26
26
  end
@@ -37,26 +37,23 @@ class WatchTest < Test::Unit::TestCase
37
37
  Thread.current[:namespace_watch_retry_count] = 0
38
38
 
39
39
  @client = OpenStruct.new
40
- def @client.resourceVersion
41
- '12345'
42
- end
43
- def @client.watch_pods(options = {})
40
+ def @client.watch_pods(_options = {})
44
41
  []
45
42
  end
46
- def @client.watch_namespaces(options = {})
43
+
44
+ def @client.watch_namespaces(_options = {})
47
45
  []
48
46
  end
49
- def @client.get_namespaces(options = {})
50
- self
51
- end
52
- def @client.get_pods(options = {})
53
- self
47
+
48
+ def @client.get_namespaces(_options = {})
49
+ { items: [], metadata: { resourceVersion: '12345' } }
54
50
  end
55
51
 
56
- @exception_raised = OpenStruct.new
57
- def @exception_raised.each
58
- raise Exception
52
+ def @client.get_pods(_options = {})
53
+ { items: [], metadata: { resourceVersion: '12345' } }
59
54
  end
55
+
56
+ @exception_raised = :blow_up_when_used
60
57
  end
61
58
 
62
59
  def watcher=(value)
@@ -66,10 +63,14 @@ class WatchTest < Test::Unit::TestCase
66
63
  logger = {}
67
64
  def logger.debug(message)
68
65
  end
66
+
69
67
  def logger.info(message, error)
70
68
  end
69
+
71
70
  def logger.error(message, error)
72
71
  end
72
+ def logger.warn(message)
73
+ end
73
74
  logger
74
75
  end
75
76
  end