fluent-plugin-kubernetes_metadata_filter 2.5.0 → 3.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. checksums.yaml +4 -4
  2. data/.circleci/config.yml +10 -14
  3. data/.gitignore +0 -1
  4. data/.rubocop.yml +57 -0
  5. data/Gemfile +4 -2
  6. data/Gemfile.lock +76 -67
  7. data/README.md +9 -83
  8. data/Rakefile +15 -11
  9. data/doc/benchmark/5m-1-2500lps-256b-baseline-01/cpu.png +0 -0
  10. data/doc/benchmark/5m-1-2500lps-256b-baseline-01/latency.png +0 -0
  11. data/doc/benchmark/5m-1-2500lps-256b-baseline-01/loss.png +0 -0
  12. data/doc/benchmark/5m-1-2500lps-256b-baseline-01/mem.png +0 -0
  13. data/doc/benchmark/5m-1-2500lps-256b-baseline-01/readme.md +88 -0
  14. data/doc/benchmark/5m-1-2500lps-256b-baseline-01/results.html +127 -0
  15. data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/cpu.png +0 -0
  16. data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/latency.png +0 -0
  17. data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/loss.png +0 -0
  18. data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/mem.png +0 -0
  19. data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/readme.md +97 -0
  20. data/doc/benchmark/5m-1-2500lps-256b-kube-01-01/results.html +136 -0
  21. data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/cpu.png +0 -0
  22. data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/latency.png +0 -0
  23. data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/loss.png +0 -0
  24. data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/mem.png +0 -0
  25. data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/readme.md +97 -0
  26. data/doc/benchmark/5m-1-2500lps-256b-kube-01-marshal-02/results.html +136 -0
  27. data/fluent-plugin-kubernetes_metadata_filter.gemspec +25 -27
  28. data/lib/fluent/plugin/filter_kubernetes_metadata.rb +171 -192
  29. data/lib/fluent/plugin/kubernetes_metadata_cache_strategy.rb +25 -23
  30. data/lib/fluent/plugin/kubernetes_metadata_common.rb +44 -69
  31. data/lib/fluent/plugin/kubernetes_metadata_stats.rb +21 -5
  32. data/lib/fluent/plugin/kubernetes_metadata_test_api_adapter.rb +68 -0
  33. data/lib/fluent/plugin/kubernetes_metadata_util.rb +33 -0
  34. data/lib/fluent/plugin/kubernetes_metadata_watch_namespaces.rb +91 -42
  35. data/lib/fluent/plugin/kubernetes_metadata_watch_pods.rb +108 -47
  36. data/release_notes.md +42 -0
  37. data/test/cassettes/kubernetes_get_pod_container_init.yml +145 -0
  38. data/test/helper.rb +20 -2
  39. data/test/plugin/test_cache_stats.rb +10 -13
  40. data/test/plugin/test_cache_strategy.rb +158 -160
  41. data/test/plugin/test_filter_kubernetes_metadata.rb +340 -616
  42. data/test/plugin/test_watch_namespaces.rb +188 -125
  43. data/test/plugin/test_watch_pods.rb +282 -202
  44. data/test/plugin/watch_test.rb +16 -15
  45. metadata +77 -67
  46. /data/test/cassettes/{kubernetes_docker_metadata_dotted_labels.yml → kubernetes_docker_metadata_dotted_slashed_labels.yml} +0 -0
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  #
2
4
  # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
3
5
  # Kubernetes metadata
@@ -16,11 +18,11 @@
16
18
  # See the License for the specific language governing permissions and
17
19
  # limitations under the License.
18
20
  #
21
+ # TODO: this is mostly copy-paste from kubernetes_metadata_watch_namespaces.rb unify them
19
22
  require_relative 'kubernetes_metadata_common'
20
23
 
21
24
  module KubernetesMetadata
22
25
  module WatchPods
23
-
24
26
  include ::KubernetesMetadata::Common
25
27
 
26
28
  def set_up_pod_thread
@@ -28,6 +30,7 @@ module KubernetesMetadata
28
30
  # Fluent:ConfigError, so that users can inspect potential errors in
29
31
  # the configuration.
30
32
  pod_watcher = start_pod_watch
33
+
31
34
  Thread.current[:pod_watch_retry_backoff_interval] = @watch_retry_interval
32
35
  Thread.current[:pod_watch_retry_count] = 0
33
36
 
@@ -35,20 +38,33 @@ module KubernetesMetadata
35
38
  # processing will be swallowed and retried. These failures /
36
39
  # exceptions could be caused by Kubernetes API being temporarily
37
40
  # down. We assume the configuration is correct at this point.
38
- while true
39
- begin
40
- pod_watcher ||= get_pods_and_start_watcher
41
- process_pod_watcher_notices(pod_watcher)
42
- rescue Exception => e
41
+ loop do
42
+ pod_watcher ||= get_pods_and_start_watcher
43
+ process_pod_watcher_notices(pod_watcher)
44
+ rescue GoneError => e
45
+ # Expected error. Quietly go back through the loop in order to
46
+ # start watching from the latest resource versions
47
+ @stats.bump(:pod_watch_gone_errors)
48
+ log.info('410 Gone encountered. Restarting pod watch to reset resource versions.', e)
49
+ pod_watcher = nil
50
+ rescue KubeException => e
51
+ if e.error_code == 401
52
+ # recreate client to refresh token
53
+ log.info("Encountered '401 Unauthorized' exception in watch, recreating client to refresh token")
54
+ create_client()
55
+ pod_watcher = nil
56
+ else
57
+ # treat all other errors the same as StandardError, log, swallow and reset
43
58
  @stats.bump(:pod_watch_failures)
44
59
  if Thread.current[:pod_watch_retry_count] < @watch_retry_max_times
45
60
  # Instead of raising exceptions and crashing Fluentd, swallow
46
61
  # the exception and reset the watcher.
47
62
  log.info(
48
- "Exception encountered parsing pod watch event. The " \
49
- "connection might have been closed. Sleeping for " \
63
+ 'Exception encountered parsing pod watch event. The ' \
64
+ 'connection might have been closed. Sleeping for ' \
50
65
  "#{Thread.current[:pod_watch_retry_backoff_interval]} " \
51
- "seconds and resetting the pod watcher.", e)
66
+ 'seconds and resetting the pod watcher.', e
67
+ )
52
68
  sleep(Thread.current[:pod_watch_retry_backoff_interval])
53
69
  Thread.current[:pod_watch_retry_count] += 1
54
70
  Thread.current[:pod_watch_retry_backoff_interval] *= @watch_retry_exponential_backoff_base
@@ -57,20 +73,45 @@ module KubernetesMetadata
57
73
  # Since retries failed for many times, log as errors instead
58
74
  # of info and raise exceptions and trigger Fluentd to restart.
59
75
  message =
60
- "Exception encountered parsing pod watch event. The " \
61
- "connection might have been closed. Retried " \
76
+ 'Exception encountered parsing pod watch event. The ' \
77
+ 'connection might have been closed. Retried ' \
62
78
  "#{@watch_retry_max_times} times yet still failing. Restarting."
63
79
  log.error(message, e)
64
- raise Fluent::UnrecoverableError.new(message)
80
+ raise Fluent::UnrecoverableError, message
65
81
  end
66
82
  end
83
+ rescue StandardError => e
84
+ @stats.bump(:pod_watch_failures)
85
+ if Thread.current[:pod_watch_retry_count] < @watch_retry_max_times
86
+ # Instead of raising exceptions and crashing Fluentd, swallow
87
+ # the exception and reset the watcher.
88
+ log.info(
89
+ 'Exception encountered parsing pod watch event. The ' \
90
+ 'connection might have been closed. Sleeping for ' \
91
+ "#{Thread.current[:pod_watch_retry_backoff_interval]} " \
92
+ 'seconds and resetting the pod watcher.', e
93
+ )
94
+ sleep(Thread.current[:pod_watch_retry_backoff_interval])
95
+ Thread.current[:pod_watch_retry_count] += 1
96
+ Thread.current[:pod_watch_retry_backoff_interval] *= @watch_retry_exponential_backoff_base
97
+ pod_watcher = nil
98
+ else
99
+ # Since retries failed for many times, log as errors instead
100
+ # of info and raise exceptions and trigger Fluentd to restart.
101
+ message =
102
+ 'Exception encountered parsing pod watch event. The ' \
103
+ 'connection might have been closed. Retried ' \
104
+ "#{@watch_retry_max_times} times yet still failing. Restarting."
105
+ log.error(message, e)
106
+ raise Fluent::UnrecoverableError, message
107
+ end
67
108
  end
68
109
  end
69
110
 
70
111
  def start_pod_watch
71
112
  get_pods_and_start_watcher
72
- rescue Exception => e
73
- message = "start_pod_watch: Exception encountered setting up pod watch " \
113
+ rescue StandardError => e
114
+ message = 'start_pod_watch: Exception encountered setting up pod watch ' \
74
115
  "from Kubernetes API #{@apiVersion} endpoint " \
75
116
  "#{@kubernetes_url}: #{e.message}"
76
117
  message += " (#{e.response})" if e.respond_to?(:response)
@@ -83,19 +124,27 @@ module KubernetesMetadata
83
124
  # from that resourceVersion.
84
125
  def get_pods_and_start_watcher
85
126
  options = {
86
- resource_version: '0' # Fetch from API server.
127
+ resource_version: '0' # Fetch from API server cache instead of etcd quorum read
87
128
  }
88
129
  if ENV['K8S_NODE_NAME']
89
130
  options[:field_selector] = 'spec.nodeName=' + ENV['K8S_NODE_NAME']
90
131
  end
91
- pods = @client.get_pods(options)
92
- pods.each do |pod|
93
- cache_key = pod.metadata['uid']
94
- @cache[cache_key] = parse_pod_metadata(pod)
95
- @stats.bump(:pod_cache_host_updates)
132
+ if @last_seen_resource_version
133
+ options[:resource_version] = @last_seen_resource_version
134
+ else
135
+ pods = @client.get_pods(options)
136
+ pods[:items].each do |pod|
137
+ cache_key = pod[:metadata][:uid]
138
+ @cache[cache_key] = parse_pod_metadata(pod)
139
+ @stats.bump(:pod_cache_host_updates)
140
+ end
141
+
142
+ # continue watching from most recent resourceVersion
143
+ options[:resource_version] = pods[:metadata][:resourceVersion]
96
144
  end
97
- options[:resource_version] = pods.resourceVersion
145
+
98
146
  watcher = @client.watch_pods(options)
147
+ reset_pod_watch_retry_stats
99
148
  watcher
100
149
  end
101
150
 
@@ -109,34 +158,46 @@ module KubernetesMetadata
109
158
  # Process a watcher notice and potentially raise an exception.
110
159
  def process_pod_watcher_notices(watcher)
111
160
  watcher.each do |notice|
112
- case notice.type
113
- when 'MODIFIED'
114
- reset_pod_watch_retry_stats
115
- cache_key = notice.object['metadata']['uid']
116
- cached = @cache[cache_key]
117
- if cached
118
- @cache[cache_key] = parse_pod_metadata(notice.object)
119
- @stats.bump(:pod_cache_watch_updates)
120
- elsif ENV['K8S_NODE_NAME'] == notice.object['spec']['nodeName'] then
121
- @cache[cache_key] = parse_pod_metadata(notice.object)
122
- @stats.bump(:pod_cache_host_updates)
123
- else
124
- @stats.bump(:pod_cache_watch_misses)
125
- end
126
- when 'DELETED'
127
- reset_pod_watch_retry_stats
128
- # ignore and let age out for cases where pods
129
- # deleted but still processing logs
130
- @stats.bump(:pod_cache_watch_delete_ignored)
131
- when 'ERROR'
161
+ # store version we processed to not reprocess it ... do not unset when there is no version in response
162
+ version = ( # TODO: replace with &.dig once we are on ruby 2.5+
163
+ notice[:object] && notice[:object][:metadata] && notice[:object][:metadata][:resourceVersion]
164
+ )
165
+ @last_seen_resource_version = version if version
166
+
167
+ case notice[:type]
168
+ when 'MODIFIED'
169
+ reset_pod_watch_retry_stats
170
+ cache_key = notice.dig(:object, :metadata, :uid)
171
+ cached = @cache[cache_key]
172
+ if cached
173
+ @cache[cache_key] = parse_pod_metadata(notice[:object])
174
+ @stats.bump(:pod_cache_watch_updates)
175
+ elsif ENV['K8S_NODE_NAME'] == notice[:object][:spec][:nodeName]
176
+ @cache[cache_key] = parse_pod_metadata(notice[:object])
177
+ @stats.bump(:pod_cache_host_updates)
178
+ else
179
+ @stats.bump(:pod_cache_watch_misses)
180
+ end
181
+ when 'DELETED'
182
+ reset_pod_watch_retry_stats
183
+ # ignore and let age out for cases where pods
184
+ # deleted but still processing logs
185
+ @stats.bump(:pod_cache_watch_delete_ignored)
186
+ when 'ERROR'
187
+ if notice[:object] && notice[:object][:code] == 410
188
+ @last_seen_resource_version = nil # requested resourceVersion was too old, need to reset
189
+ @stats.bump(:pod_watch_gone_notices)
190
+ raise GoneError
191
+ else
132
192
  @stats.bump(:pod_watch_error_type_notices)
133
- message = notice['object']['message'] if notice['object'] && notice['object']['message']
193
+ message = notice[:object][:message] if notice[:object] && notice[:object][:message]
134
194
  raise "Error while watching pods: #{message}"
135
- else
136
- reset_pod_watch_retry_stats
137
- # Don't pay attention to creations, since the created pod may not
138
- # end up on this node.
139
- @stats.bump(:pod_cache_watch_ignored)
195
+ end
196
+ else
197
+ reset_pod_watch_retry_stats
198
+ # Don't pay attention to creations, since the created pod may not
199
+ # end up on this node.
200
+ @stats.bump(:pod_cache_watch_ignored)
140
201
  end
141
202
  end
142
203
  end
data/release_notes.md ADDED
@@ -0,0 +1,42 @@
1
+ # Release Notes
2
+
3
+ ## 2.9.4
4
+ As of this release, the 'de_dot' functionality is depricated and will be removed in future releases.
5
+ Ref: https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter/issues/320
6
+
7
+ ## v2.1.4
8
+ The use of `use_journal` is **DEPRECATED**. If this setting is not present, the plugin will
9
+ attempt to figure out the source of the metadata fields from the following:
10
+ - If `lookup_from_k8s_field true` (the default) and the following fields are present in the record:
11
+ `docker.container_id`, `kubernetes.namespace_name`, `kubernetes.pod_name`, `kubernetes.container_name`,
12
+ then the plugin will use those values as the source to use to lookup the metadata
13
+ - If `use_journal true`, or `use_journal` is unset, and the fields `CONTAINER_NAME` and `CONTAINER_ID_FULL` are present in the record,
14
+ then the plugin will parse those values using `container_name_to_kubernetes_regexp` and use those as the source to lookup the metadata
15
+ - Otherwise, if the tag matches `tag_to_kubernetes_name_regexp`, the plugin will parse the tag and use those values to
16
+ lookup the metdata
17
+
18
+ ## v2.1.x
19
+
20
+ As of the release 2.1.x of this plugin, it no longer supports parsing the source message into JSON and attaching it to the
21
+ payload. The following configuration options are removed:
22
+
23
+ * `merge_json_log`
24
+ * `preserve_json_log`
25
+
26
+ One way of preserving JSON logs can be through the [parser plugin](https://docs.fluentd.org/filter/parser).
27
+ It can parsed with the parser plugin like this:
28
+
29
+ ```
30
+ <filter kubernetes.**>
31
+ @type parser
32
+ key_name log
33
+ <parse>
34
+ @type json
35
+ json_parser json
36
+ </parse>
37
+ replace_invalid_sequence true
38
+ reserve_data true # this preserves unparsable log lines
39
+ emit_invalid_record_to_error false # In case of unparsable log lines keep the error log clean
40
+ reserve_time # the time was already parsed in the source, we don't want to overwrite it with current time.
41
+ </filter>
42
+ ```
@@ -0,0 +1,145 @@
1
+ #
2
+ # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
3
+ # Kubernetes metadata
4
+ #
5
+ # Copyright 2015 Red Hat, Inc.
6
+ #
7
+ # Licensed under the Apache License, Version 2.0 (the "License");
8
+ # you may not use this file except in compliance with the License.
9
+ # You may obtain a copy of the License at
10
+ #
11
+ # http://www.apache.org/licenses/LICENSE-2.0
12
+ #
13
+ # Unless required by applicable law or agreed to in writing, software
14
+ # distributed under the License is distributed on an "AS IS" BASIS,
15
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ # See the License for the specific language governing permissions and
17
+ # limitations under the License.
18
+ #
19
+ ---
20
+ http_interactions:
21
+ - request:
22
+ method: get
23
+ uri: https://localhost:8443/api/v1/namespaces/default/pods/fabric8-console-controller-98rqc
24
+ body:
25
+ encoding: US-ASCII
26
+ string: ''
27
+ headers:
28
+ Accept:
29
+ - "*/*; q=0.5, application/xml"
30
+ Accept-Encoding:
31
+ - gzip, deflate
32
+ User-Agent:
33
+ - Ruby
34
+ response:
35
+ status:
36
+ code: 200
37
+ message: OK
38
+ headers:
39
+ Content-Type:
40
+ - application/json
41
+ Date:
42
+ - Fri, 08 May 2015 10:35:37 GMT
43
+ Transfer-Encoding:
44
+ - chunked
45
+ body:
46
+ encoding: UTF-8
47
+ string: |-
48
+ {
49
+ "kind": "Pod",
50
+ "apiVersion": "v1",
51
+ "metadata": {
52
+ "name": "fabric8-console-controller-98rqc",
53
+ "generateName": "fabric8-console-controller-",
54
+ "namespace": "default",
55
+ "selfLink": "/api/v1/namespaces/default/pods/fabric8-console-controller-98rqc",
56
+ "uid": "c76927af-f563-11e4-b32d-54ee7527188d",
57
+ "resourceVersion": "122",
58
+ "creationTimestamp": "2015-05-08T09:22:42Z",
59
+ "labels": {
60
+ "component": "fabric8Console"
61
+ }
62
+ },
63
+ "spec": {
64
+ "volumes": [
65
+ {
66
+ "name": "openshift-cert-secrets",
67
+ "hostPath": null,
68
+ "emptyDir": null,
69
+ "gcePersistentDisk": null,
70
+ "gitRepo": null,
71
+ "secret": {
72
+ "secretName": "openshift-cert-secrets"
73
+ },
74
+ "nfs": null,
75
+ "iscsi": null,
76
+ "glusterfs": null
77
+ }
78
+ ],
79
+ "containers": [
80
+ {
81
+ "name": "fabric8-console-container",
82
+ "image": "fabric8/hawtio-kubernetes:latest",
83
+ "ports": [
84
+ {
85
+ "containerPort": 9090,
86
+ "protocol": "TCP"
87
+ }
88
+ ],
89
+ "env": [
90
+ {
91
+ "name": "OAUTH_CLIENT_ID",
92
+ "value": "fabric8-console"
93
+ },
94
+ {
95
+ "name": "OAUTH_AUTHORIZE_URI",
96
+ "value": "https://localhost:8443/oauth/authorize"
97
+ }
98
+ ],
99
+ "resources": {},
100
+ "volumeMounts": [
101
+ {
102
+ "name": "openshift-cert-secrets",
103
+ "readOnly": true,
104
+ "mountPath": "/etc/secret-volume"
105
+ }
106
+ ],
107
+ "terminationMessagePath": "/dev/termination-log",
108
+ "imagePullPolicy": "IfNotPresent",
109
+ "capabilities": {}
110
+ }
111
+ ],
112
+ "restartPolicy": "Always",
113
+ "dnsPolicy": "ClusterFirst",
114
+ "nodeName": "jimmi-redhat.localnet"
115
+ },
116
+ "status": {
117
+ "phase": "Running",
118
+ "Condition": [
119
+ {
120
+ "type": "Ready",
121
+ "status": "True"
122
+ }
123
+ ],
124
+ "hostIP": "172.17.42.1",
125
+ "podIP": "172.17.0.8",
126
+ "containerStatuses": [
127
+ {
128
+ "name": "fabric8-console-container",
129
+ "state": {
130
+ "waiting": {
131
+ "reason": "ContainerCreating"
132
+ }
133
+ },
134
+ "lastState": {},
135
+ "ready": true,
136
+ "restartCount": 0,
137
+ "image": "fabric8/hawtio-kubernetes:latest",
138
+ "imageID": ""
139
+ }
140
+ ]
141
+ }
142
+ }
143
+ http_version:
144
+ recorded_at: Fri, 08 May 2015 10:35:37 GMT
145
+ recorded_with: VCR 2.9.3
data/test/helper.rb CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  #
2
4
  # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
3
5
  # Kubernetes metadata
@@ -16,6 +18,7 @@
16
18
  # See the License for the specific language governing permissions and
17
19
  # limitations under the License.
18
20
  #
21
+ require 'bundler/setup'
19
22
  require 'codeclimate-test-reporter'
20
23
  SimpleCov.start do
21
24
  formatter SimpleCov::Formatter::MultiFormatter.new [
@@ -31,8 +34,14 @@ require 'fileutils'
31
34
  require 'fluent/log'
32
35
  require 'fluent/test'
33
36
  require 'minitest/autorun'
34
- require 'webmock/test_unit'
35
37
  require 'vcr'
38
+ require 'ostruct'
39
+ require 'fluent/plugin/filter_kubernetes_metadata'
40
+ require 'fluent/test/driver/filter'
41
+ require 'kubeclient'
42
+
43
+ require 'webmock/test_unit'
44
+ WebMock.disable_net_connect!
36
45
 
37
46
  VCR.configure do |config|
38
47
  config.cassette_library_dir = 'test/cassettes'
@@ -58,7 +67,16 @@ def ipv6_enabled?
58
67
  begin
59
68
  TCPServer.open('::1', 0)
60
69
  true
61
- rescue
70
+ rescue StandardError
62
71
  false
63
72
  end
64
73
  end
74
+
75
+ # TEST_NAME='foo' ruby test_file.rb to run a single test case
76
+ if ENV['TEST_NAME']
77
+ (class << Test::Unit::TestCase; self; end).prepend(Module.new do
78
+ def test(name)
79
+ super if name == ENV['TEST_NAME']
80
+ end
81
+ end)
82
+ end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  #
2
4
  # Fluentd Kubernetes Metadata Filter Plugin - Enrich Fluentd events with
3
5
  # Kubernetes metadata
@@ -17,20 +19,15 @@
17
19
  # limitations under the License.
18
20
  #
19
21
  require_relative '../helper'
20
- require 'fluent/plugin/kubernetes_metadata_stats'
21
- require 'webmock/test_unit'
22
- WebMock.disable_net_connect!
23
22
 
24
23
  class KubernetesMetadataCacheStatsTest < Test::Unit::TestCase
25
-
26
- test 'watch stats' do
27
- require 'lru_redux'
28
- stats = KubernetesMetadata::Stats.new
29
- stats.bump(:missed)
30
- stats.bump(:deleted)
31
- stats.bump(:deleted)
24
+ test 'watch stats' do
25
+ require 'lru_redux'
26
+ stats = KubernetesMetadata::Stats.new
27
+ stats.bump(:missed)
28
+ stats.bump(:deleted)
29
+ stats.bump(:deleted)
32
30
 
33
- assert_equal("stats - deleted: 2, missed: 1", stats.to_s)
34
- end
35
-
31
+ assert_equal('stats - deleted: 2, missed: 1', stats.to_s)
32
+ end
36
33
  end