fluent-plugin-kubernetes_metadata_filter 2.5.2 → 2.5.3

Sign up to get free protection for your applications and to get access to all the features.
@@ -16,6 +16,7 @@
16
16
  # See the License for the specific language governing permissions and
17
17
  # limitations under the License.
18
18
  #
19
+ # TODO: this is mostly copy-paste from kubernetes_metadata_watch_pods.rb unify them
19
20
  require_relative 'kubernetes_metadata_common'
20
21
 
21
22
  module KubernetesMetadata
@@ -45,7 +46,7 @@ module KubernetesMetadata
45
46
  @stats.bump(:namespace_watch_gone_errors)
46
47
  log.info("410 Gone encountered. Restarting namespace watch to reset resource versions.", e)
47
48
  namespace_watcher = nil
48
- rescue Exception => e
49
+ rescue => e
49
50
  @stats.bump(:namespace_watch_failures)
50
51
  if Thread.current[:namespace_watch_retry_count] < @watch_retry_max_times
51
52
  # Instead of raising exceptions and crashing Fluentd, swallow
@@ -74,8 +75,8 @@ module KubernetesMetadata
74
75
  end
75
76
 
76
77
  def start_namespace_watch
77
- return get_namespaces_and_start_watcher
78
- rescue Exception => e
78
+ get_namespaces_and_start_watcher
79
+ rescue => e
79
80
  message = "start_namespace_watch: Exception encountered setting up " \
80
81
  "namespace watch from Kubernetes API #{@apiVersion} endpoint " \
81
82
  "#{@kubernetes_url}: #{e.message}"
@@ -89,16 +90,20 @@ module KubernetesMetadata
89
90
  # starting from that resourceVersion.
90
91
  def get_namespaces_and_start_watcher
91
92
  options = {
92
- resource_version: '0' # Fetch from API server.
93
+ resource_version: '0' # Fetch from API server cache instead of etcd quorum read
93
94
  }
94
95
  namespaces = @client.get_namespaces(options)
95
- namespaces.each do |namespace|
96
- cache_key = namespace.metadata['uid']
96
+ namespaces[:items].each do |namespace|
97
+ cache_key = namespace[:metadata][:uid]
97
98
  @namespace_cache[cache_key] = parse_namespace_metadata(namespace)
98
99
  @stats.bump(:namespace_cache_host_updates)
99
100
  end
100
- options[:resource_version] = namespaces.resourceVersion
101
+
102
+ # continue watching from most recent resourceVersion
103
+ options[:resource_version] = namespaces[:metadata][:resourceVersion]
104
+
101
105
  watcher = @client.watch_namespaces(options)
106
+ reset_namespace_watch_retry_stats
102
107
  watcher
103
108
  end
104
109
 
@@ -112,13 +117,13 @@ module KubernetesMetadata
112
117
  # Process a watcher notice and potentially raise an exception.
113
118
  def process_namespace_watcher_notices(watcher)
114
119
  watcher.each do |notice|
115
- case notice.type
120
+ case notice[:type]
116
121
  when 'MODIFIED'
117
122
  reset_namespace_watch_retry_stats
118
- cache_key = notice.object['metadata']['uid']
123
+ cache_key = notice[:object][:metadata][:uid]
119
124
  cached = @namespace_cache[cache_key]
120
125
  if cached
121
- @namespace_cache[cache_key] = parse_namespace_metadata(notice.object)
126
+ @namespace_cache[cache_key] = parse_namespace_metadata(notice[:object])
122
127
  @stats.bump(:namespace_cache_watch_updates)
123
128
  else
124
129
  @stats.bump(:namespace_cache_watch_misses)
@@ -129,12 +134,12 @@ module KubernetesMetadata
129
134
  # deleted but still processing logs
130
135
  @stats.bump(:namespace_cache_watch_deletes_ignored)
131
136
  when 'ERROR'
132
- if notice.object && notice.object['code'] == 410
137
+ if notice[:object] && notice[:object][:code] == 410
133
138
  @stats.bump(:namespace_watch_gone_notices)
134
139
  raise GoneError
135
140
  else
136
141
  @stats.bump(:namespace_watch_error_type_notices)
137
- message = notice['object']['message'] if notice['object'] && notice['object']['message']
142
+ message = notice[:object][:message] if notice[:object] && notice[:object][:message]
138
143
  raise "Error while watching namespaces: #{message}"
139
144
  end
140
145
  else
@@ -16,6 +16,7 @@
16
16
  # See the License for the specific language governing permissions and
17
17
  # limitations under the License.
18
18
  #
19
+ # TODO: this is mostly copy-paste from kubernetes_metadata_watch_namespaces.rb unify them
19
20
  require_relative 'kubernetes_metadata_common'
20
21
 
21
22
  module KubernetesMetadata
@@ -29,6 +30,7 @@ module KubernetesMetadata
29
30
  # Fluent:ConfigError, so that users can inspect potential errors in
30
31
  # the configuration.
31
32
  pod_watcher = start_pod_watch
33
+
32
34
  Thread.current[:pod_watch_retry_backoff_interval] = @watch_retry_interval
33
35
  Thread.current[:pod_watch_retry_count] = 0
34
36
 
@@ -46,7 +48,7 @@ module KubernetesMetadata
46
48
  @stats.bump(:pod_watch_gone_errors)
47
49
  log.info("410 Gone encountered. Restarting pod watch to reset resource versions.", e)
48
50
  pod_watcher = nil
49
- rescue Exception => e
51
+ rescue => e
50
52
  @stats.bump(:pod_watch_failures)
51
53
  if Thread.current[:pod_watch_retry_count] < @watch_retry_max_times
52
54
  # Instead of raising exceptions and crashing Fluentd, swallow
@@ -76,7 +78,7 @@ module KubernetesMetadata
76
78
 
77
79
  def start_pod_watch
78
80
  get_pods_and_start_watcher
79
- rescue Exception => e
81
+ rescue => e
80
82
  message = "start_pod_watch: Exception encountered setting up pod watch " \
81
83
  "from Kubernetes API #{@apiVersion} endpoint " \
82
84
  "#{@kubernetes_url}: #{e.message}"
@@ -90,19 +92,27 @@ module KubernetesMetadata
90
92
  # from that resourceVersion.
91
93
  def get_pods_and_start_watcher
92
94
  options = {
93
- resource_version: '0' # Fetch from API server.
95
+ resource_version: '0' # Fetch from API server cache instead of etcd quorum read
94
96
  }
95
97
  if ENV['K8S_NODE_NAME']
96
98
  options[:field_selector] = 'spec.nodeName=' + ENV['K8S_NODE_NAME']
97
99
  end
98
- pods = @client.get_pods(options)
99
- pods.each do |pod|
100
- cache_key = pod.metadata['uid']
101
- @cache[cache_key] = parse_pod_metadata(pod)
102
- @stats.bump(:pod_cache_host_updates)
100
+ if @last_seen_resource_version
101
+ options[:resource_version] = @last_seen_resource_version
102
+ else
103
+ pods = @client.get_pods(options)
104
+ pods[:items].each do |pod|
105
+ cache_key = pod[:metadata][:uid]
106
+ @cache[cache_key] = parse_pod_metadata(pod)
107
+ @stats.bump(:pod_cache_host_updates)
108
+ end
109
+
110
+ # continue watching from most recent resourceVersion
111
+ options[:resource_version] = pods[:metadata][:resourceVersion]
103
112
  end
104
- options[:resource_version] = pods.resourceVersion
113
+
105
114
  watcher = @client.watch_pods(options)
115
+ reset_pod_watch_retry_stats
106
116
  watcher
107
117
  end
108
118
 
@@ -116,16 +126,22 @@ module KubernetesMetadata
116
126
  # Process a watcher notice and potentially raise an exception.
117
127
  def process_pod_watcher_notices(watcher)
118
128
  watcher.each do |notice|
119
- case notice.type
129
+ # store version we processed to not reprocess it ... do not unset when there is no version in response
130
+ version = ( # TODO: replace with &.dig once we are on ruby 2.5+
131
+ notice[:object] && notice[:object][:metadata] && notice[:object][:metadata][:resourceVersion]
132
+ )
133
+ @last_seen_resource_version = version if version
134
+
135
+ case notice[:type]
120
136
  when 'MODIFIED'
121
137
  reset_pod_watch_retry_stats
122
- cache_key = notice.object['metadata']['uid']
138
+ cache_key = notice.dig(:object, :metadata, :uid)
123
139
  cached = @cache[cache_key]
124
140
  if cached
125
- @cache[cache_key] = parse_pod_metadata(notice.object)
141
+ @cache[cache_key] = parse_pod_metadata(notice[:object])
126
142
  @stats.bump(:pod_cache_watch_updates)
127
- elsif ENV['K8S_NODE_NAME'] == notice.object['spec']['nodeName'] then
128
- @cache[cache_key] = parse_pod_metadata(notice.object)
143
+ elsif ENV['K8S_NODE_NAME'] == notice[:object][:spec][:nodeName] then
144
+ @cache[cache_key] = parse_pod_metadata(notice[:object])
129
145
  @stats.bump(:pod_cache_host_updates)
130
146
  else
131
147
  @stats.bump(:pod_cache_watch_misses)
@@ -136,12 +152,13 @@ module KubernetesMetadata
136
152
  # deleted but still processing logs
137
153
  @stats.bump(:pod_cache_watch_delete_ignored)
138
154
  when 'ERROR'
139
- if notice.object && notice.object['code'] == 410
155
+ if notice[:object] && notice[:object][:code] == 410
156
+ @last_seen_resource_version = nil # requested resourceVersion was too old, need to reset
140
157
  @stats.bump(:pod_watch_gone_notices)
141
158
  raise GoneError
142
159
  else
143
160
  @stats.bump(:pod_watch_error_type_notices)
144
- message = notice['object']['message'] if notice['object'] && notice['object']['message']
161
+ message = notice[:object][:message] if notice[:object] && notice[:object][:message]
145
162
  raise "Error while watching pods: #{message}"
146
163
  end
147
164
  else
@@ -16,6 +16,7 @@
16
16
  # See the License for the specific language governing permissions and
17
17
  # limitations under the License.
18
18
  #
19
+ require 'bundler/setup'
19
20
  require 'codeclimate-test-reporter'
20
21
  SimpleCov.start do
21
22
  formatter SimpleCov::Formatter::MultiFormatter.new [
@@ -31,8 +32,14 @@ require 'fileutils'
31
32
  require 'fluent/log'
32
33
  require 'fluent/test'
33
34
  require 'minitest/autorun'
34
- require 'webmock/test_unit'
35
35
  require 'vcr'
36
+ require 'ostruct'
37
+ require 'fluent/plugin/filter_kubernetes_metadata'
38
+ require 'fluent/test/driver/filter'
39
+ require 'kubeclient'
40
+
41
+ require 'webmock/test_unit'
42
+ WebMock.disable_net_connect!
36
43
 
37
44
  VCR.configure do |config|
38
45
  config.cassette_library_dir = 'test/cassettes'
@@ -62,3 +69,12 @@ def ipv6_enabled?
62
69
  false
63
70
  end
64
71
  end
72
+
73
+ # TEST_NAME='foo' ruby test_file.rb to run a single test case
74
+ if ENV["TEST_NAME"]
75
+ (class << Test::Unit::TestCase; self; end).prepend(Module.new do
76
+ def test(name)
77
+ super if name == ENV["TEST_NAME"]
78
+ end
79
+ end)
80
+ end
@@ -17,12 +17,9 @@
17
17
  # limitations under the License.
18
18
  #
19
19
  require_relative '../helper'
20
- require 'fluent/plugin/kubernetes_metadata_stats'
21
- require 'webmock/test_unit'
22
- WebMock.disable_net_connect!
23
20
 
24
21
  class KubernetesMetadataCacheStatsTest < Test::Unit::TestCase
25
-
22
+
26
23
  test 'watch stats' do
27
24
  require 'lru_redux'
28
25
  stats = KubernetesMetadata::Stats.new
@@ -32,5 +29,5 @@ class KubernetesMetadataCacheStatsTest < Test::Unit::TestCase
32
29
 
33
30
  assert_equal("stats - deleted: 2, missed: 1", stats.to_s)
34
31
  end
35
-
32
+
36
33
  end
@@ -17,11 +17,6 @@
17
17
  # limitations under the License.
18
18
  #
19
19
  require_relative '../helper'
20
- require_relative '../../lib/fluent/plugin/kubernetes_metadata_cache_strategy'
21
- require_relative '../../lib/fluent/plugin/kubernetes_metadata_stats'
22
- require 'lru_redux'
23
- require 'webmock/test_unit'
24
- WebMock.disable_net_connect!
25
20
 
26
21
  class TestCacheStrategy
27
22
  include KubernetesMetadata::CacheStrategy
@@ -38,9 +33,11 @@ class TestCacheStrategy
38
33
  attr_accessor :stats, :cache, :id_cache, :namespace_cache, :allow_orphans
39
34
 
40
35
  def fetch_pod_metadata(namespace_name, pod_name)
36
+ {}
41
37
  end
42
38
 
43
39
  def fetch_namespace_metadata(namespace_name)
40
+ {}
44
41
  end
45
42
 
46
43
  def log
@@ -56,7 +53,7 @@ class TestCacheStrategy
56
53
  end
57
54
 
58
55
  class KubernetesMetadataCacheStrategyTest < Test::Unit::TestCase
59
-
56
+
60
57
  def setup
61
58
  @strategy = TestCacheStrategy.new
62
59
  @cache_key = 'some_long_container_id'
@@ -114,7 +111,7 @@ class KubernetesMetadataCacheStrategyTest < Test::Unit::TestCase
114
111
  # we ever will have and should allow us to process all the deleted
115
112
  # pod records
116
113
  exp = {
117
- 'pod_id'=> @cache_key,
114
+ 'pod_id'=> @cache_key,
118
115
  'namespace_id'=> @namespace_uuid
119
116
  }
120
117
  @strategy.stub :fetch_pod_metadata, {} do
@@ -175,7 +172,7 @@ class KubernetesMetadataCacheStrategyTest < Test::Unit::TestCase
175
172
  end
176
173
  assert_equal({}, @strategy.get_pod_metadata(@cache_key,'namespace', 'pod', @time, batch_miss_cache))
177
174
  end
178
-
175
+
179
176
  test 'when metadata is not cached and no metadata can be fetched and allowing orphans for multiple records' do
180
177
  # we should never see this since pod meta should not be retrievable
181
178
  # unless the namespace exists
@@ -17,11 +17,6 @@
17
17
  # limitations under the License.
18
18
  #
19
19
  require_relative '../helper'
20
- require 'fluent/test/driver/filter'
21
- require 'fluent/plugin/filter_kubernetes_metadata'
22
-
23
- require 'webmock/test_unit'
24
- WebMock.disable_net_connect!
25
20
 
26
21
  class KubernetesMetadataFilterTest < Test::Unit::TestCase
27
22
  include Fluent
@@ -122,8 +117,8 @@ class KubernetesMetadataFilterTest < Test::Unit::TestCase
122
117
  secret_dir #{dir}
123
118
  ")
124
119
  assert_equal(d.instance.kubernetes_url, "https://localhost:8443/api")
125
- assert_false(d.instance.ca_file.present?)
126
- assert_false(d.instance.bearer_token_file.present?)
120
+ assert_nil(d.instance.ca_file, nil)
121
+ assert_nil(d.instance.bearer_token_file)
127
122
  }
128
123
  ensure
129
124
  ENV['KUBERNETES_SERVICE_HOST'] = nil
@@ -769,9 +764,13 @@ class KubernetesMetadataFilterTest < Test::Unit::TestCase
769
764
  'CONTAINER_ID_FULL' => '49095a2894da899d3b327c5fde1e056a81376cc9a8f8b09a195f2a92bceed459',
770
765
  'randomfield' => 'randomvalue'
771
766
  }
772
- VCR.use_cassettes([{name: 'valid_kubernetes_api_server'}, {name: 'kubernetes_get_api_v1'}, {name: 'kubernetes_get_pod'},
773
- {name: 'kubernetes_get_namespace_default'},
774
- {name: 'metadata_from_tag_and_journald_fields'}]) do
767
+ VCR.use_cassettes([
768
+ {name: 'valid_kubernetes_api_server'},
769
+ {name: 'kubernetes_get_api_v1'},
770
+ {name: 'kubernetes_get_pod'},
771
+ {name: 'kubernetes_get_namespace_default'},
772
+ {name: 'metadata_from_tag_and_journald_fields'}
773
+ ]) do
775
774
  filtered = emit_with_tag(tag, msg, '
776
775
  kubernetes_url https://localhost:8443
777
776
  watch false
@@ -873,6 +872,7 @@ class KubernetesMetadataFilterTest < Test::Unit::TestCase
873
872
  assert_equal(expected_kube_metadata, filtered[0])
874
873
  end
875
874
  end
875
+
876
876
  test 'with CONTAINER_NAME that does not match' do
877
877
  tag = 'var.log.containers.junk4_junk5_junk6-49095a2894da899d3b327c5fde1e056a81376cc9a8f8b09a195f2a92bceed450.log'
878
878
  msg = {
@@ -897,6 +897,7 @@ class KubernetesMetadataFilterTest < Test::Unit::TestCase
897
897
  assert_equal(expected_kube_metadata, filtered[0])
898
898
  end
899
899
  end
900
+
900
901
  test 'with CONTAINER_NAME starts with k8s_ that does not match' do
901
902
  tag = 'var.log.containers.junk4_junk5_junk6-49095a2894da899d3b327c5fde1e056a81376cc9a8f8b09a195f2a92bceed450.log'
902
903
  msg = {
@@ -17,7 +17,6 @@
17
17
  # limitations under the License.
18
18
  #
19
19
  require_relative '../helper'
20
- require 'ostruct'
21
20
  require_relative 'watch_test'
22
21
 
23
22
  class WatchNamespacesTestTest < WatchTest
@@ -25,71 +24,72 @@ class WatchNamespacesTestTest < WatchTest
25
24
  include KubernetesMetadata::WatchNamespaces
26
25
 
27
26
  setup do
28
- @initial = Kubeclient::Common::EntityList.new(
29
- 'NamespaceList',
30
- '123',
31
- [
32
- Kubeclient::Resource.new({
33
- 'metadata' => {
34
- 'name' => 'initial',
35
- 'uid' => 'initial_uid'
36
- }
37
- }),
38
- Kubeclient::Resource.new({
39
- 'metadata' => {
40
- 'name' => 'modified',
41
- 'uid' => 'modified_uid'
42
- }
43
- })
44
- ])
45
-
46
- @created = OpenStruct.new(
27
+ @initial = {
28
+ kind: 'NamespaceList',
29
+ metadata: {resourceVersion: '123'},
30
+ items: [
31
+ {
32
+ metadata: {
33
+ name: 'initial',
34
+ uid: 'initial_uid'
35
+ }
36
+ },
37
+ {
38
+ metadata: {
39
+ name: 'modified',
40
+ uid: 'modified_uid'
41
+ }
42
+ }
43
+ ]
44
+ }
45
+
46
+ @created = {
47
47
  type: 'CREATED',
48
48
  object: {
49
- 'metadata' => {
50
- 'name' => 'created',
51
- 'uid' => 'created_uid'
52
- }
49
+ metadata: {
50
+ name: 'created',
51
+ uid: 'created_uid'
52
+ }
53
53
  }
54
- )
55
- @modified = OpenStruct.new(
54
+ }
55
+ @modified = {
56
56
  type: 'MODIFIED',
57
57
  object: {
58
- 'metadata' => {
59
- 'name' => 'foo',
60
- 'uid' => 'modified_uid'
61
- }
58
+ metadata: {
59
+ name: 'foo',
60
+ uid: 'modified_uid'
61
+ }
62
62
  }
63
- )
64
- @deleted = OpenStruct.new(
63
+ }
64
+ @deleted = {
65
65
  type: 'DELETED',
66
66
  object: {
67
- 'metadata' => {
68
- 'name' => 'deleteme',
69
- 'uid' => 'deleted_uid'
70
- }
67
+ metadata: {
68
+ name: 'deleteme',
69
+ uid: 'deleted_uid'
70
+ }
71
71
  }
72
- )
73
- @error = OpenStruct.new(
72
+ }
73
+ @error = {
74
74
  type: 'ERROR',
75
75
  object: {
76
- 'message' => 'some error message'
76
+ message: 'some error message'
77
77
  }
78
- )
79
- @gone = OpenStruct.new(
80
- type: 'ERROR',
81
- object: {
82
- 'code' => 410,
83
- 'kind' => 'Status',
84
- 'message' => 'too old resource version: 123 (391079)',
85
- 'metadata' => {
86
- 'name' => 'gone',
87
- 'namespace' => 'gone',
88
- 'uid' => 'gone_uid'
89
- },
90
- 'reason' => 'Gone'
91
- }
92
- )
78
+ }
79
+ @gone = {
80
+ type: 'ERROR',
81
+ object: {
82
+ code: 410,
83
+ kind: 'Status',
84
+ message: 'too old resource version: 123 (391079)',
85
+ metadata: {
86
+ name: 'gone',
87
+ namespace: 'gone',
88
+ uid: 'gone_uid'
89
+ },
90
+ reason: 'Gone'
91
+ }
92
+ }
93
93
  end
94
94
 
95
95
  test 'namespace list caches namespaces' do
@@ -148,30 +148,52 @@ class WatchNamespacesTestTest < WatchTest
148
148
  end
149
149
  end
150
150
 
151
- test 'namespace watch retries when exceptions are encountered' do
151
+ test 'namespace watch raises Fluent::UnrecoverableError when cannot re-establish connection to k8s API server' do
152
+ # Stub start_namespace_watch to simulate initial successful connection to API server
153
+ stub(self).start_namespace_watch
154
+ # Stub watch_namespaces to simluate not being able to set up watch connection to API server
155
+ stub(@client).watch_namespaces { raise }
156
+ @client.stub :get_namespaces, @initial do
157
+ assert_raise Fluent::UnrecoverableError do
158
+ set_up_namespace_thread
159
+ end
160
+ end
161
+ assert_equal(3, @stats[:namespace_watch_failures])
162
+ assert_equal(2, Thread.current[:namespace_watch_retry_count])
163
+ assert_equal(4, Thread.current[:namespace_watch_retry_backoff_interval])
164
+ assert_nil(@stats[:namespace_watch_error_type_notices])
165
+ end
166
+
167
+ test 'namespace watch resets watch retry count when exceptions are encountered and connection to k8s API server is re-established' do
152
168
  @client.stub :get_namespaces, @initial do
153
169
  @client.stub :watch_namespaces, [[@created, @exception_raised]] do
154
- assert_raise Fluent::UnrecoverableError do
155
- set_up_namespace_thread
170
+ # Force the infinite watch loop to exit after 3 seconds. Verifies that
171
+ # no unrecoverable error was thrown during this period of time.
172
+ assert_raise Timeout::Error.new('execution expired') do
173
+ Timeout.timeout(3) do
174
+ set_up_namespace_thread
175
+ end
156
176
  end
157
- assert_equal(3, @stats[:namespace_watch_failures])
158
- assert_equal(2, Thread.current[:namespace_watch_retry_count])
159
- assert_equal(4, Thread.current[:namespace_watch_retry_backoff_interval])
160
- assert_nil(@stats[:namespace_watch_error_type_notices])
177
+ assert_operator(@stats[:namespace_watch_failures], :>=, 3)
178
+ assert_operator(Thread.current[:namespace_watch_retry_count], :<=, 1)
179
+ assert_operator(Thread.current[:namespace_watch_retry_backoff_interval], :<=, 1)
161
180
  end
162
181
  end
163
182
  end
164
183
 
165
- test 'namespace watch retries when error is received' do
184
+ test 'namespace watch resets watch retry count when error is received and connection to k8s API server is re-established' do
166
185
  @client.stub :get_namespaces, @initial do
167
186
  @client.stub :watch_namespaces, [@error] do
168
- assert_raise Fluent::UnrecoverableError do
169
- set_up_namespace_thread
187
+ # Force the infinite watch loop to exit after 3 seconds. Verifies that
188
+ # no unrecoverable error was thrown during this period of time.
189
+ assert_raise Timeout::Error.new('execution expired') do
190
+ Timeout.timeout(3) do
191
+ set_up_namespace_thread
192
+ end
170
193
  end
171
- assert_equal(3, @stats[:namespace_watch_failures])
172
- assert_equal(2, Thread.current[:namespace_watch_retry_count])
173
- assert_equal(4, Thread.current[:namespace_watch_retry_backoff_interval])
174
- assert_equal(3, @stats[:namespace_watch_error_type_notices])
194
+ assert_operator(@stats[:namespace_watch_failures], :>=, 3)
195
+ assert_operator(Thread.current[:namespace_watch_retry_count], :<=, 1)
196
+ assert_operator(Thread.current[:namespace_watch_retry_backoff_interval], :<=, 1)
175
197
  end
176
198
  end
177
199
  end