fluent-plugin-cloudwatch-logs 0.9.2 → 0.10.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: a468cfc9693ee888e05050e47bf6fbd44cda3b9331072cbf0fe28cbb60865cb6
4
- data.tar.gz: 7cf780a9d6110ce92279377aa8ecc2f2c311081ab4074a6259a46ebbf2e855ed
3
+ metadata.gz: 3445e1c11e8856669ee57b3557865f0f4944699dd49a93c61e518c3cbe88de9b
4
+ data.tar.gz: 43453d4dd9eefed4805541de0e69f0715e2caf9d451c705c5ab403a77f85473b
5
5
  SHA512:
6
- metadata.gz: d093177804546cb66a2595999d5ef493d4250f0f6916cd5f7b4567e433b322f26351f629e2e2a4c333b21094f9507286771a194e24a232106666fce1383deb31
7
- data.tar.gz: 75aab7c98fa0c930de114e1dc122b94713df67cb3287a4edb5c0730dbfb8cc8a2540593bbfd3f8110c4f54581d4942ea4baacbe9f1507dc3b24e54ddf372e768
6
+ metadata.gz: 8fa3558069bba0d7d9ffe98d61357eef71fd05648c5eaedc614fe0b5a0ced11d8a8fb2bb34a43b4dcab7990bcfd07f4540406334a46293b75be84b2c5986af54
7
+ data.tar.gz: 7e1a3196f98975d0f9a24d0f14b3a49f14e603eefc76533384fe4bc5bec4547ef9e78d8690ea99c282e04ba861c8ca243684da3ee5ffaa12645f6aa59fe1c670
@@ -0,0 +1,12 @@
1
+ name: Autocloser
2
+ on: [issues]
3
+ jobs:
4
+ autoclose:
5
+ runs-on: ubuntu-latest
6
+ steps:
7
+ - name: Autoclose issues that did not follow issue template
8
+ uses: roots/issue-closer-action@v1.1
9
+ with:
10
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
11
+ issue-close-message: "@${issue.user.login} this issue was automatically closed because it did not follow the issue template."
12
+ issue-pattern: "(.*Problem.*)|(.*Expected Behavior or What you need to ask.*)|(.*Using Fluentd and CloudWatchLogs plugin versions.*)"
data/README.md CHANGED
@@ -43,6 +43,46 @@ Create IAM user with a policy like the following:
43
43
  }
44
44
  ```
45
45
 
46
+ More restricted IAM policy for `out_cloudwatch_logs` is:
47
+
48
+ ```json
49
+ {
50
+ "Version": "2012-10-17",
51
+ "Statement": [
52
+ {
53
+ "Action": [
54
+ "logs:PutLogEvents",
55
+ "logs:CreateLogGroup",
56
+ "logs:PutRetentionPolicy",
57
+ "logs:CreateLogStream",
58
+ "logs:DescribeLogGroups",
59
+ "logs:DescribeLogStreams"
60
+ ],
61
+ "Effect": "Allow",
62
+ "Resource": "*"
63
+ }
64
+ ]
65
+ }
66
+ ```
67
+
68
+ Also, more restricted IAM policy for `in_cloudwatch_logs` is:
69
+
70
+ ```json
71
+ {
72
+ "Version": "2012-10-17",
73
+ "Statement": [
74
+ {
75
+ "Action": [
76
+ "logs:GetLogEvents",
77
+ "logs:DescribeLogStreams"
78
+ ],
79
+ "Effect": "Allow",
80
+ "Resource": "*"
81
+ }
82
+ ]
83
+ }
84
+ ```
85
+
46
86
  ## Authentication
47
87
 
48
88
  There are several methods to provide authentication credentials. Be aware that there are various tradeoffs for these methods,
@@ -149,7 +189,7 @@ Fetch sample log from CloudWatch Logs:
149
189
  * `remove_log_group_aws_tags_key`: remove field specified by `log_group_aws_tags_key`
150
190
  * `remove_log_group_name_key`: remove field specified by `log_group_name_key`
151
191
  * `remove_log_stream_name_key`: remove field specified by `log_stream_name_key`
152
- * `remove_retention_in_days`: remove field specified by `retention_in_days`
192
+ * `remove_retention_in_days_key`: remove field specified by `retention_in_days_key`
153
193
  * `retention_in_days`: use to set the expiry time for log group when created with `auto_create_stream`. (default to no expiry)
154
194
  * `retention_in_days_key`: use specified field of records as retention period
155
195
  * `use_tag_as_group`: to use tag as a group name
@@ -178,6 +218,9 @@ Please refer to [the PutRetentionPolicy column in documentation](https://docs.aw
178
218
  #<parse>
179
219
  # @type none # or csv, tsv, regexp etc.
180
220
  #</parse>
221
+ #<storage>
222
+ # @type local # or redis, memcached, etc.
223
+ #</storage>
181
224
  </source>
182
225
  ```
183
226
 
@@ -193,7 +236,9 @@ Please refer to [the PutRetentionPolicy column in documentation](https://docs.aw
193
236
  * `log_group_name`: name of log group to fetch logs
194
237
  * `log_stream_name`: name of log stream to fetch logs
195
238
  * `region`: AWS Region. See [Authentication](#authentication) for more information.
196
- * `state_file`: file to store current state (e.g. next\_forward\_token)
239
+ * `throttling_retry_seconds`: time period in seconds to retry a request when aws CloudWatch rate limit exceeds (default: nil)
240
+ * `include_metadata`: include metadata such as `log_group_name` and `log_stream_name`. (default: false)
241
+ * `state_file`: file to store current state (e.g. next\_forward\_token). This parameter is deprecated. Use `<storage>` instead.
197
242
  * `tag`: fluentd tag
198
243
  * `use_log_stream_name_prefix`: to use `log_stream_name` as log stream name prefix (default false)
199
244
  * `use_todays_log_stream`: use todays and yesterdays date as log stream name prefix (formatted YYYY/MM/DD). (default: `false`)
@@ -203,6 +248,7 @@ Please refer to [the PutRetentionPolicy column in documentation](https://docs.aw
203
248
  * `time_range_format`: specify time format for time range. (default: `%Y-%m-%d %H:%M:%S`)
204
249
  * `format`: specify CloudWatchLogs' log format. (default `nil`)
205
250
  * `<parse>`: specify parser plugin configuration. see also: https://docs.fluentd.org/v/1.0/parser#how-to-use
251
+ * `<storage>`: specify storage plugin configuration. see also: https://docs.fluentd.org/v/1.0/storage#how-to-use
206
252
 
207
253
  ## Test
208
254
 
@@ -2,7 +2,7 @@ module Fluent
2
2
  module Plugin
3
3
  module Cloudwatch
4
4
  module Logs
5
- VERSION = "0.9.2"
5
+ VERSION = "0.10.1"
6
6
  end
7
7
  end
8
8
  end
@@ -8,7 +8,9 @@ module Fluent::Plugin
8
8
  class CloudwatchLogsInput < Input
9
9
  Fluent::Plugin.register_input('cloudwatch_logs', self)
10
10
 
11
- helpers :parser, :thread, :compat_parameters
11
+ helpers :parser, :thread, :compat_parameters, :storage
12
+
13
+ DEFAULT_STORAGE_TYPE = 'local'
12
14
 
13
15
  config_param :aws_key_id, :string, default: nil, secret: true
14
16
  config_param :aws_sec_key, :string, default: nil, secret: true
@@ -21,7 +23,8 @@ module Fluent::Plugin
21
23
  config_param :log_group_name, :string
22
24
  config_param :log_stream_name, :string, default: nil
23
25
  config_param :use_log_stream_name_prefix, :bool, default: false
24
- config_param :state_file, :string
26
+ config_param :state_file, :string, default: nil,
27
+ deprecated: "Use <stroage> instead."
25
28
  config_param :fetch_interval, :time, default: 60
26
29
  config_param :http_proxy, :string, default: nil
27
30
  config_param :json_handler, :enum, list: [:yajl, :json], default: :yajl
@@ -30,11 +33,19 @@ module Fluent::Plugin
30
33
  config_param :start_time, :string, default: nil
31
34
  config_param :end_time, :string, default: nil
32
35
  config_param :time_range_format, :string, default: "%Y-%m-%d %H:%M:%S"
36
+ config_param :throttling_retry_seconds, :time, default: nil
37
+ config_param :include_metadata, :bool, default: false
33
38
 
34
39
  config_section :parse do
35
40
  config_set_default :@type, 'none'
36
41
  end
37
42
 
43
+ config_section :storage do
44
+ config_set_default :usage, 'store_next_tokens'
45
+ config_set_default :@type, DEFAULT_STORAGE_TYPE
46
+ config_set_default :persistent, false
47
+ end
48
+
38
49
  def initialize
39
50
  super
40
51
 
@@ -52,6 +63,7 @@ module Fluent::Plugin
52
63
  if @start_time && @end_time && (@end_time < @start_time)
53
64
  raise Fluent::ConfigError, "end_time(#{@end_time}) should be greater than start_time(#{@start_time})."
54
65
  end
66
+ @next_token_storage = storage_create(usage: 'store_next_tokens', conf: config, default_type: DEFAULT_STORAGE_TYPE)
55
67
  end
56
68
 
57
69
  def start
@@ -98,20 +110,28 @@ module Fluent::Plugin
98
110
  end
99
111
  end
100
112
 
101
- def state_file_for(log_stream_name)
102
- return "#{@state_file}_#{log_stream_name.gsub(File::SEPARATOR, '-')}" if log_stream_name
103
- return @state_file
113
+ def state_key_for(log_stream_name)
114
+ if log_stream_name
115
+ "#{@state_file}_#{log_stream_name.gsub(File::SEPARATOR, '-')}"
116
+ else
117
+ @state_file
118
+ end
119
+ end
120
+
121
+ def migrate_state_file_to_storage(log_stream_name)
122
+ @next_token_storage.put(:"#{state_key_for(log_stream_name)}", File.read(state_key_for(log_stream_name)).chomp)
123
+ File.delete(state_key_for(log_stream_name))
104
124
  end
105
125
 
106
126
  def next_token(log_stream_name)
107
- return nil unless File.exist?(state_file_for(log_stream_name))
108
- File.read(state_file_for(log_stream_name)).chomp
127
+ if @next_token_storage.persistent && File.exist?(state_key_for(log_stream_name))
128
+ migrate_state_file_to_storage(log_stream_name)
129
+ end
130
+ @next_token_storage.get(:"#{state_key_for(log_stream_name)}")
109
131
  end
110
132
 
111
133
  def store_next_token(token, log_stream_name = nil)
112
- File.open(state_file_for(log_stream_name), 'w') do |f|
113
- f.write token
114
- end
134
+ @next_token_storage.put(:"#{state_key_for(log_stream_name)}", token)
115
135
  end
116
136
 
117
137
  def run
@@ -129,8 +149,16 @@ module Fluent::Plugin
129
149
  log_streams.each do |log_stream|
130
150
  log_stream_name = log_stream.log_stream_name
131
151
  events = get_events(log_stream_name)
152
+ metadata = if @include_metadata
153
+ {
154
+ "log_stream_name" => log_stream_name,
155
+ "log_group_name" => @log_group_name
156
+ }
157
+ else
158
+ {}
159
+ end
132
160
  events.each do |event|
133
- emit(log_stream_name, event)
161
+ emit(log_stream_name, event, metadata)
134
162
  end
135
163
  end
136
164
  rescue Aws::CloudWatchLogs::Errors::ResourceNotFoundException
@@ -139,8 +167,16 @@ module Fluent::Plugin
139
167
  end
140
168
  else
141
169
  events = get_events(@log_stream_name)
170
+ metadata = if @include_metadata
171
+ {
172
+ "log_stream_name" => @log_stream_name,
173
+ "log_group_name" => @log_group_name
174
+ }
175
+ else
176
+ {}
177
+ end
142
178
  events.each do |event|
143
- emit(log_stream_name, event)
179
+ emit(log_stream_name, event, metadata)
144
180
  end
145
181
  end
146
182
  end
@@ -148,18 +184,24 @@ module Fluent::Plugin
148
184
  end
149
185
  end
150
186
 
151
- def emit(stream, event)
187
+ def emit(stream, event, metadata)
152
188
  if @parser
153
189
  @parser.parse(event.message) {|time,record|
154
190
  if @use_aws_timestamp
155
191
  time = (event.timestamp / 1000).floor
156
192
  end
193
+ unless metadata.empty?
194
+ record.merge!("metadata" => metadata)
195
+ end
157
196
  router.emit(@tag, time, record)
158
197
  }
159
198
  else
160
199
  time = (event.timestamp / 1000).floor
161
200
  begin
162
201
  record = @json_handler.load(event.message)
202
+ unless metadata.empty?
203
+ record.merge!("metadata" => metadata)
204
+ end
163
205
  router.emit(@tag, time, record)
164
206
  rescue JSON::ParserError, Yajl::ParseError => error # Catch parser errors
165
207
  log.error "Invalid JSON encountered while parsing event.message"
@@ -169,38 +211,55 @@ module Fluent::Plugin
169
211
  end
170
212
 
171
213
  def get_events(log_stream_name)
172
- request = {
173
- log_group_name: @log_group_name,
174
- log_stream_name: log_stream_name
175
- }
176
- request.merge!(start_time: @start_time) if @start_time
177
- request.merge!(end_time: @end_time) if @end_time
178
- log_next_token = next_token(log_stream_name)
179
- request[:next_token] = log_next_token if !log_next_token.nil? && !log_next_token.empty?
180
- response = @logs.get_log_events(request)
181
- if valid_next_token(log_next_token, response.next_forward_token)
182
- store_next_token(response.next_forward_token, log_stream_name)
183
- end
214
+ throttling_handler('get_log_events') do
215
+ request = {
216
+ log_group_name: @log_group_name,
217
+ log_stream_name: log_stream_name
218
+ }
219
+ request.merge!(start_time: @start_time) if @start_time
220
+ request.merge!(end_time: @end_time) if @end_time
221
+ log_next_token = next_token(log_stream_name)
222
+ request[:next_token] = log_next_token if !log_next_token.nil? && !log_next_token.empty?
223
+ response = @logs.get_log_events(request)
224
+ if valid_next_token(log_next_token, response.next_forward_token)
225
+ store_next_token(response.next_forward_token, log_stream_name)
226
+ end
184
227
 
185
- response.events
228
+ response.events
229
+ end
186
230
  end
187
231
 
188
232
  def describe_log_streams(log_stream_name_prefix, log_streams = nil, next_token = nil)
189
- request = {
190
- log_group_name: @log_group_name
191
- }
192
- request[:next_token] = next_token if next_token
193
- request[:log_stream_name_prefix] = log_stream_name_prefix if log_stream_name_prefix
194
- response = @logs.describe_log_streams(request)
195
- if log_streams
196
- log_streams.concat(response.log_streams)
197
- else
198
- log_streams = response.log_streams
233
+ throttling_handler('describe_log_streams') do
234
+ request = {
235
+ log_group_name: @log_group_name
236
+ }
237
+ request[:next_token] = next_token if next_token
238
+ request[:log_stream_name_prefix] = log_stream_name_prefix if log_stream_name_prefix
239
+ response = @logs.describe_log_streams(request)
240
+ if log_streams
241
+ log_streams.concat(response.log_streams)
242
+ else
243
+ log_streams = response.log_streams
244
+ end
245
+ if response.next_token
246
+ log_streams = describe_log_streams(log_stream_name_prefix, log_streams, response.next_token)
247
+ end
248
+ log_streams
199
249
  end
200
- if response.next_token
201
- log_streams = describe_log_streams(log_stream_name_prefix, log_streams, response.next_token)
250
+ end
251
+
252
+ def throttling_handler(method_name)
253
+ yield
254
+ rescue Aws::CloudWatchLogs::Errors::ThrottlingException => err
255
+ if throttling_retry_seconds
256
+ log.warn "ThrottlingException #{method_name}. Waiting #{throttling_retry_seconds} seconds to retry."
257
+ sleep throttling_retry_seconds
258
+
259
+ throttling_handler(method_name) { yield }
260
+ else
261
+ raise err
202
262
  end
203
- log_streams
204
263
  end
205
264
 
206
265
  def valid_next_token(prev_token, next_token)
@@ -7,6 +7,8 @@ module Fluent::Plugin
7
7
  class CloudwatchLogsOutput < Output
8
8
  Fluent::Plugin.register_output('cloudwatch_logs', self)
9
9
 
10
+ class TooLargeEventError < Fluent::UnrecoverableError; end
11
+
10
12
  helpers :compat_parameters, :inject
11
13
 
12
14
  DEFAULT_BUFFER_TYPE = "memory"
@@ -41,7 +43,7 @@ module Fluent::Plugin
41
43
  config_param :remove_log_group_aws_tags_key, :bool, default: false
42
44
  config_param :retention_in_days, :integer, default: nil
43
45
  config_param :retention_in_days_key, :string, default: nil
44
- config_param :remove_retention_in_days, :bool, default: false
46
+ config_param :remove_retention_in_days_key, :bool, default: false
45
47
  config_param :json_handler, :enum, list: [:yajl, :json], :default => :yajl
46
48
  config_param :log_rejected_request, :bool, :default => false
47
49
 
@@ -219,6 +221,14 @@ module Fluent::Plugin
219
221
 
220
222
  events = []
221
223
  rs.each do |t, time, record|
224
+ if @log_group_aws_tags_key && @remove_log_group_aws_tags_key
225
+ record.delete(@log_group_aws_tags_key)
226
+ end
227
+
228
+ if @retention_in_days_key && @remove_retention_in_days_key
229
+ record.delete(@retention_in_days_key)
230
+ end
231
+
222
232
  record = drop_empty_record(record)
223
233
 
224
234
  time_ms = (time.to_f * 1000).floor
@@ -311,8 +321,7 @@ module Fluent::Plugin
311
321
  while event = events.shift
312
322
  event_bytesize = event[:message].bytesize + EVENT_HEADER_SIZE
313
323
  if MAX_EVENT_SIZE < event_bytesize
314
- log.warn "Log event in #{group_name} is discarded because it is too large: #{event_bytesize} bytes exceeds limit of #{MAX_EVENT_SIZE}"
315
- break
324
+ raise TooLargeEventError, "Log event in #{group_name} is discarded because it is too large: #{event_bytesize} bytes exceeds limit of #{MAX_EVENT_SIZE}"
316
325
  end
317
326
 
318
327
  new_chunk = chunk + [event]
@@ -369,8 +378,7 @@ module Fluent::Plugin
369
378
  end
370
379
  rescue Aws::CloudWatchLogs::Errors::InvalidSequenceTokenException, Aws::CloudWatchLogs::Errors::DataAlreadyAcceptedException => err
371
380
  sleep 1 # to avoid too many API calls
372
- log_stream = find_log_stream(group_name, stream_name)
373
- store_next_sequence_token(group_name, stream_name, log_stream.upload_sequence_token)
381
+ store_next_sequence_token(group_name, stream_name, err.expected_sequence_token)
374
382
  log.warn "updating upload sequence token forcefully because unrecoverable error occured", {
375
383
  "error" => err,
376
384
  "log_group" => group_name,
@@ -392,7 +400,13 @@ module Fluent::Plugin
392
400
  raise err
393
401
  end
394
402
  rescue Aws::CloudWatchLogs::Errors::ThrottlingException => err
395
- if !@put_log_events_disable_retry_limit && @put_log_events_retry_limit < retry_count
403
+ if @put_log_events_retry_limit < 1
404
+ log.warn "failed to PutLogEvents and discard logs because put_log_events_retry_limit is less than 1", {
405
+ "error_class" => err.class.to_s,
406
+ "error" => err.message,
407
+ }
408
+ return
409
+ elsif !@put_log_events_disable_retry_limit && @put_log_events_retry_limit < retry_count
396
410
  log.error "failed to PutLogEvents and discard logs because retry count exceeded put_log_events_retry_limit", {
397
411
  "error_class" => err.class.to_s,
398
412
  "error" => err.message,
@@ -28,6 +28,7 @@ class CloudwatchLogsInputTest < Test::Unit::TestCase
28
28
  start_time "2019-06-18 00:00:00Z"
29
29
  end_time "2020-01-18 00:00:00Z"
30
30
  time_range_format "%Y-%m-%d %H:%M:%S%z"
31
+ throttling_retry_seconds 30
31
32
  EOC
32
33
 
33
34
  assert_equal('test_id', d.instance.aws_key_id)
@@ -43,6 +44,7 @@ class CloudwatchLogsInputTest < Test::Unit::TestCase
43
44
  assert_equal(1560816000000, d.instance.start_time)
44
45
  assert_equal(1579305600000, d.instance.end_time)
45
46
  assert_equal("%Y-%m-%d %H:%M:%S%z", d.instance.time_range_format)
47
+ assert_equal(30, d.instance.throttling_retry_seconds)
46
48
  end
47
49
 
48
50
  test 'invalid time range' do
@@ -97,6 +99,34 @@ class CloudwatchLogsInputTest < Test::Unit::TestCase
97
99
  assert_equal(['test', (time_ms / 1000).floor, {'cloudwatch' => 'logs2'}], emits[1])
98
100
  end
99
101
 
102
+ def test_emit_with_metadata
103
+ create_log_stream
104
+
105
+ time_ms = (Time.now.to_f * 1000).floor
106
+ put_log_events([
107
+ {timestamp: time_ms, message: '{"cloudwatch":"logs1"}'},
108
+ {timestamp: time_ms, message: '{"cloudwatch":"logs2"}'},
109
+ ])
110
+
111
+ sleep 5
112
+
113
+ d = create_driver(default_config + %[include_metadata true])
114
+ d.run(expect_emits: 2, timeout: 5)
115
+
116
+ emits = d.events
117
+ assert_true(emits[0][2].has_key?("metadata"))
118
+ assert_true(emits[1][2].has_key?("metadata"))
119
+ emits[0][2].delete_if {|k, v|
120
+ k == "metadata"
121
+ }
122
+ emits[1][2].delete_if {|k, v|
123
+ k == "metadata"
124
+ }
125
+ assert_equal(2, emits.size)
126
+ assert_equal(['test', (time_ms / 1000).floor, {'cloudwatch' => 'logs1'}], emits[0])
127
+ assert_equal(['test', (time_ms / 1000).floor, {'cloudwatch' => 'logs2'}], emits[1])
128
+ end
129
+
100
130
  def test_emit_with_aws_timestamp
101
131
  create_log_stream
102
132
 
@@ -171,7 +201,6 @@ class CloudwatchLogsInputTest < Test::Unit::TestCase
171
201
  '@type' => 'cloudwatch_logs',
172
202
  'log_group_name' => "#{log_group_name}",
173
203
  'log_stream_name' => "#{log_stream_name}",
174
- 'state_file' => '/tmp/state',
175
204
  }
176
205
  cloudwatch_config = cloudwatch_config.merge!(config_elementify(aws_key_id)) if ENV['aws_key_id']
177
206
  cloudwatch_config = cloudwatch_config.merge!(config_elementify(aws_sec_key)) if ENV['aws_sec_key']
@@ -181,7 +210,49 @@ class CloudwatchLogsInputTest < Test::Unit::TestCase
181
210
  csv_format_config = config_element('ROOT', '', cloudwatch_config, [
182
211
  config_element('parse', '', {'@type' => 'csv',
183
212
  'keys' => 'time,message',
184
- 'time_key' => 'time'})
213
+ 'time_key' => 'time'}),
214
+ config_element('storage', '', {'@type' => 'local',
215
+ 'path' => '/tmp/state'})
216
+ ])
217
+ create_log_stream
218
+
219
+ time_ms = (Time.now.to_f * 1000).floor
220
+ log_time_ms = time_ms - 10000
221
+ put_log_events([
222
+ {timestamp: time_ms, message: Time.at(log_time_ms/1000.floor).to_s + ",Cloudwatch non json logs1"},
223
+ {timestamp: time_ms, message: Time.at(log_time_ms/1000.floor).to_s + ",Cloudwatch non json logs2"},
224
+ ])
225
+
226
+ sleep 5
227
+
228
+ d = create_driver(csv_format_config)
229
+ d.run(expect_emits: 2, timeout: 5)
230
+
231
+ emits = d.events
232
+ assert_equal(2, emits.size)
233
+ assert_equal(['test', (log_time_ms / 1000).floor, {"message"=>"Cloudwatch non json logs1"}], emits[0])
234
+ assert_equal(['test', (log_time_ms / 1000).floor, {"message"=>"Cloudwatch non json logs2"}], emits[1])
235
+ end
236
+
237
+ test "emit with <parse> csv with metadata" do
238
+ cloudwatch_config = {'tag' => "test",
239
+ '@type' => 'cloudwatch_logs',
240
+ 'log_group_name' => "#{log_group_name}",
241
+ 'log_stream_name' => "#{log_stream_name}",
242
+ 'include_metadata' => true,
243
+ }
244
+ cloudwatch_config = cloudwatch_config.merge!(config_elementify(aws_key_id)) if ENV['aws_key_id']
245
+ cloudwatch_config = cloudwatch_config.merge!(config_elementify(aws_sec_key)) if ENV['aws_sec_key']
246
+ cloudwatch_config = cloudwatch_config.merge!(config_elementify(region)) if ENV['region']
247
+ cloudwatch_config = cloudwatch_config.merge!(config_elementify(endpoint)) if ENV['endpoint']
248
+
249
+ csv_format_config = config_element('ROOT', '', cloudwatch_config, [
250
+ config_element('parse', '', {'@type' => 'csv',
251
+ 'keys' => 'time,message',
252
+ 'time_key' => 'time'}),
253
+ config_element('storage', '', {'@type' => 'local',
254
+ 'path' => '/tmp/state'})
255
+
185
256
  ])
186
257
  create_log_stream
187
258
 
@@ -198,6 +269,14 @@ class CloudwatchLogsInputTest < Test::Unit::TestCase
198
269
  d.run(expect_emits: 2, timeout: 5)
199
270
 
200
271
  emits = d.events
272
+ assert_true(emits[0][2].has_key?("metadata"))
273
+ assert_true(emits[1][2].has_key?("metadata"))
274
+ emits[0][2].delete_if {|k, v|
275
+ k == "metadata"
276
+ }
277
+ emits[1][2].delete_if {|k, v|
278
+ k == "metadata"
279
+ }
201
280
  assert_equal(2, emits.size)
202
281
  assert_equal(['test', (log_time_ms / 1000).floor, {"message"=>"Cloudwatch non json logs1"}], emits[0])
203
282
  assert_equal(['test', (log_time_ms / 1000).floor, {"message"=>"Cloudwatch non json logs2"}], emits[1])
@@ -244,7 +323,6 @@ class CloudwatchLogsInputTest < Test::Unit::TestCase
244
323
  '@type' => 'cloudwatch_logs',
245
324
  'log_group_name' => "#{log_group_name}",
246
325
  'log_stream_name' => "#{log_stream_name}",
247
- 'state_file' => '/tmp/state',
248
326
  }
249
327
  cloudwatch_config = cloudwatch_config.merge!(config_elementify(aws_key_id)) if ENV['aws_key_id']
250
328
  cloudwatch_config = cloudwatch_config.merge!(config_elementify(aws_sec_key)) if ENV['aws_sec_key']
@@ -254,7 +332,9 @@ class CloudwatchLogsInputTest < Test::Unit::TestCase
254
332
  regex_format_config = config_element('ROOT', '', cloudwatch_config, [
255
333
  config_element('parse', '', {'@type' => 'regexp',
256
334
  'expression' => "/^(?<cloudwatch>[^ ]*)?/",
257
- })
335
+ }),
336
+ config_element('storage', '', {'@type' => 'local',
337
+ 'path' => '/tmp/state'})
258
338
  ])
259
339
  create_log_stream
260
340
 
@@ -608,6 +688,59 @@ class CloudwatchLogsInputTest < Test::Unit::TestCase
608
688
  assert_equal(["test", ((time_ms + 7000) / 1000), { "cloudwatch" => "logs7" }], events[6])
609
689
  assert_equal(["test", ((time_ms + 8000) / 1000), { "cloudwatch" => "logs8" }], events[7])
610
690
  end
691
+
692
+ test "retry on Aws::CloudWatchLogs::Errors::ThrottlingException in get_log_events" do
693
+ config = <<-CONFIG
694
+ tag test
695
+ @type cloudwatch_logs
696
+ log_group_name #{log_group_name}
697
+ state_file /tmp/state
698
+ fetch_interval 0.1
699
+ throttling_retry_seconds 0.2
700
+ CONFIG
701
+
702
+ # it will raises the error 2 times
703
+ counter = 0
704
+ times = 2
705
+ stub(@client).get_log_events(anything) {
706
+ counter += 1
707
+ counter <= times ? raise(Aws::CloudWatchLogs::Errors::ThrottlingException.new(nil, "error")) : OpenStruct.new(events: [], next_forward_token: nil)
708
+ }
709
+
710
+ d = create_driver(config)
711
+
712
+ # so, it is expected to valid_next_token once
713
+ mock(d.instance).valid_next_token(nil, nil).once
714
+
715
+ d.run
716
+ assert_equal(2, d.logs.select {|l| l =~ /ThrottlingException get_log_events. Waiting 0.2 seconds to retry/ }.size)
717
+ end
718
+
719
+ test "retry on Aws::CloudWatchLogs::Errors::ThrottlingException in describe_log_streams" do
720
+ config = <<-CONFIG
721
+ tag test
722
+ @type cloudwatch_logs
723
+ log_group_name #{log_group_name}
724
+ use_log_stream_name_prefix true
725
+ state_file /tmp/state
726
+ fetch_interval 0.1
727
+ throttling_retry_seconds 0.2
728
+ CONFIG
729
+
730
+ # it will raises the error 2 times
731
+ log_stream = Aws::CloudWatchLogs::Types::LogStream.new(log_stream_name: "stream_name")
732
+ counter = 0
733
+ times = 2
734
+ stub(@client).describe_log_streams(anything) {
735
+ counter += 1
736
+ counter <= times ? raise(Aws::CloudWatchLogs::Errors::ThrottlingException.new(nil, "error")) : OpenStruct.new(log_streams: [log_stream], next_token: nil)
737
+ }
738
+
739
+ d = create_driver(config)
740
+
741
+ d.run
742
+ assert_equal(2, d.logs.select {|l| l =~ /ThrottlingException describe_log_streams. Waiting 0.2 seconds to retry/ }.size)
743
+ end
611
744
  end
612
745
 
613
746
  private
@@ -547,6 +547,37 @@ class CloudwatchLogsOutputTest < Test::Unit::TestCase
547
547
  assert(d.logs.any?{|log| log.include?("failed to set retention policy for Log group")})
548
548
  end
549
549
 
550
+ def test_remove_retention_in_days_key
551
+ new_log_stream
552
+
553
+ d = create_driver(<<-EOC)
554
+ #{default_config}
555
+ log_group_name #{log_group_name}
556
+ log_stream_name #{log_stream_name}
557
+ retention_in_days_key retention_in_days
558
+ remove_retention_in_days_key true
559
+ EOC
560
+
561
+ records = [
562
+ {'cloudwatch' => 'logs1', 'message' => 'message1', 'retention_in_days' => '7'},
563
+ {'cloudwatch' => 'logs2', 'message' => 'message2', 'retention_in_days' => '7'},
564
+ ]
565
+
566
+ time = Time.now
567
+ d.run(default_tag: fluentd_tag) do
568
+ records.each_with_index do |record, i|
569
+ d.feed(time.to_i + i, record)
570
+ end
571
+ end
572
+
573
+ sleep 10
574
+
575
+ events = get_log_events
576
+ assert_equal(2, events.size)
577
+ assert_equal({'cloudwatch' => 'logs1', 'message' => 'message1'}, JSON.parse(events[0].message))
578
+ assert_equal({'cloudwatch' => 'logs2', 'message' => 'message2'}, JSON.parse(events[1].message))
579
+ end
580
+
550
581
  def test_log_group_aws_tags_key
551
582
  clear_log_group
552
583
 
@@ -576,6 +607,37 @@ class CloudwatchLogsOutputTest < Test::Unit::TestCase
576
607
  assert_equal("value2", awstags.fetch("tag2"))
577
608
  end
578
609
 
610
+ def test_remove_log_group_aws_tags_key
611
+ new_log_stream
612
+
613
+ d = create_driver(<<-EOC)
614
+ #{default_config}
615
+ log_group_name #{log_group_name}
616
+ log_stream_name #{log_stream_name}
617
+ log_group_aws_tags_key log_group_tags
618
+ remove_log_group_aws_tags_key true
619
+ EOC
620
+
621
+ records = [
622
+ {'cloudwatch' => 'logs1', 'message' => 'message1', 'log_group_tags' => {"tag1" => "value1", "tag2" => "value2"}},
623
+ {'cloudwatch' => 'logs2', 'message' => 'message2', 'log_group_tags' => {"tag1" => "value1", "tag2" => "value2"}},
624
+ ]
625
+
626
+ time = Time.now
627
+ d.run(default_tag: fluentd_tag) do
628
+ records.each_with_index do |record, i|
629
+ d.feed(time.to_i + i, record)
630
+ end
631
+ end
632
+
633
+ sleep 10
634
+
635
+ events = get_log_events
636
+ assert_equal(2, events.size)
637
+ assert_equal({'cloudwatch' => 'logs1', 'message' => 'message1'}, JSON.parse(events[0].message))
638
+ assert_equal({'cloudwatch' => 'logs2', 'message' => 'message2'}, JSON.parse(events[1].message))
639
+ end
640
+
579
641
  def test_log_group_aws_tags_key_same_group_diff_tags
580
642
  clear_log_group
581
643
 
@@ -651,6 +713,32 @@ class CloudwatchLogsOutputTest < Test::Unit::TestCase
651
713
  assert_equal({'cloudwatch' => 'logs2', 'message' => 'message2'}, JSON.parse(events[1].message))
652
714
  end
653
715
 
716
+ def test_retrying_on_throttling_exception_with_put_log_events_retry_limit_as_zero
717
+ client = Aws::CloudWatchLogs::Client.new
718
+ @called = false
719
+ stub(client).put_log_events(anything) {
720
+ raise(Aws::CloudWatchLogs::Errors::ThrottlingException.new(nil, "error"))
721
+ }.once.ordered
722
+
723
+ d = create_driver(<<-EOC)
724
+ #{default_config}
725
+ log_group_name #{log_group_name}
726
+ log_stream_name #{log_stream_name}
727
+ @log_level debug
728
+ put_log_events_retry_limit 0
729
+ EOC
730
+ time = event_time
731
+ d.instance.instance_variable_set(:@logs, client)
732
+ d.run(default_tag: fluentd_tag) do
733
+ d.feed(time, {'message' => 'message1'})
734
+ end
735
+
736
+ logs = d.logs
737
+ assert_equal(0, logs.select {|l| l =~ /Called PutLogEvents API/ }.size)
738
+ assert_equal(1, logs.select {|l| l =~ /failed to PutLogEvents/ }.size)
739
+ assert_equal(0, logs.select {|l| l =~ /retry succeeded/ }.size)
740
+ end
741
+
654
742
  def test_retrying_on_throttling_exception
655
743
  resp = Object.new
656
744
  mock(resp).rejected_log_events_info {}
@@ -42,10 +42,7 @@ module CloudwatchLogsTestHelper
42
42
  end
43
43
 
44
44
  def log_stream_name(log_stream_name_prefix = nil)
45
- if !@log_stream_name
46
- new_log_stream(log_stream_name_prefix)
47
- end
48
- @log_stream_name
45
+ @log_stream_name ||= new_log_stream(log_stream_name_prefix)
49
46
  end
50
47
 
51
48
  def new_log_stream(log_stream_name_prefix = nil)
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: fluent-plugin-cloudwatch-logs
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.9.2
4
+ version: 0.10.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Ryota Arai
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2020-04-02 00:00:00.000000000 Z
11
+ date: 2020-07-15 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: fluentd
@@ -115,6 +115,7 @@ executables: []
115
115
  extensions: []
116
116
  extra_rdoc_files: []
117
117
  files:
118
+ - ".github/workflows/issue-auto-closer.yml"
118
119
  - ".gitignore"
119
120
  - ".travis.yml"
120
121
  - Gemfile