fluent-plugin-cloudwatch-ingest 0.1.9 → 0.1.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: fb407b315654aac2dbac07280a22beb45dca1358
4
- data.tar.gz: 15798196f5d8653346d63e037e3585992d486ae6
3
+ metadata.gz: 867b3c94f91c292c250bb5b2071a6f0115f56051
4
+ data.tar.gz: 2844dc5a9d0443f15f5b240ca1c91b84b35b512c
5
5
  SHA512:
6
- metadata.gz: 68a1ada252e002cd072928bbb22f7a0caaa39b70b40a6b742d555d306fb3efffbd6887c439b10d47efaa0675315eb2a7cf3993468c1b44b402757aaf0f3712b4
7
- data.tar.gz: a5a87902b86858f5bc88c151fabbde3af46591f3ba3251363c1dd824bc072775cf77681416a3b4c61dc47bdc40e51bc5dcec6a2f5915d36dbba79f6bc8a8a814
6
+ metadata.gz: 7b8a46269c09a36d7b1918f9e12ff01fec05a57366aad780a36589cec423df5d5d77602edba436e771e507164506ea8b54b9f1b6fe20c89d4eb9b19e91e88120
7
+ data.tar.gz: d9c71cf85ca8cafad29ac04ab5a9ef35eab1a47f245983b33e05a301019a39cb1db4f78fa4c3954fb0b4b2351dd1aefc1e7eed015e78dac79190a0a4fe8335cc
@@ -12,3 +12,5 @@ Metrics/AbcSize:
12
12
  Enabled: false
13
13
  Metrics/CyclomaticComplexity:
14
14
  Max: 10
15
+ Metrics/ClassLength:
16
+ Enabled: false
@@ -2,7 +2,7 @@ module Fluent
2
2
  module Plugin
3
3
  module Cloudwatch
4
4
  module Ingest
5
- VERSION = '0.1.9'.freeze
5
+ VERSION = '0.1.10'.freeze
6
6
  end
7
7
  end
8
8
  end
@@ -28,206 +28,206 @@ module Fluent::Plugin
28
28
  config_param :interval, :time, default: 60
29
29
  desc 'Time to pause between API call failures'
30
30
  config_param :api_interval, :time, default: 120
31
- end
32
-
33
- def initialize
34
- super
35
- log.info('Starting fluentd-plugin-cloudwatch-ingest')
36
- end
37
31
 
38
- def configure(conf)
39
- super
40
- log.info('Configured fluentd-plugin-cloudwatch-ingest')
41
- end
42
-
43
- def start
44
- super
45
- log.info('Started fluentd-plugin-cloudwatch-ingest')
46
-
47
- # Get a handle to Cloudwatch
48
- aws_options = {}
49
- Aws.config[:region] = @region
50
- log.info("Working in region #{@region}")
51
-
52
- if @sts_enabled
53
- aws_options[:credentials] = Aws::AssumeRoleCredentials.new(
54
- role_arn: @sts_arn,
55
- role_session_name: @sts_session_name
56
- )
57
-
58
- log.info("Using STS for authentication with source account ARN:
59
- #{@sts_arn}, session name: #{@sts_session_name}")
60
- else
61
- log.info('Using local instance IAM role for authentication')
32
+ def initialize
33
+ super
34
+ log.info('Starting fluentd-plugin-cloudwatch-ingest')
62
35
  end
63
- @aws = Aws::CloudWatchLogs::Client.new(aws_options)
64
- @finished = false
65
- @thread = Thread.new(&method(:run))
66
- end
67
36
 
68
- def shutdown
69
- @finished = true
70
- @thread.join
71
- end
72
-
73
- private
37
+ def configure(conf)
38
+ super
39
+ log.info('Configured fluentd-plugin-cloudwatch-ingest')
40
+ end
74
41
 
75
- def emit(log_event)
76
- # TODO: I need to do something useful
77
- end
42
+ def start
43
+ super
44
+ log.info('Started fluentd-plugin-cloudwatch-ingest')
78
45
 
79
- def log_groups(log_group_prefix)
80
- log_groups = []
46
+ # Get a handle to Cloudwatch
47
+ aws_options = {}
48
+ Aws.config[:region] = @region
49
+ log.info("Working in region #{@region}")
81
50
 
82
- # Fetch all log group names
83
- next_token = nil
84
- loop do
85
- begin
86
- response = @aws.describe_log_groups(
87
- log_group_name_prefix: log_group_prefix,
88
- next_token: next_token
51
+ if @sts_enabled
52
+ aws_options[:credentials] = Aws::AssumeRoleCredentials.new(
53
+ role_arn: @sts_arn,
54
+ role_session_name: @sts_session_name
89
55
  )
90
56
 
91
- response.log_groups.each { |g| log_groups << g.log_group_name }
92
- break unless response.next_token
93
- next_token = response.next_token
94
- rescue => boom
95
- log.error("Unable to retrieve log groups: #{boom}")
96
- next_token = nil
97
- sleep @api_interval
98
- retry
57
+ log.info("Using STS for authentication with source account ARN:
58
+ #{@sts_arn}, session name: #{@sts_session_name}")
59
+ else
60
+ log.info('Using local instance IAM role for authentication')
99
61
  end
62
+ @aws = Aws::CloudWatchLogs::Client.new(aws_options)
63
+ @finished = false
64
+ @thread = Thread.new(&method(:run))
100
65
  end
101
- log.info("Found #{log_groups.size} log groups")
102
66
 
103
- return log_groups
104
- end
67
+ def shutdown
68
+ @finished = true
69
+ @thread.join
70
+ end
105
71
 
106
- def log_streams(log_group_name, log_stream_name_prefix)
107
- log_streams = []
108
- next_token = nil
109
- loop do
110
- begin
111
- response = @aws.describe_log_streams(
112
- log_group_name: group,
113
- log_stream_name_prefix: log_stream_name_prefix,
114
- next_token: next_token
115
- )
72
+ private
116
73
 
117
- response.log_streams.each { |s| log_streams << s.log_stream_name }
118
- break unless reponse.next_token
119
- next_token = reponse.next_token
120
- rescue => boom
121
- log.error("Unable to retrieve log streams for group #{group}
122
- with stream prefix #{log_stream_name_prefix}: #{boom}")
123
- log_streams = []
124
- next_token = nil
125
- sleep @api_interval
126
- retry
127
- end
74
+ def emit(log_event)
75
+ # TODO: I need to do something useful
128
76
  end
129
- log.info("Found #{log_streams.size} streams for #{log_group_name}")
130
77
 
131
- return log_streams
132
- end
78
+ def log_groups(log_group_prefix)
79
+ log_groups = []
133
80
 
134
- def run
135
- until @finished
136
- begin
137
- state = State.new(@state_file_name)
138
- rescue => boom
139
- log.info("Failed to get state lock. Sleeping for #{@interval}: #{boom}")
140
- sleep @interval
141
- retry
81
+ # Fetch all log group names
82
+ next_token = nil
83
+ loop do
84
+ begin
85
+ response = @aws.describe_log_groups(
86
+ log_group_name_prefix: log_group_prefix,
87
+ next_token: next_token
88
+ )
89
+
90
+ response.log_groups.each { |g| log_groups << g.log_group_name }
91
+ break unless response.next_token
92
+ next_token = response.next_token
93
+ rescue => boom
94
+ log.error("Unable to retrieve log groups: #{boom}")
95
+ next_token = nil
96
+ sleep @api_interval
97
+ retry
98
+ end
142
99
  end
100
+ log.info("Found #{log_groups.size} log groups")
143
101
 
144
- # Fetch the streams for each log group
145
- log_groups(@log_group_name_prefix).each do |group|
146
- # For each log stream get and emit the events
147
- log_streams(group, @log_stream_name_prefix).each do |stream|
148
- # See if we have some stored state for this group and stream.
149
- # If we have then use the stored forward_token to pick up
150
- # from that point. Otherwise start from the start.
151
- stream_token = (state[group][stream] if state[group][stream])
152
-
153
- begin
154
- loop do
155
- response = @aws.get_log_events(
156
- log_group_name: group,
157
- log_stream_name: stream,
158
- next_token: stream_token
159
- )
160
-
161
- emit(response.events)
162
- break unless response.next_token
163
- stream_token = response.next_token
164
- end
102
+ return log_groups
103
+ end
165
104
 
166
- # Once all events for this stream have been processed,
167
- # store the forward token
168
- state[group][stream] = response.next_forward_token
169
- rescue => boom
170
- log.error("Unable to retrieve events for stream
171
- #{stream} in group #{group}: #{boom}")
172
- sleep @api_interval
173
- retry
174
- end
105
+ def log_streams(log_group_name, log_stream_name_prefix)
106
+ log_streams = []
107
+ next_token = nil
108
+ loop do
109
+ begin
110
+ response = @aws.describe_log_streams(
111
+ log_group_name: group,
112
+ log_stream_name_prefix: log_stream_name_prefix,
113
+ next_token: next_token
114
+ )
115
+
116
+ response.log_streams.each { |s| log_streams << s.log_stream_name }
117
+ break unless reponse.next_token
118
+ next_token = reponse.next_token
119
+ rescue => boom
120
+ log.error("Unable to retrieve log streams for group #{group}
121
+ with stream prefix #{log_stream_name_prefix}: #{boom}")
122
+ log_streams = []
123
+ next_token = nil
124
+ sleep @api_interval
125
+ retry
175
126
  end
176
127
  end
128
+ log.info("Found #{log_streams.size} streams for #{log_group_name}")
177
129
 
178
- log.info('Pruning and saving state')
179
- state.prune(log_groups) # Remove dead streams
180
- begin
181
- state.save
182
- state.close
183
- rescue
184
- log.error("Unable to save state file: #{boom}")
185
- end
186
- log.info("Pausing for #{@interval}")
187
- sleep @interval
130
+ return log_streams
188
131
  end
189
- end
190
-
191
- class CloudwatchIngestInput::State < Hash
192
- class LockFailed < RuntimeError; end
193
- attr_accessor :statefile
194
132
 
195
- def initialize(filepath)
196
- self.statefile = Pathname.new(filepath).open('w')
197
- unless statefile.exists?
198
- log.warn("State file #{statefile} does not exist. Creating a new one.")
133
+ def run
134
+ until @finished
199
135
  begin
200
- save
136
+ state = State.new(@state_file_name)
201
137
  rescue => boom
202
- log.error("Unable to create new state file #{statefile}: #{boom}")
138
+ log.info("Failed lock state. Sleeping for #{@interval}: #{boom}")
139
+ sleep @interval
140
+ retry
203
141
  end
204
- end
205
142
 
206
- # Attempt to obtain an exclusive flock on the file and raise and
207
- # exception if we can't
208
- log.info("Obtaining exclusive lock on state file #{statefile}")
209
- lockstatus = statefile.flock(File::LOCK_EX | File::LOCK_NB)
210
- raise CloudwatchIngestInput::State::LockFailed if lockstatus == false
143
+ # Fetch the streams for each log group
144
+ log_groups(@log_group_name_prefix).each do |group|
145
+ # For each log stream get and emit the events
146
+ log_streams(group, @log_stream_name_prefix).each do |stream|
147
+ # See if we have some stored state for this group and stream.
148
+ # If we have then use the stored forward_token to pick up
149
+ # from that point. Otherwise start from the start.
150
+ stream_token = (state[group][stream] if state[group][stream])
151
+
152
+ begin
153
+ loop do
154
+ response = @aws.get_log_events(
155
+ log_group_name: group,
156
+ log_stream_name: stream,
157
+ next_token: stream_token
158
+ )
159
+
160
+ emit(response.events)
161
+ break unless response.next_token
162
+ stream_token = response.next_token
163
+ end
164
+
165
+ # Once all events for this stream have been processed,
166
+ # store the forward token
167
+ state[group][stream] = response.next_forward_token
168
+ rescue => boom
169
+ log.error("Unable to retrieve events for stream
170
+ #{stream} in group #{group}: #{boom}")
171
+ sleep @api_interval
172
+ retry
173
+ end
174
+ end
175
+ end
211
176
 
212
- merge!(YAML.safe_load(statefile.read))
213
- log.info("Loaded state for #{keys.size} log groups from #{statefile}")
177
+ log.info('Pruning and saving state')
178
+ state.prune(log_groups) # Remove dead streams
179
+ begin
180
+ state.save
181
+ state.close
182
+ rescue
183
+ log.error("Unable to save state file: #{boom}")
184
+ end
185
+ log.info("Pausing for #{@interval}")
186
+ sleep @interval
187
+ end
214
188
  end
215
189
 
216
- def save
217
- statefile.write(YAML.dump(self))
218
- log.info("Saved state to #{statefile}")
219
- end
190
+ class CloudwatchIngestInput::State < Hash
191
+ class LockFailed < RuntimeError; end
192
+ attr_accessor :statefile
220
193
 
221
- def close
222
- statefile.close
223
- end
194
+ def initialize(filepath)
195
+ self.statefile = Pathname.new(filepath).open('w')
196
+ unless statefile.exists?
197
+ log.warn("No state file #{statefile} Creating a new one.")
198
+ begin
199
+ save
200
+ rescue => boom
201
+ log.error("Unable to create new state file #{statefile}: #{boom}")
202
+ end
203
+ end
204
+
205
+ # Attempt to obtain an exclusive flock on the file and raise and
206
+ # exception if we can't
207
+ log.info("Obtaining exclusive lock on state file #{statefile}")
208
+ lockstatus = statefile.flock(File::LOCK_EX | File::LOCK_NB)
209
+ raise CloudwatchIngestInput::State::LockFailed if lockstatus == false
224
210
 
225
- def prune(log_groups)
226
- groups_before = keys.size
227
- delete_if { |k, _v| true unless log_groups.key?(k) }
228
- log.info("Pruned #{groups_before - keys.size} keys from state file")
211
+ merge!(YAML.safe_load(statefile.read))
212
+ log.info("Loaded state for #{keys.size} log groups from #{statefile}")
213
+ end
229
214
 
230
- # TODO: also prune streams as these are most likely to be transient
215
+ def save
216
+ statefile.write(YAML.dump(self))
217
+ log.info("Saved state to #{statefile}")
218
+ end
219
+
220
+ def close
221
+ statefile.close
222
+ end
223
+
224
+ def prune(log_groups)
225
+ groups_before = keys.size
226
+ delete_if { |k, _v| true unless log_groups.key?(k) }
227
+ log.info("Pruned #{groups_before - keys.size} keys from state file")
228
+
229
+ # TODO: also prune streams as these are most likely to be transient
230
+ end
231
231
  end
232
232
  end
233
233
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: fluent-plugin-cloudwatch-ingest
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.9
4
+ version: 0.1.10
5
5
  platform: ruby
6
6
  authors:
7
7
  - Sam Pointer