logstash-output-newrelic 1.4.0 → 1.5.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 9e6c4d247631f976e7a7fc6153a02cc764f583491c749654a827c1fd6846c6c2
4
- data.tar.gz: 42389a3c04b5f1ef4804f9bf7efa820d7037c25e2df73c127354d180c624ff02
3
+ metadata.gz: b9339803f78539fb9240937234cc69240b289f8050c9ddfd37210f423570d828
4
+ data.tar.gz: 14746a11f4c482bebe359504f9a66987bb6d500fb6e4b38f2d839a508aa4d62b
5
5
  SHA512:
6
- metadata.gz: deebd2f6b0c7f5c07ee4fc347f577a9011a481be03f8e170a9e9c368594009cbc33b853e05c025a1938b038febc8cc6d6db8c906aa3eacbeacfabab3483d7755
7
- data.tar.gz: becf3aef35efc785f84ea956cabf2004d8d1ddbde4dac11f11bec3a25b65ab9a3277a97dcb35add2839581fb0b19ddf59114a83c9e5afd7a54da14ddd9b181d1
6
+ metadata.gz: 7cf438c882e3017df2597038d8812110d2fd8037c57c3d5e0440e1f11396143fa4aa52b1ad639d617cc32fb0c7ed05ac148bf70088d1e9dfab8b4170bf136f43
7
+ data.tar.gz: b6c5a7c7acf89da4da50a1e675a9339ccc8f17cd5bce2b4e0662f7be1ee4211ecce1ad0b120036084938bdf388aee3224ab6fdf5b83625856cc0a8caa8dbc38a
@@ -11,9 +11,8 @@ require_relative './config/bigdecimal_patch'
11
11
  require_relative './exception/error'
12
12
 
13
13
  class LogStash::Outputs::NewRelic < LogStash::Outputs::Base
14
- java_import java.util.concurrent.Executors;
15
14
 
16
- NON_RETRYABLE_CODES = Set[401, 403]
15
+ RETRIABLE_CODES = Set[408, 429, 500, 502, 503, 504, 599]
17
16
 
18
17
  MAX_PAYLOAD_SIZE_BYTES = 1_000_000
19
18
 
@@ -43,7 +42,14 @@ class LogStash::Outputs::NewRelic < LogStash::Outputs::Base
43
42
  'Content-Encoding' => 'gzip',
44
43
  'Content-Type' => 'application/json'
45
44
  }.merge(auth).freeze
45
+
46
+ # We use a semaphore to ensure that at most there are @concurrent_requests inflight Logstash requests being processed
47
+ # by our plugin at the same time. Without this semaphore, given that @executor.submit() is an asynchronous method, it
48
+ # would cause that an unbounded amount of inflight requests may be processed by our plugin. Logstash then believes
49
+ # that our plugin has processed the request, and keeps reading more inflight requests in memory. This causes a memory
50
+ # leak and results in an OutOfMemoryError.
46
51
  @executor = java.util.concurrent.Executors.newFixedThreadPool(@concurrent_requests)
52
+ @semaphore = java.util.concurrent.Semaphore.new(@concurrent_requests)
47
53
  end
48
54
 
49
55
  # Used by tests so that the test run can complete (background threads prevent JVM exit)
@@ -97,7 +103,19 @@ class LogStash::Outputs::NewRelic < LogStash::Outputs::Base
97
103
 
98
104
  nr_logs = to_nr_logs(logstash_events)
99
105
 
100
- package_and_send_recursively(nr_logs)
106
+ submit_logs_to_be_sent(nr_logs)
107
+ end
108
+
109
+ def submit_logs_to_be_sent(nr_logs)
110
+ @semaphore.acquire()
111
+ execute = @executor.java_method :submit, [java.lang.Runnable]
112
+ execute.call do
113
+ begin
114
+ package_and_send_recursively(nr_logs)
115
+ ensure
116
+ @semaphore.release()
117
+ end
118
+ end
101
119
  end
102
120
 
103
121
  def package_and_send_recursively(nr_logs)
@@ -113,27 +131,24 @@ class LogStash::Outputs::NewRelic < LogStash::Outputs::Base
113
131
  :logs => nr_logs
114
132
  }
115
133
 
116
- execute = @executor.java_method :submit, [java.lang.Runnable]
117
- execute.call do
118
- compressed_payload = StringIO.new
119
- gzip = Zlib::GzipWriter.new(compressed_payload)
120
- gzip << [payload].to_json
121
- gzip.close
122
-
123
- compressed_size = compressed_payload.string.bytesize
124
- log_record_count = nr_logs.length
125
-
126
- if compressed_size >= MAX_PAYLOAD_SIZE_BYTES && log_record_count == 1
127
- @logger.error("Can't compress record below required maximum packet size and it will be discarded.")
128
- elsif compressed_size >= MAX_PAYLOAD_SIZE_BYTES && log_record_count > 1
129
- @logger.debug("Compressed payload size (#{compressed_size}) exceededs maximum packet size (1MB) and will be split in two.")
130
- split_index = log_record_count / 2
131
- package_and_send_recursively(nr_logs[0...split_index])
132
- package_and_send_recursively(nr_logs[split_index..-1])
133
- else
134
- @logger.debug("Payload compressed size: #{compressed_size}")
135
- nr_send(compressed_payload.string)
136
- end
134
+ compressed_payload = StringIO.new
135
+ gzip = Zlib::GzipWriter.new(compressed_payload)
136
+ gzip << [payload].to_json
137
+ gzip.close
138
+
139
+ compressed_size = compressed_payload.string.bytesize
140
+ log_record_count = nr_logs.length
141
+
142
+ if compressed_size >= MAX_PAYLOAD_SIZE_BYTES && log_record_count == 1
143
+ @logger.error("Can't compress record below required maximum packet size and it will be discarded.")
144
+ elsif compressed_size >= MAX_PAYLOAD_SIZE_BYTES && log_record_count > 1
145
+ @logger.debug("Compressed payload size (#{compressed_size}) exceededs maximum packet size (1MB) and will be split in two.")
146
+ split_index = log_record_count / 2
147
+ package_and_send_recursively(nr_logs[0...split_index])
148
+ package_and_send_recursively(nr_logs[split_index..-1])
149
+ else
150
+ @logger.debug("Payload compressed size: #{compressed_size}")
151
+ nr_send(compressed_payload.string)
137
152
  end
138
153
  end
139
154
 
@@ -145,6 +160,8 @@ class LogStash::Outputs::NewRelic < LogStash::Outputs::Base
145
160
 
146
161
  def nr_send(payload)
147
162
  retries = 0
163
+ retry_duration = 1
164
+
148
165
  begin
149
166
  http = Net::HTTP.new(@end_point.host, 443)
150
167
  request = Net::HTTP::Post.new(@end_point.request_uri)
@@ -159,11 +176,15 @@ class LogStash::Outputs::NewRelic < LogStash::Outputs::Base
159
176
  @header.each { |k, v| request[k] = v }
160
177
  request.body = payload
161
178
  handle_response(http.request(request))
179
+ if (retries > 0)
180
+ @logger.warn("Successfully sent logs at retry #{retries}")
181
+ end
162
182
  rescue Error::BadResponseCodeError => e
163
183
  @logger.error(e.message)
164
184
  if (should_retry(retries) && is_retryable_code(e))
165
185
  retries += 1
166
- sleep(1)
186
+ sleep(retry_duration)
187
+ retry_duration *= 2
167
188
  retry
168
189
  end
169
190
  rescue => e
@@ -178,7 +199,8 @@ class LogStash::Outputs::NewRelic < LogStash::Outputs::Base
178
199
  :error_class => e.class.name,
179
200
  :backtrace => e.backtrace
180
201
  )
181
- sleep(1)
202
+ sleep(retry_duration)
203
+ retry_duration *= 2
182
204
  retry
183
205
  else
184
206
  @logger.error(
@@ -197,6 +219,6 @@ class LogStash::Outputs::NewRelic < LogStash::Outputs::Base
197
219
 
198
220
  def is_retryable_code(response_error)
199
221
  error_code = response_error.response_code
200
- !NON_RETRYABLE_CODES.include?(error_code)
222
+ RETRIABLE_CODES.include?(error_code)
201
223
  end
202
224
  end # class LogStash::Outputs::NewRelic
@@ -1,7 +1,7 @@
1
1
  module LogStash
2
2
  module Outputs
3
3
  module NewRelicVersion
4
- VERSION = "1.4.0"
4
+ VERSION = "1.5.1"
5
5
  end
6
6
  end
7
7
  end
@@ -265,7 +265,7 @@ describe LogStash::Outputs::NewRelic do
265
265
  end
266
266
  end
267
267
 
268
- context "error handling" do
268
+ context "error handling and retry logic" do
269
269
  it "continues through errors, future calls should still succeed" do
270
270
  stub_request(:any, base_uri)
271
271
  .to_raise(StandardError.new("from test"))
@@ -281,33 +281,41 @@ describe LogStash::Outputs::NewRelic do
281
281
  .to have_been_made
282
282
  end
283
283
 
284
- it "retry when receive retryable http error code" do
285
- stub_request(:any, base_uri)
286
- .to_return(status: 500)
287
- .to_return(status: 200)
288
-
289
- event1 = LogStash::Event.new({ "message" => "Test message 1" })
290
- @newrelic_output.multi_receive([event1])
291
-
292
- wait_for(a_request(:post, base_uri)
293
- .with { |request| single_gzipped_message(request.body)['message'] == 'Test message 1' })
294
- .to have_been_made.times(2)
295
- end
296
-
297
- it "not retry when receive a non retryable http error code" do
298
- stub_request(:any, base_uri)
299
- .to_return(status: 401)
300
-
301
- event1 = LogStash::Event.new({ "message" => "Test message 1" })
302
- @newrelic_output.multi_receive([event1])
303
- # Due the async behavior we need to wait to be sure that the method was not called more than 1 time
304
- sleep(2)
305
- wait_for(a_request(:post, base_uri)
306
- .with { |request| single_gzipped_message(request.body)['message'] == 'Test message 1' })
307
- .to have_been_made.times(1)
284
+ [
285
+ { "returned_status_code" => 200, "expected_to_retry" => false },
286
+ { "returned_status_code" => 202, "expected_to_retry" => false },
287
+ { "returned_status_code" => 400, "expected_to_retry" => false },
288
+ { "returned_status_code" => 404, "expected_to_retry" => false },
289
+ { "returned_status_code" => 408, "expected_to_retry" => true },
290
+ { "returned_status_code" => 429, "expected_to_retry" => true },
291
+ { "returned_status_code" => 500, "expected_to_retry" => true },
292
+ { "returned_status_code" => 502, "expected_to_retry" => true },
293
+ { "returned_status_code" => 503, "expected_to_retry" => true },
294
+ { "returned_status_code" => 504, "expected_to_retry" => true },
295
+ { "returned_status_code" => 599, "expected_to_retry" => true }
296
+ ].each do |test_case|
297
+ returned_status_code = test_case["returned_status_code"]
298
+ expected_to_retry = test_case["expected_to_retry"]
299
+
300
+ it "should #{expected_to_retry ? "" : "not"} retry on status code #{returned_status_code}" do
301
+ stub_request(:any, base_uri)
302
+ .to_return(status: returned_status_code)
303
+ .to_return(status: 200)
304
+
305
+ logstash_event = LogStash::Event.new({ "message" => "Test message" })
306
+ @newrelic_output.multi_receive([logstash_event])
307
+
308
+ expected_retries = expected_to_retry ? 2 : 1
309
+ wait_for(a_request(:post, base_uri)
310
+ .with { |request| single_gzipped_message(request.body)['message'] == 'Test message' })
311
+ .to have_been_made.at_least_times(expected_retries)
312
+ wait_for(a_request(:post, base_uri)
313
+ .with { |request| single_gzipped_message(request.body)['message'] == 'Test message' })
314
+ .to have_been_made.at_most_times(expected_retries)
315
+ end
308
316
  end
309
317
 
310
- it "not retries when retry is disabled" do
318
+ it "does not retry when max_retries is set to 0" do
311
319
  @newrelic_output = LogStash::Plugin.lookup("output", "newrelic").new(
312
320
  { "base_uri" => base_uri, "license_key" => api_key, "max_retries" => '0' }
313
321
  )
@@ -324,7 +332,7 @@ describe LogStash::Outputs::NewRelic do
324
332
  .to have_been_made.times(1)
325
333
  end
326
334
 
327
- it "retry when receive a not expected exception" do
335
+ it "retries when receive a not expected exception" do
328
336
  stub_request(:any, base_uri)
329
337
  .to_raise(StandardError.new("from test"))
330
338
  .to_return(status: 200)
@@ -335,6 +343,28 @@ describe LogStash::Outputs::NewRelic do
335
343
  .with { |request| single_gzipped_message(request.body)['message'] == 'Test message 1' })
336
344
  .to have_been_made.times(2)
337
345
  end
346
+
347
+ it "performs the configured amount of retries, no more, no less" do
348
+ @newrelic_output = LogStash::Plugin.lookup("output", "newrelic").new(
349
+ { "base_uri" => base_uri, "license_key" => api_key, "max_retries" => '3' }
350
+ )
351
+ @newrelic_output.register
352
+ stub_request(:any, base_uri)
353
+ .to_return(status: 500)
354
+ .to_return(status: 500)
355
+ .to_return(status: 500)
356
+ .to_return(status: 200)
357
+
358
+ event1 = LogStash::Event.new({ "message" => "Test message" })
359
+ @newrelic_output.multi_receive([event1])
360
+
361
+ wait_for(a_request(:post, base_uri)
362
+ .with { |request| single_gzipped_message(request.body)['message'] == 'Test message' })
363
+ .to have_been_made.at_least_times(3)
364
+ wait_for(a_request(:post, base_uri)
365
+ .with { |request| single_gzipped_message(request.body)['message'] == 'Test message' })
366
+ .to have_been_made.at_most_times(3)
367
+ end
338
368
  end
339
369
 
340
370
  context "JSON serialization" do
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-output-newrelic
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.4.0
4
+ version: 1.5.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - New Relic Logging Team
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2023-08-14 00:00:00.000000000 Z
11
+ date: 2023-08-16 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  requirement: !ruby/object:Gem::Requirement