logstash-output-scalyr 0.1.12 → 0.1.17.beta

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 8e13b3be77498d8ff0613a34733b477a5c648d48af1070c4ed083d3cd178b69c
4
- data.tar.gz: 2873fd90e70e8ac42bfdda09041fc2863eda1a0760efca7203bdc8e63bcbaa5d
3
+ metadata.gz: 96fd1b3adba21150a05f43d1e8d6510711197da866d3cd6ec8b57bae14e53391
4
+ data.tar.gz: 221aac42711955c49c878c324e2570ebb55050cad13f2d4911b99c4b3f1eb5a1
5
5
  SHA512:
6
- metadata.gz: 192c043d86b0b2b7e12532e16e0c94654789d8a6e959994b317b699b02f318b36a220f1935c5a214174d67f46a5d3dea77b01a3299d290246c59d9ed652de150
7
- data.tar.gz: 7b9b18c86a9abe7141c6b0a74ec99a37ee2cc301dcaabf4e2c25bff02673a2388912a3f155af2534a22658165c5998278ba194851a8b96317b02ed3ff127b94f
6
+ metadata.gz: c8a678e7caf8778dbe9b44e8d8e48c809b016ac6b89424c4c29c29b63578fea61f236ae80a528dab7707c8ee534cfab0f20a252d688f44df53faccdd1ce907b0
7
+ data.tar.gz: c263ae0ecffacb454f98f112d384c544cfd68fb0e0e6a0d7339a6b84add2c94b4bb99afd20a815b587f5722e0125d80fc25f9285574187c9f2a0ff2af7f219ec
data/CHANGELOG.md CHANGED
@@ -1,5 +1,26 @@
1
1
  # Beta
2
2
 
3
+ ## 0.1.17.beta
4
+ - Catch errors relating to Bignum conversions present in the ``json`` library and manually convert to string as
5
+ a workaround.
6
+
7
+ ## 0.1.16.beta
8
+ - Fix race condition in ``register()`` method.
9
+
10
+ ## 0.1.15.beta
11
+ - Only call ``send_status`` method at the end of ``multi_receive()`` if there is at least one
12
+ record in the batch when ``report_status_for_empty_batches`` config option is set to ``false``.
13
+ - Update ``register()`` method to use a separate short-lived client session for sending initial
14
+ client status.
15
+
16
+ ## 0.1.14.beta
17
+ - Add configurable max retries for requests when running into errors.
18
+ - Add ability to send messages to the dead letter queue if we exhaust all retries and if it is configured.
19
+ - Log truncated error body for all errors to help with debugging.
20
+
21
+ ## 0.1.13
22
+ - Fix synchronization of status message sending code to avoid duplicate logs.
23
+
3
24
  ## 0.1.12
4
25
  - Add logging of successful request retries after an error for additional clarity.
5
26
  - Add debug level logging of request body on error.
data/README.md CHANGED
@@ -10,7 +10,7 @@ You can view documentation for this plugin [on the Scalyr website](https://app.s
10
10
  # Quick start
11
11
 
12
12
  1. Build the gem, run `gem build logstash-output-scalyr.gemspec`
13
- 2. Install the gem into a Logstash installation, run `/usr/share/logstash/bin/logstash-plugin install logstash-output-scalyr-0.1.12.gem` or follow the latest official instructions on working with plugins from Logstash.
13
+ 2. Install the gem into a Logstash installation, run `/usr/share/logstash/bin/logstash-plugin install logstash-output-scalyr-0.1.17.beta.gem` or follow the latest official instructions on working with plugins from Logstash.
14
14
  3. Configure the output plugin (e.g. add it to a pipeline .conf)
15
15
  4. Restart Logstash
16
16
 
@@ -78,6 +78,10 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
78
78
 
79
79
  # Initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval`
80
80
  config :retry_initial_interval, :validate => :number, :default => 1
81
+ # How many times to retry sending an event before giving up on it
82
+ config :max_retries, :validate => :number, :default => 5
83
+ # Whether or not to send messages that failed to send a max_retries amount of times to the DLQ or just drop them
84
+ config :send_to_dlq, :validate => :boolean, :default => true
81
85
 
82
86
  # Set max interval in seconds between bulk retries.
83
87
  config :retry_max_interval, :validate => :number, :default => 64
@@ -105,6 +109,11 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
105
109
  # minutes.
106
110
  config :status_report_interval, :validate => :number, :default => 300
107
111
 
112
+ # True to also call send_status when multi_receive() is called with no events.
113
+ # In some situations (e.g. when logstash is configured with multiple scalyr
114
+ # plugins conditionally where most are idle) you may want to set this to false
115
+ config :report_status_for_empty_batches, :validate => :boolean, :default => true
116
+
108
117
  # Set to true to also log status messages with various metrics to stdout in addition to sending
109
118
  # this data to Scalyr
110
119
  config :log_status_messages_to_stdout, :validate => :boolean, :default => false
@@ -231,7 +240,8 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
231
240
  # Plugin level (either per batch or event level metrics). Other request
232
241
  # level metrics are handled by the HTTP Client class.
233
242
  @multi_receive_statistics = {
234
- :total_multi_receive_secs => 0
243
+ :total_multi_receive_secs => 0,
244
+ :total_java_class_cast_errors => 0
235
245
  }
236
246
  @plugin_metrics = get_new_metrics
237
247
 
@@ -247,7 +257,25 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
247
257
  @logger.info(sprintf("Started Scalyr output plugin (%s)." % [PLUGIN_VERSION]), :class => self.class.name)
248
258
 
249
259
  # Finally, send a status line to Scalyr
250
- send_status
260
+ # We use a special separate short lived client session for sending the initial client status.
261
+ # This is done to avoid the overhead in case single logstash instance has many scalyr output
262
+ # plugins configured with conditionals and majority of them are inactive (aka receive no data).
263
+ # This way we don't need to keep idle long running connection open.
264
+ initial_send_status_client_session = Scalyr::Common::Client::ClientSession.new(
265
+ @logger, @add_events_uri,
266
+ @compression_type, @compression_level, @ssl_verify_peer, @ssl_ca_bundle_path, @append_builtin_cert,
267
+ @record_stats_for_status, @flush_quantile_estimates_on_status_send,
268
+ @http_connect_timeout, @http_socket_timeout, @http_request_timeout, @http_pool_max, @http_pool_max_per_route
269
+ )
270
+ send_status(initial_send_status_client_session)
271
+ initial_send_status_client_session.close
272
+
273
+ # We also "prime" the main HTTP client here, one which is used for sending subsequent requests.
274
+ # Here priming just means setting up the client parameters without opening any connections.
275
+ # Since client writes certs to a temporary file there could be a race in case we don't do that
276
+ # here since multi_receive() is multi threaded. An alternative would be to put a look around
277
+ # client init method (aka client_config())
278
+ @client_session.client
251
279
 
252
280
  end # def register
253
281
 
@@ -319,6 +347,7 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
319
347
  exc_retries += 1
320
348
  message = "Error uploading to Scalyr (will backoff-retry)"
321
349
  exc_data = {
350
+ :error_class => e.e_class,
322
351
  :url => e.url.to_s,
323
352
  :message => e.message,
324
353
  :batch_num => batch_num,
@@ -330,7 +359,7 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
330
359
  exc_data[:code] = e.code if e.code
331
360
  if @logger.debug? and e.body
332
361
  exc_data[:body] = e.body
333
- elsif e.message == "Invalid JSON response from server" and e.body
362
+ elsif e.body
334
363
  exc_data[:body] = Scalyr::Common::Util.truncate(e.body, 512)
335
364
  end
336
365
  exc_data[:payload] = "\tSample payload: #{request[:body][0,1024]}..." if @logger.debug?
@@ -343,7 +372,9 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
343
372
  @logger.error(message, exc_data)
344
373
  exc_commonly_retried = false
345
374
  end
346
- retry if @running
375
+ retry if @running and exc_retries < @max_retries
376
+ log_retry_failure(multi_event_request, exc_data, exc_retries, exc_sleep)
377
+ next
347
378
 
348
379
  rescue => e
349
380
  # Any unexpected errors should be fully logged
@@ -363,7 +394,9 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
363
394
  }
364
395
  exc_sleep += sleep_interval
365
396
  exc_retries += 1
366
- retry if @running
397
+ retry if @running and exc_retries < @max_retries
398
+ log_retry_failure(multi_event_request, exc_data, exc_retries, exc_sleep)
399
+ next
367
400
  end
368
401
 
369
402
  if !exc_data.nil?
@@ -385,7 +418,10 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
385
418
  end
386
419
  end
387
420
 
388
- send_status
421
+ if @report_status_for_empty_batches or records_count > 0
422
+ send_status
423
+ end
424
+
389
425
  return result
390
426
 
391
427
  rescue => e
@@ -400,6 +436,23 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
400
436
  end # def multi_receive
401
437
 
402
438
 
439
+ def log_retry_failure(multi_event_request, exc_data, exc_retries, exc_sleep)
440
+ message = "Failed to send #{multi_event_request[:logstash_events].length} events after #{exc_retries} tries."
441
+ sample_events = Array.new
442
+ multi_event_request[:logstash_events][0,5].each {|l_event|
443
+ sample_events << Scalyr::Common::Util.truncate(l_event.to_hash.to_json, 256)
444
+ }
445
+ @logger.error(message, :error_data => exc_data, :sample_events => sample_events, :retries => exc_retries, :sleep_time => exc_sleep)
446
+ if @dlq_writer
447
+ multi_event_request[:logstash_events].each {|l_event|
448
+ @dlq_writer.write(l_event, "#{exc_data[:message]}")
449
+ }
450
+ else
451
+ @logger.warn("Deal letter queue not configured, dropping #{multi_event_request[:logstash_events].length} events after #{exc_retries} tries.", :sample_events => sample_events)
452
+ end
453
+ end
454
+
455
+
403
456
  # Builds an array of multi-event requests from LogStash events
404
457
  # Each array element is a request that groups multiple events (to be posted to Scalyr's addEvents endpoint)
405
458
  #
@@ -428,6 +481,8 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
428
481
  current_threads = Hash.new
429
482
  # Create a Scalyr event object for each record in the chunk
430
483
  scalyr_events = Array.new
484
+ # Track the logstash events in each chunk to send them to the dlq in case of an error
485
+ l_events = Array.new
431
486
 
432
487
  thread_ids = Hash.new
433
488
  next_id = 1 #incrementing thread id for the session
@@ -607,6 +662,21 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
607
662
  ).force_encoding('UTF-8')
608
663
  end
609
664
  event_json = scalyr_event.to_json
665
+ rescue Java::JavaLang::ClassCastException => e
666
+ # Most likely we ran into the issue described here: https://github.com/flori/json/issues/336
667
+ # Because of the version of jruby logstash works with we don't have the option to just update this away,
668
+ # so if we run into it we convert bignums into strings so we can get the data in at least.
669
+ # This is fixed in JRuby 9.2.7, which includes json 2.2.0
670
+ @logger.warn("Error serializing events to JSON, likely due to the presence of Bignum values. Converting Bignum values to strings.")
671
+ @stats_lock.synchronize do
672
+ @multi_receive_statistics[:total_java_class_cast_errors] += 1
673
+ end
674
+ Scalyr::Common::Util.convert_bignums(scalyr_event)
675
+ event_json = scalyr_event.to_json
676
+ log_json = nil
677
+ if add_log
678
+ log_json = logs[log_identifier].to_json
679
+ end
610
680
  end
611
681
 
612
682
  # generate new request if json size of events in the array exceed maximum request buffer size
@@ -619,9 +689,10 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
619
689
  # make sure we always have at least one event
620
690
  if scalyr_events.size == 0
621
691
  scalyr_events << scalyr_event
692
+ l_events << l_event
622
693
  append_event = false
623
694
  end
624
- multi_event_request = self.create_multi_event_request(scalyr_events, current_threads, logs)
695
+ multi_event_request = self.create_multi_event_request(scalyr_events, l_events, current_threads, logs)
625
696
  multi_event_request_array << multi_event_request
626
697
 
627
698
  total_bytes = 0
@@ -629,19 +700,21 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
629
700
  logs = Hash.new
630
701
  logs_ids = Hash.new
631
702
  scalyr_events = Array.new
703
+ l_events = Array.new
632
704
  end
633
705
 
634
706
  # if we haven't consumed the current event already
635
707
  # add it to the end of our array and keep track of the json bytesize
636
708
  if append_event
637
709
  scalyr_events << scalyr_event
710
+ l_events << l_event
638
711
  total_bytes += add_bytes
639
712
  end
640
713
 
641
714
  }
642
715
 
643
716
  # create a final request with any left over events
644
- multi_event_request = self.create_multi_event_request(scalyr_events, current_threads, logs)
717
+ multi_event_request = self.create_multi_event_request(scalyr_events, l_events, current_threads, logs)
645
718
  multi_event_request_array << multi_event_request
646
719
  multi_event_request_array
647
720
  end
@@ -659,7 +732,7 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
659
732
  # A request comprises multiple Scalyr Events. This function creates a request hash for
660
733
  # final upload to Scalyr (from an array of events, and an optional hash of current threads)
661
734
  # Note: The request body field will be json-encoded.
662
- def create_multi_event_request(scalyr_events, current_threads, current_logs)
735
+ def create_multi_event_request(scalyr_events, logstash_events, current_threads, current_logs)
663
736
 
664
737
  body = {
665
738
  :session => @session_id + Thread.current.object_id.to_s,
@@ -692,10 +765,22 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
692
765
 
693
766
  # We time serialization to get some insight on how long it takes to serialize the request body
694
767
  start_time = Time.now.to_f
695
- serialized_body = body.to_json
768
+ begin
769
+ serialized_body = body.to_json
770
+ rescue Java::JavaLang::ClassCastException => e
771
+ @logger.warn("Error serializing events to JSON, likely due to the presence of Bignum values. Converting Bignum values to strings.")
772
+ @stats_lock.synchronize do
773
+ @multi_receive_statistics[:total_java_class_cast_errors] += 1
774
+ end
775
+ Scalyr::Common::Util.convert_bignums(body)
776
+ serialized_body = body.to_json
777
+ end
696
778
  end_time = Time.now.to_f
697
779
  serialization_duration = end_time - start_time
698
- { :body => serialized_body, :record_count => scalyr_events.size, :serialization_duration => serialization_duration }
780
+ {
781
+ :body => serialized_body, :record_count => scalyr_events.size, :serialization_duration => serialization_duration,
782
+ :logstash_events => logstash_events
783
+ }
699
784
 
700
785
  end # def create_multi_event_request
701
786
 
@@ -744,7 +829,8 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
744
829
  # Finally, note that there could be multiple instances of this plugin (one per worker), in which case each worker
745
830
  # thread sends their own status updates. This is intentional so that we know how much data each worker thread is
746
831
  # uploading to Scalyr over time.
747
- def send_status
832
+ def send_status(client_session = nil)
833
+ client_session = @client_session if client_session.nil?
748
834
 
749
835
  status_event = {
750
836
  :ts => (Time.now.to_f * (10**9)).round,
@@ -763,7 +849,7 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
763
849
  # echee TODO: get instance stats from session and create a status log line
764
850
  msg = 'plugin_status: '
765
851
  cnt = 0
766
- @client_session.get_stats.each do |k, v|
852
+ client_session.get_stats.each do |k, v|
767
853
  val = v.instance_of?(Float) ? sprintf("%.4f", v) : v
768
854
  val = val.nil? ? 0 : val
769
855
  msg << ' ' if cnt > 0
@@ -781,19 +867,28 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
781
867
  status_event[:attrs]['serverHost'] = @node_hostname
782
868
  status_event[:attrs]['parser'] = @status_parser
783
869
  end
870
+ multi_event_request = create_multi_event_request([status_event], nil, nil, nil)
871
+ begin
872
+ client_session.post_add_events(multi_event_request[:body], true, 0)
873
+ rescue => e
874
+ if e.body
875
+ @logger.warn(
876
+ "Unexpected error occurred while uploading status to Scalyr",
877
+ :error_message => e.message,
878
+ :error_class => e.class.name,
879
+ :body => Scalyr::Common::Util.truncate(e.body, 512)
880
+ )
881
+ else
882
+ @logger.warn(
883
+ "Unexpected error occurred while uploading status to Scalyr",
884
+ :error_message => e.message,
885
+ :error_class => e.class.name
886
+ )
887
+ end
888
+ return
889
+ end
890
+ @last_status_transmit_time = Time.now()
784
891
  end
785
- multi_event_request = create_multi_event_request([status_event], nil, nil)
786
- begin
787
- @client_session.post_add_events(multi_event_request[:body], true, 0)
788
- rescue => e
789
- @logger.warn(
790
- "Unexpected error occurred while uploading status to Scalyr",
791
- :error_message => e.message,
792
- :error_class => e.class.name
793
- )
794
- return
795
- end
796
- @last_status_transmit_time = Time.now()
797
892
 
798
893
  if @log_status_messages_to_stdout
799
894
  @logger.info msg
@@ -7,13 +7,14 @@ module Scalyr; module Common; module Client
7
7
  #---------------------------------------------------------------------------------------------------------------------
8
8
  class ServerError < StandardError
9
9
 
10
- attr_reader :code, :url, :body
10
+ attr_reader :code, :url, :body, :e_class
11
11
 
12
- def initialize(msg=nil, code=nil, url=nil, body=nil)
12
+ def initialize(msg=nil, code=nil, url=nil, body=nil, e_class="Scalyr::Common::Client::ServerError")
13
13
  super(msg)
14
14
  @code = code.to_i
15
15
  @url = url
16
16
  @body = body
17
+ @e_class = e_class
17
18
  end
18
19
 
19
20
  def is_commonly_retried?
@@ -33,13 +34,14 @@ end
33
34
  #---------------------------------------------------------------------------------------------------------------------
34
35
  class ClientError < StandardError
35
36
 
36
- attr_reader :code, :url, :body
37
+ attr_reader :code, :url, :body, :e_class
37
38
 
38
- def initialize(msg=nil, url=nil)
39
+ def initialize(msg=nil, url=nil, e_class="Scalyr::Common::Client::ClientError")
39
40
  super(msg)
40
41
  @code = nil # currently no way to get this from Net::HTTP::Persistent::Error
41
42
  @url = url
42
43
  @body = nil
44
+ @e_class = e_class
43
45
  end
44
46
 
45
47
  def is_commonly_retried?
@@ -236,15 +238,10 @@ class ClientSession
236
238
  bytes_received = response.body.bytesize # echee: double check
237
239
  # echee TODO add more statistics
238
240
 
239
- # TODO: Manticore doesn't raise SSL errors as this but as "UnknownExceptions", need to dig in and see if there is a
240
- # way to detect that it is from SSL.
241
- rescue OpenSSL::SSL::SSLError => e
242
- raise e
243
-
244
241
  rescue Manticore::ManticoreException => e
245
242
  # The underlying persistent-connection library automatically retries when there are network-related errors.
246
243
  # Eventually, it will give up and raise this generic error, at which time, we convert it to a ClientError
247
- raise ClientError.new(e.message, @add_events_uri)
244
+ raise ClientError.new(e.message, @add_events_uri, e.class.name)
248
245
 
249
246
  ensure
250
247
  if @record_stats_for_status or !is_status
@@ -270,6 +267,7 @@ class ClientSession
270
267
 
271
268
 
272
269
  def close
270
+ @client.close if @client
273
271
  end # def close
274
272
 
275
273
 
@@ -52,5 +52,26 @@ def self.truncate(content, max)
52
52
  return content
53
53
  end
54
54
 
55
+ def self.convert_bignums(obj)
56
+ if obj.respond_to?(:has_key?) and obj.respond_to?(:each)
57
+ # input object is a hash
58
+ obj.each do |key, value|
59
+ obj[key] = convert_bignums(value)
60
+ end
61
+
62
+ elsif obj.respond_to?(:each)
63
+ # input object is an array or set
64
+ obj.each_with_index do |value, index|
65
+ obj[index] = convert_bignums(value)
66
+ end
67
+
68
+ elsif obj.is_a? Bignum
69
+ return obj.to_s
70
+
71
+ else
72
+ return obj
73
+ end
74
+ end
75
+
55
76
  end; end; end;
56
77
 
@@ -1,2 +1,2 @@
1
1
  # encoding: utf-8
2
- PLUGIN_VERSION = "v0.1.12"
2
+ PLUGIN_VERSION = "v0.1.17.beta"
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-output-scalyr'
3
- s.version = '0.1.12'
3
+ s.version = '0.1.17.beta'
4
4
  s.licenses = ['Apache-2.0']
5
5
  s.summary = "Scalyr output plugin for Logstash"
6
6
  s.description = "Sends log data collected by Logstash to Scalyr (https://www.scalyr.com)"
@@ -0,0 +1,90 @@
1
+ require 'benchmark'
2
+ require 'quantile'
3
+
4
+ require_relative '../../lib/scalyr/common/util'
5
+
6
+ # Micro benchmark which measures how long it takes to find all the Bignums in a record and convert them to strings
7
+
8
+ ITERATIONS = 500
9
+
10
+ def rand_str(len)
11
+ return (0...len).map { (65 + rand(26)).chr }.join
12
+ end
13
+
14
+ def rand_bignum()
15
+ return 200004000020304050300 + rand(999999)
16
+ end
17
+
18
+ def generate_hash(widths)
19
+ result = {}
20
+ if widths.empty?
21
+ return rand_bignum()
22
+ else
23
+ widths[0].times do
24
+ result[rand_str(9)] = generate_hash(widths[1..widths.length])
25
+ end
26
+ return result
27
+ end
28
+ end
29
+
30
+ def generate_data_array_for_spec(spec)
31
+ data = []
32
+ ITERATIONS.times do
33
+ data << generate_hash(spec)
34
+ end
35
+
36
+ data
37
+ end
38
+
39
+ def run_benchmark_and_print_results(data, run_benchmark_func)
40
+ puts ""
41
+ puts "Using %s total keys in a hash" % [Scalyr::Common::Util.flatten(data[0]).count]
42
+ puts ""
43
+
44
+ result = []
45
+ ITERATIONS.times do |i|
46
+ result << Benchmark.measure { run_benchmark_func.(data[0]) }
47
+ end
48
+
49
+ sum = result.inject(nil) { |sum, t| sum.nil? ? sum = t : sum += t }
50
+ avg = sum / result.size
51
+
52
+ Benchmark.bm(7, "sum:", "avg:") do |b|
53
+ [sum, avg]
54
+ end
55
+ puts ""
56
+ end
57
+
58
+
59
+ puts "Using %s iterations" % [ITERATIONS]
60
+ puts ""
61
+
62
+ @value = Quantile::Estimator.new
63
+ @prng = Random.new
64
+
65
+ def convert_bignums(record)
66
+ Scalyr::Common::Util.convert_bignums(record)
67
+ end
68
+
69
+ puts "Util.convert_bignums()"
70
+ puts "==============================="
71
+
72
+ # Around ~200 keys in a hash
73
+ data = generate_data_array_for_spec([4, 4, 3, 4])
74
+ run_benchmark_and_print_results(data, method(:convert_bignums))
75
+
76
+ # Around ~200 keys in a hash (single level)
77
+ data = generate_data_array_for_spec([200])
78
+ run_benchmark_and_print_results(data, method(:convert_bignums))
79
+
80
+ # Around ~512 keys in a hash
81
+ data = generate_data_array_for_spec([8, 4, 4, 4])
82
+ run_benchmark_and_print_results(data, method(:convert_bignums))
83
+
84
+ # Around ~960 keys in a hash
85
+ data = generate_data_array_for_spec([12, 5, 4, 4])
86
+ run_benchmark_and_print_results(data, method(:convert_bignums))
87
+
88
+ # Around ~2700 keys in a hash
89
+ data = generate_data_array_for_spec([14, 8, 6, 4])
90
+ run_benchmark_and_print_results(data, method(:convert_bignums))
@@ -28,8 +28,11 @@ describe LogStash::Outputs::Scalyr do
28
28
  plugin = LogStash::Outputs::Scalyr.new({'api_write_token' => '1234'})
29
29
  plugin.register
30
30
  plugin.instance_variable_set(:@running, false)
31
- expect(plugin.instance_variable_get(:@logger)).to receive(:error).with("Error uploading to Scalyr (will backoff-retry)",
31
+ allow(plugin.instance_variable_get(:@logger)).to receive(:error)
32
+ plugin.multi_receive(sample_events)
33
+ expect(plugin.instance_variable_get(:@logger)).to have_received(:error).with("Error uploading to Scalyr (will backoff-retry)",
32
34
  {
35
+ :error_class=>"Scalyr::Common::Client::ServerError",
33
36
  :batch_num=>1,
34
37
  :code=>401,
35
38
  :message=>"error/client/badParam",
@@ -37,10 +40,10 @@ describe LogStash::Outputs::Scalyr do
37
40
  :record_count=>3,
38
41
  :total_batches=>1,
39
42
  :url=>"https://agent.scalyr.com/addEvents",
40
- :will_retry_in_seconds=>2
43
+ :will_retry_in_seconds=>2,
44
+ :body=>"{\n \"message\": \"Couldn't decode API token ...234.\",\n \"status\": \"error/client/badParam\"\n}"
41
45
  }
42
46
  )
43
- plugin.multi_receive(sample_events)
44
47
  end
45
48
  end
46
49
 
@@ -49,8 +52,11 @@ describe LogStash::Outputs::Scalyr do
49
52
  plugin = LogStash::Outputs::Scalyr.new({'api_write_token' => '1234', 'ssl_ca_bundle_path' => '/fakepath/nocerts', 'append_builtin_cert' => false})
50
53
  plugin.register
51
54
  plugin.instance_variable_set(:@running, false)
52
- expect(plugin.instance_variable_get(:@logger)).to receive(:error).with("Error uploading to Scalyr (will backoff-retry)",
55
+ allow(plugin.instance_variable_get(:@logger)).to receive(:error)
56
+ plugin.multi_receive(sample_events)
57
+ expect(plugin.instance_variable_get(:@logger)).to have_received(:error).with("Error uploading to Scalyr (will backoff-retry)",
53
58
  {
59
+ :error_class=>"Manticore::UnknownException",
54
60
  :batch_num=>1,
55
61
  :message=>"Unexpected error: java.security.InvalidAlgorithmParameterException: the trustAnchors parameter must be non-empty",
56
62
  :payload_size=>781,
@@ -60,7 +66,6 @@ describe LogStash::Outputs::Scalyr do
60
66
  :will_retry_in_seconds=>2
61
67
  }
62
68
  )
63
- plugin.multi_receive(sample_events)
64
69
  end
65
70
  end
66
71
 
@@ -73,8 +78,11 @@ describe LogStash::Outputs::Scalyr do
73
78
  plugin = LogStash::Outputs::Scalyr.new({'api_write_token' => '1234', 'append_builtin_cert' => false})
74
79
  plugin.register
75
80
  plugin.instance_variable_set(:@running, false)
76
- expect(plugin.instance_variable_get(:@logger)).to receive(:error).with("Error uploading to Scalyr (will backoff-retry)",
81
+ allow(plugin.instance_variable_get(:@logger)).to receive(:error)
82
+ plugin.multi_receive(sample_events)
83
+ expect(plugin.instance_variable_get(:@logger)).to have_received(:error).with("Error uploading to Scalyr (will backoff-retry)",
77
84
  {
85
+ :error_class=>"Manticore::UnknownException",
78
86
  :batch_num=>1,
79
87
  :message=>"Unexpected error: java.security.InvalidAlgorithmParameterException: the trustAnchors parameter must be non-empty",
80
88
  :payload_size=>781,
@@ -84,7 +92,6 @@ describe LogStash::Outputs::Scalyr do
84
92
  :will_retry_in_seconds=>2
85
93
  }
86
94
  )
87
- plugin.multi_receive(sample_events)
88
95
  end
89
96
  ensure
90
97
  `sudo mv /tmp/system_certs #{OpenSSL::X509::DEFAULT_CERT_DIR}`
@@ -110,8 +117,11 @@ describe LogStash::Outputs::Scalyr do
110
117
  plugin = LogStash::Outputs::Scalyr.new({'api_write_token' => '1234', 'scalyr_server' => 'https://invalid.mitm.should.fail.test.agent.scalyr.com:443'})
111
118
  plugin.register
112
119
  plugin.instance_variable_set(:@running, false)
113
- expect(plugin.instance_variable_get(:@logger)).to receive(:error).with("Error uploading to Scalyr (will backoff-retry)",
120
+ allow(plugin.instance_variable_get(:@logger)).to receive(:error)
121
+ plugin.multi_receive(sample_events)
122
+ expect(plugin.instance_variable_get(:@logger)).to have_received(:error).with("Error uploading to Scalyr (will backoff-retry)",
114
123
  {
124
+ :error_class=>"Manticore::UnknownException",
115
125
  :batch_num=>1,
116
126
  :message=>"Host name 'invalid.mitm.should.fail.test.agent.scalyr.com' does not match the certificate subject provided by the peer (CN=*.scalyr.com)",
117
127
  :payload_size=>781,
@@ -121,7 +131,6 @@ describe LogStash::Outputs::Scalyr do
121
131
  :will_retry_in_seconds=>2
122
132
  }
123
133
  )
124
- plugin.multi_receive(sample_events)
125
134
  ensure
126
135
  # Clean up the hosts file
127
136
  `sudo truncate -s 0 /etc/hosts`
@@ -129,6 +138,17 @@ describe LogStash::Outputs::Scalyr do
129
138
  end
130
139
  end
131
140
  end
141
+
142
+ context "when an error occurs with retries at 5" do
143
+ it "exits after 5 retries and emits a log" do
144
+ plugin = LogStash::Outputs::Scalyr.new({'retry_initial_interval' => 0.1, 'api_write_token' => '1234', 'ssl_ca_bundle_path' => '/fakepath/nocerts', 'append_builtin_cert' => false})
145
+ plugin.register
146
+ allow(plugin.instance_variable_get(:@logger)).to receive(:error)
147
+ plugin.multi_receive(sample_events)
148
+ expect(plugin.instance_variable_get(:@logger)).to have_received(:error).with("Failed to send 3 events after 5 tries.", anything
149
+ )
150
+ end
151
+ end
132
152
  end
133
153
 
134
154
  describe "response_handling_tests" do
@@ -145,6 +165,7 @@ describe LogStash::Outputs::Scalyr do
145
165
  plugin.multi_receive(sample_events)
146
166
  expect(plugin.instance_variable_get(:@logger)).to have_received(:debug).with("Error uploading to Scalyr (will backoff-retry)",
147
167
  {
168
+ :error_class=>"Scalyr::Common::Client::ServerError",
148
169
  :batch_num=>1,
149
170
  :code=>503,
150
171
  :message=>"Invalid JSON response from server",
@@ -172,6 +193,7 @@ describe LogStash::Outputs::Scalyr do
172
193
  plugin.multi_receive(sample_events)
173
194
  expect(plugin.instance_variable_get(:@logger)).to have_received(:error).with("Error uploading to Scalyr (will backoff-retry)",
174
195
  {
196
+ :error_class=>"Scalyr::Common::Client::ServerError",
175
197
  :batch_num=>1,
176
198
  :code=>500,
177
199
  :message=>"Invalid JSON response from server",
@@ -199,6 +221,7 @@ describe LogStash::Outputs::Scalyr do
199
221
  plugin.multi_receive(sample_events)
200
222
  expect(plugin.instance_variable_get(:@logger)).to have_received(:error).with("Error uploading to Scalyr (will backoff-retry)",
201
223
  {
224
+ :error_class=>"Scalyr::Common::Client::ServerError",
202
225
  :batch_num=>1,
203
226
  :code=>500,
204
227
  :message=>"Invalid JSON response from server",
@@ -212,6 +235,22 @@ describe LogStash::Outputs::Scalyr do
212
235
  )
213
236
  end
214
237
  end
238
+
239
+ context 'when DLQ is enabled' do
240
+ let(:dlq_writer) { double('DLQ writer') }
241
+ it 'should send the event to the DLQ' do
242
+ stub_request(:post, "https://agent.scalyr.com/addEvents").
243
+ to_return(status: 500, body: "stubbed response", headers: {})
244
+
245
+ plugin = LogStash::Outputs::Scalyr.new({'api_write_token' => '1234', 'ssl_ca_bundle_path' => '/fakepath/nocerts', 'append_builtin_cert' => false})
246
+ plugin.register
247
+ plugin.instance_variable_set(:@running, false)
248
+ plugin.instance_variable_set('@dlq_writer', dlq_writer)
249
+
250
+ expect(dlq_writer).to receive(:write).exactly(3).times.with(anything, anything)
251
+ plugin.multi_receive(sample_events)
252
+ end
253
+ end
215
254
  end
216
255
 
217
256
  end
@@ -69,7 +69,7 @@ describe LogStash::Outputs::Scalyr do
69
69
  end
70
70
 
71
71
  it "it doesnt include flatten metrics if flattening is disabled" do
72
- plugin1 = LogStash::Outputs::Scalyr.new({
72
+ plugin1 = LogStash::Outputs::Scalyr.new({
73
73
  'api_write_token' => '1234',
74
74
  'serverhost_field' => 'source_host',
75
75
  'log_constants' => ['tags'],
@@ -122,7 +122,7 @@ describe LogStash::Outputs::Scalyr do
122
122
  expect(status_event[:attrs]["message"]).to eq("plugin_status: total_requests_sent=20 total_requests_failed=10 total_request_bytes_sent=100 total_compressed_request_bytes_sent=50 total_response_bytes_received=100 total_request_latency_secs=100 total_serialization_duration_secs=100.5000 total_compression_duration_secs=10.2000 compression_type=deflate compression_level=9 total_multi_receive_secs=0 multi_receive_duration_p50=10 multi_receive_duration_p90=18 multi_receive_duration_p99=19 multi_receive_event_count_p50=0 multi_receive_event_count_p90=0 multi_receive_event_count_p99=0 event_attributes_count_p50=0 event_attributes_count_p90=0 event_attributes_count_p99=0 batches_per_multi_receive_p50=0 batches_per_multi_receive_p90=0 batches_per_multi_receive_p99=0 flatten_values_duration_secs_p50=0 flatten_values_duration_secs_p90=0 flatten_values_duration_secs_p99=0")
123
123
  end
124
124
 
125
- it "send_stats is called when events list is empty, but otherwise noop" do
125
+ it "send_stats is called when events list is empty, but otherwise is noop" do
126
126
  quantile_estimator = Quantile::Estimator.new
127
127
  plugin.instance_variable_set(:@plugin_metrics, {
128
128
  :multi_receive_duration_secs => Quantile::Estimator.new,
@@ -137,6 +137,30 @@ describe LogStash::Outputs::Scalyr do
137
137
  plugin.multi_receive([])
138
138
  end
139
139
 
140
+ it "send_stats is not called when events list is empty and report_status_for_empty_batches is false" do
141
+ plugin2 = LogStash::Outputs::Scalyr.new({
142
+ 'api_write_token' => '1234',
143
+ 'serverhost_field' => 'source_host',
144
+ 'log_constants' => ['tags'],
145
+ 'flatten_nested_values' => false,
146
+ 'report_status_for_empty_batches' => false,
147
+ })
148
+
149
+ mock_client_session = MockClientSession.new
150
+ quantile_estimator = Quantile::Estimator.new
151
+ plugin2.instance_variable_set(:@plugin_metrics, {
152
+ :multi_receive_duration_secs => Quantile::Estimator.new,
153
+ :multi_receive_event_count => Quantile::Estimator.new,
154
+ :event_attributes_count => Quantile::Estimator.new,
155
+ :flatten_values_duration_secs => Quantile::Estimator.new
156
+ })
157
+ plugin2.instance_variable_set(:@client_session, mock_client_session)
158
+ expect(plugin2).not_to receive(:send_status)
159
+ expect(quantile_estimator).not_to receive(:observe)
160
+ expect(mock_client_session).not_to receive(:post_add_events)
161
+ plugin2.multi_receive([])
162
+ end
163
+
140
164
  # Kind of a weak test but I don't see a decent way to write a stronger one without a live client session
141
165
  it "send_status only sends posts with is_status = true" do
142
166
  # 1. Initial send
@@ -318,5 +342,23 @@ describe LogStash::Outputs::Scalyr do
318
342
  })
319
343
  end
320
344
  end
345
+
346
+ context "when receiving an event with Bignums" do
347
+ config = {
348
+ 'api_write_token' => '1234',
349
+ }
350
+ plugin = LogStash::Outputs::Scalyr.new(config)
351
+ it "doesn't throw an error" do
352
+ allow(plugin).to receive(:send_status).and_return(nil)
353
+ plugin.register
354
+ e = LogStash::Event.new
355
+ e.set('bignumber', 2000023030042002050202030320240)
356
+ allow(plugin.instance_variable_get(:@logger)).to receive(:error)
357
+ result = plugin.build_multi_event_request_array([e])
358
+ body = JSON.parse(result[0][:body])
359
+ expect(body['events'].size).to eq(1)
360
+ expect(plugin.instance_variable_get(:@logger)).to_not receive(:error)
361
+ end
362
+ end
321
363
  end
322
364
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-output-scalyr
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.12
4
+ version: 0.1.17.beta
5
5
  platform: ruby
6
6
  authors:
7
7
  - Edward Chee
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2021-06-15 00:00:00.000000000 Z
11
+ date: 2021-07-06 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  requirement: !ruby/object:Gem::Requirement
@@ -132,6 +132,7 @@ files:
132
132
  - lib/scalyr/common/util.rb
133
133
  - lib/scalyr/constants.rb
134
134
  - logstash-output-scalyr.gemspec
135
+ - spec/benchmarks/bignum_fixing.rb
135
136
  - spec/benchmarks/flattening_and_serialization.rb
136
137
  - spec/benchmarks/metrics_overhead.rb
137
138
  - spec/logstash/outputs/scalyr_integration_spec.rb
@@ -4052,9 +4053,9 @@ required_ruby_version: !ruby/object:Gem::Requirement
4052
4053
  version: '0'
4053
4054
  required_rubygems_version: !ruby/object:Gem::Requirement
4054
4055
  requirements:
4055
- - - ">="
4056
+ - - ">"
4056
4057
  - !ruby/object:Gem::Version
4057
- version: '0'
4058
+ version: 1.3.1
4058
4059
  requirements: []
4059
4060
  rubyforge_project:
4060
4061
  rubygems_version: 2.7.10
@@ -4062,6 +4063,7 @@ signing_key:
4062
4063
  specification_version: 4
4063
4064
  summary: Scalyr output plugin for Logstash
4064
4065
  test_files:
4066
+ - spec/benchmarks/bignum_fixing.rb
4065
4067
  - spec/benchmarks/flattening_and_serialization.rb
4066
4068
  - spec/benchmarks/metrics_overhead.rb
4067
4069
  - spec/logstash/outputs/scalyr_integration_spec.rb