logstash-output-scalyr 0.2.8.beta → 0.2.9.beta

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: da91d80d418081307dc1a6b756f17024712c0eab9a6d527a140288976a58b76f
4
- data.tar.gz: 2ac792b42939edb371614c73c4dbb50fcc835f199d03685ff619d9d355c6465a
3
+ metadata.gz: a0d3f1dbfffae370fea3b6f119ce439c66358c5ba8f3bec03fec386a48f034c5
4
+ data.tar.gz: 66c5224a53503e11e3c48ce8bf7768eb8d3284a56c205f6505254c74f4655bdc
5
5
  SHA512:
6
- metadata.gz: 0a87ca7115a2eb3f03282046510bd51869e575eb29b8bec6ee273cbc9f68fea0167890835ffdda8bd0d571ac4299aff689642542d8dd5d88eaa2a89fe760a3f0
7
- data.tar.gz: 7457fff5003d10b7288a9a958f3ec3f4a490c19da51a3ac3ade0402a37f74fafffc2501185857b4b3a7263753cb6af3276471724870efa0dca9e0773671b2f3f
6
+ metadata.gz: fc609b65672aaa0d32888d81c2f7e811b73a91bf6419ef44225c05e6945f3e6a6bc76eccc5b831db95c119fbe11fe2ccc318876b06cbf0893ebc10f286a60f9e
7
+ data.tar.gz: 4b4130bd8cdeb85ae5122f4272a8d708ab4442dfb88a87e010bfc7f1cf428b687ea96b153fbc4458eb4a5ae9bddb3454daea1f6171368a71dca84930a71494ac
data/CHANGELOG.md CHANGED
@@ -1,5 +1,21 @@
1
1
  # Beta
2
2
 
3
+ ## 0.2.9.beta
4
+
5
+ * Introduce new plugin config options which allows failed HTTP request retry options to be
6
+ configured differently for a different set of errors.
7
+
8
+ Those options should be left as-is, unless instructed differently by the DataSet support team.
9
+
10
+ * Use longer retry delays where we don't want to retry as soon as possible (e.g. deploy related
11
+ errors or client being throttled by the server).
12
+
13
+ * Update context which is logged with errors which represent HTTP requests which are retried
14
+ to also include ``total_retries_so_far`` and ``total_sleep_time_so_far`` attribute.
15
+
16
+ * Add new ``retry_backoff_factor`` config option with which user can change a default value of 2
17
+ for the retry backoff factor (exponential delay).
18
+
3
19
  ## 0.2.8.beta
4
20
 
5
21
  * Update ``.gemspec`` gem metadata to not include ``spec/`` directory with the tests and tests
data/Gemfile CHANGED
@@ -11,10 +11,13 @@ if Dir.exist?(logstash_path) && use_logstash_source
11
11
  end
12
12
 
13
13
  group :test do
14
- gem "webmock"
14
+ gem "webmock", "~> 3.18.1"
15
+
16
+ # Newer versions depend on Ruby >= 2.6
17
+ gem "rubocop", "~> 1.28.2"
15
18
 
16
19
  # Require the specific version of `json` used in logstash while testing
17
- gem 'json', '1.8.6'
20
+ gem 'json', '2.6.2'
18
21
  end
19
22
 
20
23
  gem 'pry'
@@ -4,7 +4,9 @@ require "logstash/namespace"
4
4
  require "concurrent"
5
5
  require "stud/buffer"
6
6
  require "socket" # for Socket.gethostname
7
+ # rubocop:disable Lint/RedundantRequireStatement
7
8
  require "thread" # for safe queueing
9
+ # rubocop:enable Lint/RedundantRequireStatement
8
10
  require "uri" # for escaping user input
9
11
  require 'json' # for converting event object to JSON for upload
10
12
 
@@ -118,18 +120,50 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
118
120
  config :flat_tag_prefix, :validate => :string, :default => 'tag_'
119
121
  config :flat_tag_value, :default => 1
120
122
 
121
- # Initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval`
123
+ #####
124
+ ## Retry settings for non deploy and non throttling related errors
125
+ ####
126
+
127
+ # Initial interval in seconds between bulk retries. Doubled (by default, can be overriden using
128
+ # retry_backoff_factor config option) on each retry up to `retry_max_interval`
122
129
  config :retry_initial_interval, :validate => :number, :default => 1
130
+
123
131
  # How many times to retry sending an event before giving up on it
124
132
  # This will result in a total of around 12 minutes of retrying / sleeping with a default value
125
133
  # for retry_max_interval
126
134
  config :max_retries, :validate => :number, :default => 15
127
- # Whether or not to send messages that failed to send a max_retries amount of times to the DLQ or just drop them
128
- config :send_to_dlq, :validate => :boolean, :default => true
129
135
 
130
136
  # Set max interval in seconds between bulk retries.
131
137
  config :retry_max_interval, :validate => :number, :default => 64
132
138
 
139
+ # Back off factor for retries. We default to 2 (exponential retry delay).
140
+ config :retry_backoff_factor, :validate => :number, :default => 2
141
+
142
+ #####
143
+ ## Retry settings for deploy related errors
144
+ ####
145
+
146
+ config :retry_initial_interval_deploy_errors, :validate => :number, :default => 30
147
+ config :max_retries_deploy_errors, :validate => :number, :default => 5
148
+ config :retry_max_interval_deploy_errors, :validate => :number, :default => 64
149
+ config :retry_backoff_factor_deploy_errors, :validate => :number, :default => 1.5
150
+
151
+ #####
152
+ ## Retry settings for throttling related errors
153
+ ####
154
+
155
+ config :retry_initial_interval_throttling_errors, :validate => :number, :default => 20
156
+ config :max_retries_throttling_errors, :validate => :number, :default => 6
157
+ config :retry_max_interval_throttling_errors, :validate => :number, :default => 64
158
+ config :retry_backoff_factor_throttling_errors, :validate => :number, :default => 1.5
159
+
160
+ #####
161
+ ## Common retry related settings
162
+ #####
163
+
164
+ # Whether or not to send messages that failed to send a max_retries amount of times to the DLQ or just drop them
165
+ config :send_to_dlq, :validate => :boolean, :default => true
166
+
133
167
  # Whether or not to verify the connection to Scalyr, only set to false for debugging.
134
168
  config :ssl_verify_peer, :validate => :boolean, :default => true
135
169
 
@@ -224,7 +258,6 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
224
258
  @client_session.close if @client_session
225
259
  end
226
260
 
227
- public
228
261
  def register
229
262
  # This prng is used exclusively to determine when to sample statistics and no security related purpose, for this
230
263
  # reason we do not ensure thread safety for it.
@@ -349,6 +382,7 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
349
382
  :successful_events_processed => 0,
350
383
  :failed_events_processed => 0,
351
384
  :total_retry_count => 0,
385
+ :total_retry_duration_secs => 0,
352
386
  :total_java_class_cast_errors => 0
353
387
  }
354
388
  @plugin_metrics = get_new_metrics
@@ -455,7 +489,6 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
455
489
  # Also note that event uploads are broken up into batches such that each batch is less than max_request_buffer.
456
490
  # Increasing max_request_buffer beyond 3MB will lead to failed requests.
457
491
  #
458
- public
459
492
  def multi_receive(events)
460
493
  # Just return and pretend we did something if running in noop mode
461
494
  return events if @noop_mode
@@ -471,7 +504,6 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
471
504
  build_multi_duration_secs = Time.now.to_f - start_time
472
505
 
473
506
  # Loop over all array of multi-event requests, sending each multi-event to Scalyr
474
- sleep_interval = @retry_initial_interval
475
507
  batch_num = 1
476
508
  total_batches = multi_event_request_array.length unless multi_event_request_array.nil?
477
509
 
@@ -485,17 +517,20 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
485
517
  exc_data = nil
486
518
  # Whether the exception is commonly retried or not, for determining log level
487
519
  exc_commonly_retried = false
488
- # Count of retries attempted for this request
489
- exc_retries = 0
490
- # Total time spent sleeping while retrying this request due to backoff
491
- exc_sleep = 0
520
+
521
+ # We use new and clean retry state object for each request
522
+ # Since @running is only available directly on the output plugin instance and we don't
523
+ # want to create a cyclic reference between output and state tracker instance we pass
524
+ # this lambda method to the state tracker
525
+ is_plugin_running = lambda { @running }
526
+
527
+ retry_state = RetryStateTracker.new(@config, is_plugin_running)
528
+
492
529
  begin
493
530
  # For some reason a retry on the multi_receive may result in the request array containing `nil` elements, we
494
531
  # ignore these.
495
532
  if !multi_event_request.nil?
496
533
  @client_session.post_add_events(multi_event_request[:body], false, multi_event_request[:serialization_duration])
497
-
498
- sleep_interval = @retry_initial_interval
499
534
  batch_num += 1
500
535
  result.push(multi_event_request)
501
536
  end
@@ -519,12 +554,14 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
519
554
  log_retry_failure(multi_event_request, exc_data, 0, 0)
520
555
  next
521
556
  rescue Scalyr::Common::Client::ServerError, Scalyr::Common::Client::ClientError => e
522
- sleep_interval = sleep_for(sleep_interval)
523
- exc_sleep += sleep_interval
524
- exc_retries += 1
557
+ previous_state = retry_state.get_state_for_error(e)
558
+ updated_state = retry_state.sleep_for_error_and_update_state(e)
559
+
525
560
  @stats_lock.synchronize do
526
561
  @multi_receive_statistics[:total_retry_count] += 1
562
+ @multi_receive_statistics[:total_retry_duration_secs] += updated_state[:sleep_interval]
527
563
  end
564
+
528
565
  message = "Error uploading to Scalyr (will backoff-retry)"
529
566
  exc_data = {
530
567
  :error_class => e.e_class,
@@ -534,7 +571,15 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
534
571
  :total_batches => total_batches,
535
572
  :record_count => multi_event_request[:record_count],
536
573
  :payload_size => multi_event_request[:body].bytesize,
537
- :will_retry_in_seconds => sleep_interval,
574
+ # retry related values
575
+ :max_retries => updated_state[:options][:max_retries],
576
+ :retry_backoff_factor => updated_state[:options][:retry_backoff_factor],
577
+ :retry_max_interval => updated_state[:options][:retry_max_interval],
578
+ :will_retry_in_seconds => updated_state[:sleep_interval],
579
+ # to get values which include this next retry, you need to add +1
580
+ # to :total_retries_so_far and +:sleep_interval to :total_sleep_time_so_far
581
+ :total_retries_so_far => previous_state[:retries],
582
+ :total_sleep_time_so_far => previous_state[:sleep],
538
583
  }
539
584
  exc_data[:code] = e.code if e.code
540
585
  if @logger.debug? and defined?(e.body) and e.body
@@ -552,8 +597,9 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
552
597
  @logger.warn(message, exc_data)
553
598
  exc_commonly_retried = false
554
599
  end
555
- retry if @running and exc_retries < @max_retries
556
- log_retry_failure(multi_event_request, exc_data, exc_retries, exc_sleep)
600
+
601
+ retry if @running and updated_state[:retries] < updated_state[:options][:max_retries]
602
+ log_retry_failure(multi_event_request, exc_data, updated_state[:retries], updated_state[:sleep])
557
603
  next
558
604
 
559
605
  rescue => e
@@ -565,20 +611,22 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
565
611
  :backtrace => e.backtrace
566
612
  )
567
613
  @logger.debug("Failed multi_event_request", :multi_event_request => multi_event_request)
568
- sleep_interval = sleep_for(sleep_interval)
614
+
615
+ updated_state = retry_state.sleep_for_error_and_update_state(e)
569
616
  exc_data = {
570
617
  :error_message => e.message,
571
618
  :error_class => e.class.name,
572
619
  :backtrace => e.backtrace,
573
620
  :multi_event_request => multi_event_request
574
621
  }
575
- exc_sleep += sleep_interval
576
- exc_retries += 1
622
+
577
623
  @stats_lock.synchronize do
578
624
  @multi_receive_statistics[:total_retry_count] += 1
625
+ @multi_receive_statistics[:total_retry_duration_secs] += updated_state[:sleep_interval]
579
626
  end
580
- retry if @running and exc_retries < @max_retries
581
- log_retry_failure(multi_event_request, exc_data, exc_retries, exc_sleep)
627
+
628
+ retry if @running and updated_state[:retries] < updated_state[:options][:max_retries]
629
+ log_retry_failure(multi_event_request, exc_data, updated_state[:retries], updated_state[:sleep])
582
630
  next
583
631
  end
584
632
 
@@ -590,9 +638,9 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
590
638
  if !exc_data.nil?
591
639
  message = "Retry successful after error."
592
640
  if exc_commonly_retried
593
- @logger.debug(message, :error_data => exc_data, :retries => exc_retries, :sleep_time => exc_sleep)
641
+ @logger.debug(message, :error_data => exc_data, :retries => updated_state[:retries], :sleep_time => updated_state[:sleep_interval])
594
642
  else
595
- @logger.info(message, :error_data => exc_data, :retries => exc_retries, :sleep_time => exc_sleep)
643
+ @logger.info(message, :error_data => exc_data, :retries => updated_state[:retries], :sleep_time => updated_state[:sleep_interval])
596
644
  end
597
645
  end
598
646
  end
@@ -864,7 +912,7 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
864
912
  if @flatten_nested_values
865
913
  start_time = Time.now.to_f
866
914
  begin
867
- record = Scalyr::Common::Util.flatten(record, delimiter=@flatten_nested_values_delimiter, flatten_arrays=@flatten_nested_arrays, fix_deep_flattening_delimiters=@fix_deep_flattening_delimiters, max_key_count=@flattening_max_key_count)
915
+ record = Scalyr::Common::Util.flatten(record, @flatten_nested_values_delimiter, @flatten_nested_arrays, @fix_deep_flattening_delimiters, @flattening_max_key_count)
868
916
  rescue Scalyr::Common::Util::MaxKeyCountError => e
869
917
  @logger.warn("Error while flattening record", :error_message => e.message, :sample_keys => e.sample_keys)
870
918
  end
@@ -938,7 +986,7 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
938
986
  ).force_encoding('UTF-8')
939
987
  end
940
988
  event_json = self.json_encode(scalyr_event)
941
- rescue Java::JavaLang::ClassCastException => e
989
+ rescue Java::JavaLang::ClassCastException
942
990
  # Most likely we ran into the issue described here: https://github.com/flori/json/issues/336
943
991
  # Because of the version of jruby logstash works with we don't have the option to just update this away,
944
992
  # so if we run into it we convert bignums into strings so we can get the data in at least.
@@ -1042,7 +1090,7 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
1042
1090
  # build the scalyr thread logs object
1043
1091
  if current_logs
1044
1092
  logs = Array.new
1045
- current_logs.each do |identifier, log|
1093
+ current_logs.each do |_identifier, log|
1046
1094
  logs << log
1047
1095
  end
1048
1096
  body[:logs] = logs
@@ -1061,7 +1109,7 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
1061
1109
  start_time = Time.now.to_f
1062
1110
  begin
1063
1111
  serialized_body = self.json_encode(body)
1064
- rescue Java::JavaLang::ClassCastException => e
1112
+ rescue Java::JavaLang::ClassCastException
1065
1113
  @logger.warn("Error serializing events to JSON, likely due to the presence of Bignum values. Converting Bignum values to strings.")
1066
1114
  @stats_lock.synchronize do
1067
1115
  @multi_receive_statistics[:total_java_class_cast_errors] += 1
@@ -1161,14 +1209,14 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
1161
1209
  val = v.instance_of?(Float) ? sprintf("%.4f", v) : v
1162
1210
  val = val.nil? ? 0 : val
1163
1211
  msg << ' ' if cnt > 0
1164
- msg << "#{k.to_s}=#{val}"
1212
+ msg << "#{k}=#{val}"
1165
1213
  cnt += 1
1166
1214
  end
1167
1215
  get_stats.each do |k, v|
1168
1216
  val = v.instance_of?(Float) ? sprintf("%.4f", v) : v
1169
1217
  val = val.nil? ? 0 : val
1170
1218
  msg << ' ' if cnt > 0
1171
- msg << "#{k.to_s}=#{val}"
1219
+ msg << "#{k}=#{val}"
1172
1220
  cnt += 1
1173
1221
  end
1174
1222
  status_event[:attrs]['message'] = msg
@@ -1223,26 +1271,97 @@ class LogStash::Outputs::Scalyr < LogStash::Outputs::Base
1223
1271
  end
1224
1272
  end
1225
1273
 
1274
+ # Helper method to check if the dead-letter queue is enabled
1275
+ def dlq_enabled?
1276
+ # echee TODO submit to DLQ
1277
+ respond_to?(:execution_context) && execution_context.respond_to?(:dlq_writer) &&
1278
+ !execution_context.dlq_writer.inner_writer.is_a?(::LogStash::Util::DummyDeadLetterQueueWriter)
1279
+ end
1280
+ end
1226
1281
 
1227
- # Helper method that performs synchronous sleep for a certain time interval
1228
- def sleep_for(sleep_interval)
1229
- Stud.stoppable_sleep(sleep_interval) { !@running }
1230
- get_sleep_sec(sleep_interval)
1282
+ # Class which allows us to track retry related settings and state for different type of errors for
1283
+ # which we use different retry settings (e.g. general errors vs errors during deploy windows vs
1284
+ # client throttled errors).
1285
+ class RetryStateTracker
1286
+
1287
+ def initialize(plugin_config, is_plugin_running_method)
1288
+ # :retries - stores number of times we have retried so far
1289
+ # :sleep - stores total duration (in seconds) we have slept / waited so far
1290
+ # :sleep_interval stores - sleep interval / delay (in seconds) for the next retry
1291
+ @STATE = {
1292
+ :deploy_errors => {
1293
+ :retries => 0,
1294
+ :sleep => 0,
1295
+ :sleep_interval => plugin_config["retry_initial_interval_deploy_errors"],
1296
+ :options => {
1297
+ :retry_initial_interval => plugin_config["retry_initial_interval_deploy_errors"],
1298
+ :max_retries => plugin_config["max_retries_deploy_errors"],
1299
+ :retry_max_interval => plugin_config["retry_max_interval_deploy_errors"],
1300
+ :retry_backoff_factor => plugin_config["retry_backoff_factor_deploy_errors"],
1301
+ }
1302
+ },
1303
+ :throttling_errors => {
1304
+ :retries => 0,
1305
+ :sleep => 0,
1306
+ :sleep_interval => plugin_config["retry_initial_interval_throttling_errors"],
1307
+ :options => {
1308
+ :retry_initial_interval => plugin_config["retry_initial_interval_throttling_errors"],
1309
+ :max_retries => plugin_config["max_retries_throttling_errors"],
1310
+ :retry_max_interval => plugin_config["retry_max_interval_throttling_errors"],
1311
+ :retry_backoff_factor => plugin_config["retry_backoff_factor_throttling_errors"],
1312
+ }
1313
+ },
1314
+ :other_errors => {
1315
+ :retries => 0,
1316
+ :sleep => 0,
1317
+ :sleep_interval => plugin_config["retry_initial_interval"],
1318
+ :options => {
1319
+ :retry_initial_interval => plugin_config["retry_initial_interval"],
1320
+ :max_retries => plugin_config["max_retries"],
1321
+ :retry_max_interval => plugin_config["retry_max_interval"],
1322
+ :retry_backoff_factor => plugin_config["retry_backoff_factor"],
1323
+ }
1324
+ },
1325
+ }
1326
+
1327
+ @is_plugin_running_method = is_plugin_running_method
1231
1328
  end
1232
1329
 
1330
+ # Return state hash for a specific error
1331
+ def get_state_for_error(error)
1332
+ if error.instance_of?(Scalyr::Common::Client::ClientThrottledError)
1333
+ return @STATE[:throttling_errors]
1334
+ elsif error.instance_of?(Scalyr::Common::Client::DeployWindowError)
1335
+ return @STATE[:deploy_errors]
1336
+ else
1337
+ return @STATE[:other_errors]
1338
+ end
1339
+ end
1233
1340
 
1234
- # Helper method that gets the next sleep time for exponential backoff, capped at a defined maximum
1235
- def get_sleep_sec(current_interval)
1236
- doubled = current_interval * 2
1237
- doubled > @retry_max_interval ? @retry_max_interval : doubled
1341
+ def get_state()
1342
+ @STATE
1238
1343
  end
1239
1344
 
1345
+ # Helper method that performs synchronous sleep for a certain time interval for a specific
1346
+ # error and updates internal error specific state. It also returns updated internal state
1347
+ # specific to that error.
1348
+ def sleep_for_error_and_update_state(error)
1349
+ # Sleep for a specific duration
1350
+ state = get_state_for_error(error)
1240
1351
 
1241
- # Helper method to check if the dead-letter queue is enabled
1242
- def dlq_enabled?
1243
- # echee TODO submit to DLQ
1244
- respond_to?(:execution_context) && execution_context.respond_to?(:dlq_writer) &&
1245
- !execution_context.dlq_writer.inner_writer.is_a?(::LogStash::Util::DummyDeadLetterQueueWriter)
1352
+ current_interval = state[:sleep_interval]
1353
+
1354
+ Stud.stoppable_sleep(current_interval) { !@is_plugin_running_method.call }
1355
+
1356
+ # Update internal state + sleep interval for the next retry
1357
+ updated_interval = current_interval * state[:options][:retry_backoff_factor]
1358
+ updated_interval = updated_interval > state[:options][:retry_max_interval] ? state[:options][:retry_max_interval] : updated_interval
1359
+
1360
+ state[:retries] += 1
1361
+ state[:sleep] += current_interval
1362
+ state[:sleep_interval] = updated_interval
1363
+
1364
+ state
1246
1365
  end
1247
1366
  end
1248
1367
 
@@ -41,6 +41,24 @@ class PayloadTooLargeError < ServerError;
41
41
  end
42
42
  end
43
43
 
44
+ #---------------------------------------------------------------------------------------------------------------------
45
+ # An exception that signifies an error which occured during Scalyr deploy window
46
+ #---------------------------------------------------------------------------------------------------------------------
47
+ class DeployWindowError < ServerError;
48
+ def initialize(msg=nil, code=nil, url=nil, body=nil, e_class="Scalyr::Common::Client::DeployWindowError")
49
+ super(msg, code, url, body, e_class)
50
+ end
51
+ end
52
+
53
+ #---------------------------------------------------------------------------------------------------------------------
54
+ # An exception that signifies that the client has been throttled by the server
55
+ #---------------------------------------------------------------------------------------------------------------------
56
+ class ClientThrottledError < ServerError;
57
+ def initialize(msg=nil, code=nil, url=nil, body=nil, e_class="Scalyr::Common::Client::ClientThrottledError")
58
+ super(msg, code, url, body, e_class)
59
+ end
60
+ end
61
+
44
62
  #---------------------------------------------------------------------------------------------------------------------
45
63
  # An exception representing failure of the http client to upload data to Scalyr (in contrast to server-side errors
46
64
  # where the POST api succeeds, but the Scalyr server then responds with an error)
@@ -187,7 +205,7 @@ class ClientSession
187
205
  # Send "ping" request to the API. This is mostly used to test the connecting with Scalyr API
188
206
  # and verify that the API key is valid.
189
207
  def send_ping(body)
190
- post_body, post_headers, compression_duration = prepare_post_object @add_events_uri.path, body
208
+ post_body, post_headers, _ = prepare_post_object @add_events_uri.path, body
191
209
  response = client.send(:post, @add_events_uri, body: post_body, headers: post_headers)
192
210
  handle_response(response)
193
211
 
@@ -248,7 +266,7 @@ class ClientSession
248
266
 
249
267
  # Prepare a post object to be sent, compressing it if necessary
250
268
  private
251
- def prepare_post_object(uri_path, body)
269
+ def prepare_post_object(_uri_path, body)
252
270
  # use compression if enabled
253
271
  encoding = nil
254
272
  compression_duration = 0
@@ -325,6 +343,10 @@ class ClientSession
325
343
  if status != "success"
326
344
  if code == 413
327
345
  raise PayloadTooLargeError.new(status, response.code, @add_events_uri, response.body)
346
+ elsif [530, 500].include?(code)
347
+ raise DeployWindowError.new(status, response.code, @add_events_uri, response.body)
348
+ elsif code == 429
349
+ raise ClientThrottledError.new(status, response.code, @add_events_uri, response.body)
328
350
  elsif status =~ /discardBuffer/
329
351
  raise RequestDroppedError.new(status, response.code, @add_events_uri, response.body)
330
352
  else
@@ -4,6 +4,7 @@ class MaxKeyCountError < StandardError
4
4
  attr_reader :message, :sample_keys
5
5
 
6
6
  def initialize(message, sample_keys)
7
+ super(message)
7
8
  @message = message
8
9
  @sample_keys = sample_keys
9
10
  end
@@ -32,7 +33,6 @@ def self.flatten(hash_obj, delimiter='_', flatten_arrays=true, fix_deep_flatteni
32
33
  key_list = []
33
34
  key_list_width = []
34
35
  result = Hash.new
35
- test_key = 0
36
36
  #Debugging
37
37
  #require 'pry'
38
38
  #binding.pry
@@ -81,10 +81,10 @@ def self.flatten(hash_obj, delimiter='_', flatten_arrays=true, fix_deep_flatteni
81
81
  )
82
82
  end
83
83
 
84
- throw_away = key_list.pop
84
+ key_list.pop
85
85
  until key_list_width.empty? or key_list_width[-1] > 1
86
- throw_away = key_list_width.pop
87
- throw_away = key_list.pop
86
+ key_list_width.pop
87
+ key_list.pop
88
88
  end
89
89
  if not key_list_width.empty?
90
90
  key_list_width[-1] -= 1
@@ -116,7 +116,7 @@ def self.convert_bignums(obj)
116
116
  obj[index] = convert_bignums(value)
117
117
  end
118
118
 
119
- elsif obj.is_a? Bignum
119
+ elsif obj.is_a? Integer
120
120
  return obj.to_s
121
121
 
122
122
  else
@@ -1,6 +1,6 @@
1
1
  # encoding: utf-8
2
2
 
3
- PLUGIN_VERSION = "v0.2.8.beta"
3
+ PLUGIN_VERSION = "v0.2.9.beta"
4
4
 
5
5
  # Special event level attribute name which can be used for setting event level serverHost attribute
6
6
  EVENT_LEVEL_SERVER_HOST_ATTRIBUTE_NAME = '__origServerHost'
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-output-scalyr'
3
- s.version = '0.2.8.beta'
3
+ s.version = '0.2.9.beta'
4
4
  s.licenses = ['Apache-2.0']
5
5
  s.summary = "Scalyr output plugin for Logstash"
6
6
  s.description = "Sends log data collected by Logstash to Scalyr (https://www.scalyr.com)"
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-output-scalyr
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.8.beta
4
+ version: 0.2.9.beta
5
5
  platform: ruby
6
6
  authors:
7
7
  - Edward Chee
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2022-10-20 00:00:00.000000000 Z
11
+ date: 2022-11-08 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  requirement: !ruby/object:Gem::Requirement