logdna 1.4.2 → 1.5.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 61fb8e8caddc3258145cc8d0d6f608822b3afca192012c6798e3cf6924b12cd6
4
- data.tar.gz: 5c60cab47160976899feef07a87c0e79fc196c6931175db12e9becaaf3fa7ac4
3
+ metadata.gz: 32488a458ed8004dcb65531121c286ae1df1b5b3586cf146bb5e973e24e0f559
4
+ data.tar.gz: 03f487fbff81de61177296ec51849659c66391d7f16a00a2d170e7d03971eaf9
5
5
  SHA512:
6
- metadata.gz: 0f4eb111244e049c6ae038a98509bad947eb9fd25ebd5dd51786af64881eca54a08e9f8aaa88e58533e09faaafffd7374b3db64ddfadb17cbe14e88c6a0a7429
7
- data.tar.gz: 6da73271847ed79db1304730e6749e9d19bde782fa7924420d669e934039fb3f78b1f71a48df712abb64c1e6bf140c8beb7b3018c625370cb3eec12e9e3c4df0
6
+ metadata.gz: 5a525a02bc844af91ecc52e324ebc9c1323c26ff0fada9cfb32092b9d57f84b7c8dacd7239e8ffb4d36caca3b89533b0fac577a17e3264643c2678368b7abc10
7
+ data.tar.gz: fa9721cf9605ab44e08d6222a3d8a222c0c0b3eca7802db673ab7a2c3c8bc840ddb060197a39ddb7b5aa149b93fc26d197c09c7c10a54e84f4579d00f7b8ed61
data/README.md CHANGED
@@ -123,8 +123,12 @@ Instantiates a new instance of the class it is called on. ingestion_key is requi
123
123
  |{ :env => STAGING, PRODUCTION .. etc} | Nil |
124
124
  |{ :meta => metadata} | Nil |
125
125
  |{ :endpoint => LogDNA Ingestion URI | 'https://logs.logdna.com/logs/ingest' |
126
- |{ :flushtime => Log flush interval in seconds } | 0.25 seconds |
127
- |{ :flushbyte => Log flush upper limit in bytes } | 500000 bytes ~= 0.5 megabytes |
126
+ |{ :flush_interval => Limit to trigger a flush in seconds } | 0.25 seconds |
127
+ |{ :flush_size => Limit to trigger a flush in bytes } | 2097152 bytes = 2 MiB |
128
+ |{ :request_size => Upper limit of request in bytes } | 2097152 bytes = 2 MiB |
129
+ |{ :retry_timeout => Base timeout for retries in seconds } | 0.25 seconds |
130
+ |{ :retry_max_attempts => Maximum number of retries per request } | 3 attempts |
131
+ |{ :retry_max_jitter => Maximum amount of jitter to add to each retry request in seconds } | 0.25 seconds |
128
132
 
129
133
  Different log level displays log messages in different colors as well.
130
134
  - ![TRACE DEBUG INFO Colors](https://placehold.it/15/515151/000000?text=+) "Trace" "Debug" "Info"
@@ -3,12 +3,13 @@
3
3
  require "logger"
4
4
  require "socket"
5
5
  require "uri"
6
- require_relative "logdna/client.rb"
7
- require_relative "logdna/resources.rb"
8
- require_relative "logdna/version.rb"
6
+ require_relative "logdna/client"
7
+ require_relative "logdna/resources"
8
+ require_relative "logdna/version"
9
9
 
10
10
  module Logdna
11
11
  class ValidURLRequired < ArgumentError; end
12
+
12
13
  class MaxLengthExceeded < ArgumentError; end
13
14
 
14
15
  class Ruby < ::Logger
@@ -18,11 +19,12 @@ module Logdna
18
19
  attr_accessor :app, :env, :meta
19
20
 
20
21
  def initialize(key, opts = {})
22
+ super(nil, nil, nil)
21
23
  @app = opts[:app] || "default"
22
24
  @log_level = opts[:level] || "INFO"
23
25
  @env = opts[:env]
24
26
  @meta = opts[:meta]
25
- @internal_logger = Logger.new(STDOUT)
27
+ @internal_logger = Logger.new($stdout)
26
28
  @internal_logger.level = Logger::DEBUG
27
29
  endpoint = opts[:endpoint] || Resources::ENDPOINT
28
30
  hostname = opts[:hostname] || Socket.gethostname
@@ -127,9 +129,5 @@ module Logdna
127
129
  def close
128
130
  @client&.exitout
129
131
  end
130
-
131
- at_exit do
132
- @client&.exitout
133
- end
134
132
  end
135
133
  end
@@ -1,34 +1,57 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require "etc"
3
4
  require "net/http"
4
5
  require "socket"
5
6
  require "json"
6
7
  require "concurrent"
7
8
  require "date"
9
+ require "securerandom"
8
10
 
9
11
  module Logdna
12
+ Message = Struct.new(:source, :running_size)
13
+
10
14
  class Client
11
15
  def initialize(request, uri, opts)
12
16
  @uri = uri
13
17
 
14
18
  # NOTE: buffer is in memory
15
19
  @buffer = []
16
- @buffer_byte_size = 0
17
-
18
- @side_messages = []
19
20
 
20
21
  @lock = Mutex.new
21
- @side_message_lock = Mutex.new
22
- @flush_limit = opts[:flush_size] || Resources::FLUSH_BYTE_LIMIT
22
+
23
23
  @flush_interval = opts[:flush_interval] || Resources::FLUSH_INTERVAL
24
- @flush_scheduled = false
25
- @exception_flag = false
24
+ @flush_size = opts[:flush_size] || Resources::FLUSH_SIZE
26
25
 
27
26
  @request = request
27
+ @request_size = opts[:request_size] || Resources::REQUEST_SIZE
28
+
28
29
  @retry_timeout = opts[:retry_timeout] || Resources::RETRY_TIMEOUT
30
+ @retry_max_jitter = opts[:retry_max_jitter] || Resources::RETRY_MAX_JITTER
31
+ @retry_max_attempts = opts[:retry_max_attempts] || Resources::RETRY_MAX_ATTEMPTS
29
32
 
30
- @internal_logger = Logger.new(STDOUT)
33
+ @internal_logger = Logger.new($stdout)
31
34
  @internal_logger.level = Logger::DEBUG
35
+
36
+ @work_thread_pool = Concurrent::FixedThreadPool.new(Etc.nprocessors)
37
+ # TODO: Expose an option to configure the maximum concurrent requests
38
+ # Requires the instance-global request to be resolved first
39
+ @request_thread_pool = Concurrent::FixedThreadPool.new(Resources::MAX_CONCURRENT_REQUESTS)
40
+
41
+ @scheduled_flush = nil
42
+ end
43
+
44
+ def schedule_flush
45
+ if @scheduled_flush.nil? || @scheduled_flush.complete?
46
+ @scheduled_flush = Concurrent::ScheduledTask.execute(@flush_interval) { flush }
47
+ end
48
+ end
49
+
50
+ def unschedule_flush
51
+ if !@scheduled_flush.nil?
52
+ @scheduled_flush.cancel
53
+ @scheduled_flush = nil
54
+ end
32
55
  end
33
56
 
34
57
  def process_message(msg, opts = {})
@@ -44,95 +67,145 @@ module Logdna
44
67
  processed_message
45
68
  end
46
69
 
47
- def schedule_flush
48
- start_timer = lambda {
49
- sleep(@exception_flag ? @retry_timeout : @flush_interval)
50
- flush if @flush_scheduled
51
- }
52
- Thread.new { start_timer.call }
70
+ def write_to_buffer(msg, opts)
71
+ Concurrent::Future.execute({ executor: @work_thread_pool }) { write_to_buffer_sync(msg, opts) }
53
72
  end
54
73
 
55
- def write_to_buffer(msg, opts)
56
- if @lock.try_lock
57
- processed_message = process_message(msg, opts)
58
- new_message_size = processed_message.to_s.bytesize
59
- @buffer.push(processed_message)
60
- @buffer_byte_size += new_message_size
61
- @flush_scheduled = true
62
- @lock.unlock
63
-
64
- if @flush_limit <= @buffer_byte_size
65
- flush
66
- else
67
- schedule_flush
74
+ def write_to_buffer_sync(msg, opts)
75
+ processed_message = process_message(msg, opts)
76
+ message_size = processed_message.to_s.bytesize
77
+
78
+ running_size = @lock.synchronize do
79
+ running_size = message_size
80
+ if @buffer.any?
81
+ running_size += @buffer[-1].running_size
68
82
  end
83
+ @buffer.push(Message.new(processed_message, running_size))
84
+
85
+ running_size
86
+ end
87
+
88
+ if running_size >= @flush_size
89
+ unschedule_flush
90
+ flush_sync
69
91
  else
70
- @side_message_lock.synchronize do
71
- @side_messages.push(process_message(msg, opts))
72
- end
92
+ schedule_flush
73
93
  end
74
94
  end
75
95
 
76
- # This method has to be called with @lock
77
- def send_request
78
- @side_message_lock.synchronize do
79
- @buffer.concat(@side_messages)
80
- @side_messages.clear
96
+ ##
97
+ # Flushes all logs to LogDNA asynchronously
98
+ def flush(options = {})
99
+ Concurrent::Future.execute({ executor: @work_thread_pool }) { flush_sync(options) }
100
+ end
101
+
102
+ ##
103
+ # Flushes all logs to LogDNA synchronously
104
+ def flush_sync(options = {})
105
+ slices = @lock.synchronize do
106
+ # Slice the buffer into chunks that try to be no larger than @request_size. Slice points are found with
107
+ # a binary search thanks to the structure of @buffer. We are working backwards because it's cheaper to
108
+ # remove from the tail of an array instead of the head
109
+ slices = []
110
+ until @buffer.empty?
111
+ search_size = @buffer[-1].running_size - @request_size
112
+ if search_size.negative?
113
+ search_size = 0
114
+ end
115
+
116
+ slice_index = @buffer.bsearch_index { |message| message.running_size >= search_size }
117
+ slices.push(@buffer.pop(@buffer.length - slice_index).map(&:source))
118
+ end
119
+ slices
120
+ end
121
+
122
+ # Remember the chunks are in reverse order, this un-reverses them
123
+ slices.reverse_each do |slice|
124
+ if options[:block_on_requests]
125
+ try_request(slice)
126
+ else
127
+ Concurrent::Future.execute({ executor: @request_thread_pool }) { try_request(slice) }
128
+ end
81
129
  end
130
+ end
82
131
 
83
- @request.body = {
132
+ def try_request(slice)
133
+ body = {
84
134
  e: "ls",
85
- ls: @buffer
135
+ ls: slice
86
136
  }.to_json
87
137
 
88
- handle_exception = lambda do |message|
89
- @internal_logger.debug(message)
90
- @exception_flag = true
91
- @side_message_lock.synchronize do
92
- @side_messages.concat(@buffer)
138
+ flush_id = "#{SecureRandom.uuid} [#{slice.length} lines]"
139
+ error_header = "Flush {#{flush_id}} failed."
140
+ tries = 0
141
+ loop do
142
+ tries += 1
143
+
144
+ if tries > @retry_max_attempts
145
+ @internal_logger.debug("Flush {#{flush_id}} exceeded 3 tries. Discarding flush buffer")
146
+ break
93
147
  end
148
+
149
+ if send_request(body, error_header)
150
+ break
151
+ end
152
+
153
+ sleep(@retry_timeout * (1 << (tries - 1)) + rand(@retry_max_jitter))
94
154
  end
155
+ end
95
156
 
157
+ def send_request(body, error_header)
158
+ # TODO: Remove instance-global request object
159
+ @request.body = body
96
160
  begin
97
- @response = Net::HTTP.start(
161
+ response = Net::HTTP.start(
98
162
  @uri.hostname,
99
163
  @uri.port,
100
164
  use_ssl: @uri.scheme == "https"
101
165
  ) do |http|
102
166
  http.request(@request)
103
167
  end
104
- if @response.is_a?(Net::HTTPForbidden)
105
- @internal_logger.debug("Please provide a valid ingestion key")
106
- elsif !@response.is_a?(Net::HTTPSuccess)
107
- handle_exception.call("The response is not successful #{@response}")
168
+
169
+ code = response.code.to_i
170
+ if [401, 403].include?(code)
171
+ @internal_logger.debug("#{error_header} Please provide a valid ingestion key. Discarding flush buffer")
172
+ return true
173
+ elsif [408, 500, 504].include?(code)
174
+ # These codes might indicate a temporary ingester issue
175
+ @internal_logger.debug("#{error_header} The request failed #{response}. Retrying")
176
+ elsif code == 200
177
+ return true
178
+ else
179
+ @internal_logger.debug("#{error_header} The request failed #{response}. Discarding flush buffer")
180
+ return true
108
181
  end
109
- @exception_flag = false
110
182
  rescue SocketError
111
- handle_exception.call("Network connectivity issue")
183
+ @internal_logger.debug("#{error_header} Network connectivity issue. Retrying")
112
184
  rescue Errno::ECONNREFUSED => e
113
- handle_exception.call("The server is down. #{e.message}")
185
+ @internal_logger.debug("#{error_header} The server is down. #{e.message}. Retrying")
114
186
  rescue Timeout::Error => e
115
- handle_exception.call("Timeout error occurred. #{e.message}")
116
- ensure
117
- @buffer.clear
187
+ @internal_logger.debug("#{error_header} Timeout error occurred. #{e.message}. Retrying")
118
188
  end
119
- end
120
189
 
121
- def flush
122
- if @lock.try_lock
123
- @flush_scheduled = false
124
- if @buffer.any? || @side_messages.any?
125
- send_request
126
- end
127
- @lock.unlock
128
- else
129
- schedule_flush
130
- end
190
+ false
131
191
  end
132
192
 
133
193
  def exitout
134
- flush
135
- @internal_logger.debug("Exiting LogDNA logger: Logging remaining messages")
194
+ unschedule_flush
195
+ @work_thread_pool.shutdown
196
+ if !@work_thread_pool.wait_for_termination(1)
197
+ @internal_logger.warn("Work thread pool unable to shutdown gracefully. Logs potentially dropped")
198
+ end
199
+ @request_thread_pool.shutdown
200
+ if !@request_thread_pool.wait_for_termination(5)
201
+ @internal_logger.warn("Request thread pool unable to shutdown gracefully. Logs potentially dropped")
202
+ end
203
+
204
+ if @buffer.any?
205
+ @internal_logger.debug("Exiting LogDNA logger: Logging remaining messages")
206
+ flush_sync({ block_on_requests: true })
207
+ @internal_logger.debug("Finished flushing logs to LogDNA")
208
+ end
136
209
  end
137
210
  end
138
211
  end
@@ -8,10 +8,14 @@ module Resources
8
8
  MAX_REQUEST_TIMEOUT = 300_000
9
9
  MAX_LINE_LENGTH = 32_000
10
10
  MAX_INPUT_LENGTH = 80
11
- RETRY_TIMEOUT = 60
11
+ RETRY_TIMEOUT = 0.25
12
+ RETRY_MAX_ATTEMPTS = 3
13
+ RETRY_MAX_JITTER = 0.5
12
14
  FLUSH_INTERVAL = 0.25
13
- FLUSH_BYTE_LIMIT = 500_000
15
+ FLUSH_SIZE = 2 * 1_024 * 1_024
16
+ REQUEST_SIZE = 2 * 1_024 * 1_024
14
17
  ENDPOINT = "https://logs.logdna.com/logs/ingest"
15
18
  MAC_ADDR_CHECK = /^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$/.freeze
16
19
  IP_ADDR_CHECK = /^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$/.freeze
20
+ MAX_CONCURRENT_REQUESTS = 1
17
21
  end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LogDNA
4
- VERSION = "1.4.2"
4
+ VERSION = "1.5.0"
5
5
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logdna
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.4.2
4
+ version: 1.5.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Gun Woo Choi, Derek Zhou, Vilya Levitskiy, Muaz Siddiqui
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2020-03-19 00:00:00.000000000 Z
11
+ date: 2021-01-29 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: concurrent-ruby
@@ -90,7 +90,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
90
90
  requirements:
91
91
  - - ">="
92
92
  - !ruby/object:Gem::Version
93
- version: '0'
93
+ version: 2.5.0
94
94
  required_rubygems_version: !ruby/object:Gem::Requirement
95
95
  requirements:
96
96
  - - ">="