lpxc 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/lpxc.rb +181 -0
- data/readme.md +17 -0
- metadata +45 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: bbb9515fbe1b5b1aaae292b1b01b064b716449d9
|
4
|
+
data.tar.gz: a859b4147d2f032283662c2a6066ba73b4f58eaf
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: ae76140042c3a5ae27431d1b3b187cbb4094af5bd7579f0350dbc1c047da4dc039ecd94abb6a6d9a2de3e20f2f8f19849e640a1a8195026c2b7eb06e1fd93d73
|
7
|
+
data.tar.gz: ce06660a291346c8fb42ddcfc0d274b75809a9f9c70b79f66b82d397892854d9aab4491577a024dbcc533f07357b4444b0a8de56b77de81117abe2bdc14be0dd
|
data/lpxc.rb
ADDED
@@ -0,0 +1,181 @@
|
|
1
|
+
require 'time'
|
2
|
+
require 'net/http'
|
3
|
+
require 'uri'
|
4
|
+
require 'thread'
|
5
|
+
require 'timeout'
|
6
|
+
|
7
|
+
class Lpxc
|
8
|
+
|
9
|
+
#After parsing opts and initializing defaults, the initializer
|
10
|
+
#will start 2 threads. One thread for sending HTTP requests and another
|
11
|
+
#thread for flusing log messages to the outlet thread periodically.
|
12
|
+
#:hash => {}:: A data structure for grouping log messages by token.
|
13
|
+
#:request_queue => SizedQueue.new:: Contains HTTP requests ready for outlet thread to deliver to logplex.
|
14
|
+
#:default_token => nil:: You can specify a token that will be used for any call to Lpxc#puts that doesn't include a token.
|
15
|
+
#:structured_data => '-':: Structured-data field for syslog headers. Ignored by logplex.
|
16
|
+
#:msgid => '-':: Msg ID field for syslog headers. Ignored by logplex.
|
17
|
+
#:procid => 'lpxc':: Proc ID field for syslog headers. This will show up in the Heroku logs tail command as: app [lpxc].
|
18
|
+
#:hostname => 'myhost':: Hostname field for syslog headers. Ignored by logplex.
|
19
|
+
#:max_reqs_per_conn => 1_000:: Number of requests before we re-establish our keep-alive connection to logplex.
|
20
|
+
#:conn_timeout => 2:: Number of seconds before timing out a sindle request to logplex.
|
21
|
+
#:batch_size => 300:: Max number of log messages inside single HTTP request.
|
22
|
+
#:flush_interval => 0.5:: Fractional number of seconds before flushing all log messages in buffer to logplex.
|
23
|
+
#:logplex_url => \'https://east.logplex.io/logs':: HTTP server that will accept our log messages.
|
24
|
+
def initialize(opts={})
|
25
|
+
@hash_lock = Mutex.new
|
26
|
+
@hash = opts[:hash] || Hash.new
|
27
|
+
@request_queue = opts[:request_queue] || SizedQueue.new(1)
|
28
|
+
@default_token = opts[:default_token]
|
29
|
+
@structured_data = opts[:structured_data] || "-"
|
30
|
+
@msgid = opts[:msgid] || "-"
|
31
|
+
@procid = opts[:procid] || "lpxc"
|
32
|
+
@hostname = opts[:hostname] || "myhost"
|
33
|
+
@max_reqs_per_conn = opts[:max_reqs_per_conn] || 1_000
|
34
|
+
@conn_timeout = opts[:conn_timeout] || 2
|
35
|
+
@batch_size = opts[:batch_size] || 300
|
36
|
+
@flush_interval = opts[:flush_interval] || 0.5
|
37
|
+
@logplex_url = URI(opts[:logplex_url] || ENV["LOGPLEX_URL"] ||
|
38
|
+
raise("Must set logplex url."))
|
39
|
+
|
40
|
+
#Keep track of the number of requests that the outlet
|
41
|
+
#is processing. This value is used by the wait function.
|
42
|
+
@req_in_flight = 0
|
43
|
+
|
44
|
+
#Initialize the last_flush to an arbitrary time.
|
45
|
+
@last_flush = Time.now + @flush_interval
|
46
|
+
|
47
|
+
#Start the processing threads.
|
48
|
+
Thread.new {outlet}
|
49
|
+
Thread.new {delay_flush}
|
50
|
+
end
|
51
|
+
|
52
|
+
#The interface to publish logs into the stream.
|
53
|
+
#This function will set the log message to the current time in UTC.
|
54
|
+
#If the buffer for this token's log messages is full, it will flush the buffer.
|
55
|
+
def puts(msg, tok=@default_token)
|
56
|
+
@hash_lock.synchronize do
|
57
|
+
#Messages are grouped by their token since 1 http request
|
58
|
+
#to logplex must only contain log messages belonging to a single token.
|
59
|
+
q = @hash[tok] ||= SizedQueue.new(@batch_size)
|
60
|
+
#This call will block if the queue is full.
|
61
|
+
#However this should never happen since the next command will flush
|
62
|
+
#the queue if we add the last item.
|
63
|
+
q.enq({:t => Time.now.utc, :token => tok, :msg => msg})
|
64
|
+
flush if q.size == q.max
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
#Wait until all of the data has been cleared from memory.
|
69
|
+
#This is useful if you don't want your program to exit before
|
70
|
+
#we are able to deliver log messages to logplex.
|
71
|
+
def wait
|
72
|
+
sleep(0.1) until
|
73
|
+
@hash.length.zero? &&
|
74
|
+
@request_queue.empty? &&
|
75
|
+
@req_in_flight.zero?
|
76
|
+
end
|
77
|
+
|
78
|
+
private
|
79
|
+
|
80
|
+
#Take a lock to read all of the buffered messages.
|
81
|
+
#Once we have read the messages, we make 1 http request for the batch.
|
82
|
+
#We pass the request off into the request queue so that the request
|
83
|
+
#can be sent to LOGPLEX_URL.
|
84
|
+
def flush
|
85
|
+
@hash.each do |tok, msgs|
|
86
|
+
#Copy the messages from the queue into the payload array.
|
87
|
+
payloads = []
|
88
|
+
msgs.size.times {payloads << msgs.deq}
|
89
|
+
return if payloads.nil? || payloads.empty?
|
90
|
+
|
91
|
+
#Use the payloads array to build a string that will be
|
92
|
+
#used as the http body for the logplex request.
|
93
|
+
body = ""
|
94
|
+
payloads.flatten.each do |payload|
|
95
|
+
body += "#{fmt(payload)}"
|
96
|
+
end
|
97
|
+
|
98
|
+
#Build a new HTTP request and place it into the queue
|
99
|
+
#to be processed by the HTTP connection.
|
100
|
+
req = Net::HTTP::Post.new(@logplex_url.path)
|
101
|
+
req.basic_auth("token", tok)
|
102
|
+
req.add_field('Content-Type', 'application/logplex-1')
|
103
|
+
req.body = body
|
104
|
+
@request_queue.enq(req)
|
105
|
+
@hash.delete(tok)
|
106
|
+
@last_flush = Time.now
|
107
|
+
end
|
108
|
+
end
|
109
|
+
|
110
|
+
|
111
|
+
#This method must be called in order for the messages to be sent to Logplex.
|
112
|
+
#This method also spawns a thread that allows the messages to be batched.
|
113
|
+
#Messages are flushed from memory every 500ms or when we have 300 messages,
|
114
|
+
#whichever comes first.
|
115
|
+
def delay_flush
|
116
|
+
loop do
|
117
|
+
begin
|
118
|
+
if interval_ready?
|
119
|
+
@hash_lock.synchronize {flush}
|
120
|
+
end
|
121
|
+
sleep(0.01)
|
122
|
+
rescue => e
|
123
|
+
$stderr.puts("at=start-error error=#{e.message}") if ENV['DEBUG']
|
124
|
+
end
|
125
|
+
end
|
126
|
+
end
|
127
|
+
|
128
|
+
def interval_ready?
|
129
|
+
(Time.now.to_f - @last_flush.to_f).abs >= @flush_interval
|
130
|
+
end
|
131
|
+
|
132
|
+
#Format the user message into RFC5425 format.
|
133
|
+
#This method also prepends the length to the message.
|
134
|
+
def fmt(data)
|
135
|
+
pkt = "<190>1 "
|
136
|
+
pkt += "#{data[:t].strftime("%Y-%m-%dT%H:%M:%S+00:00")} "
|
137
|
+
pkt += "#{@hostname} "
|
138
|
+
pkt += "#{data[:token]} "
|
139
|
+
pkt += "#{@procid} "
|
140
|
+
pkt += "#{@msgid} "
|
141
|
+
pkt += "#{@structured_data} "
|
142
|
+
pkt += data[:msg]
|
143
|
+
"#{pkt.size} #{pkt}"
|
144
|
+
end
|
145
|
+
|
146
|
+
#We use a keep-alive connection to send data to LOGPLEX_URL.
|
147
|
+
#Each request will contain one or more log messages.
|
148
|
+
def outlet
|
149
|
+
loop do
|
150
|
+
http = Net::HTTP.new(@logplex_url.host, @logplex_url.port)
|
151
|
+
http.set_debug_output($stdout) if ENV['DEBUG']
|
152
|
+
http.use_ssl = true if @logplex_url.scheme == 'https'
|
153
|
+
begin
|
154
|
+
http.start do |conn|
|
155
|
+
num_reqs = 0
|
156
|
+
while num_reqs < @max_reqs_per_conn
|
157
|
+
#Blocks waiting for a request.
|
158
|
+
req = @request_queue.deq
|
159
|
+
@req_in_flight += 1
|
160
|
+
resp = nil
|
161
|
+
begin
|
162
|
+
Timeout::timeout(@conn_timeout) {resp = conn.request(req)}
|
163
|
+
rescue => e
|
164
|
+
$stdout.puts("at=req-error msg=#{e.message}") if ENV['DEBUG']
|
165
|
+
next
|
166
|
+
ensure
|
167
|
+
@req_in_flight -= 1
|
168
|
+
end
|
169
|
+
num_reqs += 1
|
170
|
+
$stdout.puts("at=req-sent status=#{resp.code}") if ENV['DEBUG']
|
171
|
+
end
|
172
|
+
end
|
173
|
+
rescue => e
|
174
|
+
$stdout.puts("at=req-error msg=#{e.message}") if ENV['DEBUG']
|
175
|
+
ensure
|
176
|
+
http.finish
|
177
|
+
end
|
178
|
+
end
|
179
|
+
end
|
180
|
+
|
181
|
+
end
|
data/readme.md
ADDED
metadata
ADDED
@@ -0,0 +1,45 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: lpxc
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.0.1
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Ryan Smith (♠ ace hacker)
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
date: 2013-06-29 00:00:00.000000000 Z
|
12
|
+
dependencies: []
|
13
|
+
description: Ruby client for sending data to Heroku's logplex.
|
14
|
+
email: r@32k.io
|
15
|
+
executables: []
|
16
|
+
extensions: []
|
17
|
+
extra_rdoc_files: []
|
18
|
+
files:
|
19
|
+
- readme.md
|
20
|
+
- lpxc.rb
|
21
|
+
homepage: http://github.com/ryandotsmith/lpxc
|
22
|
+
licenses:
|
23
|
+
- MIT
|
24
|
+
metadata: {}
|
25
|
+
post_install_message:
|
26
|
+
rdoc_options: []
|
27
|
+
require_paths:
|
28
|
+
- .
|
29
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
30
|
+
requirements:
|
31
|
+
- - '>='
|
32
|
+
- !ruby/object:Gem::Version
|
33
|
+
version: '0'
|
34
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
35
|
+
requirements:
|
36
|
+
- - '>='
|
37
|
+
- !ruby/object:Gem::Version
|
38
|
+
version: '0'
|
39
|
+
requirements: []
|
40
|
+
rubyforge_project:
|
41
|
+
rubygems_version: 2.0.2
|
42
|
+
signing_key:
|
43
|
+
specification_version: 4
|
44
|
+
summary: Ruby client for logplex.
|
45
|
+
test_files: []
|