afstatsd 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/example/example.rb +69 -0
- data/lib/afstatsd/statsd_aggregator.rb +132 -0
- data/lib/afstatsd/statsd_metrics.rb +106 -0
- data/lib/afstatsd.rb +281 -0
- metadata +65 -0
data/example/example.rb
ADDED
@@ -0,0 +1,69 @@
|
|
1
|
+
require 'afstatsd'
|
2
|
+
|
3
|
+
#$statsd = Statsd.new 'statsd_server.my_company.com', 8125, 20
|
4
|
+
|
5
|
+
$statsd = Statsd.new # use defaults
|
6
|
+
$statsd.namespace = 'test.ruby'
|
7
|
+
|
8
|
+
|
9
|
+
$statsd.increment 'counter1'
|
10
|
+
$statsd.increment 'counter1'
|
11
|
+
$statsd.decrement 'counter1' #counters accumulate
|
12
|
+
|
13
|
+
$statsd.gauge 'gauge1', 1024
|
14
|
+
$statsd.gauge 'gauge1', 1025
|
15
|
+
$statsd.gauge 'gauge1', 1026
|
16
|
+
$statsd.gauge 'gauge1', 1027
|
17
|
+
$statsd.gauge 'gauge1', 1028 # gauges get averaged when aggregated
|
18
|
+
|
19
|
+
$statsd.time('timing1' ){sleep 0.01}
|
20
|
+
$statsd.time('timing1' ){sleep 0.02}
|
21
|
+
$statsd.time('timing1' ){sleep 0.03}
|
22
|
+
$statsd.time('timing1' ){sleep 0.04} # timings get averaged when aggregated
|
23
|
+
|
24
|
+
|
25
|
+
=begin
|
26
|
+
|
27
|
+
100.times do
|
28
|
+
#$statsd.increment 'sampled', 0.1, 'sampled'
|
29
|
+
$statsd.increment 'sampled'
|
30
|
+
end
|
31
|
+
|
32
|
+
$statsd.set 'set1', 1099, "ez"
|
33
|
+
|
34
|
+
for i in 10..19 do
|
35
|
+
$statsd.increment "counter#{i}" # create a group of counters
|
36
|
+
end
|
37
|
+
|
38
|
+
1000.times do
|
39
|
+
$statsd.increment 'fast' # don't do this if aggregation is off
|
40
|
+
end
|
41
|
+
|
42
|
+
# In this test program, this will give the aggregator time to run.
|
43
|
+
15.times do
|
44
|
+
sleep 2
|
45
|
+
$statsd.increment 'slow'
|
46
|
+
end
|
47
|
+
|
48
|
+
=end
|
49
|
+
|
50
|
+
=begin
|
51
|
+
# test for thread safety
|
52
|
+
threads = []
|
53
|
+
start = Time.now
|
54
|
+
for i in 0..9 do
|
55
|
+
threads << Thread.new(i) do |j|
|
56
|
+
start = Time.now
|
57
|
+
1000000.times do
|
58
|
+
$statsd.increment 'inthethread'
|
59
|
+
# sleep(0.01)
|
60
|
+
end
|
61
|
+
puts "thread #{j} says: I took #{((Time.now - start)*1000).round} ms"
|
62
|
+
end
|
63
|
+
end
|
64
|
+
threads.each { |t| t.join }
|
65
|
+
|
66
|
+
puts "total time: #{((Time.now - start)*1000).round} ms"
|
67
|
+
=end
|
68
|
+
|
69
|
+
puts "#{$statsd.dropped} messages dropped"
|
@@ -0,0 +1,132 @@
|
|
1
|
+
# Statsd Aggregator
|
2
|
+
#
|
3
|
+
# Used to aggregate metrics in a threaded environment. Only one of these
|
4
|
+
# should be created, in the main thread.
|
5
|
+
# For each thread, we create 2 buffers. The thread will be writing to
|
6
|
+
# one, while the aggregator reads from the other. The aggregator will
|
7
|
+
# control which set is which.
|
8
|
+
|
9
|
+
|
10
|
+
class StatsdAggregator
|
11
|
+
attr_accessor :transport
|
12
|
+
|
13
|
+
def initialize(interval=20)
|
14
|
+
@interval = interval
|
15
|
+
@timer = nil
|
16
|
+
@mutex = Mutex.new
|
17
|
+
@running = false
|
18
|
+
@left_buffers = {} # 2 buffer groups
|
19
|
+
@right_buffers = {} # each buffer group is a hash
|
20
|
+
@rbufs = @left_buffers # buffer group currently being read from
|
21
|
+
@wbufs = @right_buffers # buffer group currently being written to
|
22
|
+
at_exit do
|
23
|
+
if @running
|
24
|
+
flush_buffers
|
25
|
+
swap_buffers
|
26
|
+
flush_buffers
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
def start(transport)
|
32
|
+
@transport = transport
|
33
|
+
return if @running # already started
|
34
|
+
# Spin up a thread to periodically send the aggregated stats.
|
35
|
+
# Divide the interval in half to allow other threads to finish
|
36
|
+
# their writes after we swap, and before we start reading.
|
37
|
+
@timer = Thread.new do
|
38
|
+
loop do
|
39
|
+
sleep @interval/2
|
40
|
+
swap_buffers
|
41
|
+
sleep @interval/2
|
42
|
+
flush_buffers
|
43
|
+
end
|
44
|
+
end
|
45
|
+
@running = true
|
46
|
+
#puts "aggregation started. Interval=#{@interval}"
|
47
|
+
end
|
48
|
+
|
49
|
+
def stop
|
50
|
+
return if not @running # already stopped
|
51
|
+
flush_buffers
|
52
|
+
@timer.kill if @timer
|
53
|
+
@timer = nil
|
54
|
+
@running = false
|
55
|
+
#puts "aggregation stopped"
|
56
|
+
end
|
57
|
+
|
58
|
+
def set_interval(interval)
|
59
|
+
@interval = interval
|
60
|
+
end
|
61
|
+
|
62
|
+
# the following methods are thread safe
|
63
|
+
|
64
|
+
def running
|
65
|
+
@running
|
66
|
+
end
|
67
|
+
|
68
|
+
# this is the only method that should be used by child threads.
|
69
|
+
def add(metric)
|
70
|
+
# We should have a write buffer assigned to our thread.
|
71
|
+
# Create one if not.
|
72
|
+
unless write_buffer = @wbufs[Thread.current]
|
73
|
+
#puts "Thread #{Thread.current}: creating write_buffer"
|
74
|
+
write_buffer = {}
|
75
|
+
# get a lock before we mess with the global hash
|
76
|
+
@mutex.synchronize do
|
77
|
+
@wbufs[Thread.current] = write_buffer
|
78
|
+
end
|
79
|
+
end
|
80
|
+
if m = write_buffer[metric.name]
|
81
|
+
# if we are already collecting this metric, just aggregate the new value
|
82
|
+
m.aggregate metric.value
|
83
|
+
else
|
84
|
+
# otherwise, add this metric to the aggregation buffer
|
85
|
+
#puts "Thread #{Thread.current}: creating metric"
|
86
|
+
write_buffer[metric.name] = metric
|
87
|
+
end
|
88
|
+
#puts "Thread #{Thread.current}: Added metric: #{metric}"
|
89
|
+
end
|
90
|
+
|
91
|
+
private
|
92
|
+
|
93
|
+
# Next two methods are called at different times during the interval,
|
94
|
+
# so any writes in progress after the swap will have time to complete.
|
95
|
+
|
96
|
+
def swap_buffers
|
97
|
+
if @rbufs == @left_buffers
|
98
|
+
@rbufs = @right_buffers
|
99
|
+
@wbufs = @left_buffers
|
100
|
+
else
|
101
|
+
@rbufs = @left_buffers
|
102
|
+
@wbufs = @right_buffers
|
103
|
+
end
|
104
|
+
end
|
105
|
+
|
106
|
+
def flush_buffers
|
107
|
+
# Each thread has it's own read buffer. If it's empty, the
|
108
|
+
# thread might be dead. We'll delete it's read buffer.
|
109
|
+
@rbufs.delete_if { |k, rb| rb.empty? }
|
110
|
+
|
111
|
+
# If not empty, aggregate all the data across all the threads,
|
112
|
+
# then send.
|
113
|
+
send_buffer = {}
|
114
|
+
@rbufs.each_value do |rb|
|
115
|
+
rb.each_value do |metric|
|
116
|
+
if m = send_buffer[metric.name]
|
117
|
+
m.aggregate metric.value
|
118
|
+
else
|
119
|
+
send_buffer[metric.name] = metric
|
120
|
+
end
|
121
|
+
end
|
122
|
+
# once we've aggregated all the metrics from this
|
123
|
+
# thread, clear out the buffer, but don't remove it.
|
124
|
+
rb.clear
|
125
|
+
end
|
126
|
+
#puts "nothing to send" if send_buffer.empty?
|
127
|
+
send_buffer.each_value do |metric|
|
128
|
+
@transport.call(metric)
|
129
|
+
end
|
130
|
+
end
|
131
|
+
|
132
|
+
end # class StatsdAggregator
|
@@ -0,0 +1,106 @@
|
|
1
|
+
# Classes used to store and manipulate each type of metric
|
2
|
+
# each type must implement initialize, aggregate, and to_s
|
3
|
+
|
4
|
+
module StatsdMetrics
|
5
|
+
|
6
|
+
class Metric
|
7
|
+
# all metrics share these
|
8
|
+
attr_accessor :name
|
9
|
+
attr_accessor :value
|
10
|
+
attr_accessor :message
|
11
|
+
end
|
12
|
+
|
13
|
+
class CMetric < Metric
|
14
|
+
# Counter
|
15
|
+
def initialize(name, value, rate=1, msg="")
|
16
|
+
@name = name
|
17
|
+
@value = value
|
18
|
+
@message = msg
|
19
|
+
@sample_rate = rate
|
20
|
+
end
|
21
|
+
|
22
|
+
def aggregate(delta)
|
23
|
+
@value += delta #accumulate
|
24
|
+
end
|
25
|
+
|
26
|
+
def to_s
|
27
|
+
if @sample_rate == 1 then r = "" else r = "|@#{@sample_rate}" end
|
28
|
+
if @message == ""
|
29
|
+
m = ""
|
30
|
+
else
|
31
|
+
if r == ""
|
32
|
+
m = "||#{@message}"
|
33
|
+
else
|
34
|
+
m = "|#{@message}"
|
35
|
+
end
|
36
|
+
end
|
37
|
+
"#{name}:#{@value}|c#{r}#{m}"
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
class GMetric < Metric
|
42
|
+
# Guage
|
43
|
+
def initialize(name, value, msg="")
|
44
|
+
@name = name
|
45
|
+
@value = value
|
46
|
+
@message = msg
|
47
|
+
@count = 1
|
48
|
+
end
|
49
|
+
|
50
|
+
def aggregate(value)
|
51
|
+
@value += value #average
|
52
|
+
@count += 1
|
53
|
+
end
|
54
|
+
|
55
|
+
def to_s
|
56
|
+
avg = @value / @count
|
57
|
+
if @message == "" then m = "" else m = "|#{@message}" end
|
58
|
+
"#{name}:#{avg}|g#{m}"
|
59
|
+
end
|
60
|
+
|
61
|
+
end
|
62
|
+
|
63
|
+
class TMetric < Metric
|
64
|
+
# Timing
|
65
|
+
def initialize(name, value, rate=1, msg="")
|
66
|
+
@name = name
|
67
|
+
@value = value
|
68
|
+
@sample_rate = rate
|
69
|
+
@message = msg
|
70
|
+
@count = 1
|
71
|
+
end
|
72
|
+
|
73
|
+
def aggregate(value)
|
74
|
+
@value += value #average
|
75
|
+
@count += 1
|
76
|
+
end
|
77
|
+
|
78
|
+
def to_s
|
79
|
+
avg = @value / @count
|
80
|
+
if @message == "" then m = "" else m = "|#{@message}" end
|
81
|
+
"#{name}:#{avg}|ms#{m}"
|
82
|
+
end
|
83
|
+
|
84
|
+
end
|
85
|
+
|
86
|
+
class SMetric < Metric
|
87
|
+
# Set (per the etsy standard)
|
88
|
+
def initialize(name, value, msg="")
|
89
|
+
@name = name
|
90
|
+
@value = value
|
91
|
+
@message = msg
|
92
|
+
end
|
93
|
+
|
94
|
+
def aggregate(value)
|
95
|
+
@value = value #overwrite
|
96
|
+
end
|
97
|
+
|
98
|
+
def to_s
|
99
|
+
if @message == "" then m = "" else m = "|#{@message}" end
|
100
|
+
"#{name}:#{@value}|s#{m}"
|
101
|
+
end
|
102
|
+
|
103
|
+
end
|
104
|
+
|
105
|
+
end #module StatsdMetrics
|
106
|
+
|
data/lib/afstatsd.rb
ADDED
@@ -0,0 +1,281 @@
|
|
1
|
+
require 'socket'
|
2
|
+
require 'forwardable'
|
3
|
+
require 'rubygems'
|
4
|
+
require 'posix_mq'
|
5
|
+
require 'afstatsd/statsd_metrics'
|
6
|
+
require 'afstatsd/statsd_aggregator'
|
7
|
+
require 'monitor'
|
8
|
+
require 'fcntl'
|
9
|
+
|
10
|
+
# = Statsd: A Statsd client (https://github.com/etsy/statsd)
|
11
|
+
#
|
12
|
+
# @example Set up a global Statsd client for a server on localhost:9125,
|
13
|
+
# aggregate 20 seconds worth of metrics
|
14
|
+
# $statsd = Statsd.new 'localhost', 8125, 20
|
15
|
+
# @example Send some stats
|
16
|
+
# $statsd.increment 'garets'
|
17
|
+
# $statsd.timing 'glork', 320
|
18
|
+
# $statsd.gauge 'bork', 100
|
19
|
+
# @example Use {#time} to time the execution of a block
|
20
|
+
# $statsd.time('account.activate') { @account.activate! }
|
21
|
+
# @example Create a namespaced statsd client and increment 'account.activate'
|
22
|
+
# statsd = Statsd.new('localhost').tap{|sd| sd.namespace = 'account'}
|
23
|
+
# statsd.increment 'activate'
|
24
|
+
#
|
25
|
+
# Statsd instances are thread safe for general usage, by using a thread local
|
26
|
+
# UDPSocket and carrying no state. The attributes are stateful, and are not
|
27
|
+
# mutexed, it is expected that users will not change these at runtime in
|
28
|
+
# threaded environments. If users require such use cases, it is recommend that
|
29
|
+
# users either mutex around their Statsd object, or create separate objects for
|
30
|
+
# each namespace / host+port combination.
|
31
|
+
class Statsd
|
32
|
+
|
33
|
+
# A namespace to prepend to all statsd calls.
|
34
|
+
attr_reader :namespace
|
35
|
+
|
36
|
+
# StatsD host. Defaults to 127.0.0.1. Only used with UDP transport
|
37
|
+
attr_reader :host
|
38
|
+
|
39
|
+
# StatsD port. Defaults to 8125. Only used with UDP transport
|
40
|
+
attr_reader :port
|
41
|
+
|
42
|
+
# StatsD namespace prefix, generated from #namespace
|
43
|
+
attr_reader :prefix
|
44
|
+
|
45
|
+
# a postfix to append to all metrics
|
46
|
+
attr_reader :postfix
|
47
|
+
|
48
|
+
# count of messages that were dropped due to transmit error
|
49
|
+
attr_reader :dropped
|
50
|
+
|
51
|
+
class << self
|
52
|
+
# Set to a standard logger instance to enable debug logging.
|
53
|
+
attr_accessor :logger
|
54
|
+
end
|
55
|
+
|
56
|
+
# @param [String] host your statsd host
|
57
|
+
# @param [Integer] port your statsd port
|
58
|
+
# @param [Integer] interval for aggregatore
|
59
|
+
def initialize(host = '127.0.0.1', port = 8125, interval = 20)
|
60
|
+
self.host, self.port = host, port
|
61
|
+
@prefix = nil
|
62
|
+
@postfix = nil
|
63
|
+
@aggregator = StatsdAggregator.new(interval)
|
64
|
+
set_transport :mq_transport
|
65
|
+
self.aggregating = true unless interval == 0
|
66
|
+
@dropped = 0
|
67
|
+
end
|
68
|
+
|
69
|
+
# @param [method] The ruby symbol for the method that gets called to send
|
70
|
+
# one metric to the server. eg: set_transport :udp_transport
|
71
|
+
def set_transport(transport)
|
72
|
+
@transport = method(transport)
|
73
|
+
@aggregator.transport = @transport # aggregator needs to know
|
74
|
+
end
|
75
|
+
|
76
|
+
# @param [Boolean] Turn aggregation on or off
|
77
|
+
def aggregating= (should_aggregate)
|
78
|
+
if should_aggregate
|
79
|
+
@aggregator.start(@transport)
|
80
|
+
else
|
81
|
+
@aggregator.stop
|
82
|
+
end
|
83
|
+
end
|
84
|
+
|
85
|
+
# is the aggregator running?
|
86
|
+
def aggregating
|
87
|
+
@aggregator.running
|
88
|
+
end
|
89
|
+
|
90
|
+
# @attribute [w] namespace
|
91
|
+
# Writes are not thread safe.
|
92
|
+
def namespace=(namespace)
|
93
|
+
@namespace = namespace
|
94
|
+
@prefix = "#{namespace}."
|
95
|
+
end
|
96
|
+
|
97
|
+
# @attribute [w] postfix
|
98
|
+
# A value to be appended to the stat name after a '.'. If the value is
|
99
|
+
# blank then the postfix will be reset to nil (rather than to '.').
|
100
|
+
def postfix=(pf)
|
101
|
+
case pf
|
102
|
+
when nil, false, '' then @postfix = nil
|
103
|
+
else @postfix = ".#{pf}"
|
104
|
+
end
|
105
|
+
end
|
106
|
+
|
107
|
+
# @attribute [w] host
|
108
|
+
# Writes are not thread safe.
|
109
|
+
def host=(host)
|
110
|
+
@host = host || '127.0.0.1'
|
111
|
+
end
|
112
|
+
|
113
|
+
# @attribute [w] port
|
114
|
+
# Writes are not thread safe.
|
115
|
+
def port=(port)
|
116
|
+
@port = port || 8125
|
117
|
+
end
|
118
|
+
|
119
|
+
# Sends an increment (count = 1) for the given stat to the statsd server.
|
120
|
+
#
|
121
|
+
# @param [String] stat stat name
|
122
|
+
# @param [Numeric] sample_rate sample rate, 1 for always
|
123
|
+
# @param [String] optional note (AppFirst extension to StatsD)
|
124
|
+
# @see #count
|
125
|
+
def increment(stat, sample_rate=1, note="")
|
126
|
+
count stat, 1, sample_rate, note
|
127
|
+
end
|
128
|
+
|
129
|
+
# Sends a decrement (count = -1) for the given stat to the statsd server.
|
130
|
+
#
|
131
|
+
# @param [String] stat stat name
|
132
|
+
# @param [Numeric] sample_rate sample rate, 1 for always
|
133
|
+
# @param [String] optional note (AppFirst extension to StatsD)
|
134
|
+
# @see #count
|
135
|
+
def decrement(stat, sample_rate=1, note="")
|
136
|
+
count stat, -1, sample_rate, note
|
137
|
+
end
|
138
|
+
|
139
|
+
# Sends an arbitrary count for the given stat to the statsd server.
|
140
|
+
#
|
141
|
+
# @param [String] stat stat name
|
142
|
+
# @param [Integer] count count
|
143
|
+
# @param [Numeric] sample_rate sample rate, 1 for always
|
144
|
+
# @param [String] optional note (AppFirst extension to StatsD)
|
145
|
+
def count(stat, count, sample_rate=1, note="")
|
146
|
+
if sample_rate == 1 or rand < sample_rate
|
147
|
+
send_metric StatsdMetrics::CMetric.new(expand_name(stat), count, sample_rate, note)
|
148
|
+
end
|
149
|
+
end
|
150
|
+
|
151
|
+
# Sends an arbitary gauge value for the given stat to the statsd server.
|
152
|
+
#
|
153
|
+
# This is useful for recording things like available disk space,
|
154
|
+
# memory usage, and the like, which have different semantics than
|
155
|
+
# counters.
|
156
|
+
#
|
157
|
+
# @param [String] stat stat name.
|
158
|
+
# @param [Numeric] value gauge value.
|
159
|
+
# @param [String] optional note (AppFirst extension to StatsD)
|
160
|
+
# @example Report the current user count:
|
161
|
+
# $statsd.gauge('user.count', User.count)
|
162
|
+
def gauge(stat, value, note="")
|
163
|
+
send_metric StatsdMetrics::GMetric.new(expand_name(stat), value, note)
|
164
|
+
end
|
165
|
+
|
166
|
+
# Sends an arbitary set value for the given stat to the statsd server.
|
167
|
+
#
|
168
|
+
# This is for recording counts of unique events, which are useful to
|
169
|
+
# see on graphs to correlate to other values. For example, a deployment
|
170
|
+
# might get recorded as a set, and be drawn as annotations on a CPU history
|
171
|
+
# graph.
|
172
|
+
#
|
173
|
+
# @param [String] stat stat name.
|
174
|
+
# @param [Numeric] value event value.
|
175
|
+
# @param [String] optional note (AppFirst extension to StatsD)
|
176
|
+
# @example Report a deployment happening:
|
177
|
+
# $statsd.set('deployment', DEPLOYMENT_EVENT_CODE)
|
178
|
+
def set(stat, value, note="")
|
179
|
+
send_metric StatsdMetrics::SMetric.new(expand_name(stat), value, note)
|
180
|
+
end
|
181
|
+
|
182
|
+
# Sends a timing (in ms) for the given stat to the statsd server. The
|
183
|
+
# sample_rate determines what percentage of the time this report is sent. The
|
184
|
+
# statsd server then uses the sample_rate to correctly track the average
|
185
|
+
# timing for the stat.
|
186
|
+
#
|
187
|
+
# @param [String] stat stat name
|
188
|
+
# @param [Integer] ms timing in milliseconds
|
189
|
+
# @param [Numeric] sample_rate sample rate, 1 for always
|
190
|
+
# @param [String] optional note (AppFirst extension to StatsD)
|
191
|
+
def timing(stat, ms, sample_rate=1, note="")
|
192
|
+
if sample_rate == 1 or rand < sample_rate
|
193
|
+
send_metric StatsdMetrics::TMetric.new(expand_name(stat), ms, sample_rate, note)
|
194
|
+
end
|
195
|
+
end
|
196
|
+
|
197
|
+
# Reports execution time of the provided block using {#timing}.
|
198
|
+
#
|
199
|
+
# @param [String] stat stat name
|
200
|
+
# @param [Numeric] sample_rate sample rate, 1 for always
|
201
|
+
# @param [String] optional note (AppFirst extension to StatsD)
|
202
|
+
# @yield The operation to be timed
|
203
|
+
# @see #timing
|
204
|
+
# @example Report the time (in ms) taken to activate an account
|
205
|
+
# $statsd.time('account.activate') { @account.activate! }
|
206
|
+
def time(stat, sample_rate=1, note="")
|
207
|
+
start = Time.now
|
208
|
+
result = yield
|
209
|
+
timing(stat, ((Time.now - start) * 1000).round, sample_rate, note)
|
210
|
+
result
|
211
|
+
end
|
212
|
+
|
213
|
+
protected
|
214
|
+
|
215
|
+
def send_metric(metric)
|
216
|
+
# All the metric types above funnel to here. We will send or aggregate.
|
217
|
+
if aggregating
|
218
|
+
@aggregator.add metric
|
219
|
+
else
|
220
|
+
@transport.call(metric)
|
221
|
+
end
|
222
|
+
end
|
223
|
+
|
224
|
+
def expand_name(name)
|
225
|
+
# Replace Ruby module scoping with '.' and reserved chars (: | @) with underscores.
|
226
|
+
name = name.to_s.gsub('::', '.').tr(':|@', '_')
|
227
|
+
"#{prefix}#{name}#{postfix}"
|
228
|
+
end
|
229
|
+
|
230
|
+
def udp_transport(metric)
|
231
|
+
#puts "socket < #{metric}\n"
|
232
|
+
self.class.logger.debug { "Statsd: #{metric}" } if self.class.logger
|
233
|
+
socket.send(metric.to_s, 0, @host, @port)
|
234
|
+
rescue => boom
|
235
|
+
#puts "socket send error"
|
236
|
+
@dropped +=1
|
237
|
+
self.class.logger.error { "Statsd: #{boom.class} #{boom}" } if self.class.logger
|
238
|
+
nil
|
239
|
+
end
|
240
|
+
|
241
|
+
STATSD_SEVERITY = 3
|
242
|
+
def mq_transport(metric)
|
243
|
+
#puts "MQ < #{metric}\n" #debug
|
244
|
+
self.class.logger.debug { "Statsd: #{metric}" } if self.class.logger
|
245
|
+
if not @mq
|
246
|
+
begin
|
247
|
+
@mq = POSIX_MQ.new("/afcollectorapi", Fcntl::O_WRONLY | Fcntl::O_NONBLOCK)
|
248
|
+
rescue => boom
|
249
|
+
self.class.logger.debug { "Statsd: MQ open error #{boom.class} #{boom}" } if self.class.logger
|
250
|
+
# failed to open MQ. Fall back to UPD transport. Note: Current message will be lost.
|
251
|
+
@dropped += 1
|
252
|
+
# puts "fallback to udp"
|
253
|
+
set_transport :udp_transport
|
254
|
+
return nil
|
255
|
+
end
|
256
|
+
end
|
257
|
+
begin
|
258
|
+
@mq.send(metric.to_s, STATSD_SEVERITY)
|
259
|
+
rescue => boom
|
260
|
+
# just drop it on the floor
|
261
|
+
@dropped += 1
|
262
|
+
#puts "MQ send error: #{boom.class} #{boom}"
|
263
|
+
self.class.logger.error { "Statsd: MQ Send Error#{boom.class} #{boom}" } if self.class.logger
|
264
|
+
nil
|
265
|
+
end
|
266
|
+
end
|
267
|
+
|
268
|
+
def both_transport(metric)
|
269
|
+
mq_transport(metric)
|
270
|
+
udp_transport(metric)
|
271
|
+
end
|
272
|
+
|
273
|
+
private
|
274
|
+
|
275
|
+
def socket
|
276
|
+
Thread.current[:statsd_socket] ||= UDPSocket.new
|
277
|
+
end
|
278
|
+
|
279
|
+
end # class Statsd
|
280
|
+
|
281
|
+
|
metadata
ADDED
@@ -0,0 +1,65 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: afstatsd
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.0.1
|
5
|
+
prerelease:
|
6
|
+
platform: ruby
|
7
|
+
authors:
|
8
|
+
- Clark Bremer
|
9
|
+
autorequire:
|
10
|
+
bindir: bin
|
11
|
+
cert_chain: []
|
12
|
+
date: 2013-02-18 00:00:00.000000000 Z
|
13
|
+
dependencies:
|
14
|
+
- !ruby/object:Gem::Dependency
|
15
|
+
name: posix_mq
|
16
|
+
requirement: !ruby/object:Gem::Requirement
|
17
|
+
none: false
|
18
|
+
requirements:
|
19
|
+
- - ~>
|
20
|
+
- !ruby/object:Gem::Version
|
21
|
+
version: 2.0.0
|
22
|
+
type: :runtime
|
23
|
+
prerelease: false
|
24
|
+
version_requirements: !ruby/object:Gem::Requirement
|
25
|
+
none: false
|
26
|
+
requirements:
|
27
|
+
- - ~>
|
28
|
+
- !ruby/object:Gem::Version
|
29
|
+
version: 2.0.0
|
30
|
+
description: A StatsD library with AppFirst Extensions
|
31
|
+
email: clark@appfirst.com
|
32
|
+
executables: []
|
33
|
+
extensions: []
|
34
|
+
extra_rdoc_files: []
|
35
|
+
files:
|
36
|
+
- lib/afstatsd.rb
|
37
|
+
- lib/afstatsd/statsd_aggregator.rb
|
38
|
+
- lib/afstatsd/statsd_metrics.rb
|
39
|
+
- example/example.rb
|
40
|
+
homepage: http://appfirst.com
|
41
|
+
licenses: []
|
42
|
+
post_install_message:
|
43
|
+
rdoc_options: []
|
44
|
+
require_paths:
|
45
|
+
- lib
|
46
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
47
|
+
none: false
|
48
|
+
requirements:
|
49
|
+
- - ! '>='
|
50
|
+
- !ruby/object:Gem::Version
|
51
|
+
version: '0'
|
52
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
53
|
+
none: false
|
54
|
+
requirements:
|
55
|
+
- - ! '>='
|
56
|
+
- !ruby/object:Gem::Version
|
57
|
+
version: '0'
|
58
|
+
requirements: []
|
59
|
+
rubyforge_project:
|
60
|
+
rubygems_version: 1.8.24
|
61
|
+
signing_key:
|
62
|
+
specification_version: 3
|
63
|
+
summary: AppFirst StatsD Library
|
64
|
+
test_files: []
|
65
|
+
has_rdoc:
|