statsd-instrument 3.7.0 → 3.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/pull_request_template.md +14 -0
- data/.github/workflows/benchmark.yml +2 -3
- data/.github/workflows/lint.yml +1 -2
- data/.github/workflows/tests.yml +2 -2
- data/.rubocop.yml +0 -1
- data/.ruby-version +1 -0
- data/CHANGELOG.md +19 -0
- data/Gemfile +7 -0
- data/README.md +56 -0
- data/Rakefile +11 -0
- data/benchmark/local-udp-throughput +178 -13
- data/benchmark/send-metrics-to-local-udp-receiver +6 -4
- data/lib/statsd/instrument/aggregator.rb +259 -0
- data/lib/statsd/instrument/{batched_udp_sink.rb → batched_sink.rb} +112 -19
- data/lib/statsd/instrument/client.rb +65 -7
- data/lib/statsd/instrument/datagram.rb +6 -2
- data/lib/statsd/instrument/datagram_builder.rb +21 -0
- data/lib/statsd/instrument/environment.rb +42 -6
- data/lib/statsd/instrument/{udp_sink.rb → sink.rb} +34 -25
- data/lib/statsd/instrument/udp_connection.rb +39 -0
- data/lib/statsd/instrument/uds_connection.rb +52 -0
- data/lib/statsd/instrument/version.rb +1 -1
- data/lib/statsd/instrument.rb +9 -3
- data/statsd-instrument.gemspec +2 -0
- data/test/aggregator_test.rb +142 -0
- data/test/client_test.rb +36 -1
- data/test/datagram_builder_test.rb +5 -0
- data/test/dispatcher_stats_test.rb +69 -0
- data/test/environment_test.rb +4 -4
- data/test/integration_test.rb +51 -0
- data/test/test_helper.rb +6 -1
- data/test/udp_sink_test.rb +28 -6
- data/test/uds_sink_test.rb +187 -0
- metadata +20 -9
@@ -82,20 +82,47 @@ module StatsD
|
|
82
82
|
env.fetch("STATSD_ADDR", "localhost:8125")
|
83
83
|
end
|
84
84
|
|
85
|
+
def statsd_socket_path
|
86
|
+
env.fetch("STATSD_SOCKET_PATH", "")
|
87
|
+
end
|
88
|
+
|
85
89
|
def statsd_default_tags
|
86
90
|
env.key?("STATSD_DEFAULT_TAGS") ? env.fetch("STATSD_DEFAULT_TAGS").split(",") : nil
|
87
91
|
end
|
88
92
|
|
89
93
|
def statsd_buffer_capacity
|
90
|
-
Integer(env.fetch("STATSD_BUFFER_CAPACITY", StatsD::Instrument::
|
94
|
+
Integer(env.fetch("STATSD_BUFFER_CAPACITY", StatsD::Instrument::BatchedSink::DEFAULT_BUFFER_CAPACITY))
|
91
95
|
end
|
92
96
|
|
93
97
|
def statsd_batching?
|
94
98
|
statsd_buffer_capacity > 0 && Float(env.fetch("STATSD_FLUSH_INTERVAL", 1.0)) > 0.0
|
95
99
|
end
|
96
100
|
|
101
|
+
def statsd_uds_send?
|
102
|
+
!statsd_socket_path.empty?
|
103
|
+
end
|
104
|
+
|
97
105
|
def statsd_max_packet_size
|
98
|
-
|
106
|
+
if statsd_uds_send?
|
107
|
+
return Float(env.fetch("STATSD_MAX_PACKET_SIZE", StatsD::Instrument::UdsConnection::DEFAULT_MAX_PACKET_SIZE))
|
108
|
+
end
|
109
|
+
|
110
|
+
Float(env.fetch("STATSD_MAX_PACKET_SIZE", StatsD::Instrument::UdpConnection::DEFAULT_MAX_PACKET_SIZE))
|
111
|
+
end
|
112
|
+
|
113
|
+
def statsd_batch_statistics_interval
|
114
|
+
Integer(env.fetch(
|
115
|
+
"STATSD_BATCH_STATISTICS_INTERVAL",
|
116
|
+
StatsD::Instrument::BatchedSink::DEFAULT_STATISTICS_INTERVAL,
|
117
|
+
))
|
118
|
+
end
|
119
|
+
|
120
|
+
def experimental_aggregation_enabled?
|
121
|
+
env.key?("STATSD_ENABLE_AGGREGATION")
|
122
|
+
end
|
123
|
+
|
124
|
+
def aggregation_interval
|
125
|
+
Float(env.fetch("STATSD_AGGREGATION_INTERVAL", 2.0))
|
99
126
|
end
|
100
127
|
|
101
128
|
def client
|
@@ -105,15 +132,24 @@ module StatsD
|
|
105
132
|
def default_sink_for_environment
|
106
133
|
case environment
|
107
134
|
when "production", "staging"
|
135
|
+
connection = if statsd_uds_send?
|
136
|
+
StatsD::Instrument::UdsConnection.new(statsd_socket_path)
|
137
|
+
else
|
138
|
+
host, port = statsd_addr.split(":")
|
139
|
+
StatsD::Instrument::UdpConnection.new(host, port.to_i)
|
140
|
+
end
|
141
|
+
|
142
|
+
sink = StatsD::Instrument::Sink.new(connection)
|
108
143
|
if statsd_batching?
|
109
|
-
|
110
|
-
|
144
|
+
# if we are batching, wrap the sink in a batched sink
|
145
|
+
return StatsD::Instrument::BatchedSink.new(
|
146
|
+
sink,
|
111
147
|
buffer_capacity: statsd_buffer_capacity,
|
112
148
|
max_packet_size: statsd_max_packet_size,
|
149
|
+
statistics_interval: statsd_batch_statistics_interval,
|
113
150
|
)
|
114
|
-
else
|
115
|
-
StatsD::Instrument::UDPSink.for_addr(statsd_addr)
|
116
151
|
end
|
152
|
+
sink
|
117
153
|
when "test"
|
118
154
|
StatsD::Instrument::NullSink.new
|
119
155
|
else
|
@@ -2,18 +2,21 @@
|
|
2
2
|
|
3
3
|
module StatsD
|
4
4
|
module Instrument
|
5
|
-
|
6
|
-
# to become the new default in the next major release of this library.
|
7
|
-
class UDPSink
|
5
|
+
class Sink
|
8
6
|
class << self
|
9
7
|
def for_addr(addr)
|
10
|
-
|
11
|
-
|
8
|
+
# if addr is host:port
|
9
|
+
if addr.include?(":")
|
10
|
+
host, port_as_string = addr.split(":", 2)
|
11
|
+
connection = UdpConnection.new(host, Integer(port_as_string))
|
12
|
+
new(connection)
|
13
|
+
else
|
14
|
+
connection = UdsConnection.new(addr)
|
15
|
+
new(connection)
|
16
|
+
end
|
12
17
|
end
|
13
18
|
end
|
14
19
|
|
15
|
-
attr_reader :host, :port
|
16
|
-
|
17
20
|
FINALIZER = ->(object_id) do
|
18
21
|
Thread.list.each do |thread|
|
19
22
|
if (store = thread["StatsD::UDPSink"])
|
@@ -22,10 +25,9 @@ module StatsD
|
|
22
25
|
end
|
23
26
|
end
|
24
27
|
|
25
|
-
def initialize(
|
28
|
+
def initialize(connection = nil)
|
26
29
|
ObjectSpace.define_finalizer(self, FINALIZER)
|
27
|
-
@
|
28
|
-
@port = port
|
30
|
+
@connection = connection
|
29
31
|
end
|
30
32
|
|
31
33
|
def sample?(sample_rate)
|
@@ -35,12 +37,12 @@ module StatsD
|
|
35
37
|
def <<(datagram)
|
36
38
|
retried = false
|
37
39
|
begin
|
38
|
-
|
40
|
+
connection.send_datagram(datagram)
|
39
41
|
rescue SocketError, IOError, SystemCallError => error
|
40
42
|
StatsD.logger.debug do
|
41
|
-
"[
|
43
|
+
"[#{self.class.name}] Resetting connection because of #{error.class}: #{error.message}"
|
42
44
|
end
|
43
|
-
|
45
|
+
invalidate_connection
|
44
46
|
if retried
|
45
47
|
StatsD.logger.warn do
|
46
48
|
"[#{self.class.name}] Events were dropped because of #{error.class}: #{error.message}"
|
@@ -53,27 +55,34 @@ module StatsD
|
|
53
55
|
self
|
54
56
|
end
|
55
57
|
|
56
|
-
def flush(blocking:)
|
58
|
+
def flush(blocking: false)
|
57
59
|
# noop
|
58
60
|
end
|
59
61
|
|
60
|
-
|
62
|
+
def connection_type
|
63
|
+
connection.class.name
|
64
|
+
end
|
61
65
|
|
62
|
-
def
|
63
|
-
|
64
|
-
socket&.close
|
66
|
+
def connection
|
67
|
+
thread_store[object_id] ||= @connection
|
65
68
|
end
|
66
69
|
|
67
|
-
def
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
70
|
+
def host
|
71
|
+
connection.host
|
72
|
+
end
|
73
|
+
|
74
|
+
def port
|
75
|
+
connection.port
|
76
|
+
end
|
77
|
+
|
78
|
+
private
|
79
|
+
|
80
|
+
def invalidate_connection
|
81
|
+
connection&.close
|
73
82
|
end
|
74
83
|
|
75
84
|
def thread_store
|
76
|
-
Thread.current["StatsD::
|
85
|
+
Thread.current["StatsD::Sink"] ||= {}
|
77
86
|
end
|
78
87
|
end
|
79
88
|
end
|
@@ -0,0 +1,39 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module StatsD
|
4
|
+
module Instrument
|
5
|
+
class UdpConnection
|
6
|
+
DEFAULT_MAX_PACKET_SIZE = 1_472
|
7
|
+
|
8
|
+
attr_reader :host, :port
|
9
|
+
|
10
|
+
def initialize(host, port)
|
11
|
+
@host = host
|
12
|
+
@port = port
|
13
|
+
end
|
14
|
+
|
15
|
+
def send_datagram(message)
|
16
|
+
socket.send(message, 0)
|
17
|
+
end
|
18
|
+
|
19
|
+
def close
|
20
|
+
@socket&.close
|
21
|
+
@socket = nil
|
22
|
+
end
|
23
|
+
|
24
|
+
def type
|
25
|
+
:udp
|
26
|
+
end
|
27
|
+
|
28
|
+
private
|
29
|
+
|
30
|
+
def socket
|
31
|
+
@socket ||= begin
|
32
|
+
socket = UDPSocket.new
|
33
|
+
socket.connect(@host, @port)
|
34
|
+
socket
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
@@ -0,0 +1,52 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module StatsD
|
4
|
+
module Instrument
|
5
|
+
class UdsConnection
|
6
|
+
DEFAULT_MAX_PACKET_SIZE = 8_192
|
7
|
+
|
8
|
+
def initialize(socket_path, max_packet_size: DEFAULT_MAX_PACKET_SIZE)
|
9
|
+
if max_packet_size <= 0
|
10
|
+
StatsD.logger.warn do
|
11
|
+
"[StatsD::Instrument::UdsConnection] max_packet_size must be greater than 0, " \
|
12
|
+
"using default: #{DEFAULT_MAX_PACKET_SIZE}"
|
13
|
+
end
|
14
|
+
end
|
15
|
+
@socket_path = socket_path
|
16
|
+
@max_packet_size = max_packet_size
|
17
|
+
end
|
18
|
+
|
19
|
+
def send_datagram(message)
|
20
|
+
socket.sendmsg(message, 0)
|
21
|
+
end
|
22
|
+
|
23
|
+
def close
|
24
|
+
@socket&.close
|
25
|
+
@socket = nil
|
26
|
+
end
|
27
|
+
|
28
|
+
def host
|
29
|
+
nil
|
30
|
+
end
|
31
|
+
|
32
|
+
def port
|
33
|
+
nil
|
34
|
+
end
|
35
|
+
|
36
|
+
def type
|
37
|
+
:uds
|
38
|
+
end
|
39
|
+
|
40
|
+
private
|
41
|
+
|
42
|
+
def socket
|
43
|
+
@socket ||= begin
|
44
|
+
socket = Socket.new(Socket::AF_UNIX, Socket::SOCK_DGRAM)
|
45
|
+
socket.setsockopt(Socket::SOL_SOCKET, Socket::SO_SNDBUF, @max_packet_size.to_i)
|
46
|
+
socket.connect(Socket.pack_sockaddr_un(@socket_path))
|
47
|
+
socket
|
48
|
+
end
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
data/lib/statsd/instrument.rb
CHANGED
@@ -201,7 +201,10 @@ module StatsD
|
|
201
201
|
#
|
202
202
|
# @param method (see #statsd_measure)
|
203
203
|
# @param name (see #statsd_measure)
|
204
|
-
# @param
|
204
|
+
# @param sample_rate
|
205
|
+
# @param tags
|
206
|
+
# @param no_prefix
|
207
|
+
# @param client
|
205
208
|
# @return [void]
|
206
209
|
def statsd_count(method, name, sample_rate: nil, tags: nil, no_prefix: false, client: nil)
|
207
210
|
add_to_method(method, name, :count) do
|
@@ -386,19 +389,22 @@ end
|
|
386
389
|
require "statsd/instrument/version"
|
387
390
|
require "statsd/instrument/client"
|
388
391
|
require "statsd/instrument/datagram"
|
392
|
+
require "statsd/instrument/aggregator"
|
389
393
|
require "statsd/instrument/dogstatsd_datagram"
|
390
394
|
require "statsd/instrument/datagram_builder"
|
391
395
|
require "statsd/instrument/statsd_datagram_builder"
|
392
396
|
require "statsd/instrument/dogstatsd_datagram_builder"
|
393
397
|
require "statsd/instrument/null_sink"
|
394
|
-
require "statsd/instrument/udp_sink"
|
395
|
-
require "statsd/instrument/batched_udp_sink"
|
396
398
|
require "statsd/instrument/capture_sink"
|
397
399
|
require "statsd/instrument/log_sink"
|
398
400
|
require "statsd/instrument/environment"
|
399
401
|
require "statsd/instrument/helpers"
|
400
402
|
require "statsd/instrument/assertions"
|
401
403
|
require "statsd/instrument/expectation"
|
404
|
+
require "statsd/instrument/uds_connection"
|
405
|
+
require "statsd/instrument/udp_connection"
|
406
|
+
require "statsd/instrument/sink"
|
407
|
+
require "statsd/instrument/batched_sink"
|
402
408
|
require "statsd/instrument/matchers" if defined?(RSpec)
|
403
409
|
require "statsd/instrument/railtie" if defined?(Rails::Railtie)
|
404
410
|
require "statsd/instrument/strict" if ENV["STATSD_STRICT_MODE"]
|
data/statsd-instrument.gemspec
CHANGED
@@ -0,0 +1,142 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "test_helper"
|
4
|
+
|
5
|
+
class AggregatorTest < Minitest::Test
|
6
|
+
def setup
|
7
|
+
@sink = StatsD::Instrument::CaptureSink.new(parent: StatsD::Instrument::NullSink.new)
|
8
|
+
@subject = StatsD::Instrument::Aggregator.new(
|
9
|
+
@sink, StatsD::Instrument::DatagramBuilder, nil, [], flush_interval: 0.1
|
10
|
+
)
|
11
|
+
end
|
12
|
+
|
13
|
+
def teardown
|
14
|
+
@sink.clear
|
15
|
+
end
|
16
|
+
|
17
|
+
def test_increment_simple
|
18
|
+
@subject.increment("foo", 1, tags: { foo: "bar" })
|
19
|
+
@subject.increment("foo", 1, tags: { foo: "bar" })
|
20
|
+
@subject.flush
|
21
|
+
|
22
|
+
datagram = @sink.datagrams.first
|
23
|
+
assert_equal("foo", datagram.name)
|
24
|
+
assert_equal(2, datagram.value)
|
25
|
+
assert_equal(1.0, datagram.sample_rate)
|
26
|
+
assert_equal(["foo:bar"], datagram.tags)
|
27
|
+
end
|
28
|
+
|
29
|
+
def test_distribution_simple
|
30
|
+
@subject.aggregate_timing("foo", 1, tags: { foo: "bar" })
|
31
|
+
@subject.aggregate_timing("foo", 100, tags: { foo: "bar" })
|
32
|
+
@subject.flush
|
33
|
+
|
34
|
+
datagram = @sink.datagrams.first
|
35
|
+
assert_equal("foo", datagram.name)
|
36
|
+
assert_equal(2, datagram.value.size)
|
37
|
+
assert_equal([1.0, 100.0], datagram.value)
|
38
|
+
end
|
39
|
+
|
40
|
+
def test_mixed_type_timings
|
41
|
+
@subject.aggregate_timing("foo_ms", 1, tags: { foo: "bar" }, type: :ms)
|
42
|
+
@subject.aggregate_timing("foo_ms", 100, tags: { foo: "bar" }, type: :ms)
|
43
|
+
|
44
|
+
@subject.aggregate_timing("foo_d", 100, tags: { foo: "bar" }, type: :d)
|
45
|
+
@subject.aggregate_timing("foo_d", 120, tags: { foo: "bar" }, type: :d)
|
46
|
+
|
47
|
+
@subject.flush
|
48
|
+
|
49
|
+
assert_equal(2, @sink.datagrams.size)
|
50
|
+
assert_equal(1, @sink.datagrams.filter { |d| d.name == "foo_ms" }.size)
|
51
|
+
assert_equal(1, @sink.datagrams.filter { |d| d.name == "foo_d" }.size)
|
52
|
+
assert_equal("ms", @sink.datagrams.find { |d| d.name == "foo_ms" }.type.to_s)
|
53
|
+
assert_equal("d", @sink.datagrams.find { |d| d.name == "foo_d" }.type.to_s)
|
54
|
+
end
|
55
|
+
|
56
|
+
def test_gauge_simple
|
57
|
+
@subject.gauge("foo", 1, tags: { foo: "bar" })
|
58
|
+
@subject.gauge("foo", 100, tags: { foo: "bar" })
|
59
|
+
@subject.flush
|
60
|
+
|
61
|
+
datagram = @sink.datagrams.first
|
62
|
+
assert_equal("foo", datagram.name)
|
63
|
+
assert_equal(100, datagram.value)
|
64
|
+
assert_equal(:g, datagram.type)
|
65
|
+
end
|
66
|
+
|
67
|
+
def test_increment_with_tags_in_different_orders
|
68
|
+
@subject.increment("foo", 1, tags: ["tag1:val1", "tag2:val2"])
|
69
|
+
@subject.increment("foo", 1, tags: ["tag2:val2", "tag1:val1"])
|
70
|
+
@subject.flush
|
71
|
+
|
72
|
+
assert_equal(2, @sink.datagrams.first.value)
|
73
|
+
end
|
74
|
+
|
75
|
+
def test_increment_with_different_tag_values
|
76
|
+
@subject.increment("foo", 1, tags: ["tag1:val1", "tag2:val2"])
|
77
|
+
@subject.increment("foo", 1, tags: { tag1: "val1", tag2: "val2" })
|
78
|
+
|
79
|
+
@subject.increment("bar")
|
80
|
+
@subject.flush
|
81
|
+
|
82
|
+
assert_equal(2, @sink.datagrams.first.value)
|
83
|
+
assert_equal(2, @sink.datagrams.size)
|
84
|
+
assert_equal(["tag1:val1", "tag2:val2"], @sink.datagrams.first.tags)
|
85
|
+
end
|
86
|
+
|
87
|
+
def test_increment_with_different_metric_names
|
88
|
+
@subject.increment("foo", 1, tags: ["tag1:val1", "tag2:val2"])
|
89
|
+
@subject.increment("bar", 1, tags: ["tag1:val1", "tag2:val2"])
|
90
|
+
@subject.flush
|
91
|
+
|
92
|
+
assert_equal(1, @sink.datagrams.find { |d| d.name == "foo" }.value)
|
93
|
+
assert_equal(1, @sink.datagrams.find { |d| d.name == "bar" }.value)
|
94
|
+
end
|
95
|
+
|
96
|
+
def test_increment_with_different_values
|
97
|
+
@subject.increment("foo", 1, tags: ["tag1:val1", "tag2:val2"])
|
98
|
+
@subject.increment("foo", 2, tags: ["tag1:val1", "tag2:val2"])
|
99
|
+
@subject.flush
|
100
|
+
|
101
|
+
assert_equal(3, @sink.datagrams.first.value)
|
102
|
+
end
|
103
|
+
|
104
|
+
def test_send_mixed_types_will_pass_through
|
105
|
+
@subject.increment("test_counter", 1, tags: ["tag1:val1", "tag2:val2"])
|
106
|
+
@subject.aggregate_timing("test_counter", 100, tags: ["tag1:val1", "tag2:val2"])
|
107
|
+
|
108
|
+
@subject.gauge("test_gauge", 100, tags: ["tag1:val1", "tag2:val2"])
|
109
|
+
@subject.increment("test_gauge", 1, tags: ["tag1:val1", "tag2:val2"])
|
110
|
+
|
111
|
+
@subject.aggregate_timing("test_timing", 100, tags: ["tag1:val1", "tag2:val2"])
|
112
|
+
@subject.gauge("test_timing", 100, tags: ["tag1:val1", "tag2:val2"])
|
113
|
+
@subject.flush
|
114
|
+
|
115
|
+
assert_equal(6, @sink.datagrams.size)
|
116
|
+
|
117
|
+
assert_equal(2, @sink.datagrams.filter { |d| d.name == "test_counter" }.size)
|
118
|
+
assert_equal(2, @sink.datagrams.filter { |d| d.name == "test_gauge" }.size)
|
119
|
+
assert_equal(2, @sink.datagrams.filter { |d| d.name == "test_timing" }.size)
|
120
|
+
|
121
|
+
assert_equal(:d, @sink.datagrams.find { |d| d.name == "test_timing" }.type)
|
122
|
+
assert_equal(:g, @sink.datagrams.find { |d| d.name == "test_gauge" }.type)
|
123
|
+
assert_equal(:c, @sink.datagrams.find { |d| d.name == "test_counter" }.type)
|
124
|
+
end
|
125
|
+
|
126
|
+
def test_with_prefix
|
127
|
+
aggregator = StatsD::Instrument::Aggregator.new(@sink, StatsD::Instrument::DatagramBuilder, "MyApp", [])
|
128
|
+
|
129
|
+
aggregator.increment("foo", 1, tags: ["tag1:val1", "tag2:val2"])
|
130
|
+
aggregator.increment("foo", 1, tags: ["tag1:val1", "tag2:val2"])
|
131
|
+
|
132
|
+
aggregator.increment("foo", 1, tags: ["tag1:val1", "tag2:val2"], no_prefix: true)
|
133
|
+
aggregator.flush
|
134
|
+
|
135
|
+
assert_equal(2, @sink.datagrams.size)
|
136
|
+
assert_equal("MyApp.foo", @sink.datagrams.first.name)
|
137
|
+
assert_equal(2, @sink.datagrams.first.value)
|
138
|
+
|
139
|
+
assert_equal("foo", @sink.datagrams.last.name)
|
140
|
+
assert_equal(1, @sink.datagrams.last.value)
|
141
|
+
end
|
142
|
+
end
|
data/test/client_test.rb
CHANGED
@@ -24,7 +24,7 @@ class ClientTest < Minitest::Test
|
|
24
24
|
assert_equal(["shard:1", "env:production"], client.default_tags)
|
25
25
|
assert_equal(StatsD::Instrument::StatsDDatagramBuilder, client.datagram_builder_class)
|
26
26
|
|
27
|
-
assert_kind_of(StatsD::Instrument::
|
27
|
+
assert_kind_of(StatsD::Instrument::BatchedSink, client.sink)
|
28
28
|
assert_equal("1.2.3.4", client.sink.host)
|
29
29
|
assert_equal(8125, client.sink.port)
|
30
30
|
end
|
@@ -63,6 +63,41 @@ class ClientTest < Minitest::Test
|
|
63
63
|
assert_kind_of(StatsD::Instrument::NullSink, client.sink)
|
64
64
|
end
|
65
65
|
|
66
|
+
def test_client_from_env_with_aggregation
|
67
|
+
env = StatsD::Instrument::Environment.new(
|
68
|
+
"STATSD_SAMPLE_RATE" => "0.1",
|
69
|
+
"STATSD_PREFIX" => "foo",
|
70
|
+
"STATSD_DEFAULT_TAGS" => "shard:1,env:production",
|
71
|
+
"STATSD_IMPLEMENTATION" => "statsd",
|
72
|
+
"STATSD_ENABLE_AGGREGATION" => "true",
|
73
|
+
"STATSD_BUFFER_CAPACITY" => "0",
|
74
|
+
)
|
75
|
+
client = StatsD::Instrument::Client.from_env(
|
76
|
+
env,
|
77
|
+
prefix: "bar",
|
78
|
+
implementation: "dogstatsd",
|
79
|
+
sink: StatsD::Instrument::CaptureSink.new(parent: StatsD::Instrument::NullSink.new),
|
80
|
+
)
|
81
|
+
|
82
|
+
assert_equal(0.1, client.default_sample_rate)
|
83
|
+
assert_equal("bar", client.prefix)
|
84
|
+
assert_equal(["shard:1", "env:production"], client.default_tags)
|
85
|
+
assert_equal(StatsD::Instrument::DogStatsDDatagramBuilder, client.datagram_builder_class)
|
86
|
+
|
87
|
+
client.increment("foo", 1, sample_rate: 0.5, tags: { foo: "bar" })
|
88
|
+
client.increment("foo", 1, sample_rate: 0.5, tags: { foo: "bar" })
|
89
|
+
|
90
|
+
client.measure("block_duration_example") { 1 + 1 }
|
91
|
+
client.force_flush
|
92
|
+
|
93
|
+
datagram = client.sink.datagrams.first
|
94
|
+
assert_equal("bar.foo", datagram.name)
|
95
|
+
assert_equal(2, datagram.value)
|
96
|
+
|
97
|
+
datagram = client.sink.datagrams.find { |d| d.name == "bar.block_duration_example" }
|
98
|
+
assert_equal(true, !datagram.nil?)
|
99
|
+
end
|
100
|
+
|
66
101
|
def test_capture
|
67
102
|
inner_datagrams = nil
|
68
103
|
|
@@ -121,4 +121,9 @@ class DatagramBuilderTest < Minitest::Test
|
|
121
121
|
datagram = datagram_builder.c("bar", 1, nil, nil)
|
122
122
|
assert_equal("bar:1|c|#foo", datagram)
|
123
123
|
end
|
124
|
+
|
125
|
+
def test_timing_with_multiple_values
|
126
|
+
dt = @datagram_builder.timing_value_packed("test_distribution", "d", [0, 3, 6, 9, 12, 15, 18, 21, 24, 27], 1, nil)
|
127
|
+
assert_equal("test_distribution:0:3:6:9:12:15:18:21:24:27|d", dt)
|
128
|
+
end
|
124
129
|
end
|
@@ -0,0 +1,69 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "test_helper"
|
4
|
+
|
5
|
+
class DispatcherStatsTest < Minitest::Test
|
6
|
+
include StatsD::Instrument::Assertions
|
7
|
+
|
8
|
+
def test_maybe_flush
|
9
|
+
stats = StatsD::Instrument::BatchedSink::DispatcherStats.new(0, "udp")
|
10
|
+
|
11
|
+
stats.increment_synchronous_sends
|
12
|
+
stats.increment_batched_sends(1, 1, 1)
|
13
|
+
|
14
|
+
expectations = [
|
15
|
+
StatsD::Instrument::Expectation.increment("statsd_instrument.batched_udp_sink.synchronous_sends", 1),
|
16
|
+
StatsD::Instrument::Expectation.increment("statsd_instrument.batched_udp_sink.batched_sends", 1),
|
17
|
+
StatsD::Instrument::Expectation.gauge("statsd_instrument.batched_udp_sink.avg_buffer_length", 1),
|
18
|
+
StatsD::Instrument::Expectation.gauge("statsd_instrument.batched_udp_sink.avg_batched_packet_size", 1),
|
19
|
+
StatsD::Instrument::Expectation.gauge("statsd_instrument.batched_udp_sink.avg_batch_length", 1),
|
20
|
+
]
|
21
|
+
assert_statsd_expectations(expectations) { stats.maybe_flush! }
|
22
|
+
assert_equal(0, stats.instance_variable_get(:@synchronous_sends))
|
23
|
+
assert_equal(0, stats.instance_variable_get(:@batched_sends))
|
24
|
+
assert_equal(0, stats.instance_variable_get(:@avg_buffer_length))
|
25
|
+
assert_equal(0, stats.instance_variable_get(:@avg_batched_packet_size))
|
26
|
+
assert_equal(0, stats.instance_variable_get(:@avg_batch_length))
|
27
|
+
|
28
|
+
stats = StatsD::Instrument::BatchedSink::DispatcherStats.new(1, :udp)
|
29
|
+
stats.increment_batched_sends(1, 1, 1)
|
30
|
+
assert_no_statsd_calls { stats.maybe_flush! }
|
31
|
+
end
|
32
|
+
|
33
|
+
def test_calculations_are_correct
|
34
|
+
stats = StatsD::Instrument::BatchedSink::DispatcherStats.new(0, :udp)
|
35
|
+
|
36
|
+
5.times { stats.increment_synchronous_sends }
|
37
|
+
assert_equal(5, stats.instance_variable_get(:@synchronous_sends))
|
38
|
+
|
39
|
+
batches = [
|
40
|
+
{ buffer_len: 100, packet_size: 1472, batch_len: 10 },
|
41
|
+
{ buffer_len: 90, packet_size: 1300, batch_len: 20 },
|
42
|
+
{ buffer_len: 110, packet_size: 1470, batch_len: 8 },
|
43
|
+
{ buffer_len: 500, packet_size: 1000, batch_len: 1 },
|
44
|
+
{ buffer_len: 100, packet_size: 30, batch_len: 99 },
|
45
|
+
]
|
46
|
+
batches.each do |batch|
|
47
|
+
stats.increment_batched_sends(batch[:buffer_len], batch[:packet_size], batch[:batch_len])
|
48
|
+
end
|
49
|
+
assert_equal(batches.length, stats.instance_variable_get(:@batched_sends))
|
50
|
+
assert_equal(
|
51
|
+
batches.map { |b|
|
52
|
+
b[:buffer_len]
|
53
|
+
}.sum / batches.length,
|
54
|
+
stats.instance_variable_get(:@avg_buffer_length),
|
55
|
+
)
|
56
|
+
assert_equal(
|
57
|
+
batches.map { |b|
|
58
|
+
b[:packet_size]
|
59
|
+
}.sum / batches.length,
|
60
|
+
stats.instance_variable_get(:@avg_batched_packet_size),
|
61
|
+
)
|
62
|
+
assert_equal(
|
63
|
+
batches.map { |b|
|
64
|
+
b[:batch_len]
|
65
|
+
}.sum / batches.length,
|
66
|
+
stats.instance_variable_get(:@avg_batch_length),
|
67
|
+
)
|
68
|
+
end
|
69
|
+
end
|
data/test/environment_test.rb
CHANGED
@@ -48,12 +48,12 @@ class EnvironmentTest < Minitest::Test
|
|
48
48
|
|
49
49
|
def test_client_from_env_uses_batched_udp_sink_in_staging_environment
|
50
50
|
env = StatsD::Instrument::Environment.new("STATSD_USE_NEW_CLIENT" => "1", "STATSD_ENV" => "staging")
|
51
|
-
assert_kind_of(StatsD::Instrument::
|
51
|
+
assert_kind_of(StatsD::Instrument::BatchedSink, env.client.sink)
|
52
52
|
end
|
53
53
|
|
54
54
|
def test_client_from_env_uses_batched_udp_sink_in_production_environment
|
55
55
|
env = StatsD::Instrument::Environment.new("STATSD_USE_NEW_CLIENT" => "1", "STATSD_ENV" => "production")
|
56
|
-
assert_kind_of(StatsD::Instrument::
|
56
|
+
assert_kind_of(StatsD::Instrument::BatchedSink, env.client.sink)
|
57
57
|
end
|
58
58
|
|
59
59
|
def test_client_from_env_uses_regular_udp_sink_when_flush_interval_is_0
|
@@ -65,7 +65,7 @@ class EnvironmentTest < Minitest::Test
|
|
65
65
|
"STATSD_ENV" => "staging",
|
66
66
|
"STATSD_FLUSH_INTERVAL" => "0.0",
|
67
67
|
)
|
68
|
-
assert_kind_of(StatsD::Instrument::
|
68
|
+
assert_kind_of(StatsD::Instrument::Sink, env.client.sink)
|
69
69
|
end
|
70
70
|
|
71
71
|
def test_client_from_env_uses_regular_udp_sink_when_buffer_capacity_is_0
|
@@ -74,6 +74,6 @@ class EnvironmentTest < Minitest::Test
|
|
74
74
|
"STATSD_ENV" => "staging",
|
75
75
|
"STATSD_BUFFER_CAPACITY" => "0",
|
76
76
|
)
|
77
|
-
assert_kind_of(StatsD::Instrument::
|
77
|
+
assert_kind_of(StatsD::Instrument::Sink, env.client.sink)
|
78
78
|
end
|
79
79
|
end
|