statsd-instrument 3.8.0 → 3.9.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,269 @@
1
+ # frozen_string_literal: true
2
+
3
+ module StatsD
4
+ module Instrument
5
+ class AggregationKey
6
+ attr_reader :name, :tags, :no_prefix, :type, :hash
7
+
8
+ def initialize(name, tags, no_prefix, type)
9
+ @name = name
10
+ @tags = tags
11
+ @no_prefix = no_prefix
12
+ @type = type
13
+ @hash = [@name, @tags, @no_prefix, @type].hash
14
+ end
15
+
16
+ def ==(other)
17
+ other.is_a?(self.class) &&
18
+ @name == other.name &&
19
+ @tags == other.tags &&
20
+ @no_prefix == other.no_prefix &&
21
+ @type == other.type
22
+ end
23
+ alias_method :eql?, :==
24
+ end
25
+
26
+ class Aggregator
27
+ DEFAULT_MAX_CONTEXT_SIZE = 250
28
+
29
+ CONST_SAMPLE_RATE = 1.0
30
+ COUNT = :c
31
+ DISTRIBUTION = :d
32
+ MEASURE = :ms
33
+ HISTOGRAM = :h
34
+ GAUGE = :g
35
+ private_constant :COUNT, :DISTRIBUTION, :MEASURE, :HISTOGRAM, :GAUGE, :CONST_SAMPLE_RATE
36
+
37
+ class << self
38
+ def finalize(aggregation_state, sink, datagram_builders, datagram_builder_class, default_tags)
39
+ proc do
40
+ aggregation_state.each do |key, agg_value|
41
+ no_prefix = key.no_prefix
42
+ datagram_builders[no_prefix] ||= datagram_builder_class.new(
43
+ prefix: no_prefix ? nil : @metric_prefix,
44
+ default_tags: default_tags,
45
+ )
46
+ case key.type
47
+ when COUNT
48
+ sink << datagram_builders[no_prefix].c(
49
+ key.name,
50
+ agg_value,
51
+ CONST_SAMPLE_RATE,
52
+ key.tags,
53
+ )
54
+ when DISTRIBUTION, MEASURE, HISTOGRAM
55
+ sink << datagram_builders[no_prefix].timing_value_packed(
56
+ key.name,
57
+ key.type.to_s,
58
+ agg_value,
59
+ CONST_SAMPLE_RATE,
60
+ key.tags,
61
+ )
62
+ when GAUGE
63
+ sink << datagram_builders[no_prefix].g(
64
+ key.name,
65
+ agg_value,
66
+ CONST_SAMPLE_RATE,
67
+ key.tags,
68
+ )
69
+ else
70
+ StatsD.logger.error { "[#{self.class.name}] Unknown aggregation type: #{key.type}" }
71
+ end
72
+ end
73
+ aggregation_state.clear
74
+ end
75
+ end
76
+ end
77
+
78
+ # @param sink [#<<] The sink to write the aggregated metrics to.
79
+ # @param datagram_builder_class [Class] The class to use for building datagrams.
80
+ # @param prefix [String] The prefix to add to all metrics.
81
+ # @param default_tags [Array<String>] The tags to add to all metrics.
82
+ # @param flush_interval [Float] The interval at which to flush the aggregated metrics.
83
+ # @param max_values [Integer] The maximum number of values to aggregate before flushing.
84
+ def initialize(
85
+ sink,
86
+ datagram_builder_class,
87
+ prefix,
88
+ default_tags,
89
+ flush_interval: 5.0,
90
+ max_values: DEFAULT_MAX_CONTEXT_SIZE
91
+ )
92
+ @sink = sink
93
+ @datagram_builder_class = datagram_builder_class
94
+ @metric_prefix = prefix
95
+ @default_tags = default_tags
96
+ @datagram_builders = {
97
+ true: nil,
98
+ false: nil,
99
+ }
100
+ @max_values = max_values
101
+
102
+ # Mutex protects the aggregation_state and flush_thread from concurrent access
103
+ @mutex = Mutex.new
104
+ @aggregation_state = {}
105
+
106
+ @pid = Process.pid
107
+ @flush_interval = flush_interval
108
+ @flush_thread = Thread.new do
109
+ Thread.current.abort_on_exception = true
110
+ loop do
111
+ sleep(@flush_interval)
112
+ thread_healthcheck
113
+ flush
114
+ rescue => e
115
+ StatsD.logger.error { "[#{self.class.name}] Error in flush thread: #{e}" }
116
+ raise e
117
+ end
118
+ end
119
+
120
+ ObjectSpace.define_finalizer(
121
+ self,
122
+ self.class.finalize(@aggregation_state, @sink, @datagram_builders, @datagram_builder_class, @default_tags),
123
+ )
124
+ end
125
+
126
+ # Increment a counter by a given value and save it for later flushing.
127
+ # @param name [String] The name of the counter.
128
+ # @param value [Integer] The value to increment the counter by.
129
+ # @param tags [Hash{String, Symbol => String},Array<String>] The tags to attach to the counter.
130
+ # @param no_prefix [Boolean] If true, the metric will not be prefixed.
131
+ # @return [void]
132
+ def increment(name, value = 1, tags: [], no_prefix: false)
133
+ unless thread_healthcheck
134
+ sink << datagram_builder(no_prefix: no_prefix).c(name, value, CONST_SAMPLE_RATE, tags)
135
+ return
136
+ end
137
+
138
+ tags = tags_sorted(tags)
139
+ key = packet_key(name, tags, no_prefix, COUNT)
140
+
141
+ @mutex.synchronize do
142
+ @aggregation_state[key] ||= 0
143
+ @aggregation_state[key] += value
144
+ end
145
+ end
146
+
147
+ def aggregate_timing(name, value, tags: [], no_prefix: false, type: DISTRIBUTION)
148
+ unless thread_healthcheck
149
+ sink << datagram_builder(no_prefix: no_prefix).timing_value_packed(
150
+ name, type, [value], CONST_SAMPLE_RATE, tags
151
+ )
152
+ return
153
+ end
154
+
155
+ tags = tags_sorted(tags)
156
+ key = packet_key(name, tags, no_prefix, type)
157
+
158
+ @mutex.synchronize do
159
+ values = @aggregation_state[key] ||= []
160
+ if values.size + 1 >= @max_values
161
+ do_flush
162
+ end
163
+ values << value
164
+ end
165
+ end
166
+
167
+ def gauge(name, value, tags: [], no_prefix: false)
168
+ unless thread_healthcheck
169
+ sink << datagram_builder(no_prefix: no_prefix).g(name, value, CONST_SAMPLE_RATE, tags)
170
+ return
171
+ end
172
+
173
+ tags = tags_sorted(tags)
174
+ key = packet_key(name, tags, no_prefix, GAUGE)
175
+
176
+ @mutex.synchronize do
177
+ @aggregation_state[key] = value
178
+ end
179
+ end
180
+
181
+ def flush
182
+ @mutex.synchronize { do_flush }
183
+ end
184
+
185
+ private
186
+
187
+ EMPTY_ARRAY = [].freeze
188
+
189
+ def do_flush
190
+ @aggregation_state.each do |key, value|
191
+ case key.type
192
+ when COUNT
193
+ @sink << datagram_builder(no_prefix: key.no_prefix).c(
194
+ key.name,
195
+ value,
196
+ CONST_SAMPLE_RATE,
197
+ key.tags,
198
+ )
199
+ when DISTRIBUTION, MEASURE, HISTOGRAM
200
+ @sink << datagram_builder(no_prefix: key.no_prefix).timing_value_packed(
201
+ key.name,
202
+ key.type.to_s,
203
+ value,
204
+ CONST_SAMPLE_RATE,
205
+ key.tags,
206
+ )
207
+ when GAUGE
208
+ @sink << datagram_builder(no_prefix: key.no_prefix).g(
209
+ key.name,
210
+ value,
211
+ CONST_SAMPLE_RATE,
212
+ key.tags,
213
+ )
214
+ else
215
+ StatsD.logger.error { "[#{self.class.name}] Unknown aggregation type: #{key.type}" }
216
+ end
217
+ end
218
+ @aggregation_state.clear
219
+ end
220
+
221
+ def tags_sorted(tags)
222
+ return "" if tags.nil? || tags.empty?
223
+
224
+ if tags.is_a?(Hash)
225
+ tags = tags.sort_by { |k, _v| k.to_s }.map! { |k, v| "#{k}:#{v}" }
226
+ else
227
+ tags.sort!
228
+ end
229
+ datagram_builder(no_prefix: false).normalize_tags(tags)
230
+ end
231
+
232
+ def packet_key(name, tags = "".b, no_prefix = false, type = COUNT)
233
+ AggregationKey.new(DatagramBuilder.normalize_string(name), tags, no_prefix, type).freeze
234
+ end
235
+
236
+ def datagram_builder(no_prefix:)
237
+ @datagram_builders[no_prefix] ||= @datagram_builder_class.new(
238
+ prefix: no_prefix ? nil : @metric_prefix,
239
+ default_tags: @default_tags,
240
+ )
241
+ end
242
+
243
+ def thread_healthcheck
244
+ @mutex.synchronize do
245
+ unless @flush_thread&.alive?
246
+ return false unless Thread.main.alive?
247
+
248
+ if @pid != Process.pid
249
+ StatsD.logger.info { "[#{self.class.name}] Restarting the flush thread after fork" }
250
+ @pid = Process.pid
251
+ @aggregation_state.clear
252
+ else
253
+ StatsD.logger.info { "[#{self.class.name}] Restarting the flush thread" }
254
+ end
255
+ @flush_thread = Thread.new do
256
+ Thread.current.abort_on_exception = true
257
+ loop do
258
+ sleep(@flush_interval)
259
+ thread_healthcheck
260
+ flush
261
+ end
262
+ end
263
+ end
264
+ true
265
+ end
266
+ end
267
+ end
268
+ end
269
+ end
@@ -1,22 +1,30 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require "forwardable"
4
+
3
5
  module StatsD
4
6
  module Instrument
5
- # @note This class is part of the new Client implementation that is intended
6
- # to become the new default in the next major release of this library.
7
- class BatchedUDPSink
7
+ class BatchedSink
8
+ extend Forwardable
9
+
10
+ def_delegator :@sink, :host
11
+ def_delegator :@sink, :port
12
+
8
13
  DEFAULT_THREAD_PRIORITY = 100
9
14
  DEFAULT_BUFFER_CAPACITY = 5_000
10
15
  # https://docs.datadoghq.com/developers/dogstatsd/high_throughput/?code-lang=ruby#ensure-proper-packet-sizes
11
16
  DEFAULT_MAX_PACKET_SIZE = 1472
12
17
  DEFAULT_STATISTICS_INTERVAL = 0 # in seconds, and 0 implies disabled-by-default.
13
18
 
14
- attr_reader :host, :port
15
-
16
19
  class << self
17
20
  def for_addr(addr, **kwargs)
18
- host, port_as_string = addr.split(":", 2)
19
- new(host, Integer(port_as_string), **kwargs)
21
+ if addr.include?(":")
22
+ sink = StatsD::Instrument::Sink.for_addr(addr)
23
+ new(sink, **kwargs)
24
+ else
25
+ connection = UdsConnection.new(addr)
26
+ new(connection, **kwargs)
27
+ end
20
28
  end
21
29
 
22
30
  def finalize(dispatcher)
@@ -25,18 +33,15 @@ module StatsD
25
33
  end
26
34
 
27
35
  def initialize(
28
- host,
29
- port,
36
+ sink,
30
37
  thread_priority: DEFAULT_THREAD_PRIORITY,
31
38
  buffer_capacity: DEFAULT_BUFFER_CAPACITY,
32
39
  max_packet_size: DEFAULT_MAX_PACKET_SIZE,
33
40
  statistics_interval: DEFAULT_STATISTICS_INTERVAL
34
41
  )
35
- @host = host
36
- @port = port
42
+ @sink = sink
37
43
  @dispatcher = Dispatcher.new(
38
- host,
39
- port,
44
+ @sink,
40
45
  buffer_capacity,
41
46
  thread_priority,
42
47
  max_packet_size,
@@ -62,6 +67,10 @@ module StatsD
62
67
  @dispatcher.flush(blocking: blocking)
63
68
  end
64
69
 
70
+ def connection
71
+ @sink.connection
72
+ end
73
+
65
74
  class Buffer < SizedQueue
66
75
  def push_nonblock(item)
67
76
  push(item, true)
@@ -81,7 +90,7 @@ module StatsD
81
90
  end
82
91
 
83
92
  class DispatcherStats
84
- def initialize(interval)
93
+ def initialize(interval, type)
85
94
  # The number of times the batched udp sender needed to
86
95
  # send a statsd line synchronously, due to the buffer
87
96
  # being full.
@@ -98,6 +107,12 @@ module StatsD
98
107
  # The average number of statsd lines per batch.
99
108
  @avg_batch_length = 0
100
109
 
110
+ @sync_sends_metric = "statsd_instrument.batched_#{type}_sink.synchronous_sends"
111
+ @batched_sends_metric = "statsd_instrument.batched_#{type}_sink.batched_sends"
112
+ @avg_buffer_length_metric = "statsd_instrument.batched_#{type}_sink.avg_buffer_length"
113
+ @avg_batched_packet_size_metric = "statsd_instrument.batched_#{type}_sink.avg_batched_packet_size"
114
+ @avg_batch_length_metric = "statsd_instrument.batched_#{type}_sink.avg_batch_length"
115
+
101
116
  @mutex = Mutex.new
102
117
 
103
118
  @interval = interval
@@ -121,11 +136,11 @@ module StatsD
121
136
  @since = Process.clock_gettime(Process::CLOCK_MONOTONIC)
122
137
  end
123
138
 
124
- StatsD.increment("statsd_instrument.batched_udp_sink.synchronous_sends", synchronous_sends)
125
- StatsD.increment("statsd_instrument.batched_udp_sink.batched_sends", batched_sends)
126
- StatsD.gauge("statsd_instrument.batched_udp_sink.avg_buffer_length", avg_buffer_length)
127
- StatsD.gauge("statsd_instrument.batched_udp_sink.avg_batched_packet_size", avg_batched_packet_size)
128
- StatsD.gauge("statsd_instrument.batched_udp_sink.avg_batch_length", avg_batch_length)
139
+ StatsD.increment(@sync_sends_metric, synchronous_sends)
140
+ StatsD.increment(@batched_sends_metric, batched_sends)
141
+ StatsD.gauge(@avg_buffer_length_metric, avg_buffer_length)
142
+ StatsD.gauge(@avg_batched_packet_size_metric, avg_batched_packet_size)
143
+ StatsD.gauge(@avg_batch_length_metric, avg_batch_length)
129
144
  end
130
145
 
131
146
  def increment_synchronous_sends
@@ -143,8 +158,8 @@ module StatsD
143
158
  end
144
159
 
145
160
  class Dispatcher
146
- def initialize(host, port, buffer_capacity, thread_priority, max_packet_size, statistics_interval)
147
- @udp_sink = UDPSink.new(host, port)
161
+ def initialize(sink, buffer_capacity, thread_priority, max_packet_size, statistics_interval)
162
+ @sink = sink
148
163
  @interrupted = false
149
164
  @thread_priority = thread_priority
150
165
  @max_packet_size = max_packet_size
@@ -153,7 +168,8 @@ module StatsD
153
168
  @dispatcher_thread = Thread.new { dispatch }
154
169
  @pid = Process.pid
155
170
  if statistics_interval > 0
156
- @statistics = DispatcherStats.new(statistics_interval)
171
+ type = @sink.connection.type
172
+ @statistics = DispatcherStats.new(statistics_interval, type)
157
173
  end
158
174
  end
159
175
 
@@ -161,7 +177,7 @@ module StatsD
161
177
  if !thread_healthcheck || !@buffer.push_nonblock(datagram)
162
178
  # The buffer is full or the thread can't be respawned,
163
179
  # we'll send the datagram synchronously
164
- @udp_sink << datagram
180
+ @sink << datagram
165
181
 
166
182
  @statistics&.increment_synchronous_sends
167
183
  end
@@ -206,7 +222,7 @@ module StatsD
206
222
  end
207
223
 
208
224
  packet_size = packet.bytesize
209
- @udp_sink << packet
225
+ @sink << packet
210
226
  packet.clear
211
227
 
212
228
  @statistics&.increment_batched_sends(buffer_len, packet_size, batch_len)
@@ -40,6 +40,8 @@ module StatsD
40
40
  implementation: implementation,
41
41
  sink: sink,
42
42
  datagram_builder_class: datagram_builder_class,
43
+ enable_aggregation: env.experimental_aggregation_enabled?,
44
+ aggregation_flush_interval: env.aggregation_interval,
43
45
  )
44
46
  end
45
47
 
@@ -82,7 +84,7 @@ module StatsD
82
84
  # Generally, you should use an instance of one of the following classes that
83
85
  # ship with this library:
84
86
  #
85
- # - {StatsD::Instrument::UDPSink} A sink that will actually emit the provided
87
+ # - {StatsD::Instrument::Sink} A sink that will actually emit the provided
86
88
  # datagrams over UDP.
87
89
  # - {StatsD::Instrument::NullSink} A sink that will simply swallow every
88
90
  # datagram. This sink is for use when testing your application.
@@ -152,7 +154,10 @@ module StatsD
152
154
  default_tags: nil,
153
155
  implementation: "datadog",
154
156
  sink: StatsD::Instrument::NullSink.new,
155
- datagram_builder_class: self.class.datagram_builder_class_for_implementation(implementation)
157
+ datagram_builder_class: self.class.datagram_builder_class_for_implementation(implementation),
158
+ enable_aggregation: false,
159
+ aggregation_flush_interval: 2.0,
160
+ aggregation_max_context_size: StatsD::Instrument::Aggregator::DEFAULT_MAX_CONTEXT_SIZE
156
161
  )
157
162
  @sink = sink
158
163
  @datagram_builder_class = datagram_builder_class
@@ -162,6 +167,19 @@ module StatsD
162
167
  @default_sample_rate = default_sample_rate
163
168
 
164
169
  @datagram_builder = { false => nil, true => nil }
170
+ @enable_aggregation = enable_aggregation
171
+ @aggregation_flush_interval = aggregation_flush_interval
172
+ if @enable_aggregation
173
+ @aggregator =
174
+ Aggregator.new(
175
+ @sink,
176
+ datagram_builder_class,
177
+ prefix,
178
+ default_tags,
179
+ flush_interval: @aggregation_flush_interval,
180
+ max_values: aggregation_max_context_size,
181
+ )
182
+ end
165
183
  end
166
184
 
167
185
  # @!group Metric Methods
@@ -201,6 +219,12 @@ module StatsD
201
219
  # @return [void]
202
220
  def increment(name, value = 1, sample_rate: nil, tags: nil, no_prefix: false)
203
221
  sample_rate ||= @default_sample_rate
222
+
223
+ if @enable_aggregation
224
+ @aggregator.increment(name, value, tags: tags, no_prefix: no_prefix)
225
+ return StatsD::Instrument::VOID
226
+ end
227
+
204
228
  if sample_rate.nil? || sample?(sample_rate)
205
229
  emit(datagram_builder(no_prefix: no_prefix).c(name, value, sample_rate, tags))
206
230
  end
@@ -215,14 +239,28 @@ module StatsD
215
239
  # @param tags (see #increment)
216
240
  # @return [void]
217
241
  def measure(name, value = nil, sample_rate: nil, tags: nil, no_prefix: false, &block)
242
+ sample_rate ||= @default_sample_rate
243
+ if sample_rate && !sample?(sample_rate)
244
+ # For all timing metrics, we have to use the sampling logic.
245
+ # Not doing so would impact performance and CPU usage.
246
+ # See Datadog's documentation for more details: https://github.com/DataDog/datadog-go/blob/20af2dbfabbbe6bd0347780cd57ed931f903f223/statsd/aggregator.go#L281-L283
247
+
248
+ if block_given?
249
+ return yield
250
+ end
251
+
252
+ return StatsD::Instrument::VOID
253
+ end
254
+
218
255
  if block_given?
219
256
  return latency(name, sample_rate: sample_rate, tags: tags, metric_type: :ms, no_prefix: no_prefix, &block)
220
257
  end
221
258
 
222
- sample_rate ||= @default_sample_rate
223
- if sample_rate.nil? || sample?(sample_rate)
224
- emit(datagram_builder(no_prefix: no_prefix).ms(name, value, sample_rate, tags))
259
+ if @enable_aggregation
260
+ @aggregator.aggregate_timing(name, value, tags: tags, no_prefix: no_prefix, type: :ms)
261
+ return StatsD::Instrument::VOID
225
262
  end
263
+ emit(datagram_builder(no_prefix: no_prefix).ms(name, value, sample_rate, tags))
226
264
  StatsD::Instrument::VOID
227
265
  end
228
266
 
@@ -240,6 +278,11 @@ module StatsD
240
278
  # @param tags (see #increment)
241
279
  # @return [void]
242
280
  def gauge(name, value, sample_rate: nil, tags: nil, no_prefix: false)
281
+ if @enable_aggregation
282
+ @aggregator.gauge(name, value, tags: tags, no_prefix: no_prefix)
283
+ return StatsD::Instrument::VOID
284
+ end
285
+
243
286
  sample_rate ||= @default_sample_rate
244
287
  if sample_rate.nil? || sample?(sample_rate)
245
288
  emit(datagram_builder(no_prefix: no_prefix).g(name, value, sample_rate, tags))
@@ -275,14 +318,29 @@ module StatsD
275
318
  # @param tags (see #increment)
276
319
  # @return [void]
277
320
  def distribution(name, value = nil, sample_rate: nil, tags: nil, no_prefix: false, &block)
321
+ sample_rate ||= @default_sample_rate
322
+ if sample_rate && !sample?(sample_rate)
323
+ # For all timing metrics, we have to use the sampling logic.
324
+ # Not doing so would impact performance and CPU usage.
325
+ # See Datadog's documentation for more details: https://github.com/DataDog/datadog-go/blob/20af2dbfabbbe6bd0347780cd57ed931f903f223/statsd/aggregator.go#L281-L283
326
+
327
+ if block_given?
328
+ return yield
329
+ end
330
+
331
+ return StatsD::Instrument::VOID
332
+ end
333
+
278
334
  if block_given?
279
335
  return latency(name, sample_rate: sample_rate, tags: tags, metric_type: :d, no_prefix: no_prefix, &block)
280
336
  end
281
337
 
282
- sample_rate ||= @default_sample_rate
283
- if sample_rate.nil? || sample?(sample_rate)
284
- emit(datagram_builder(no_prefix: no_prefix).d(name, value, sample_rate, tags))
338
+ if @enable_aggregation
339
+ @aggregator.aggregate_timing(name, value, tags: tags, no_prefix: no_prefix, type: :d)
340
+ return StatsD::Instrument::VOID
285
341
  end
342
+
343
+ emit(datagram_builder(no_prefix: no_prefix).d(name, value, sample_rate, tags))
286
344
  StatsD::Instrument::VOID
287
345
  end
288
346
 
@@ -299,9 +357,19 @@ module StatsD
299
357
  # @return [void]
300
358
  def histogram(name, value, sample_rate: nil, tags: nil, no_prefix: false)
301
359
  sample_rate ||= @default_sample_rate
302
- if sample_rate.nil? || sample?(sample_rate)
303
- emit(datagram_builder(no_prefix: no_prefix).h(name, value, sample_rate, tags))
360
+ if sample_rate && !sample?(sample_rate)
361
+ # For all timing metrics, we have to use the sampling logic.
362
+ # Not doing so would impact performance and CPU usage.
363
+ # See Datadog's documentation for more details: https://github.com/DataDog/datadog-go/blob/20af2dbfabbbe6bd0347780cd57ed931f903f223/statsd/aggregator.go#L281-L283
364
+ return StatsD::Instrument::VOID
365
+ end
366
+
367
+ if @enable_aggregation
368
+ @aggregator.aggregate_timing(name, value, tags: tags, no_prefix: no_prefix, type: :h)
369
+ return StatsD::Instrument::VOID
304
370
  end
371
+
372
+ emit(datagram_builder(no_prefix: no_prefix).h(name, value, sample_rate, tags))
305
373
  StatsD::Instrument::VOID
306
374
  end
307
375
 
@@ -324,11 +392,15 @@ module StatsD
324
392
  ensure
325
393
  stop = Process.clock_gettime(Process::CLOCK_MONOTONIC, :float_millisecond)
326
394
 
327
- sample_rate ||= @default_sample_rate
328
- if sample_rate.nil? || sample?(sample_rate)
329
- metric_type ||= datagram_builder(no_prefix: no_prefix).latency_metric_type
330
- latency_in_ms = stop - start
331
- emit(datagram_builder(no_prefix: no_prefix).send(metric_type, name, latency_in_ms, sample_rate, tags))
395
+ metric_type ||= datagram_builder(no_prefix: no_prefix).latency_metric_type
396
+ latency_in_ms = stop - start
397
+ if @enable_aggregation
398
+ @aggregator.aggregate_timing(name, latency_in_ms, tags: tags, no_prefix: no_prefix, type: metric_type)
399
+ else
400
+ sample_rate ||= @default_sample_rate
401
+ if sample_rate.nil? || sample?(sample_rate)
402
+ emit(datagram_builder(no_prefix: no_prefix).send(metric_type, name, latency_in_ms, sample_rate, tags))
403
+ end
332
404
  end
333
405
  end
334
406
  end
@@ -386,6 +458,18 @@ module StatsD
386
458
  ))
387
459
  end
388
460
 
461
+ # Forces the client to flush all metrics that are currently buffered, first flushes the aggregation
462
+ # if enabled.
463
+ #
464
+ # @return [void]
465
+ def force_flush
466
+ if @enable_aggregation
467
+ @aggregator.flush
468
+ end
469
+ @sink.flush(blocking: false)
470
+ StatsD::Instrument::VOID
471
+ end
472
+
389
473
  NO_CHANGE = Object.new
390
474
 
391
475
  # Instantiates a new StatsD client that uses the settings of the current client,
@@ -427,6 +511,8 @@ module StatsD
427
511
  default_tags: default_tags == NO_CHANGE ? @default_tags : default_tags,
428
512
  datagram_builder_class:
429
513
  datagram_builder_class == NO_CHANGE ? @datagram_builder_class : datagram_builder_class,
514
+ enable_aggregation: @enable_aggregation,
515
+ aggregation_flush_interval: @aggregation_flush_interval,
430
516
  )
431
517
  end
432
518
 
@@ -31,7 +31,11 @@ module StatsD
31
31
  when :c
32
32
  Integer(parsed_datagram[:value])
33
33
  when :g, :h, :d, :kv, :ms
34
- Float(parsed_datagram[:value])
34
+ if parsed_datagram[:value].include?(":")
35
+ parsed_datagram[:value].split(":").map { |v| Float(v) }
36
+ else
37
+ Float(parsed_datagram[:value])
38
+ end
35
39
  when :s
36
40
  String(parsed_datagram[:value])
37
41
  else
@@ -68,7 +72,7 @@ module StatsD
68
72
 
69
73
  PARSER = %r{
70
74
  \A
71
- (?<name>[^\:\|\@]+)\:(?<value>[^\:\|\@]+)\|(?<type>c|ms|g|s|h|d)
75
+ (?<name>[^\:\|\@]+)\:(?<value>(?:[^\:\|\@]+:)*[^\:\|\@]+)\|(?<type>c|ms|g|s|h|d)
72
76
  (?:\|\@(?<sample_rate>\d*(?:\.\d*)?))?
73
77
  (?:\|\#(?<tags>(?:[^\|,]+(?:,[^\|,]+)*)))?
74
78
  \n? # In some implementations, the datagram may include a trailing newline.
@@ -5,6 +5,7 @@ module StatsD
5
5
  # @note This class is part of the new Client implementation that is intended
6
6
  # to become the new default in the next major release of this library.
7
7
  class DatagramBuilder
8
+ extend Forwardable
8
9
  class << self
9
10
  def unsupported_datagram_types(*types)
10
11
  types.each do |type|
@@ -17,6 +18,11 @@ module StatsD
17
18
  def datagram_class
18
19
  StatsD::Instrument::Datagram
19
20
  end
21
+
22
+ def normalize_string(string)
23
+ string = string.tr("|#", "_") if /[|#]/.match?(string)
24
+ string
25
+ end
20
26
  end
21
27
 
22
28
  def initialize(prefix: nil, default_tags: nil)
@@ -48,6 +54,12 @@ module StatsD
48
54
  generate_generic_datagram(name, value, "d", sample_rate, tags)
49
55
  end
50
56
 
57
+ def timing_value_packed(name, type, values, sample_rate, tags)
58
+ # here values is an array
59
+ values = values.join(":")
60
+ generate_generic_datagram(name, values, type, sample_rate, tags)
61
+ end
62
+
51
63
  def kv(name, value, sample_rate, tags)
52
64
  generate_generic_datagram(name, value, "kv", sample_rate, tags)
53
65
  end
@@ -56,6 +68,10 @@ module StatsD
56
68
  :ms
57
69
  end
58
70
 
71
+ def normalize_tags(tags, buffer = "".b)
72
+ compile_tags(tags, buffer)
73
+ end
74
+
59
75
  protected
60
76
 
61
77
  # Utility function to remove invalid characters from a StatsD metric name
@@ -88,6 +104,11 @@ module StatsD
88
104
  end
89
105
 
90
106
  def compile_tags(tags, buffer = "".b)
107
+ if tags.is_a?(String)
108
+ tags = self.class.normalize_string(tags) if /[|,]/.match?(tags)
109
+ buffer << tags
110
+ return buffer
111
+ end
91
112
  if tags.is_a?(Hash)
92
113
  first = true
93
114
  tags.each do |key, value|