statsd-instrument 3.8.0 → 3.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,259 @@
1
+ # frozen_string_literal: true
2
+
3
+ module StatsD
4
+ module Instrument
5
+ class AggregationKey
6
+ attr_reader :name, :tags, :no_prefix, :type, :hash
7
+
8
+ def initialize(name, tags, no_prefix, type)
9
+ @name = name
10
+ @tags = tags
11
+ @no_prefix = no_prefix
12
+ @type = type
13
+ @hash = [@name, @tags, @no_prefix, @type].hash
14
+ end
15
+
16
+ def ==(other)
17
+ other.is_a?(self.class) &&
18
+ @name == other.name &&
19
+ @tags == other.tags &&
20
+ @no_prefix == other.no_prefix &&
21
+ @type == other.type
22
+ end
23
+ alias_method :eql?, :==
24
+ end
25
+
26
+ class Aggregator
27
+ CONST_SAMPLE_RATE = 1.0
28
+ COUNT = :c
29
+ DISTRIBUTION = :d
30
+ MEASURE = :ms
31
+ HISTOGRAM = :h
32
+ GAUGE = :g
33
+
34
+ class << self
35
+ def finalize(aggregation_state, sink, datagram_builders, datagram_builder_class, default_tags)
36
+ proc do
37
+ aggregation_state.each do |key, agg_value|
38
+ no_prefix = key.no_prefix
39
+ datagram_builders[no_prefix] ||= datagram_builder_class.new(
40
+ prefix: no_prefix ? nil : @metric_prefix,
41
+ default_tags: default_tags,
42
+ )
43
+ case key.type
44
+ when COUNT
45
+ sink << datagram_builders[no_prefix].c(
46
+ key.name,
47
+ agg_value,
48
+ CONST_SAMPLE_RATE,
49
+ key.tags,
50
+ )
51
+ when DISTRIBUTION, MEASURE, HISTOGRAM
52
+ sink << datagram_builders[no_prefix].timing_value_packed(
53
+ key.name,
54
+ key.type.to_s,
55
+ agg_value,
56
+ CONST_SAMPLE_RATE,
57
+ key.tags,
58
+ )
59
+ when GAUGE
60
+ sink << datagram_builders[no_prefix].g(
61
+ key.name,
62
+ agg_value,
63
+ CONST_SAMPLE_RATE,
64
+ key.tags,
65
+ )
66
+ else
67
+ StatsD.logger.error { "[#{self.class.name}] Unknown aggregation type: #{key.type}" }
68
+ end
69
+ end
70
+ aggregation_state.clear
71
+ end
72
+ end
73
+ end
74
+
75
+ # @param sink [#<<] The sink to write the aggregated metrics to.
76
+ # @param datagram_builder_class [Class] The class to use for building datagrams.
77
+ # @param prefix [String] The prefix to add to all metrics.
78
+ # @param default_tags [Array<String>] The tags to add to all metrics.
79
+ # @param flush_interval [Float] The interval at which to flush the aggregated metrics.
80
+ # @param max_values [Integer] The maximum number of values to aggregate before flushing.
81
+ def initialize(sink, datagram_builder_class, prefix, default_tags, flush_interval: 5.0, max_values: 100)
82
+ @sink = sink
83
+ @datagram_builder_class = datagram_builder_class
84
+ @metric_prefix = prefix
85
+ @default_tags = default_tags
86
+ @datagram_builders = {
87
+ true: nil,
88
+ false: nil,
89
+ }
90
+ @max_values = max_values
91
+
92
+ # Mutex protects the aggregation_state and flush_thread from concurrent access
93
+ @mutex = Mutex.new
94
+ @aggregation_state = {}
95
+
96
+ @pid = Process.pid
97
+ @flush_interval = flush_interval
98
+ @flush_thread = Thread.new do
99
+ Thread.current.abort_on_exception = true
100
+ loop do
101
+ sleep(@flush_interval)
102
+ thread_healthcheck
103
+ flush
104
+ rescue => e
105
+ StatsD.logger.error { "[#{self.class.name}] Error in flush thread: #{e}" }
106
+ raise e
107
+ end
108
+ end
109
+
110
+ ObjectSpace.define_finalizer(
111
+ self,
112
+ self.class.finalize(@aggregation_state, @sink, @datagram_builders, @datagram_builder_class, @default_tags),
113
+ )
114
+ end
115
+
116
+ # Increment a counter by a given value and save it for later flushing.
117
+ # @param name [String] The name of the counter.
118
+ # @param value [Integer] The value to increment the counter by.
119
+ # @param tags [Hash{String, Symbol => String},Array<String>] The tags to attach to the counter.
120
+ # @param no_prefix [Boolean] If true, the metric will not be prefixed.
121
+ # @return [void]
122
+ def increment(name, value = 1, tags: [], no_prefix: false)
123
+ unless thread_healthcheck
124
+ sink << datagram_builder(no_prefix: no_prefix).c(name, value, CONST_SAMPLE_RATE, tags)
125
+ return
126
+ end
127
+
128
+ tags = tags_sorted(tags)
129
+ key = packet_key(name, tags, no_prefix, COUNT)
130
+
131
+ @mutex.synchronize do
132
+ @aggregation_state[key] ||= 0
133
+ @aggregation_state[key] += value
134
+ end
135
+ end
136
+
137
+ def aggregate_timing(name, value, tags: [], no_prefix: false, type: DISTRIBUTION)
138
+ unless thread_healthcheck
139
+ sink << datagram_builder(no_prefix: no_prefix).timing_value_packed(
140
+ name, type, [value], CONST_SAMPLE_RATE, tags
141
+ )
142
+ return
143
+ end
144
+
145
+ tags = tags_sorted(tags)
146
+ key = packet_key(name, tags, no_prefix, type)
147
+
148
+ @mutex.synchronize do
149
+ values = @aggregation_state[key] ||= []
150
+ if values.size + 1 >= @max_values
151
+ do_flush
152
+ end
153
+ values << value
154
+ end
155
+ end
156
+
157
+ def gauge(name, value, tags: [], no_prefix: false)
158
+ unless thread_healthcheck
159
+ sink << datagram_builder(no_prefix: no_prefix).g(name, value, CONST_SAMPLE_RATE, tags)
160
+ return
161
+ end
162
+
163
+ tags = tags_sorted(tags)
164
+ key = packet_key(name, tags, no_prefix, GAUGE)
165
+
166
+ @mutex.synchronize do
167
+ @aggregation_state[key] = value
168
+ end
169
+ end
170
+
171
+ def flush
172
+ @mutex.synchronize { do_flush }
173
+ end
174
+
175
+ private
176
+
177
+ EMPTY_ARRAY = [].freeze
178
+
179
+ def do_flush
180
+ @aggregation_state.each do |key, value|
181
+ case key.type
182
+ when COUNT
183
+ @sink << datagram_builder(no_prefix: key.no_prefix).c(
184
+ key.name,
185
+ value,
186
+ CONST_SAMPLE_RATE,
187
+ key.tags,
188
+ )
189
+ when DISTRIBUTION, MEASURE, HISTOGRAM
190
+ @sink << datagram_builder(no_prefix: key.no_prefix).timing_value_packed(
191
+ key.name,
192
+ key.type.to_s,
193
+ value,
194
+ CONST_SAMPLE_RATE,
195
+ key.tags,
196
+ )
197
+ when GAUGE
198
+ @sink << datagram_builder(no_prefix: key.no_prefix).g(
199
+ key.name,
200
+ value,
201
+ CONST_SAMPLE_RATE,
202
+ key.tags,
203
+ )
204
+ else
205
+ StatsD.logger.error { "[#{self.class.name}] Unknown aggregation type: #{key.type}" }
206
+ end
207
+ end
208
+ @aggregation_state.clear
209
+ end
210
+
211
+ def tags_sorted(tags)
212
+ return "" if tags.nil? || tags.empty?
213
+
214
+ if tags.is_a?(Hash)
215
+ tags = tags.sort_by { |k, _v| k.to_s }.map! { |k, v| "#{k}:#{v}" }
216
+ else
217
+ tags.sort!
218
+ end
219
+ datagram_builder(no_prefix: false).normalize_tags(tags)
220
+ end
221
+
222
+ def packet_key(name, tags = "".b, no_prefix = false, type = COUNT)
223
+ AggregationKey.new(DatagramBuilder.normalize_string(name), tags, no_prefix, type).freeze
224
+ end
225
+
226
+ def datagram_builder(no_prefix:)
227
+ @datagram_builders[no_prefix] ||= @datagram_builder_class.new(
228
+ prefix: no_prefix ? nil : @metric_prefix,
229
+ default_tags: @default_tags,
230
+ )
231
+ end
232
+
233
+ def thread_healthcheck
234
+ @mutex.synchronize do
235
+ unless @flush_thread&.alive?
236
+ return false unless Thread.main.alive?
237
+
238
+ if @pid != Process.pid
239
+ StatsD.logger.info { "[#{self.class.name}] Restarting the flush thread after fork" }
240
+ @pid = Process.pid
241
+ @aggregation_state.clear
242
+ else
243
+ StatsD.logger.info { "[#{self.class.name}] Restarting the flush thread" }
244
+ end
245
+ @flush_thread = Thread.new do
246
+ Thread.current.abort_on_exception = true
247
+ loop do
248
+ sleep(@flush_interval)
249
+ thread_healthcheck
250
+ flush
251
+ end
252
+ end
253
+ end
254
+ true
255
+ end
256
+ end
257
+ end
258
+ end
259
+ end
@@ -1,22 +1,30 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require "forwardable"
4
+
3
5
  module StatsD
4
6
  module Instrument
5
- # @note This class is part of the new Client implementation that is intended
6
- # to become the new default in the next major release of this library.
7
- class BatchedUDPSink
7
+ class BatchedSink
8
+ extend Forwardable
9
+
10
+ def_delegator :@sink, :host
11
+ def_delegator :@sink, :port
12
+
8
13
  DEFAULT_THREAD_PRIORITY = 100
9
14
  DEFAULT_BUFFER_CAPACITY = 5_000
10
15
  # https://docs.datadoghq.com/developers/dogstatsd/high_throughput/?code-lang=ruby#ensure-proper-packet-sizes
11
16
  DEFAULT_MAX_PACKET_SIZE = 1472
12
17
  DEFAULT_STATISTICS_INTERVAL = 0 # in seconds, and 0 implies disabled-by-default.
13
18
 
14
- attr_reader :host, :port
15
-
16
19
  class << self
17
20
  def for_addr(addr, **kwargs)
18
- host, port_as_string = addr.split(":", 2)
19
- new(host, Integer(port_as_string), **kwargs)
21
+ if addr.include?(":")
22
+ sink = StatsD::Instrument::Sink.for_addr(addr)
23
+ new(sink, **kwargs)
24
+ else
25
+ connection = UdsConnection.new(addr)
26
+ new(connection, **kwargs)
27
+ end
20
28
  end
21
29
 
22
30
  def finalize(dispatcher)
@@ -25,18 +33,15 @@ module StatsD
25
33
  end
26
34
 
27
35
  def initialize(
28
- host,
29
- port,
36
+ sink,
30
37
  thread_priority: DEFAULT_THREAD_PRIORITY,
31
38
  buffer_capacity: DEFAULT_BUFFER_CAPACITY,
32
39
  max_packet_size: DEFAULT_MAX_PACKET_SIZE,
33
40
  statistics_interval: DEFAULT_STATISTICS_INTERVAL
34
41
  )
35
- @host = host
36
- @port = port
42
+ @sink = sink
37
43
  @dispatcher = Dispatcher.new(
38
- host,
39
- port,
44
+ @sink,
40
45
  buffer_capacity,
41
46
  thread_priority,
42
47
  max_packet_size,
@@ -62,6 +67,10 @@ module StatsD
62
67
  @dispatcher.flush(blocking: blocking)
63
68
  end
64
69
 
70
+ def connection
71
+ @sink.connection
72
+ end
73
+
65
74
  class Buffer < SizedQueue
66
75
  def push_nonblock(item)
67
76
  push(item, true)
@@ -81,7 +90,7 @@ module StatsD
81
90
  end
82
91
 
83
92
  class DispatcherStats
84
- def initialize(interval)
93
+ def initialize(interval, type)
85
94
  # The number of times the batched udp sender needed to
86
95
  # send a statsd line synchronously, due to the buffer
87
96
  # being full.
@@ -98,6 +107,12 @@ module StatsD
98
107
  # The average number of statsd lines per batch.
99
108
  @avg_batch_length = 0
100
109
 
110
+ @sync_sends_metric = "statsd_instrument.batched_#{type}_sink.synchronous_sends"
111
+ @batched_sends_metric = "statsd_instrument.batched_#{type}_sink.batched_sends"
112
+ @avg_buffer_length_metric = "statsd_instrument.batched_#{type}_sink.avg_buffer_length"
113
+ @avg_batched_packet_size_metric = "statsd_instrument.batched_#{type}_sink.avg_batched_packet_size"
114
+ @avg_batch_length_metric = "statsd_instrument.batched_#{type}_sink.avg_batch_length"
115
+
101
116
  @mutex = Mutex.new
102
117
 
103
118
  @interval = interval
@@ -121,11 +136,11 @@ module StatsD
121
136
  @since = Process.clock_gettime(Process::CLOCK_MONOTONIC)
122
137
  end
123
138
 
124
- StatsD.increment("statsd_instrument.batched_udp_sink.synchronous_sends", synchronous_sends)
125
- StatsD.increment("statsd_instrument.batched_udp_sink.batched_sends", batched_sends)
126
- StatsD.gauge("statsd_instrument.batched_udp_sink.avg_buffer_length", avg_buffer_length)
127
- StatsD.gauge("statsd_instrument.batched_udp_sink.avg_batched_packet_size", avg_batched_packet_size)
128
- StatsD.gauge("statsd_instrument.batched_udp_sink.avg_batch_length", avg_batch_length)
139
+ StatsD.increment(@sync_sends_metric, synchronous_sends)
140
+ StatsD.increment(@batched_sends_metric, batched_sends)
141
+ StatsD.gauge(@avg_buffer_length_metric, avg_buffer_length)
142
+ StatsD.gauge(@avg_batched_packet_size_metric, avg_batched_packet_size)
143
+ StatsD.gauge(@avg_batch_length_metric, avg_batch_length)
129
144
  end
130
145
 
131
146
  def increment_synchronous_sends
@@ -143,8 +158,8 @@ module StatsD
143
158
  end
144
159
 
145
160
  class Dispatcher
146
- def initialize(host, port, buffer_capacity, thread_priority, max_packet_size, statistics_interval)
147
- @udp_sink = UDPSink.new(host, port)
161
+ def initialize(sink, buffer_capacity, thread_priority, max_packet_size, statistics_interval)
162
+ @sink = sink
148
163
  @interrupted = false
149
164
  @thread_priority = thread_priority
150
165
  @max_packet_size = max_packet_size
@@ -153,7 +168,8 @@ module StatsD
153
168
  @dispatcher_thread = Thread.new { dispatch }
154
169
  @pid = Process.pid
155
170
  if statistics_interval > 0
156
- @statistics = DispatcherStats.new(statistics_interval)
171
+ type = @sink.connection.type
172
+ @statistics = DispatcherStats.new(statistics_interval, type)
157
173
  end
158
174
  end
159
175
 
@@ -161,7 +177,7 @@ module StatsD
161
177
  if !thread_healthcheck || !@buffer.push_nonblock(datagram)
162
178
  # The buffer is full or the thread can't be respawned,
163
179
  # we'll send the datagram synchronously
164
- @udp_sink << datagram
180
+ @sink << datagram
165
181
 
166
182
  @statistics&.increment_synchronous_sends
167
183
  end
@@ -206,7 +222,7 @@ module StatsD
206
222
  end
207
223
 
208
224
  packet_size = packet.bytesize
209
- @udp_sink << packet
225
+ @sink << packet
210
226
  packet.clear
211
227
 
212
228
  @statistics&.increment_batched_sends(buffer_len, packet_size, batch_len)
@@ -40,6 +40,8 @@ module StatsD
40
40
  implementation: implementation,
41
41
  sink: sink,
42
42
  datagram_builder_class: datagram_builder_class,
43
+ enable_aggregation: env.experimental_aggregation_enabled?,
44
+ aggregation_flush_interval: env.aggregation_interval,
43
45
  )
44
46
  end
45
47
 
@@ -82,7 +84,7 @@ module StatsD
82
84
  # Generally, you should use an instance of one of the following classes that
83
85
  # ship with this library:
84
86
  #
85
- # - {StatsD::Instrument::UDPSink} A sink that will actually emit the provided
87
+ # - {StatsD::Instrument::Sink} A sink that will actually emit the provided
86
88
  # datagrams over UDP.
87
89
  # - {StatsD::Instrument::NullSink} A sink that will simply swallow every
88
90
  # datagram. This sink is for use when testing your application.
@@ -152,7 +154,9 @@ module StatsD
152
154
  default_tags: nil,
153
155
  implementation: "datadog",
154
156
  sink: StatsD::Instrument::NullSink.new,
155
- datagram_builder_class: self.class.datagram_builder_class_for_implementation(implementation)
157
+ datagram_builder_class: self.class.datagram_builder_class_for_implementation(implementation),
158
+ enable_aggregation: false,
159
+ aggregation_flush_interval: 2.0
156
160
  )
157
161
  @sink = sink
158
162
  @datagram_builder_class = datagram_builder_class
@@ -162,6 +166,18 @@ module StatsD
162
166
  @default_sample_rate = default_sample_rate
163
167
 
164
168
  @datagram_builder = { false => nil, true => nil }
169
+ @enable_aggregation = enable_aggregation
170
+ @aggregation_flush_interval = aggregation_flush_interval
171
+ if @enable_aggregation
172
+ @aggregator =
173
+ Aggregator.new(
174
+ @sink,
175
+ datagram_builder_class,
176
+ prefix,
177
+ default_tags,
178
+ flush_interval: @aggregation_flush_interval,
179
+ )
180
+ end
165
181
  end
166
182
 
167
183
  # @!group Metric Methods
@@ -201,6 +217,12 @@ module StatsD
201
217
  # @return [void]
202
218
  def increment(name, value = 1, sample_rate: nil, tags: nil, no_prefix: false)
203
219
  sample_rate ||= @default_sample_rate
220
+
221
+ if @enable_aggregation
222
+ @aggregator.increment(name, value, tags: tags, no_prefix: no_prefix)
223
+ return StatsD::Instrument::VOID
224
+ end
225
+
204
226
  if sample_rate.nil? || sample?(sample_rate)
205
227
  emit(datagram_builder(no_prefix: no_prefix).c(name, value, sample_rate, tags))
206
228
  end
@@ -219,6 +241,10 @@ module StatsD
219
241
  return latency(name, sample_rate: sample_rate, tags: tags, metric_type: :ms, no_prefix: no_prefix, &block)
220
242
  end
221
243
 
244
+ if @enable_aggregation
245
+ @aggregator.aggregate_timing(name, value, tags: tags, no_prefix: no_prefix, type: :ms)
246
+ return StatsD::Instrument::VOID
247
+ end
222
248
  sample_rate ||= @default_sample_rate
223
249
  if sample_rate.nil? || sample?(sample_rate)
224
250
  emit(datagram_builder(no_prefix: no_prefix).ms(name, value, sample_rate, tags))
@@ -240,6 +266,11 @@ module StatsD
240
266
  # @param tags (see #increment)
241
267
  # @return [void]
242
268
  def gauge(name, value, sample_rate: nil, tags: nil, no_prefix: false)
269
+ if @enable_aggregation
270
+ @aggregator.gauge(name, value, tags: tags, no_prefix: no_prefix)
271
+ return StatsD::Instrument::VOID
272
+ end
273
+
243
274
  sample_rate ||= @default_sample_rate
244
275
  if sample_rate.nil? || sample?(sample_rate)
245
276
  emit(datagram_builder(no_prefix: no_prefix).g(name, value, sample_rate, tags))
@@ -279,6 +310,11 @@ module StatsD
279
310
  return latency(name, sample_rate: sample_rate, tags: tags, metric_type: :d, no_prefix: no_prefix, &block)
280
311
  end
281
312
 
313
+ if @enable_aggregation
314
+ @aggregator.aggregate_timing(name, value, tags: tags, no_prefix: no_prefix, type: :d)
315
+ return StatsD::Instrument::VOID
316
+ end
317
+
282
318
  sample_rate ||= @default_sample_rate
283
319
  if sample_rate.nil? || sample?(sample_rate)
284
320
  emit(datagram_builder(no_prefix: no_prefix).d(name, value, sample_rate, tags))
@@ -298,6 +334,10 @@ module StatsD
298
334
  # @param tags (see #increment)
299
335
  # @return [void]
300
336
  def histogram(name, value, sample_rate: nil, tags: nil, no_prefix: false)
337
+ if @enable_aggregation
338
+ @aggregator.aggregate_timing(name, value, tags: tags, no_prefix: no_prefix, type: :h)
339
+ end
340
+
301
341
  sample_rate ||= @default_sample_rate
302
342
  if sample_rate.nil? || sample?(sample_rate)
303
343
  emit(datagram_builder(no_prefix: no_prefix).h(name, value, sample_rate, tags))
@@ -324,11 +364,15 @@ module StatsD
324
364
  ensure
325
365
  stop = Process.clock_gettime(Process::CLOCK_MONOTONIC, :float_millisecond)
326
366
 
327
- sample_rate ||= @default_sample_rate
328
- if sample_rate.nil? || sample?(sample_rate)
329
- metric_type ||= datagram_builder(no_prefix: no_prefix).latency_metric_type
330
- latency_in_ms = stop - start
331
- emit(datagram_builder(no_prefix: no_prefix).send(metric_type, name, latency_in_ms, sample_rate, tags))
367
+ metric_type ||= datagram_builder(no_prefix: no_prefix).latency_metric_type
368
+ latency_in_ms = stop - start
369
+ if @enable_aggregation
370
+ @aggregator.aggregate_timing(name, latency_in_ms, tags: tags, no_prefix: no_prefix, type: metric_type)
371
+ else
372
+ sample_rate ||= @default_sample_rate
373
+ if sample_rate.nil? || sample?(sample_rate)
374
+ emit(datagram_builder(no_prefix: no_prefix).send(metric_type, name, latency_in_ms, sample_rate, tags))
375
+ end
332
376
  end
333
377
  end
334
378
  end
@@ -386,6 +430,18 @@ module StatsD
386
430
  ))
387
431
  end
388
432
 
433
+ # Forces the client to flush all metrics that are currently buffered, first flushes the aggregation
434
+ # if enabled.
435
+ #
436
+ # @return [void]
437
+ def force_flush
438
+ if @enable_aggregation
439
+ @aggregator.flush
440
+ end
441
+ @sink.flush(blocking: false)
442
+ StatsD::Instrument::VOID
443
+ end
444
+
389
445
  NO_CHANGE = Object.new
390
446
 
391
447
  # Instantiates a new StatsD client that uses the settings of the current client,
@@ -427,6 +483,8 @@ module StatsD
427
483
  default_tags: default_tags == NO_CHANGE ? @default_tags : default_tags,
428
484
  datagram_builder_class:
429
485
  datagram_builder_class == NO_CHANGE ? @datagram_builder_class : datagram_builder_class,
486
+ enable_aggregation: @enable_aggregation,
487
+ aggregation_flush_interval: @aggregation_flush_interval,
430
488
  )
431
489
  end
432
490
 
@@ -31,7 +31,11 @@ module StatsD
31
31
  when :c
32
32
  Integer(parsed_datagram[:value])
33
33
  when :g, :h, :d, :kv, :ms
34
- Float(parsed_datagram[:value])
34
+ if parsed_datagram[:value].include?(":")
35
+ parsed_datagram[:value].split(":").map { |v| Float(v) }
36
+ else
37
+ Float(parsed_datagram[:value])
38
+ end
35
39
  when :s
36
40
  String(parsed_datagram[:value])
37
41
  else
@@ -68,7 +72,7 @@ module StatsD
68
72
 
69
73
  PARSER = %r{
70
74
  \A
71
- (?<name>[^\:\|\@]+)\:(?<value>[^\:\|\@]+)\|(?<type>c|ms|g|s|h|d)
75
+ (?<name>[^\:\|\@]+)\:(?<value>(?:[^\:\|\@]+:)*[^\:\|\@]+)\|(?<type>c|ms|g|s|h|d)
72
76
  (?:\|\@(?<sample_rate>\d*(?:\.\d*)?))?
73
77
  (?:\|\#(?<tags>(?:[^\|,]+(?:,[^\|,]+)*)))?
74
78
  \n? # In some implementations, the datagram may include a trailing newline.
@@ -5,6 +5,7 @@ module StatsD
5
5
  # @note This class is part of the new Client implementation that is intended
6
6
  # to become the new default in the next major release of this library.
7
7
  class DatagramBuilder
8
+ extend Forwardable
8
9
  class << self
9
10
  def unsupported_datagram_types(*types)
10
11
  types.each do |type|
@@ -17,6 +18,11 @@ module StatsD
17
18
  def datagram_class
18
19
  StatsD::Instrument::Datagram
19
20
  end
21
+
22
+ def normalize_string(string)
23
+ string = string.tr("|#", "_") if /[|#]/.match?(string)
24
+ string
25
+ end
20
26
  end
21
27
 
22
28
  def initialize(prefix: nil, default_tags: nil)
@@ -48,6 +54,12 @@ module StatsD
48
54
  generate_generic_datagram(name, value, "d", sample_rate, tags)
49
55
  end
50
56
 
57
+ def timing_value_packed(name, type, values, sample_rate, tags)
58
+ # here values is an array
59
+ values = values.join(":")
60
+ generate_generic_datagram(name, values, type, sample_rate, tags)
61
+ end
62
+
51
63
  def kv(name, value, sample_rate, tags)
52
64
  generate_generic_datagram(name, value, "kv", sample_rate, tags)
53
65
  end
@@ -56,6 +68,10 @@ module StatsD
56
68
  :ms
57
69
  end
58
70
 
71
+ def normalize_tags(tags, buffer = "".b)
72
+ compile_tags(tags, buffer)
73
+ end
74
+
59
75
  protected
60
76
 
61
77
  # Utility function to remove invalid characters from a StatsD metric name
@@ -88,6 +104,11 @@ module StatsD
88
104
  end
89
105
 
90
106
  def compile_tags(tags, buffer = "".b)
107
+ if tags.is_a?(String)
108
+ tags = self.class.normalize_string(tags) if /[|,]/.match?(tags)
109
+ buffer << tags
110
+ return buffer
111
+ end
91
112
  if tags.is_a?(Hash)
92
113
  first = true
93
114
  tags.each do |key, value|