statsd-instrument 3.1.2 → 3.3.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 85b73b161dac9bc8839c3b820e9ea038d05078519fca692e971a48be7077e799
4
- data.tar.gz: e2d5bfa763a5d53537494c8a5006327a93967d4d2b63f74a685094c551815c81
3
+ metadata.gz: 62f90038a90bccc54c0fe5ac2fe1a2449e8662183e9b9a0cac209e5ae22a07c7
4
+ data.tar.gz: e620363a10bff05710ce52f6869364e378a0979e2ef53cc86c56f8c7fe491d91
5
5
  SHA512:
6
- metadata.gz: e6db90f921635692d1d19145e3b346029886022c679649fac3e6f5641411ddc557fcd2028baff7be4e0b47a23e53f97e4afe9af4ecb7e537dc0850b32352fbec
7
- data.tar.gz: 6c8f12361a70596d49dc84b3feb6637bc0ff7e9d62f1e6e7876ec02111cc94a2b561947ac4bfda045f612f57951f9789e578447e67a0b8bda4beec4f0fc76576
6
+ metadata.gz: 18836250885562c7862db1e515c8f8433e43cf795b886800e8a40fae7e7ebead2a120656e6f7654d5c3b87c9f364d3861441593f42dfda7fa69479146b800842
7
+ data.tar.gz: 6915ee31b5bab72a8d52ef588f6fa90e1df4e821e1aaa96cf7523b1392d49bdc56003de4e3f09e818aee21e2d094e9671b2b2297e7b3f597ca193dafbf03824a
@@ -5,18 +5,16 @@ on: push
5
5
  jobs:
6
6
  test:
7
7
  name: Send metric over UDP
8
- runs-on: ubuntu-18.04
8
+ runs-on: ubuntu-latest
9
9
 
10
10
  steps:
11
11
  - uses: actions/checkout@v1
12
12
 
13
- - name: Setup Ruby
14
- uses: actions/setup-ruby@v1
13
+ - name: Set up Ruby
14
+ uses: ruby/setup-ruby@v1
15
15
  with:
16
16
  ruby-version: 2.6
17
-
18
- - name: Install dependencies
19
- run: gem install bundler && bundle install --jobs 4 --retry 3
17
+ bundler-cache: true
20
18
 
21
19
  - name: Run benchmark on branch
22
20
  run: benchmark/send-metrics-to-local-udp-receiver
@@ -0,0 +1,22 @@
1
+ name: Contributor License Agreement (CLA)
2
+
3
+ on:
4
+ pull_request_target:
5
+ types: [opened, synchronize]
6
+ issue_comment:
7
+ types: [created]
8
+
9
+ jobs:
10
+ cla:
11
+ runs-on: ubuntu-latest
12
+ if: |
13
+ (github.event.issue.pull_request
14
+ && !github.event.issue.pull_request.merged_at
15
+ && contains(github.event.comment.body, 'signed')
16
+ )
17
+ || (github.event.pull_request && !github.event.pull_request.merged)
18
+ steps:
19
+ - uses: Shopify/shopify-cla-action@v1
20
+ with:
21
+ github-token: ${{ secrets.GITHUB_TOKEN }}
22
+ cla-token: ${{ secrets.CLA_TOKEN }}
@@ -5,18 +5,16 @@ on: push
5
5
  jobs:
6
6
  test:
7
7
  name: Rubocop
8
- runs-on: ubuntu-18.04
8
+ runs-on: ubuntu-latest
9
9
 
10
10
  steps:
11
11
  - uses: actions/checkout@v1
12
12
 
13
- - name: Setup Ruby
14
- uses: actions/setup-ruby@v1
13
+ - name: Set up Ruby
14
+ uses: ruby/setup-ruby@v1
15
15
  with:
16
- ruby-version: 2.7
17
-
18
- - name: Install dependencies
19
- run: gem install bundler && bundle install --jobs 4 --retry 3
16
+ ruby-version: 2.6
17
+ bundler-cache: true
20
18
 
21
19
  - name: Run Rubocop
22
20
  run: bin/rubocop
@@ -4,12 +4,12 @@ on: push
4
4
 
5
5
  jobs:
6
6
  test:
7
- name: Ruby ${{ matrix.ruby }} on ubuntu-18.04
8
- runs-on: ubuntu-18.04
7
+ name: Ruby ${{ matrix.ruby }} on ubuntu-latest
8
+ runs-on: ubuntu-latest
9
9
  strategy:
10
10
  fail-fast: false
11
11
  matrix:
12
- ruby: ['2.6', '2.7', '3.0']
12
+ ruby: ['2.6', '2.7', '3.0', '3.1']
13
13
 
14
14
  # Windows on macOS builds started failing, so they are disabled for noew
15
15
  # platform: [windows-2019, macOS-10.14, ubuntu-18.04]
@@ -19,13 +19,11 @@ jobs:
19
19
  steps:
20
20
  - uses: actions/checkout@v1
21
21
 
22
- - name: Setup Ruby
23
- uses: actions/setup-ruby@v1
22
+ - name: Set up Ruby
23
+ uses: ruby/setup-ruby@v1
24
24
  with:
25
25
  ruby-version: ${{ matrix.ruby }}
26
-
27
- - name: Install dependencies
28
- run: gem install bundler && bundle install --jobs 4 --retry 3
26
+ bundler-cache: true
29
27
 
30
28
  - name: Run test suite
31
- run: rake test
29
+ run: bundle exec rake test
data/.rubocop.yml CHANGED
@@ -17,6 +17,12 @@ Naming/FileName:
17
17
  Exclude:
18
18
  - lib/statsd-instrument.rb
19
19
 
20
+ Metrics/ParameterLists:
21
+ Enabled: false
22
+
23
+ Style/WhileUntilModifier:
24
+ Enabled: false
25
+
20
26
  # Enable our own cops on our own repo
21
27
 
22
28
  StatsD/MetricReturnValue:
data/CHANGELOG.md CHANGED
@@ -6,7 +6,23 @@ section below.
6
6
 
7
7
  ### Unreleased changes
8
8
 
9
- _Nothing yet_
9
+ ## Version 3.3.0
10
+
11
+ - UDP Batching now has a max queue size and emitter threads will block if the queue
12
+ reaches the limit. This is to prevent the queue from growing unbounded.
13
+ More generally the UDP batching mode was optimized to improve throughput and to
14
+ flush the queue more eagerly (#309).
15
+ - Added `STATSD_BUFFER_CAPACITY` configuration.
16
+ - Added `STATSD_MAX_PACKET_SIZE` configuration.
17
+ - Require `set` explicitly, to avoid breaking tests for users of this library (#311)
18
+
19
+ ## Version 3.2.1
20
+
21
+ - Fix a bug in UDP batching that could cause the Ruby process to be stuck on exit (#291).
22
+
23
+ ## Version 3.2.0
24
+
25
+ - Add `tag_error_class` option to `statsd_count_success` which tags the class of a thrown error
10
26
 
11
27
  ## Version 3.1.2
12
28
 
data/Gemfile CHANGED
@@ -8,6 +8,6 @@ gem "minitest"
8
8
  gem "rspec"
9
9
  gem "mocha"
10
10
  gem "yard"
11
- gem "rubocop", ">= 1.0"
11
+ gem "rubocop", [">= 1.0", "< 1.30"] # TODO: Our cops are broken by rubocop 1.30, we need to figure out why
12
12
  gem "rubocop-shopify", require: false
13
13
  gem "benchmark-ips"
data/README.md CHANGED
@@ -27,7 +27,7 @@ The following environment variables are supported:
27
27
  explicitly, this will be determined based on other environment variables,
28
28
  like `RAILS_ENV` or `ENV`. The library will behave differently:
29
29
 
30
- - In the **production** and **staging** environment, thre library will
30
+ - In the **production** and **staging** environment, the library will
31
31
  actually send UDP packets.
32
32
  - In the **test** environment, it will swallow all calls, but allows you to
33
33
  capture them for testing purposes. See below for notes on writing tests.
@@ -45,6 +45,14 @@ The following environment variables are supported:
45
45
  - `STATSD_FLUSH_INTERVAL`: (default: `1.0`) The interval in seconds at which
46
46
  events are sent in batch. Only applicable to the UDP configuration. If set
47
47
  to `0.0`, metrics are sent immediately.
48
+ - `STATSD_BUFFER_CAPACITY`: (default: `5000`) The maximum amount of events that
49
+ may be buffered before emitting threads will start to block. Increasing this
50
+ value may help for application generating spikes of events. However if the
51
+ application emit events faster than they can be sent, increasing it won't help.
52
+ - `STATSD_MAX_PACKET_SIZE`: (default: `1472`) The maximum size of UDP packets.
53
+ If your network is properly configured to handle larger packets you may try
54
+ to increase this value for better performance, but most network can't handle
55
+ larger packets.
48
56
 
49
57
  ## StatsD keys
50
58
 
@@ -16,28 +16,47 @@ else
16
16
  end
17
17
 
18
18
  intermediate_results_filename = "#{Dir.tmpdir}/statsd-instrument-benchmarks/#{File.basename($PROGRAM_NAME)}"
19
+ log_filename = "#{Dir.tmpdir}/statsd-instrument-benchmarks/#{File.basename($PROGRAM_NAME)}.log"
19
20
  FileUtils.mkdir_p(File.dirname(intermediate_results_filename))
20
21
 
21
22
  # Set up an UDP listener to which we can send StatsD packets
22
23
  receiver = UDPSocket.new
23
24
  receiver.bind("localhost", 0)
24
25
 
25
- StatsD.singleton_client = StatsD::Instrument::Environment.new(
26
+ log_file = File.open(log_filename, "w+", level: Logger::WARN)
27
+ StatsD.logger = Logger.new(log_file)
28
+
29
+ udp_client = StatsD::Instrument::Environment.new(ENV.to_h.merge(
26
30
  "STATSD_ADDR" => "#{receiver.addr[2]}:#{receiver.addr[1]}",
27
31
  "STATSD_IMPLEMENTATION" => "dogstatsd",
28
32
  "STATSD_ENV" => "production",
29
- ).client
33
+ "STATSD_FLUSH_INTERVAL" => "0",
34
+ )).client
35
+
36
+ batched_udp_client = StatsD::Instrument::Environment.new(ENV.to_h.merge(
37
+ "STATSD_ADDR" => "#{receiver.addr[2]}:#{receiver.addr[1]}",
38
+ "STATSD_IMPLEMENTATION" => "dogstatsd",
39
+ "STATSD_ENV" => "production",
40
+ )).client
41
+
42
+ def send_metrics(client)
43
+ client.increment("StatsD.increment", 10)
44
+ client.measure("StatsD.measure") { 1 + 1 }
45
+ client.gauge("StatsD.gauge", 12.0, tags: ["foo:bar", "quc"])
46
+ client.set("StatsD.set", "value", tags: { foo: "bar", baz: "quc" })
47
+ if client.datagram_builder_class == StatsD::Instrument::DogStatsDDatagramBuilder
48
+ client.event("StasD.event", "12345")
49
+ client.service_check("StatsD.service_check", "ok")
50
+ end
51
+ end
30
52
 
31
53
  report = Benchmark.ips do |bench|
32
- bench.report("StatsD metrics to local UDP receiver (branch: #{branch}, sha: #{revision[0, 7]})") do
33
- StatsD.increment("StatsD.increment", 10)
34
- StatsD.measure("StatsD.measure") { 1 + 1 }
35
- StatsD.gauge("StatsD.gauge", 12.0, tags: ["foo:bar", "quc"])
36
- StatsD.set("StatsD.set", "value", tags: { foo: "bar", baz: "quc" })
37
- if StatsD.singleton_client.datagram_builder_class == StatsD::Instrument::DogStatsDDatagramBuilder
38
- StatsD.event("StasD.event", "12345")
39
- StatsD.service_check("StatsD.service_check", "ok")
40
- end
54
+ bench.report("local UDP sync (branch: #{branch}, sha: #{revision[0, 7]})") do
55
+ send_metrics(udp_client)
56
+ end
57
+
58
+ bench.report("local UDP batched (branch: #{branch}, sha: #{revision[0, 7]})") do
59
+ send_metrics(batched_udp_client)
41
60
  end
42
61
 
43
62
  # Store the results in between runs
@@ -57,3 +76,11 @@ elsif ENV["KEEP_RESULTS"]
57
76
  else
58
77
  File.unlink(intermediate_results_filename)
59
78
  end
79
+
80
+ log_file.close
81
+ logs = File.read(log_filename)
82
+ unless logs.empty?
83
+ puts
84
+ puts "==== logs ===="
85
+ puts logs
86
+ end
@@ -57,6 +57,7 @@ module StatsD
57
57
  def assert_no_statsd_calls(*metric_names, datagrams: nil, client: nil, &block)
58
58
  if datagrams.nil?
59
59
  raise LocalJumpError, "assert_no_statsd_calls requires a block" unless block_given?
60
+
60
61
  datagrams = capture_statsd_datagrams_with_exception_handling(client: client, &block)
61
62
  end
62
63
 
@@ -152,6 +153,7 @@ module StatsD
152
153
  def assert_statsd_expectations(expectations, datagrams: nil, client: nil, &block)
153
154
  if datagrams.nil?
154
155
  raise LocalJumpError, "assert_statsd_expectations requires a block" unless block_given?
156
+
155
157
  datagrams = capture_statsd_datagrams_with_exception_handling(client: client, &block)
156
158
  end
157
159
 
@@ -6,11 +6,15 @@ module StatsD
6
6
  # to become the new default in the next major release of this library.
7
7
  class BatchedUDPSink
8
8
  DEFAULT_FLUSH_INTERVAL = 1.0
9
- MAX_PACKET_SIZE = 508
9
+ DEFAULT_THREAD_PRIORITY = 100
10
+ DEFAULT_FLUSH_THRESHOLD = 50
11
+ DEFAULT_BUFFER_CAPACITY = 5_000
12
+ # https://docs.datadoghq.com/developers/dogstatsd/high_throughput/?code-lang=ruby#ensure-proper-packet-sizes
13
+ DEFAULT_MAX_PACKET_SIZE = 1472
10
14
 
11
- def self.for_addr(addr, flush_interval: DEFAULT_FLUSH_INTERVAL)
15
+ def self.for_addr(addr, **kwargs)
12
16
  host, port_as_string = addr.split(":", 2)
13
- new(host, Integer(port_as_string), flush_interval: flush_interval)
17
+ new(host, Integer(port_as_string), **kwargs)
14
18
  end
15
19
 
16
20
  attr_reader :host, :port
@@ -21,10 +25,26 @@ module StatsD
21
25
  end
22
26
  end
23
27
 
24
- def initialize(host, port, flush_interval: DEFAULT_FLUSH_INTERVAL)
28
+ def initialize(
29
+ host,
30
+ port,
31
+ flush_interval: DEFAULT_FLUSH_INTERVAL,
32
+ thread_priority: DEFAULT_THREAD_PRIORITY,
33
+ flush_threshold: DEFAULT_FLUSH_THRESHOLD,
34
+ buffer_capacity: DEFAULT_BUFFER_CAPACITY,
35
+ max_packet_size: DEFAULT_MAX_PACKET_SIZE
36
+ )
25
37
  @host = host
26
38
  @port = port
27
- @dispatcher = Dispatcher.new(host, port, flush_interval)
39
+ @dispatcher = Dispatcher.new(
40
+ host,
41
+ port,
42
+ flush_interval,
43
+ flush_threshold,
44
+ buffer_capacity,
45
+ thread_priority,
46
+ max_packet_size,
47
+ )
28
48
  ObjectSpace.define_finalizer(self, self.class.finalize(@dispatcher))
29
49
  end
30
50
 
@@ -37,6 +57,10 @@ module StatsD
37
57
  self
38
58
  end
39
59
 
60
+ def shutdown(*args)
61
+ @dispatcher.shutdown(*args)
62
+ end
63
+
40
64
  class Dispatcher
41
65
  BUFFER_CLASS = if !::Object.const_defined?(:RUBY_ENGINE) || RUBY_ENGINE == "ruby"
42
66
  ::Array
@@ -50,24 +74,54 @@ module StatsD
50
74
  Concurrent::Array
51
75
  end
52
76
 
53
- def initialize(host, port, flush_interval)
77
+ def initialize(host, port, flush_interval, flush_threshold, buffer_capacity, thread_priority, max_packet_size)
54
78
  @host = host
55
79
  @port = port
56
80
  @interrupted = false
57
81
  @flush_interval = flush_interval
82
+ @flush_threshold = flush_threshold
83
+ @buffer_capacity = buffer_capacity
84
+ @thread_priority = thread_priority
85
+ @max_packet_size = max_packet_size
58
86
  @buffer = BUFFER_CLASS.new
59
87
  @dispatcher_thread = Thread.new { dispatch }
88
+ @pid = Process.pid
89
+ @monitor = Monitor.new
90
+ @condition = @monitor.new_cond
60
91
  end
61
92
 
62
93
  def <<(datagram)
63
- unless @dispatcher_thread&.alive?
64
- # If the dispatcher thread is dead, we assume it is because
65
- # the process was forked. So to avoid ending datagrams twice
66
- # we clear the buffer.
67
- @buffer.clear
68
- @dispatcher_thread = Thread.new { dispatch }
94
+ if thread_healthcheck
95
+ @buffer << datagram
96
+
97
+ # To avoid sending too many signals when the thread is already flushing
98
+ # We only signal when the queue size is a multiple of `flush_threshold`
99
+ if @buffer.size % @flush_threshold == 0
100
+ wakeup_thread
101
+ end
102
+
103
+ # A SizedQueue would be perfect, except that it doesn't have a timeout
104
+ # Ref: https://bugs.ruby-lang.org/issues/18774
105
+ if @buffer.size >= @buffer_capacity
106
+ StatsD.logger.warn do
107
+ "[#{self.class.name}] Max buffer size reached (#{@buffer_capacity}), pausing " \
108
+ "thread##{Thread.current.object_id}"
109
+ end
110
+ before = Process.clock_gettime(Process::CLOCK_MONOTONIC, :float_millisecond)
111
+ @monitor.synchronize do
112
+ while @buffer.size >= @buffer_capacity && @dispatcher_thread.alive?
113
+ @condition.wait(0.01)
114
+ end
115
+ end
116
+ duration = Process.clock_gettime(Process::CLOCK_MONOTONIC, :float_millisecond) - before
117
+ StatsD.logger.warn do
118
+ "[#{self.class.name}] thread##{Thread.current.object_id} resumed after #{duration.round(2)}ms"
119
+ end
120
+ end
121
+ else
122
+ flush
69
123
  end
70
- @buffer << datagram
124
+
71
125
  self
72
126
  end
73
127
 
@@ -82,6 +136,24 @@ module StatsD
82
136
 
83
137
  private
84
138
 
139
+ def wakeup_thread
140
+ begin
141
+ @monitor.synchronize do
142
+ @condition.signal
143
+ end
144
+ rescue ThreadError
145
+ # Can't synchronize from trap context
146
+ Thread.new { wakeup_thread }.join
147
+ return
148
+ end
149
+
150
+ begin
151
+ @dispatcher_thread&.run
152
+ rescue ThreadError # Somehow the thread just died
153
+ thread_healthcheck
154
+ end
155
+ end
156
+
85
157
  NEWLINE = "\n".b.freeze
86
158
  def flush
87
159
  return if @buffer.empty?
@@ -89,24 +161,54 @@ module StatsD
89
161
  datagrams = @buffer.shift(@buffer.size)
90
162
 
91
163
  until datagrams.empty?
92
- packet = String.new(datagrams.pop, encoding: Encoding::BINARY, capacity: MAX_PACKET_SIZE)
164
+ packet = String.new(datagrams.shift, encoding: Encoding::BINARY, capacity: @max_packet_size)
93
165
 
94
- until datagrams.empty? || packet.bytesize + datagrams.first.bytesize + 1 > MAX_PACKET_SIZE
166
+ until datagrams.empty? || packet.bytesize + datagrams.first.bytesize + 1 > @max_packet_size
95
167
  packet << NEWLINE << datagrams.shift
96
168
  end
97
-
98
169
  send_packet(packet)
99
170
  end
100
171
  end
101
172
 
173
+ def thread_healthcheck
174
+ # TODO: We have a race condition on JRuby / Truffle here. It could cause multiple
175
+ # dispatcher threads to be spawned, which would cause problems.
176
+ # However we can't simply lock here as we might be called from a trap context.
177
+ unless @dispatcher_thread&.alive?
178
+ # If the main the main thread is dead the VM is shutting down so we won't be able
179
+ # to spawn a new thread, so we fallback to sending our datagram directly.
180
+ return false unless Thread.main.alive?
181
+
182
+ # If the dispatcher thread is dead, it might be because the process was forked.
183
+ # So to avoid sending datagrams twice we clear the buffer.
184
+ if @pid != Process.pid
185
+ StatsD.logger.info { "[#{self.class.name}] Restarting the dispatcher thread after fork" }
186
+ @pid = Process.pid
187
+ @buffer.clear
188
+ else
189
+ StatsD.logger.info { "[#{self.class.name}] Restarting the dispatcher thread" }
190
+ end
191
+ @dispatcher_thread = Thread.new { dispatch }.tap { |t| t.priority = @thread_priority }
192
+ end
193
+ true
194
+ end
195
+
102
196
  def dispatch
103
197
  until @interrupted
104
198
  begin
105
199
  start = Process.clock_gettime(Process::CLOCK_MONOTONIC)
106
200
  flush
201
+
202
+ # Other threads may have queued more events while we were doing IO
203
+ flush while @buffer.size > @flush_threshold
204
+
107
205
  next_sleep_duration = @flush_interval - (Process.clock_gettime(Process::CLOCK_MONOTONIC) - start)
108
206
 
109
- sleep(next_sleep_duration) if next_sleep_duration > 0
207
+ if next_sleep_duration > 0
208
+ @monitor.synchronize do
209
+ @condition.wait(next_sleep_duration)
210
+ end
211
+ end
110
212
  rescue => error
111
213
  report_error(error)
112
214
  end
@@ -124,19 +226,21 @@ module StatsD
124
226
 
125
227
  def send_packet(packet)
126
228
  retried = false
127
- socket.send(packet, 0)
128
- rescue SocketError, IOError, SystemCallError => error
129
- StatsD.logger.debug do
130
- "[#{self.class.name}] Resetting connection because of #{error.class}: #{error.message}"
131
- end
132
- invalidate_socket
133
- if retried
134
- StatsD.logger.warning do
135
- "[#{self.class.name}] Events were dropped because of #{error.class}: #{error.message}"
229
+ begin
230
+ socket.send(packet, 0)
231
+ rescue SocketError, IOError, SystemCallError => error
232
+ StatsD.logger.debug do
233
+ "[#{self.class.name}] Resetting connection because of #{error.class}: #{error.message}"
234
+ end
235
+ invalidate_socket
236
+ if retried
237
+ StatsD.logger.warn do
238
+ "[#{self.class.name}] Events were dropped because of #{error.class}: #{error.message}"
239
+ end
240
+ else
241
+ retried = true
242
+ retry
136
243
  end
137
- else
138
- retried = true
139
- retry
140
244
  end
141
245
  end
142
246
 
@@ -200,6 +200,7 @@ module StatsD
200
200
  def increment(name, value = 1, sample_rate: nil, tags: nil, no_prefix: false)
201
201
  sample_rate ||= @default_sample_rate
202
202
  return StatsD::Instrument::VOID unless sample?(sample_rate)
203
+
203
204
  emit(datagram_builder(no_prefix: no_prefix).c(name, value, sample_rate, tags))
204
205
  end
205
206
 
@@ -217,6 +218,7 @@ module StatsD
217
218
 
218
219
  sample_rate ||= @default_sample_rate
219
220
  return StatsD::Instrument::VOID unless sample?(sample_rate)
221
+
220
222
  emit(datagram_builder(no_prefix: no_prefix).ms(name, value, sample_rate, tags))
221
223
  end
222
224
 
@@ -236,6 +238,7 @@ module StatsD
236
238
  def gauge(name, value, sample_rate: nil, tags: nil, no_prefix: false)
237
239
  sample_rate ||= @default_sample_rate
238
240
  return StatsD::Instrument::VOID unless sample?(sample_rate)
241
+
239
242
  emit(datagram_builder(no_prefix: no_prefix).g(name, value, sample_rate, tags))
240
243
  end
241
244
 
@@ -249,6 +252,7 @@ module StatsD
249
252
  def set(name, value, sample_rate: nil, tags: nil, no_prefix: false)
250
253
  sample_rate ||= @default_sample_rate
251
254
  return StatsD::Instrument::VOID unless sample?(sample_rate)
255
+
252
256
  emit(datagram_builder(no_prefix: no_prefix).s(name, value, sample_rate, tags))
253
257
  end
254
258
 
@@ -271,6 +275,7 @@ module StatsD
271
275
 
272
276
  sample_rate ||= @default_sample_rate
273
277
  return StatsD::Instrument::VOID unless sample?(sample_rate)
278
+
274
279
  emit(datagram_builder(no_prefix: no_prefix).d(name, value, sample_rate, tags))
275
280
  end
276
281
 
@@ -288,6 +293,7 @@ module StatsD
288
293
  def histogram(name, value, sample_rate: nil, tags: nil, no_prefix: false)
289
294
  sample_rate ||= @default_sample_rate
290
295
  return StatsD::Instrument::VOID unless sample?(sample_rate)
296
+
291
297
  emit(datagram_builder(no_prefix: no_prefix).h(name, value, sample_rate, tags))
292
298
  end
293
299
 
@@ -67,10 +67,12 @@ module StatsD
67
67
  # @return [Array<String>, nil] the list of tags in canonical form.
68
68
  def normalize_tags(tags)
69
69
  return [] unless tags
70
+
70
71
  tags = tags.map { |k, v| "#{k}:#{v}" } if tags.is_a?(Hash)
71
72
 
72
73
  # Fast path when no string replacement is needed
73
74
  return tags unless tags.any? { |tag| /[|,]/.match?(tag) }
75
+
74
76
  tags.map { |tag| tag.tr("|,", "") }
75
77
  end
76
78
 
@@ -78,6 +80,7 @@ module StatsD
78
80
  def normalize_name(name)
79
81
  # Fast path when no normalization is needed to avoid copying the string
80
82
  return name unless /[:|@]/.match?(name)
83
+
81
84
  name.tr(":|@", "_")
82
85
  end
83
86
 
@@ -79,7 +79,15 @@ module StatsD
79
79
  end
80
80
 
81
81
  def statsd_flush_interval
82
- Float(env.fetch("STATSD_FLUSH_INTERVAL", 1.0))
82
+ Float(env.fetch("STATSD_FLUSH_INTERVAL", StatsD::Instrument::BatchedUDPSink::DEFAULT_FLUSH_INTERVAL))
83
+ end
84
+
85
+ def statsd_buffer_capacity
86
+ Float(env.fetch("STATSD_BUFFER_CAPACITY", StatsD::Instrument::BatchedUDPSink::DEFAULT_BUFFER_CAPACITY))
87
+ end
88
+
89
+ def statsd_max_packet_size
90
+ Float(env.fetch("STATSD_MAX_PACKET_SIZE", StatsD::Instrument::BatchedUDPSink::DEFAULT_MAX_PACKET_SIZE))
83
91
  end
84
92
 
85
93
  def client
@@ -90,7 +98,12 @@ module StatsD
90
98
  case environment
91
99
  when "production", "staging"
92
100
  if statsd_flush_interval > 0.0
93
- StatsD::Instrument::BatchedUDPSink.for_addr(statsd_addr, flush_interval: statsd_flush_interval)
101
+ StatsD::Instrument::BatchedUDPSink.for_addr(
102
+ statsd_addr,
103
+ flush_interval: statsd_flush_interval,
104
+ buffer_capacity: statsd_buffer_capacity,
105
+ max_packet_size: statsd_max_packet_size,
106
+ )
94
107
  else
95
108
  StatsD::Instrument::UDPSink.for_addr(statsd_addr)
96
109
  end
@@ -1,5 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require "set"
4
+
3
5
  module StatsD
4
6
  module Instrument
5
7
  # @private
@@ -93,10 +95,12 @@ module StatsD
93
95
  # to ensure that this logic matches the logic of the active datagram builder.
94
96
  def normalize_tags(tags)
95
97
  return [] unless tags
98
+
96
99
  tags = tags.map { |k, v| "#{k}:#{v}" } if tags.is_a?(Hash)
97
100
 
98
101
  # Fast path when no string replacement is needed
99
102
  return tags unless tags.any? { |tag| /[|,]/.match?(tag) }
103
+
100
104
  tags.map { |tag| tag.tr("|,", "") }
101
105
  end
102
106
  end
@@ -10,6 +10,22 @@ module StatsD
10
10
 
11
11
  # For backwards compatibility
12
12
  alias_method :capture_statsd_calls, :capture_statsd_datagrams
13
+
14
+ def self.add_tag(tags, key, value)
15
+ tags = tags.dup || {}
16
+
17
+ if tags.is_a?(String)
18
+ tags = tags.empty? ? "#{key}:#{value}" : "#{tags},#{key}:#{value}"
19
+ elsif tags.is_a?(Array)
20
+ tags << "#{key}:#{value}"
21
+ elsif tags.is_a?(Hash)
22
+ tags[key] = value
23
+ else
24
+ raise ArgumentError, "add_tag only supports string, array or hash, #{tags.class} provided"
25
+ end
26
+
27
+ tags
28
+ end
13
29
  end
14
30
  end
15
31
  end
@@ -49,7 +49,7 @@ module StatsD
49
49
  raise RSpec::Expectations::ExpectationNotMetError, "No StatsD calls for metric #{metric_name} were made."
50
50
  elsif options[:times] && options[:times] != metrics.length
51
51
  raise RSpec::Expectations::ExpectationNotMetError, "The numbers of StatsD calls for metric " \
52
- "#{metric_name} was unexpected. Expected #{options[:times].inspect}, got #{metrics.length}"
52
+ "#{metric_name} was unexpected. Expected #{options[:times].inspect}, got #{metrics.length}"
53
53
  end
54
54
 
55
55
  [:sample_rate, :value, :tags].each do |expectation|
@@ -41,7 +41,7 @@ module RuboCop
41
41
  end
42
42
 
43
43
  def autocorrect(node)
44
- -> (corrector) do
44
+ ->(corrector) do
45
45
  positional_arguments = if node.arguments.last.type == :block_pass
46
46
  node.arguments[2...node.arguments.length - 1]
47
47
  else
@@ -39,6 +39,7 @@ module RuboCop
39
39
 
40
40
  def keyword_arguments(node)
41
41
  return nil if node.arguments.empty?
42
+
42
43
  last_argument = if node.arguments.last&.type == :block_pass
43
44
  node.arguments[node.arguments.length - 2]
44
45
  else
@@ -11,6 +11,7 @@ module StatsD
11
11
 
12
12
  def normalize_tags(tags)
13
13
  raise NotImplementedError, "#{self.class.name} does not support tags" if tags
14
+
14
15
  super
15
16
  end
16
17
  end
@@ -27,6 +27,7 @@ module StatsD
27
27
  def increment(key, value = 1, sample_rate: nil, tags: nil, no_prefix: false)
28
28
  raise ArgumentError, "StatsD.increment does not accept a block" if block_given?
29
29
  raise ArgumentError, "The value argument should be an integer, got #{value.inspect}" unless value.is_a?(Integer)
30
+
30
31
  check_tags_and_sample_rate(sample_rate, tags)
31
32
 
32
33
  super
@@ -35,6 +36,7 @@ module StatsD
35
36
  def gauge(key, value, sample_rate: nil, tags: nil, no_prefix: false)
36
37
  raise ArgumentError, "StatsD.increment does not accept a block" if block_given?
37
38
  raise ArgumentError, "The value argument should be an integer, got #{value.inspect}" unless value.is_a?(Numeric)
39
+
38
40
  check_tags_and_sample_rate(sample_rate, tags)
39
41
 
40
42
  super
@@ -43,6 +45,7 @@ module StatsD
43
45
  def histogram(key, value, sample_rate: nil, tags: nil, no_prefix: false)
44
46
  raise ArgumentError, "StatsD.increment does not accept a block" if block_given?
45
47
  raise ArgumentError, "The value argument should be an integer, got #{value.inspect}" unless value.is_a?(Numeric)
48
+
46
49
  check_tags_and_sample_rate(sample_rate, tags)
47
50
 
48
51
  super
@@ -50,6 +53,7 @@ module StatsD
50
53
 
51
54
  def set(key, value, sample_rate: nil, tags: nil, no_prefix: false)
52
55
  raise ArgumentError, "StatsD.set does not accept a block" if block_given?
56
+
53
57
  check_tags_and_sample_rate(sample_rate, tags)
54
58
 
55
59
  super
@@ -26,24 +26,27 @@ module StatsD
26
26
  def <<(datagram)
27
27
  with_socket { |socket| socket.send(datagram, 0) }
28
28
  self
29
-
30
- rescue ThreadError
31
- # In cases where a TERM or KILL signal has been sent, and we send stats as
32
- # part of a signal handler, locks cannot be acquired, so we do our best
33
- # to try and send the datagram without a lock.
34
- socket.send(datagram, 0) > 0
35
-
36
29
  rescue SocketError, IOError, SystemCallError => error
37
30
  StatsD.logger.debug do
38
31
  "[StatsD::Instrument::UDPSink] Resetting connection because of #{error.class}: #{error.message}"
39
32
  end
40
33
  invalidate_socket
34
+ self
41
35
  end
42
36
 
43
37
  private
44
38
 
39
+ def synchronize(&block)
40
+ @mutex.synchronize(&block)
41
+ rescue ThreadError
42
+ # In cases where a TERM or KILL signal has been sent, and we send stats as
43
+ # part of a signal handler, locks cannot be acquired, so we do our best
44
+ # to try and send the datagram without a lock.
45
+ yield
46
+ end
47
+
45
48
  def with_socket
46
- @mutex.synchronize { yield(socket) }
49
+ synchronize { yield(socket) }
47
50
  end
48
51
 
49
52
  def socket
@@ -55,7 +58,7 @@ module StatsD
55
58
  end
56
59
 
57
60
  def invalidate_socket
58
- @mutex.synchronize do
61
+ synchronize do
59
62
  @socket = nil
60
63
  end
61
64
  end
@@ -2,6 +2,6 @@
2
2
 
3
3
  module StatsD
4
4
  module Instrument
5
- VERSION = "3.1.2"
5
+ VERSION = "3.3.0"
6
6
  end
7
7
  end
@@ -104,24 +104,26 @@ module StatsD
104
104
  # @param method (see #statsd_measure)
105
105
  # @param name (see #statsd_measure)
106
106
  # @param metric_options (see #statsd_measure)
107
+ # @param tag_error_class add a <tt>error_class</tt> tag with the error class when an error is thrown
107
108
  # @yield You can pass a block to this method if you want to define yourself what is a successful call
108
109
  # based on the return value of the method.
109
110
  # @yieldparam result The return value of the instrumented method.
110
111
  # @yieldreturn [Boolean] Return true iff the return value is considered a success, false otherwise.
111
112
  # @return [void]
112
113
  # @see #statsd_count_if
113
- def statsd_count_success(method, name, sample_rate: nil, tags: nil, no_prefix: false, client: nil)
114
+ def statsd_count_success(method, name, sample_rate: nil,
115
+ tags: nil, no_prefix: false, client: nil, tag_error_class: false)
114
116
  add_to_method(method, name, :count_success) do
115
117
  define_method(method) do |*args, &block|
116
118
  truthiness = result = super(*args, &block)
117
- rescue
119
+ rescue => error
118
120
  truthiness = false
119
121
  raise
120
122
  else
121
123
  if block_given?
122
124
  begin
123
125
  truthiness = yield(result)
124
- rescue
126
+ rescue => error
125
127
  truthiness = false
126
128
  end
127
129
  end
@@ -130,6 +132,9 @@ module StatsD
130
132
  client ||= StatsD.singleton_client
131
133
  suffix = truthiness == false ? "failure" : "success"
132
134
  key = StatsD::Instrument.generate_metric_name(name, self, *args)
135
+
136
+ tags = Helpers.add_tag(tags, :error_class, error.class.name) if tag_error_class && error
137
+
133
138
  client.increment("#{key}.#{suffix}", sample_rate: sample_rate, tags: tags, no_prefix: no_prefix)
134
139
  end
135
140
  end
data/test/helpers_test.rb CHANGED
@@ -35,8 +35,47 @@ class HelpersTest < Minitest::Test
35
35
  StatsD.gauge("gauge", 15)
36
36
 
37
37
  assert_equal(2, metrics.length)
38
-
39
38
  ensure
40
39
  StatsD.singleton_client = @old_client
41
40
  end
41
+
42
+ def test_add_tag_works_for_nil
43
+ assert_equal({ key: 123 }, StatsD::Instrument::Helpers.add_tag(nil, :key, 123))
44
+ end
45
+
46
+ def test_add_tag_works_for_hashes
47
+ assert_equal({ key: 123 }, StatsD::Instrument::Helpers.add_tag({}, :key, 123))
48
+
49
+ existing = { existing: 123 }
50
+ assert_equal({ existing: 123, new: 456 }, StatsD::Instrument::Helpers.add_tag(existing, :new, 456))
51
+
52
+ # ensure we do not modify the existing tags
53
+ assert_equal({ existing: 123 }, existing)
54
+ end
55
+
56
+ def test_add_tag_works_for_arrays
57
+ assert_equal(["key:123"], StatsD::Instrument::Helpers.add_tag([], :key, 123))
58
+
59
+ existing = ["existing:123"]
60
+ assert_equal(["existing:123", "new:456"], StatsD::Instrument::Helpers.add_tag(existing, :new, 456))
61
+
62
+ # ensure we do not modify the existing tags
63
+ assert_equal(["existing:123"], existing)
64
+ end
65
+
66
+ def test_add_tag_works_for_strings
67
+ assert_equal("key:123", StatsD::Instrument::Helpers.add_tag("", :key, 123))
68
+
69
+ existing = "existing:123"
70
+ assert_equal("existing:123,new:456", StatsD::Instrument::Helpers.add_tag(existing, :new, 456))
71
+
72
+ # ensure we do not modify the existing tags
73
+ assert_equal("existing:123", existing)
74
+ end
75
+
76
+ def test_add_tags_raises_for_other
77
+ assert_raises(ArgumentError, "add_tag only supports string, array or hash, Integer provided") do
78
+ StatsD::Instrument::Helpers.add_tag(1, :key, 123)
79
+ end
80
+ end
42
81
  end
@@ -159,6 +159,38 @@ class StatsDInstrumentationTest < Minitest::Test
159
159
  ActiveMerchant::UniqueGateway.statsd_remove_count_success(:ssl_post, "ActiveMerchant.Gateway")
160
160
  end
161
161
 
162
+ def test_statsd_count_success_tag_error_class
163
+ ActiveMerchant::Base.statsd_count_success(:ssl_post, "ActiveMerchant.Base", tag_error_class: true)
164
+
165
+ assert_statsd_increment("ActiveMerchant.Base.success", tags: nil) do
166
+ ActiveMerchant::Base.new.ssl_post(true)
167
+ end
168
+
169
+ assert_statsd_increment("ActiveMerchant.Base.failure", tags: ["error_class:RuntimeError"]) do
170
+ assert_raises(RuntimeError, "Not OK") do
171
+ ActiveMerchant::Base.new.ssl_post(false)
172
+ end
173
+ end
174
+ ensure
175
+ ActiveMerchant::Base.statsd_remove_count_success(:ssl_post, "ActiveMerchant.Base")
176
+ end
177
+
178
+ def test_statsd_count_success_tag_error_class_is_opt_in
179
+ ActiveMerchant::Base.statsd_count_success(:ssl_post, "ActiveMerchant.Base")
180
+
181
+ assert_statsd_increment("ActiveMerchant.Base.success", tags: nil) do
182
+ ActiveMerchant::Base.new.ssl_post(true)
183
+ end
184
+
185
+ assert_statsd_increment("ActiveMerchant.Base.failure", tags: nil) do
186
+ assert_raises(RuntimeError, "Not OK") do
187
+ ActiveMerchant::Base.new.ssl_post(false)
188
+ end
189
+ end
190
+ ensure
191
+ ActiveMerchant::Base.statsd_remove_count_success(:ssl_post, "ActiveMerchant.Base")
192
+ end
193
+
162
194
  def test_statsd_count
163
195
  ActiveMerchant::Gateway.statsd_count(:ssl_post, "ActiveMerchant.Gateway.ssl_post")
164
196
 
@@ -32,17 +32,19 @@ module UDPSinkTests
32
32
  refute(udp_sink.sample?(0.5))
33
33
  end
34
34
 
35
- def test_parallelism
35
+ def test_concurrency
36
36
  udp_sink = build_sink(@host, @port)
37
- 50.times.map { |i| Thread.new { udp_sink << "foo:#{i}|c" << "bar:#{i}|c" } }
37
+ threads = 50.times.map { |i| Thread.new { udp_sink << "foo:#{i}|c" << "bar:#{i}|c" } }
38
38
  datagrams = []
39
39
 
40
40
  while @receiver.wait_readable(2)
41
- datagram, _source = @receiver.recvfrom(4000)
41
+ datagram, _source = @receiver.recvfrom(4096)
42
42
  datagrams += datagram.split("\n")
43
43
  end
44
44
 
45
45
  assert_equal(100, datagrams.size)
46
+ ensure
47
+ threads&.each(&:kill)
46
48
  end
47
49
 
48
50
  class SimpleFormatter < ::Logger::Formatter
@@ -53,31 +55,39 @@ module UDPSinkTests
53
55
 
54
56
  def test_sends_datagram_in_signal_handler
55
57
  udp_sink = build_sink(@host, @port)
56
- Signal.trap("USR1") { udp_sink << "exiting:1|c" }
57
-
58
- pid = fork do
59
- sleep(5)
58
+ Signal.trap("USR1") do
59
+ udp_sink << "exiting:1|c"
60
+ udp_sink << "exiting:1|d"
60
61
  end
61
62
 
63
+ Process.kill("USR1", Process.pid)
64
+ assert_equal(["exiting:1|c", "exiting:1|d"], read_datagrams(2))
65
+ ensure
62
66
  Signal.trap("USR1", "DEFAULT")
63
-
64
- Process.kill("USR1", pid)
65
- @receiver.wait_readable(1)
66
- assert_equal("exiting:1|c", @receiver.recvfrom_nonblock(100).first)
67
- Process.kill("KILL", pid)
68
- rescue NotImplementedError
69
- pass("Fork is not implemented on #{RUBY_PLATFORM}")
70
67
  end
71
68
 
72
69
  def test_sends_datagram_before_exit
73
70
  udp_sink = build_sink(@host, @port)
74
- fork do
71
+ pid = fork do
75
72
  udp_sink << "exiting:1|c"
76
- Process.exit(0)
73
+ udp_sink << "exiting:1|d"
77
74
  end
75
+ Process.wait(pid)
76
+ assert_equal(["exiting:1|c", "exiting:1|d"], read_datagrams(2))
77
+ rescue NotImplementedError
78
+ pass("Fork is not implemented on #{RUBY_PLATFORM}")
79
+ end
78
80
 
79
- @receiver.wait_readable(1)
80
- assert_equal("exiting:1|c", @receiver.recvfrom_nonblock(100).first)
81
+ def test_sends_datagram_in_at_exit_callback
82
+ udp_sink = build_sink(@host, @port)
83
+ pid = fork do
84
+ at_exit do
85
+ udp_sink << "exiting:1|c"
86
+ udp_sink << "exiting:1|d"
87
+ end
88
+ end
89
+ Process.wait(pid)
90
+ assert_equal(["exiting:1|c", "exiting:1|d"], read_datagrams(2))
81
91
  rescue NotImplementedError
82
92
  pass("Fork is not implemented on #{RUBY_PLATFORM}")
83
93
  end
@@ -86,11 +96,11 @@ module UDPSinkTests
86
96
  udp_sink = build_sink(@host, @port)
87
97
  fork do
88
98
  udp_sink << "exiting:1|c"
99
+ udp_sink << "exiting:1|d"
89
100
  Process.kill("TERM", Process.pid)
90
101
  end
91
102
 
92
- @receiver.wait_readable(1)
93
- assert_equal("exiting:1|c", @receiver.recvfrom_nonblock(100).first)
103
+ assert_equal(["exiting:1|c", "exiting:1|d"], read_datagrams(2))
94
104
  rescue NotImplementedError
95
105
  pass("Fork is not implemented on #{RUBY_PLATFORM}")
96
106
  end
@@ -101,6 +111,18 @@ module UDPSinkTests
101
111
  @sink_class.new(host, port)
102
112
  end
103
113
 
114
+ def read_datagrams(count, timeout: 2)
115
+ datagrams = []
116
+ count.times do
117
+ if @receiver.wait_readable(timeout)
118
+ datagrams += @receiver.recvfrom_nonblock(1000).first.lines(chomp: true)
119
+ else
120
+ break
121
+ end
122
+ end
123
+ datagrams
124
+ end
125
+
104
126
  class UDPSinkTest < Minitest::Test
105
127
  include UDPSinkTests
106
128
 
@@ -136,7 +158,7 @@ module UDPSinkTests
136
158
 
137
159
  assert_equal(
138
160
  "[#{@sink_class}] Resetting connection because of " \
139
- "Errno::EDESTADDRREQ: Destination address required\n",
161
+ "Errno::EDESTADDRREQ: Destination address required\n",
140
162
  logs.string,
141
163
  )
142
164
  ensure
@@ -145,7 +167,7 @@ module UDPSinkTests
145
167
  end
146
168
  end
147
169
 
148
- class BatchedUDPSinkTest < Minitest::Test
170
+ module BatchedUDPSinkTests
149
171
  include UDPSinkTests
150
172
 
151
173
  def setup
@@ -154,28 +176,63 @@ module UDPSinkTests
154
176
  @host = @receiver.addr[2]
155
177
  @port = @receiver.addr[1]
156
178
  @sink_class = StatsD::Instrument::BatchedUDPSink
179
+ @sinks = []
157
180
  end
158
181
 
159
182
  def teardown
160
183
  @receiver.close
184
+ @sinks.each(&:shutdown)
161
185
  end
162
186
 
163
- def test_parallelism_buffering
187
+ private
188
+
189
+ def build_sink(host = @host, port = @port)
190
+ sink = @sink_class.new(host, port, flush_threshold: default_flush_threshold, buffer_capacity: 50)
191
+ @sinks << sink
192
+ sink
193
+ end
194
+
195
+ def default_flush_threshold
196
+ StatsD::Instrument::BatchedUDPSink::DEFAULT_FLUSH_THRESHOLD
197
+ end
198
+ end
199
+
200
+ class BatchedUDPSinkTest < Minitest::Test
201
+ include BatchedUDPSinkTests
202
+
203
+ def test_concurrency_buffering
164
204
  udp_sink = build_sink(@host, @port)
165
- 50.times.map do |i|
205
+ threads = 50.times.map do |i|
166
206
  Thread.new do
167
207
  udp_sink << "foo:#{i}|c" << "bar:#{i}|c" << "baz:#{i}|c" << "plop:#{i}|c"
168
208
  end
169
209
  end
210
+ threads.each(&:join)
211
+ assert_equal(200, read_datagrams(10, timeout: 2).size)
212
+ ensure
213
+ threads&.each(&:kill)
214
+ end
215
+ end
170
216
 
171
- datagrams = []
217
+ class LowThresholdBatchedUDPSinkTest < Minitest::Test
218
+ include BatchedUDPSinkTests
172
219
 
173
- while @receiver.wait_readable(2)
174
- datagram, _source = @receiver.recvfrom(1000)
175
- datagrams += datagram.split("\n")
176
- end
220
+ def test_sends_datagram_when_termed
221
+ # When the main thread exit, the dispatcher thread is aborted
222
+ # and there's no exceptions or anything like that to rescue.
223
+ # So if the dispatcher thread poped some events from the buffer
224
+ # but didn't sent them yet, then they may be lost.
225
+ skip("Unfortunately this can't be guaranteed")
226
+ end
227
+ alias_method :test_sends_datagram_in_at_exit_callback, :test_sends_datagram_when_termed
228
+ alias_method :test_sends_datagram_before_exit, :test_sends_datagram_when_termed
229
+
230
+ private
177
231
 
178
- assert_equal(200, datagrams.size)
232
+ # We run the same tests again, but this time we wake up the dispatcher
233
+ # thread on every call to make sure trap context is properly handled
234
+ def default_flush_threshold
235
+ 1
179
236
  end
180
237
  end
181
238
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: statsd-instrument
3
3
  version: !ruby/object:Gem::Version
4
- version: 3.1.2
4
+ version: 3.3.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Jesse Storimer
@@ -10,7 +10,7 @@ authors:
10
10
  autorequire:
11
11
  bindir: bin
12
12
  cert_chain: []
13
- date: 2021-09-02 00:00:00.000000000 Z
13
+ date: 2022-07-26 00:00:00.000000000 Z
14
14
  dependencies:
15
15
  - !ruby/object:Gem::Dependency
16
16
  name: concurrent-ruby
@@ -35,8 +35,8 @@ extensions: []
35
35
  extra_rdoc_files: []
36
36
  files:
37
37
  - ".github/CODEOWNERS"
38
- - ".github/probots.yml"
39
38
  - ".github/workflows/benchmark.yml"
39
+ - ".github/workflows/cla.yml"
40
40
  - ".github/workflows/lint.yml"
41
41
  - ".github/workflows/tests.yml"
42
42
  - ".gitignore"
@@ -134,7 +134,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
134
134
  - !ruby/object:Gem::Version
135
135
  version: '0'
136
136
  requirements: []
137
- rubygems_version: 3.2.20
137
+ rubygems_version: 3.3.3
138
138
  signing_key:
139
139
  specification_version: 4
140
140
  summary: A StatsD client for Ruby apps
data/.github/probots.yml DELETED
@@ -1,2 +0,0 @@
1
- enabled:
2
- - cla