ddtrace 0.53.0 → 0.54.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +77 -11
  3. data/ddtrace.gemspec +5 -2
  4. data/docs/GettingStarted.md +40 -3
  5. data/docs/ProfilingDevelopment.md +2 -2
  6. data/ext/ddtrace_profiling_native_extension/NativeExtensionDesign.md +86 -0
  7. data/ext/ddtrace_profiling_native_extension/clock_id.h +4 -0
  8. data/ext/ddtrace_profiling_native_extension/clock_id_from_pthread.c +52 -0
  9. data/ext/ddtrace_profiling_native_extension/clock_id_noop.c +14 -0
  10. data/ext/ddtrace_profiling_native_extension/extconf.rb +111 -3
  11. data/ext/ddtrace_profiling_native_extension/private_vm_api_access.c +35 -0
  12. data/ext/ddtrace_profiling_native_extension/private_vm_api_access.h +3 -0
  13. data/ext/ddtrace_profiling_native_extension/profiling.c +6 -1
  14. data/lib/datadog/ci/contrib/cucumber/formatter.rb +1 -0
  15. data/lib/datadog/ci/contrib/rspec/example.rb +1 -0
  16. data/lib/datadog/ci/ext/environment.rb +26 -21
  17. data/lib/datadog/ci/ext/test.rb +1 -0
  18. data/lib/datadog/ci/test.rb +5 -1
  19. data/lib/ddtrace/buffer.rb +28 -16
  20. data/lib/ddtrace/configuration/agent_settings_resolver.rb +27 -16
  21. data/lib/ddtrace/context.rb +10 -2
  22. data/lib/ddtrace/contrib/delayed_job/plugin.rb +2 -2
  23. data/lib/ddtrace/contrib/mongodb/instrumentation.rb +1 -1
  24. data/lib/ddtrace/contrib/mongodb/integration.rb +5 -0
  25. data/lib/ddtrace/contrib/rails/configuration/settings.rb +7 -0
  26. data/lib/ddtrace/contrib/rails/framework.rb +3 -2
  27. data/lib/ddtrace/contrib/redis/instrumentation.rb +90 -0
  28. data/lib/ddtrace/contrib/redis/patcher.rb +2 -84
  29. data/lib/ddtrace/contrib/resque/integration.rb +1 -5
  30. data/lib/ddtrace/ext/priority.rb +6 -4
  31. data/lib/ddtrace/ext/profiling.rb +1 -1
  32. data/lib/ddtrace/metrics.rb +2 -2
  33. data/lib/ddtrace/profiling/collectors/stack.rb +45 -45
  34. data/lib/ddtrace/profiling/encoding/profile.rb +1 -1
  35. data/lib/ddtrace/profiling/events/stack.rb +8 -8
  36. data/lib/ddtrace/profiling/native_extension.rb +23 -1
  37. data/lib/ddtrace/profiling/pprof/builder.rb +8 -2
  38. data/lib/ddtrace/profiling/pprof/stack_sample.rb +13 -16
  39. data/lib/ddtrace/profiling/pprof/template.rb +2 -2
  40. data/lib/ddtrace/profiling/tasks/setup.rb +21 -12
  41. data/lib/ddtrace/profiling/trace_identifiers/ddtrace.rb +9 -8
  42. data/lib/ddtrace/profiling/trace_identifiers/helper.rb +2 -2
  43. data/lib/ddtrace/profiling.rb +0 -2
  44. data/lib/ddtrace/sampler.rb +18 -8
  45. data/lib/ddtrace/sampling/rule_sampler.rb +13 -1
  46. data/lib/ddtrace/utils/time.rb +6 -0
  47. data/lib/ddtrace/version.rb +1 -1
  48. metadata +14 -9
  49. data/lib/ddtrace/profiling/ext/cpu.rb +0 -67
  50. data/lib/ddtrace/profiling/ext/cthread.rb +0 -156
@@ -1,5 +1,6 @@
1
1
  #include <ruby.h>
2
- #include <stdio.h>
2
+
3
+ #include "clock_id.h"
3
4
 
4
5
  static VALUE native_working_p(VALUE self);
5
6
 
@@ -10,8 +11,12 @@ void Init_ddtrace_profiling_native_extension(void) {
10
11
 
11
12
  rb_define_singleton_method(native_extension_module, "native_working?", native_working_p, 0);
12
13
  rb_funcall(native_extension_module, rb_intern("private_class_method"), 1, ID2SYM(rb_intern("native_working?")));
14
+
15
+ rb_define_singleton_method(native_extension_module, "clock_id_for", clock_id_for, 1); // from clock_id.h
13
16
  }
14
17
 
15
18
  static VALUE native_working_p(VALUE self) {
19
+ self_test_clock_id();
20
+
16
21
  return Qtrue;
17
22
  }
@@ -39,6 +39,7 @@ module Datadog
39
39
  service: configuration[:service_name]
40
40
  },
41
41
  framework: Ext::FRAMEWORK,
42
+ framework_version: Datadog::CI::Contrib::Cucumber::Integration.version.to_s,
42
43
  test_name: event.test_case.name,
43
44
  test_suite: event.test_case.location.file,
44
45
  test_type: Ext::TEST_TYPE
@@ -37,6 +37,7 @@ module Datadog
37
37
  service: configuration[:service_name]
38
38
  },
39
39
  framework: Ext::FRAMEWORK,
40
+ framework_version: Datadog::CI::Contrib::RSpec::Integration.version.to_s,
40
41
  test_name: test_name,
41
42
  test_suite: file_path,
42
43
  test_type: Ext::TEST_TYPE
@@ -40,22 +40,27 @@ module Datadog
40
40
  module_function
41
41
 
42
42
  def tags(env)
43
+ # Extract metadata from CI provider environment variables
43
44
  _, extractor = PROVIDERS.find { |provider_env_var, _| env.key?(provider_env_var) }
44
- if extractor
45
- tags = public_send(extractor, env)
46
-
47
- tags[Datadog::Ext::Git::TAG_TAG] = normalize_ref(tags[Datadog::Ext::Git::TAG_TAG])
48
- tags.delete(Datadog::Ext::Git::TAG_BRANCH) unless tags[Datadog::Ext::Git::TAG_TAG].nil?
49
- tags[Datadog::Ext::Git::TAG_BRANCH] = normalize_ref(tags[Datadog::Ext::Git::TAG_BRANCH])
50
- tags[Datadog::Ext::Git::TAG_REPOSITORY_URL] = filter_sensitive_info(tags[Datadog::Ext::Git::TAG_REPOSITORY_URL])
51
-
52
- # Expand ~
53
- workspace_path = tags[TAG_WORKSPACE_PATH]
54
- if !workspace_path.nil? && (workspace_path == '~' || workspace_path.start_with?('~/'))
55
- tags[TAG_WORKSPACE_PATH] = File.expand_path(workspace_path)
56
- end
57
- else
58
- tags = {}
45
+ tags = extractor ? public_send(extractor, env).reject { |_, v| v.nil? || v.strip.empty? } : {}
46
+ tags.delete(Datadog::Ext::Git::TAG_BRANCH) unless tags[Datadog::Ext::Git::TAG_TAG].nil?
47
+
48
+ # If user defined metadata is defined, overwrite
49
+ tags.merge!(extract_user_defined_git(env))
50
+ if !tags[Datadog::Ext::Git::TAG_BRANCH].nil? && tags[Datadog::Ext::Git::TAG_BRANCH].include?('tags/')
51
+ tags[Datadog::Ext::Git::TAG_TAG] = tags[Datadog::Ext::Git::TAG_BRANCH]
52
+ tags.delete(Datadog::Ext::Git::TAG_BRANCH)
53
+ end
54
+
55
+ # Normalize Git references
56
+ tags[Datadog::Ext::Git::TAG_TAG] = normalize_ref(tags[Datadog::Ext::Git::TAG_TAG])
57
+ tags[Datadog::Ext::Git::TAG_BRANCH] = normalize_ref(tags[Datadog::Ext::Git::TAG_BRANCH])
58
+ tags[Datadog::Ext::Git::TAG_REPOSITORY_URL] = filter_sensitive_info(tags[Datadog::Ext::Git::TAG_REPOSITORY_URL])
59
+
60
+ # Expand ~
61
+ workspace_path = tags[TAG_WORKSPACE_PATH]
62
+ if !workspace_path.nil? && (workspace_path == '~' || workspace_path.start_with?('~/'))
63
+ tags[TAG_WORKSPACE_PATH] = File.expand_path(workspace_path)
59
64
  end
60
65
 
61
66
  # Fill out tags from local git as fallback
@@ -63,9 +68,6 @@ module Datadog
63
68
  tags[key] ||= value
64
69
  end
65
70
 
66
- # If user defined metadata is defined, overwrite
67
- tags.merge!(extract_user_defined_git(env))
68
-
69
71
  tags.reject { |_, v| v.nil? }
70
72
  end
71
73
 
@@ -211,16 +213,19 @@ module Datadog
211
213
  ref = env['GITHUB_REF'] if ref.nil? || ref.empty?
212
214
  branch, tag = branch_or_tag(ref)
213
215
 
216
+ pipeline_url = "#{env['GITHUB_SERVER_URL']}/#{env['GITHUB_REPOSITORY']}/actions/runs/#{env['GITHUB_RUN_ID']}"
217
+ pipeline_url = "#{pipeline_url}/attempts/#{env['GITHUB_RUN_ATTEMPT']}" if env['GITHUB_RUN_ATTEMPT']
218
+
214
219
  {
215
220
  Datadog::Ext::Git::TAG_BRANCH => branch,
216
221
  Datadog::Ext::Git::TAG_COMMIT_SHA => env['GITHUB_SHA'],
217
- Datadog::Ext::Git::TAG_REPOSITORY_URL => "https://github.com/#{env['GITHUB_REPOSITORY']}.git",
222
+ Datadog::Ext::Git::TAG_REPOSITORY_URL => "#{env['GITHUB_SERVER_URL']}/#{env['GITHUB_REPOSITORY']}.git",
218
223
  Datadog::Ext::Git::TAG_TAG => tag,
219
- TAG_JOB_URL => "https://github.com/#{env['GITHUB_REPOSITORY']}/commit/#{env['GITHUB_SHA']}/checks",
224
+ TAG_JOB_URL => "#{env['GITHUB_SERVER_URL']}/#{env['GITHUB_REPOSITORY']}/commit/#{env['GITHUB_SHA']}/checks",
220
225
  TAG_PIPELINE_ID => env['GITHUB_RUN_ID'],
221
226
  TAG_PIPELINE_NAME => env['GITHUB_WORKFLOW'],
222
227
  TAG_PIPELINE_NUMBER => env['GITHUB_RUN_NUMBER'],
223
- TAG_PIPELINE_URL => "https://github.com/#{env['GITHUB_REPOSITORY']}/commit/#{env['GITHUB_SHA']}/checks",
228
+ TAG_PIPELINE_URL => pipeline_url,
224
229
  TAG_PROVIDER_NAME => 'github',
225
230
  TAG_WORKSPACE_PATH => env['GITHUB_WORKSPACE'],
226
231
  Datadog::Ext::Git::TAG_COMMIT_AUTHOR_NAME => env['BUILD_REQUESTEDFORID'],
@@ -8,6 +8,7 @@ module Datadog
8
8
 
9
9
  TAG_ARGUMENTS = 'test.arguments'.freeze
10
10
  TAG_FRAMEWORK = 'test.framework'.freeze
11
+ TAG_FRAMEWORK_VERSION = 'test.framework_version'.freeze
11
12
  TAG_NAME = 'test.name'.freeze
12
13
  TAG_SKIP_REASON = 'test.skip_reason'.freeze # DEV: Not populated yet
13
14
  TAG_STATUS = 'test.status'.freeze
@@ -38,10 +38,14 @@ module Datadog
38
38
  span.context.origin = Ext::Test::CONTEXT_ORIGIN if span.context
39
39
  Datadog::Contrib::Analytics.set_measured(span)
40
40
  span.set_tag(Ext::Test::TAG_SPAN_KIND, Ext::AppTypes::TEST)
41
- Ext::Environment.tags(ENV).each { |k, v| span.set_tag(k, v) }
41
+
42
+ # Set environment tags
43
+ @environment_tags ||= Ext::Environment.tags(ENV)
44
+ @environment_tags.each { |k, v| span.set_tag(k, v) }
42
45
 
43
46
  # Set contextual tags
44
47
  span.set_tag(Ext::Test::TAG_FRAMEWORK, tags[:framework]) if tags[:framework]
48
+ span.set_tag(Ext::Test::TAG_FRAMEWORK_VERSION, tags[:framework_version]) if tags[:framework_version]
45
49
  span.set_tag(Ext::Test::TAG_NAME, tags[:test_name]) if tags[:test_name]
46
50
  span.set_tag(Ext::Test::TAG_SUITE, tags[:test_suite]) if tags[:test_suite]
47
51
  span.set_tag(Ext::Test::TAG_TYPE, tags[:test_type]) if tags[:test_type]
@@ -14,7 +14,11 @@ module Datadog
14
14
  end
15
15
 
16
16
  # Add a new ``item`` in the local queue. This method doesn't block the execution
17
- # even if the buffer is full. In that case, a random item is discarded.
17
+ # even if the buffer is full.
18
+ #
19
+ # When the buffer is full, we try to ensure that we are fairly sampling newly
20
+ # pushed traces by randomly inserting them into the buffer slots. This discards
21
+ # old traces randomly while trying to ensure that recent traces are still captured.
18
22
  def push(item)
19
23
  return if closed?
20
24
 
@@ -64,7 +68,7 @@ module Datadog
64
68
 
65
69
  protected
66
70
 
67
- # Segment items into two distinct segments: underflow and overflow.
71
+ # Segment items into two segments: underflow and overflow.
68
72
  # Underflow are items that will fit into buffer.
69
73
  # Overflow are items that will exceed capacity, after underflow is added.
70
74
  # Returns each array, and nil if there is no underflow/overflow.
@@ -176,9 +180,6 @@ module Datadog
176
180
  # Buffer that stores objects, has a maximum size, and
177
181
  # can be safely used concurrently with CRuby.
178
182
  #
179
- # Under extreme concurrency scenarios, this class can exceed
180
- # its +max_size+ by up to 4%.
181
- #
182
183
  # Because singular +Array+ operations are thread-safe in CRuby,
183
184
  # we can implement the trace buffer without an explicit lock,
184
185
  # while making the compromise of allowing the buffer to go
@@ -187,7 +188,6 @@ module Datadog
187
188
  # On the following scenario:
188
189
  # * 4.5 million spans/second.
189
190
  # * Pushed into a single CRubyTraceBuffer from 1000 threads.
190
- # The buffer can exceed its maximum size by no more than 4%.
191
191
  #
192
192
  # This implementation allocates less memory and is faster
193
193
  # than {Datadog::ThreadSafeBuffer}.
@@ -195,19 +195,31 @@ module Datadog
195
195
  # @see spec/ddtrace/benchmark/buffer_benchmark_spec.rb Buffer benchmarks
196
196
  # @see https://github.com/ruby-concurrency/concurrent-ruby/blob/c1114a0c6891d9634f019f1f9fe58dcae8658964/lib/concurrent-ruby/concurrent/array.rb#L23-L27
197
197
  class CRubyBuffer < Buffer
198
+ # A very large number to allow us to effectively
199
+ # drop all items when invoking `slice!(i, FIXNUM_MAX)`.
200
+ FIXNUM_MAX = (1 << 62) - 1
201
+
198
202
  # Add a new ``trace`` in the local queue. This method doesn't block the execution
199
203
  # even if the buffer is full. In that case, a random trace is discarded.
200
204
  def replace!(item)
201
- # we should replace a random trace with the new one
202
- replace_index = rand(@items.size)
203
- replaced_trace = @items.delete_at(replace_index)
204
- @items << item
205
-
206
- # We might have deleted an element right when the buffer
207
- # was drained, thus +replaced_trace+ will be +nil+.
208
- # In that case, nothing was replaced, and this method
209
- # performed a simple insertion into the buffer.
210
- replaced_trace
205
+ # Ensure buffer stays within +max_size+ items.
206
+ # This can happen when there's concurrent modification
207
+ # between a call the check in `full?` and the `add!` call in
208
+ # `full? ? replace!(item) : add!(item)`.
209
+ #
210
+ # We can still have `@items.size > @max_size` for a short period of
211
+ # time, but we will always try to correct it here.
212
+ #
213
+ # `slice!` is performed before `delete_at` & `<<` to avoid always
214
+ # removing the item that was just inserted.
215
+ #
216
+ # DEV: `slice!` with two integer arguments is ~10% faster than
217
+ # `slice!` with a {Range} argument.
218
+ @items.slice!(@max_size, FIXNUM_MAX)
219
+
220
+ # We should replace a random trace with the new one
221
+ replace_index = rand(@max_size)
222
+ @items[replace_index] = item
211
223
  end
212
224
  end
213
225
 
@@ -130,35 +130,46 @@ module Datadog
130
130
  def configured_port
131
131
  return @configured_port if defined?(@configured_port)
132
132
 
133
- port_from_env = ENV[Datadog::Ext::Transport::HTTP::ENV_DEFAULT_PORT]
134
133
  parsed_port_from_env =
135
- if port_from_env
136
- begin
137
- Integer(port_from_env)
138
- rescue ArgumentError
139
- log_warning(
140
- "Invalid value for #{Datadog::Ext::Transport::HTTP::ENV_DEFAULT_PORT} environment variable " \
141
- "('#{port_from_env}'). Ignoring this configuration."
142
- )
143
- end
144
- end
134
+ try_parsing_as_integer(
135
+ friendly_name: "#{Datadog::Ext::Transport::HTTP::ENV_DEFAULT_PORT} environment variable",
136
+ value: ENV[Datadog::Ext::Transport::HTTP::ENV_DEFAULT_PORT],
137
+ )
138
+
139
+ parsed_settings_tracer_port =
140
+ try_parsing_as_integer(
141
+ friendly_name: '"c.tracer.port"',
142
+ value: settings.tracer.port,
143
+ )
145
144
 
146
145
  @configured_port = pick_from(
147
146
  DetectedConfiguration.new(
148
147
  friendly_name: '"c.tracer.port"',
149
- value: settings.tracer.port
148
+ value: parsed_settings_tracer_port,
150
149
  ),
151
150
  DetectedConfiguration.new(
152
151
  friendly_name: "#{Datadog::Ext::Transport::HTTP::ENV_DEFAULT_URL} environment variable",
153
- value: parsed_url && parsed_url.port
152
+ value: parsed_url && parsed_url.port,
154
153
  ),
155
154
  DetectedConfiguration.new(
156
155
  friendly_name: "#{Datadog::Ext::Transport::HTTP::ENV_DEFAULT_PORT} environment variable",
157
- value: parsed_port_from_env
156
+ value: parsed_port_from_env,
158
157
  )
159
158
  )
160
159
  end
161
160
 
161
+ def try_parsing_as_integer(value:, friendly_name:)
162
+ return unless value
163
+
164
+ begin
165
+ Integer(value)
166
+ rescue ArgumentError, TypeError
167
+ log_warning("Invalid value for #{friendly_name} (#{value.inspect}). Ignoring this configuration.")
168
+
169
+ nil
170
+ end
171
+ end
172
+
162
173
  def ssl?
163
174
  !parsed_url.nil? && parsed_url.scheme == 'https'
164
175
  end
@@ -264,8 +275,8 @@ module Datadog
264
275
  log_warning(
265
276
  'Configuration mismatch: values differ between ' \
266
277
  "#{detected_configurations_in_priority_order
267
- .map { |config| "#{config.friendly_name} ('#{config.value}')" }.join(' and ')}" \
268
- ". Using '#{detected_configurations_in_priority_order.first.value}'."
278
+ .map { |config| "#{config.friendly_name} (#{config.value.inspect})" }.join(' and ')}" \
279
+ ". Using #{detected_configurations_in_priority_order.first.value.inspect}."
269
280
  )
270
281
  end
271
282
 
@@ -78,13 +78,21 @@ module Datadog
78
78
  # earlier while child spans still need to finish their traced execution.
79
79
  def current_span
80
80
  @mutex.synchronize do
81
- return @current_span
81
+ @current_span
82
82
  end
83
83
  end
84
84
 
85
85
  def current_root_span
86
86
  @mutex.synchronize do
87
- return @current_root_span
87
+ @current_root_span
88
+ end
89
+ end
90
+
91
+ # Same as calling #current_span and #current_root_span, but works atomically thus preventing races when we need to
92
+ # retrieve both
93
+ def current_span_and_root_span
94
+ @mutex.synchronize do
95
+ [@current_span, @current_root_span]
88
96
  end
89
97
  end
90
98
 
@@ -9,7 +9,7 @@ module Datadog
9
9
  # DelayedJob plugin that instruments invoke_job hook
10
10
  class Plugin < Delayed::Plugin
11
11
  def self.instrument_invoke(job, &block)
12
- return block.call(job) unless tracer && tracer.enabled
12
+ return yield(job) unless tracer && tracer.enabled
13
13
 
14
14
  tracer.trace(Ext::SPAN_JOB, service: configuration[:service_name], resource: job_name(job),
15
15
  on_error: configuration[:error_handler]) do |span|
@@ -29,7 +29,7 @@ module Datadog
29
29
  end
30
30
 
31
31
  def self.instrument_enqueue(job, &block)
32
- return block.call(job) unless tracer && tracer.enabled
32
+ return yield(job) unless tracer && tracer.enabled
33
33
 
34
34
  tracer.trace(Ext::SPAN_ENQUEUE, service: configuration[:client_service_name], resource: job_name(job)) do |span|
35
35
  set_sample_rate(span)
@@ -52,7 +52,7 @@ module Datadog
52
52
  module InstanceMethods
53
53
  def datadog_pin
54
54
  @datadog_pin ||= begin
55
- service = Datadog.configuration[:mongo][:service_name]
55
+ service = Datadog.configuration[:mongo, seed][:service_name]
56
56
 
57
57
  Datadog::Pin.new(
58
58
  service,
@@ -1,5 +1,6 @@
1
1
  # typed: false
2
2
  require 'ddtrace/contrib/integration'
3
+ require 'ddtrace/contrib/configuration/resolvers/pattern_resolver'
3
4
  require 'ddtrace/contrib/mongodb/configuration/settings'
4
5
  require 'ddtrace/contrib/mongodb/patcher'
5
6
 
@@ -33,6 +34,10 @@ module Datadog
33
34
  def patcher
34
35
  Patcher
35
36
  end
37
+
38
+ def resolver
39
+ @resolver ||= Contrib::Configuration::Resolvers::PatternResolver.new
40
+ end
36
41
  end
37
42
  end
38
43
  end
@@ -73,6 +73,13 @@ module Datadog
73
73
  end
74
74
  end
75
75
 
76
+ option :job_service do |o|
77
+ o.on_set do |value|
78
+ # Update ActiveJob service name too
79
+ Datadog.configuration[:active_job][:service_name] = value
80
+ end
81
+ end
82
+
76
83
  option :middleware, default: true
77
84
  option :middleware_names, default: false
78
85
  option :template_base_path do |o|
@@ -22,7 +22,7 @@ module Datadog
22
22
  # Rails framework code, used to essentially:
23
23
  # - handle configuration entries which are specific to Datadog tracing
24
24
  # - instrument parts of the framework when needed
25
- module Framework
25
+ module Framework # rubocop:disable Metrics/ModuleLength
26
26
  # After the Rails application finishes initializing, we configure the Rails
27
27
  # integration and all its sub-components with the application information
28
28
  # available.
@@ -65,6 +65,7 @@ module Datadog
65
65
  config[:database_service] ||= "#{config[:service_name]}-#{Contrib::ActiveRecord::Utils.adapter_name}"
66
66
  config[:controller_service] ||= config[:service_name]
67
67
  config[:cache_service] ||= "#{config[:service_name]}-cache"
68
+ config[:job_service] ||= "#{config[:service_name]}-#{Contrib::ActiveJob::Ext::SERVICE_NAME}"
68
69
  end
69
70
  end
70
71
 
@@ -132,7 +133,7 @@ module Datadog
132
133
 
133
134
  datadog_config.use(
134
135
  :active_job,
135
- service_name: "#{rails_config[:service_name]}-#{Contrib::ActiveJob::Ext::SERVICE_NAME}",
136
+ service_name: rails_config[:job_service],
136
137
  log_injection: rails_config[:log_injection]
137
138
  )
138
139
  end
@@ -0,0 +1,90 @@
1
+ # typed: false
2
+ require 'ddtrace/contrib/patcher'
3
+ require 'ddtrace/contrib/redis/ext'
4
+ require 'ddtrace/contrib/redis/configuration/resolver'
5
+
6
+ module Datadog
7
+ module Contrib
8
+ module Redis
9
+ # Instrumentation for Redis
10
+ module Instrumentation
11
+ def self.included(base)
12
+ base.prepend(InstanceMethods)
13
+ end
14
+
15
+ # InstanceMethods - implementing instrumentation
16
+ module InstanceMethods
17
+ def call(*args, &block)
18
+ pin = Datadog::Pin.get_from(self)
19
+ return super unless pin && pin.tracer
20
+
21
+ response = nil
22
+ pin.tracer.trace(Datadog::Contrib::Redis::Ext::SPAN_COMMAND) do |span|
23
+ span.service = pin.service
24
+ span.span_type = Datadog::Contrib::Redis::Ext::TYPE
25
+ span.resource = get_command(args)
26
+ Datadog::Contrib::Redis::Tags.set_common_tags(self, span)
27
+
28
+ response = super
29
+ end
30
+
31
+ response
32
+ end
33
+
34
+ def call_pipeline(*args, &block)
35
+ pin = Datadog::Pin.get_from(self)
36
+ return super unless pin && pin.tracer
37
+
38
+ response = nil
39
+ pin.tracer.trace(Datadog::Contrib::Redis::Ext::SPAN_COMMAND) do |span|
40
+ span.service = pin.service
41
+ span.span_type = Datadog::Contrib::Redis::Ext::TYPE
42
+ commands = get_pipeline_commands(args)
43
+ span.resource = commands.join("\n")
44
+ span.set_metric Datadog::Contrib::Redis::Ext::METRIC_PIPELINE_LEN, commands.length
45
+ Datadog::Contrib::Redis::Tags.set_common_tags(self, span)
46
+
47
+ response = super
48
+ end
49
+
50
+ response
51
+ end
52
+
53
+ def datadog_pin
54
+ @datadog_pin ||= begin
55
+ pin = Datadog::Pin.new(
56
+ datadog_configuration[:service_name],
57
+ app: Ext::APP,
58
+ app_type: Datadog::Ext::AppTypes::DB,
59
+ tracer: -> { datadog_configuration[:tracer] }
60
+ )
61
+ pin.onto(self)
62
+ end
63
+ end
64
+
65
+ private
66
+
67
+ def get_command(args)
68
+ if datadog_configuration[:command_args]
69
+ Datadog::Contrib::Redis::Quantize.format_command_args(*args)
70
+ else
71
+ Datadog::Contrib::Redis::Quantize.get_verb(*args)
72
+ end
73
+ end
74
+
75
+ def get_pipeline_commands(args)
76
+ if datadog_configuration[:command_args]
77
+ args[0].commands.map { |c| Datadog::Contrib::Redis::Quantize.format_command_args(c) }
78
+ else
79
+ args[0].commands.map { |c| Datadog::Contrib::Redis::Quantize.get_verb(c) }
80
+ end
81
+ end
82
+
83
+ def datadog_configuration
84
+ Datadog.configuration[:redis, options]
85
+ end
86
+ end
87
+ end
88
+ end
89
+ end
90
+ end
@@ -23,92 +23,10 @@ module Datadog
23
23
  require 'ddtrace/ext/app_types'
24
24
  require 'ddtrace/contrib/redis/tags'
25
25
  require 'ddtrace/contrib/redis/quantize'
26
+ require 'ddtrace/contrib/redis/instrumentation'
26
27
 
27
- patch_redis_client
28
+ ::Redis::Client.include(Instrumentation)
28
29
  end
29
-
30
- # rubocop:disable Metrics/MethodLength
31
- # rubocop:disable Metrics/BlockLength
32
- # rubocop:disable Metrics/AbcSize
33
- def patch_redis_client
34
- ::Redis::Client.class_eval do
35
- alias_method :call_without_datadog, :call
36
- remove_method :call
37
- def call(*args, &block)
38
- pin = Datadog::Pin.get_from(self)
39
- return call_without_datadog(*args, &block) unless pin && pin.tracer
40
-
41
- response = nil
42
- pin.tracer.trace(Datadog::Contrib::Redis::Ext::SPAN_COMMAND) do |span|
43
- span.service = pin.service
44
- span.span_type = Datadog::Contrib::Redis::Ext::TYPE
45
- span.resource = get_command(args)
46
- Datadog::Contrib::Redis::Tags.set_common_tags(self, span)
47
-
48
- response = call_without_datadog(*args, &block)
49
- end
50
-
51
- response
52
- end
53
-
54
- alias_method :call_pipeline_without_datadog, :call_pipeline
55
- remove_method :call_pipeline
56
- def call_pipeline(*args, &block)
57
- pin = Datadog::Pin.get_from(self)
58
- return call_pipeline_without_datadog(*args, &block) unless pin && pin.tracer
59
-
60
- response = nil
61
- pin.tracer.trace(Datadog::Contrib::Redis::Ext::SPAN_COMMAND) do |span|
62
- span.service = pin.service
63
- span.span_type = Datadog::Contrib::Redis::Ext::TYPE
64
- commands = get_pipeline_commands(args)
65
- span.resource = commands.join("\n")
66
- span.set_metric Datadog::Contrib::Redis::Ext::METRIC_PIPELINE_LEN, commands.length
67
- Datadog::Contrib::Redis::Tags.set_common_tags(self, span)
68
-
69
- response = call_pipeline_without_datadog(*args, &block)
70
- end
71
-
72
- response
73
- end
74
-
75
- def datadog_pin
76
- @datadog_pin ||= begin
77
- pin = Datadog::Pin.new(
78
- datadog_configuration[:service_name],
79
- app: Ext::APP,
80
- app_type: Datadog::Ext::AppTypes::DB,
81
- tracer: -> { datadog_configuration[:tracer] }
82
- )
83
- pin.onto(self)
84
- end
85
- end
86
-
87
- private
88
-
89
- def get_command(args)
90
- if datadog_configuration[:command_args]
91
- Datadog::Contrib::Redis::Quantize.format_command_args(*args)
92
- else
93
- Datadog::Contrib::Redis::Quantize.get_verb(*args)
94
- end
95
- end
96
-
97
- def get_pipeline_commands(args)
98
- if datadog_configuration[:command_args]
99
- args[0].commands.map { |c| Datadog::Contrib::Redis::Quantize.format_command_args(c) }
100
- else
101
- args[0].commands.map { |c| Datadog::Contrib::Redis::Quantize.get_verb(c) }
102
- end
103
- end
104
-
105
- def datadog_configuration
106
- Datadog.configuration[:redis, options]
107
- end
108
- end
109
- end
110
- # rubocop:enable Metrics/MethodLength
111
- # rubocop:enable Metrics/BlockLength
112
30
  end
113
31
  end
114
32
  end
@@ -11,8 +11,6 @@ module Datadog
11
11
  include Contrib::Integration
12
12
 
13
13
  MINIMUM_VERSION = Gem::Version.new('1.0')
14
- # Maximum is first version it's NOT compatible with (not inclusive)
15
- MAXIMUM_VERSION = Gem::Version.new('3.0')
16
14
 
17
15
  register_as :resque, auto_patch: true
18
16
 
@@ -25,9 +23,7 @@ module Datadog
25
23
  end
26
24
 
27
25
  def self.compatible?
28
- super \
29
- && version >= MINIMUM_VERSION \
30
- && version < MAXIMUM_VERSION
26
+ super && version >= MINIMUM_VERSION
31
27
  end
32
28
 
33
29
  def default_configuration
@@ -4,13 +4,15 @@ module Datadog
4
4
  # Priority is a hint given to the backend so that it knows which traces to reject or kept.
5
5
  # In a distributed context, it should be set before any context propagation (fork, RPC calls) to be effective.
6
6
  module Priority
7
- # Use this to explicitely inform the backend that a trace should be rejected and not stored.
7
+ # Use this to explicitly inform the backend that a trace MUST be rejected and not stored.
8
+ # This includes rules and rate limits configured by the user through the {RuleSampler}.
8
9
  USER_REJECT = -1
9
- # Used by the builtin sampler to inform the backend that a trace should be rejected and not stored.
10
+ # Used by the {PrioritySampler} to inform the backend that a trace should be rejected and not stored.
10
11
  AUTO_REJECT = 0
11
- # Used by the builtin sampler to inform the backend that a trace should be kept and stored.
12
+ # Used by the {PrioritySampler} to inform the backend that a trace should be kept and stored.
12
13
  AUTO_KEEP = 1
13
- # Use this to explicitely inform the backend that a trace should be kept and stored.
14
+ # Use this to explicitly inform the backend that a trace MUST be kept and stored.
15
+ # This includes rules and rate limits configured by the user through the {RuleSampler}.
14
16
  USER_KEEP = 2
15
17
  end
16
18
  end
@@ -9,9 +9,9 @@ module Datadog
9
9
  ENV_ENDPOINT_COLLECTION_ENABLED = 'DD_PROFILING_ENDPOINT_COLLECTION_ENABLED'.freeze
10
10
 
11
11
  module Pprof
12
+ LABEL_KEY_LOCAL_ROOT_SPAN_ID = 'local root span id'.freeze
12
13
  LABEL_KEY_SPAN_ID = 'span id'.freeze
13
14
  LABEL_KEY_THREAD_ID = 'thread id'.freeze
14
- LABEL_KEY_TRACE_ID = 'trace id'.freeze
15
15
  LABEL_KEY_TRACE_ENDPOINT = 'trace endpoint'.freeze
16
16
  SAMPLE_VALUE_NO_VALUE = 0
17
17
  VALUE_TYPE_CPU = 'cpu-time'.freeze