datadog 2.0.0.beta2 → 2.0.0.rc1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (49) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +57 -1
  3. data/ext/datadog_profiling_native_extension/NativeExtensionDesign.md +1 -1
  4. data/ext/datadog_profiling_native_extension/collectors_cpu_and_wall_time_worker.c +8 -20
  5. data/ext/datadog_profiling_native_extension/collectors_thread_context.c +18 -10
  6. data/ext/datadog_profiling_native_extension/crashtracker.c +108 -0
  7. data/ext/datadog_profiling_native_extension/extconf.rb +9 -23
  8. data/ext/datadog_profiling_native_extension/heap_recorder.c +38 -3
  9. data/ext/datadog_profiling_native_extension/heap_recorder.h +5 -0
  10. data/ext/datadog_profiling_native_extension/http_transport.c +0 -93
  11. data/ext/datadog_profiling_native_extension/libdatadog_helpers.c +86 -0
  12. data/ext/datadog_profiling_native_extension/libdatadog_helpers.h +4 -0
  13. data/ext/datadog_profiling_native_extension/native_extension_helpers.rb +2 -12
  14. data/ext/datadog_profiling_native_extension/private_vm_api_access.c +25 -86
  15. data/ext/datadog_profiling_native_extension/profiling.c +2 -0
  16. data/ext/datadog_profiling_native_extension/ruby_helpers.h +3 -5
  17. data/ext/datadog_profiling_native_extension/stack_recorder.c +156 -55
  18. data/lib/datadog/appsec/contrib/devise/tracking.rb +8 -0
  19. data/lib/datadog/core/configuration/settings.rb +10 -79
  20. data/lib/datadog/core/remote/client.rb +1 -5
  21. data/lib/datadog/core/remote/configuration/repository.rb +1 -1
  22. data/lib/datadog/core/remote/dispatcher.rb +3 -3
  23. data/lib/datadog/core/telemetry/emitter.rb +1 -1
  24. data/lib/datadog/core/telemetry/http/response.rb +4 -0
  25. data/lib/datadog/opentelemetry/sdk/span_processor.rb +18 -1
  26. data/lib/datadog/profiling/component.rb +26 -2
  27. data/lib/datadog/profiling/crashtracker.rb +91 -0
  28. data/lib/datadog/profiling/exporter.rb +6 -3
  29. data/lib/datadog/profiling/http_transport.rb +7 -11
  30. data/lib/datadog/profiling/profiler.rb +9 -2
  31. data/lib/datadog/profiling/stack_recorder.rb +6 -2
  32. data/lib/datadog/profiling.rb +1 -0
  33. data/lib/datadog/tracing/component.rb +5 -1
  34. data/lib/datadog/tracing/configuration/dynamic.rb +39 -1
  35. data/lib/datadog/tracing/configuration/settings.rb +1 -0
  36. data/lib/datadog/tracing/contrib/active_record/configuration/resolver.rb +1 -0
  37. data/lib/datadog/tracing/contrib/active_record/integration.rb +10 -0
  38. data/lib/datadog/tracing/contrib/configuration/resolver.rb +43 -0
  39. data/lib/datadog/tracing/contrib/trilogy/instrumentation.rb +1 -1
  40. data/lib/datadog/tracing/remote.rb +5 -1
  41. data/lib/datadog/tracing/sampling/ext.rb +5 -1
  42. data/lib/datadog/tracing/sampling/matcher.rb +60 -31
  43. data/lib/datadog/tracing/sampling/rule.rb +12 -5
  44. data/lib/datadog/tracing/sampling/rule_sampler.rb +17 -1
  45. data/lib/datadog/tracing/sampling/span/matcher.rb +13 -41
  46. data/lib/datadog/tracing/span_link.rb +12 -6
  47. data/lib/datadog/tracing/span_operation.rb +6 -4
  48. data/lib/datadog/version.rb +1 -1
  49. metadata +7 -5
@@ -7,8 +7,8 @@ module Datadog
7
7
  class Dispatcher
8
8
  attr_reader :receivers
9
9
 
10
- def initialize
11
- @receivers = []
10
+ def initialize(receivers)
11
+ @receivers = receivers
12
12
  end
13
13
 
14
14
  def dispatch(changes, repository)
@@ -45,7 +45,7 @@ module Datadog
45
45
  @block.call(path)
46
46
  end
47
47
 
48
- # Matches on the produc's path
48
+ # Matches on the product's path
49
49
  class Product < Matcher
50
50
  def initialize(products)
51
51
  block = ->(path) { products.include?(path.product) }
@@ -26,7 +26,7 @@ module Datadog
26
26
  seq_id = self.class.sequence.next
27
27
  payload = Request.build_payload(event, seq_id)
28
28
  res = @http_transport.request(request_type: event.type, payload: payload.to_json)
29
- Datadog.logger.debug { "Telemetry sent for event `#{event.type}` (status code: #{res.code})" }
29
+ Datadog.logger.debug { "Telemetry sent for event `#{event.type}` (code: #{res.code.inspect})" }
30
30
  res
31
31
  rescue => e
32
32
  Datadog.logger.debug("Unable to send telemetry request for event `#{event.type rescue 'unknown'}`: #{e}")
@@ -34,6 +34,10 @@ module Datadog
34
34
  nil
35
35
  end
36
36
 
37
+ def code
38
+ nil
39
+ end
40
+
37
41
  def inspect
38
42
  "#{self.class} ok?:#{ok?} unsupported?:#{unsupported?}, " \
39
43
  "not_found?:#{not_found?}, client_error?:#{client_error?}, " \
@@ -1,6 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require_relative 'trace/span'
4
+ require_relative '../../tracing/span_link'
5
+ require_relative '../../tracing/trace_digest'
4
6
 
5
7
  module Datadog
6
8
  module OpenTelemetry
@@ -87,6 +89,21 @@ module Datadog
87
89
  datadog_span.set_error([nil, span.status.description]) unless span.status.ok?
88
90
  datadog_span.set_tags(span.attributes)
89
91
 
92
+ unless span.links.nil?
93
+ datadog_span.links = span.links.map do |link|
94
+ Datadog::Tracing::SpanLink.new(
95
+ Datadog::Tracing::TraceDigest.new(
96
+ trace_id: link.span_context.hex_trace_id.to_i(16),
97
+ span_id: link.span_context.hex_span_id.to_i(16),
98
+ trace_sampling_priority: (link.span_context.trace_flags&.sampled? ? 1 : 0),
99
+ trace_state: link.span_context.tracestate&.to_s,
100
+ span_remote: link.span_context.remote?,
101
+ ),
102
+ attributes: link.attributes
103
+ )
104
+ end
105
+ end
106
+
90
107
  datadog_span
91
108
  end
92
109
 
@@ -119,7 +136,7 @@ module Datadog
119
136
  end
120
137
  attributes.flatten!(1)
121
138
 
122
- kwargs[:tags] = attributes
139
+ kwargs[:tags] = attributes.to_h
123
140
 
124
141
  [name, kwargs]
125
142
  end
@@ -72,8 +72,10 @@ module Datadog
72
72
  exporter = build_profiler_exporter(settings, recorder, worker, internal_metadata: internal_metadata)
73
73
  transport = build_profiler_transport(settings, agent_settings)
74
74
  scheduler = Profiling::Scheduler.new(exporter: exporter, transport: transport, interval: upload_period_seconds)
75
+ crashtracker = build_crashtracker(settings, transport)
76
+ profiler = Profiling::Profiler.new(worker: worker, scheduler: scheduler, optional_crashtracker: crashtracker)
75
77
 
76
- [Profiling::Profiler.new(worker: worker, scheduler: scheduler), { profiling_enabled: true }]
78
+ [profiler, { profiling_enabled: true }]
77
79
  end
78
80
 
79
81
  private_class_method def self.build_thread_context_collector(settings, recorder, optional_tracer, timeline_enabled)
@@ -110,6 +112,28 @@ module Datadog
110
112
  )
111
113
  end
112
114
 
115
+ private_class_method def self.build_crashtracker(settings, transport)
116
+ return unless settings.profiling.advanced.experimental_crash_tracking_enabled
117
+
118
+ # By default, the transport is an instance of HttpTransport, which validates the configuration and makes
119
+ # it available for us to use here.
120
+ # But we support overriding the transport with a user-specific one, which may e.g. write stuff to a file,
121
+ # and thus can't really provide a valid configuration to talk to a Datadog agent. Thus, in this situation,
122
+ # we can't use the crashtracker, even if enabled.
123
+ unless transport.respond_to?(:exporter_configuration)
124
+ Datadog.logger.warn(
125
+ 'Cannot enable profiling crash tracking as a custom settings.profiling.exporter.transport is configured'
126
+ )
127
+ return
128
+ end
129
+
130
+ Datadog::Profiling::Crashtracker.new(
131
+ exporter_configuration: transport.exporter_configuration,
132
+ tags: Datadog::Profiling::TagBuilder.call(settings: settings),
133
+ upload_timeout_seconds: settings.profiling.upload.timeout_seconds,
134
+ )
135
+ end
136
+
113
137
  private_class_method def self.enable_gc_profiling?(settings)
114
138
  return false unless settings.profiling.advanced.gc_enabled
115
139
 
@@ -243,7 +267,7 @@ module Datadog
243
267
 
244
268
  private_class_method def self.no_signals_workaround_enabled?(settings) # rubocop:disable Metrics/MethodLength
245
269
  setting_value = settings.profiling.advanced.no_signals_workaround_enabled
246
- legacy_ruby_that_should_use_workaround = RUBY_VERSION.start_with?('2.3.', '2.4.', '2.5.')
270
+ legacy_ruby_that_should_use_workaround = RUBY_VERSION.start_with?('2.5.')
247
271
 
248
272
  unless [true, false, :auto].include?(setting_value)
249
273
  Datadog.logger.error(
@@ -0,0 +1,91 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'libdatadog'
4
+
5
+ module Datadog
6
+ module Profiling
7
+ # Used to report Ruby VM crashes.
8
+ # The interesting bits are implemented as native code and using libdatadog.
9
+ #
10
+ # NOTE: The crashtracker native state is a singleton; so even if you create multiple instances of `Crashtracker`
11
+ # and start them, it only works as "last writer wins". Same for stop -- there's only one state, so calling stop
12
+ # on it will stop the crash tracker, regardless of which instance started it.
13
+ #
14
+ # Methods prefixed with _native_ are implemented in `crashtracker.c`
15
+ class Crashtracker
16
+ private
17
+
18
+ attr_reader \
19
+ :exporter_configuration,
20
+ :tags_as_array,
21
+ :path_to_crashtracking_receiver_binary,
22
+ :ld_library_path,
23
+ :upload_timeout_seconds
24
+
25
+ public
26
+
27
+ def initialize(
28
+ exporter_configuration:,
29
+ tags:,
30
+ upload_timeout_seconds:,
31
+ path_to_crashtracking_receiver_binary: Libdatadog.path_to_crashtracking_receiver_binary,
32
+ ld_library_path: Libdatadog.ld_library_path
33
+ )
34
+ @exporter_configuration = exporter_configuration
35
+ @tags_as_array = tags.to_a
36
+ @upload_timeout_seconds = upload_timeout_seconds
37
+ @path_to_crashtracking_receiver_binary = path_to_crashtracking_receiver_binary
38
+ @ld_library_path = ld_library_path
39
+ end
40
+
41
+ def start
42
+ start_or_update_on_fork(action: :start)
43
+ end
44
+
45
+ def reset_after_fork
46
+ start_or_update_on_fork(action: :update_on_fork)
47
+ end
48
+
49
+ def stop
50
+ begin
51
+ self.class._native_stop
52
+ Datadog.logger.debug('Crash tracking stopped successfully')
53
+ rescue => e
54
+ Datadog.logger.error("Failed to stop crash tracking: #{e.message}")
55
+ end
56
+ end
57
+
58
+ private
59
+
60
+ def start_or_update_on_fork(action:)
61
+ unless path_to_crashtracking_receiver_binary
62
+ Datadog.logger.warn(
63
+ "Cannot #{action} profiling crash tracking as no path_to_crashtracking_receiver_binary was found"
64
+ )
65
+ return
66
+ end
67
+
68
+ unless ld_library_path
69
+ Datadog.logger.warn(
70
+ "Cannot #{action} profiling crash tracking as no ld_library_path was found"
71
+ )
72
+ return
73
+ end
74
+
75
+ begin
76
+ self.class._native_start_or_update_on_fork(
77
+ action: action,
78
+ exporter_configuration: exporter_configuration,
79
+ path_to_crashtracking_receiver_binary: path_to_crashtracking_receiver_binary,
80
+ ld_library_path: ld_library_path,
81
+ tags_as_array: tags_as_array,
82
+ upload_timeout_seconds: Integer(upload_timeout_seconds),
83
+ )
84
+ Datadog.logger.debug("Crash tracking #{action} successful")
85
+ rescue => e
86
+ Datadog.logger.error("Failed to #{action} crash tracking: #{e.message}")
87
+ end
88
+ end
89
+ end
90
+ end
91
+ end
@@ -54,10 +54,11 @@ module Datadog
54
54
 
55
55
  def flush
56
56
  worker_stats = @worker.stats_and_reset_not_thread_safe
57
- start, finish, compressed_pprof = pprof_recorder.serialize
58
- @last_flush_finish_at = finish
57
+ serialization_result = pprof_recorder.serialize
58
+ return if serialization_result.nil?
59
59
 
60
- return if compressed_pprof.nil? # We don't want to report empty profiles
60
+ start, finish, compressed_pprof, profile_stats = serialization_result
61
+ @last_flush_finish_at = finish
61
62
 
62
63
  if duration_below_threshold?(start, finish)
63
64
  Datadog.logger.debug('Skipped exporting profiling events as profile duration is below minimum')
@@ -77,6 +78,8 @@ module Datadog
77
78
  internal_metadata: internal_metadata.merge(
78
79
  {
79
80
  worker_stats: worker_stats,
81
+ profile_stats: profile_stats,
82
+ recorder_stats: pprof_recorder.stats,
80
83
  gc: GC.stat,
81
84
  }
82
85
  ),
@@ -7,6 +7,8 @@ module Datadog
7
7
  # Used to report profiling data to Datadog.
8
8
  # Methods prefixed with _native_ are implemented in `http_transport.c`
9
9
  class HttpTransport
10
+ attr_reader :exporter_configuration
11
+
10
12
  def initialize(agent_settings:, site:, api_key:, upload_timeout_seconds:)
11
13
  @upload_timeout_milliseconds = (upload_timeout_seconds * 1_000).to_i
12
14
 
@@ -14,19 +16,19 @@ module Datadog
14
16
 
15
17
  @exporter_configuration =
16
18
  if agentless?(site, api_key)
17
- [:agentless, site, api_key]
19
+ [:agentless, site, api_key].freeze
18
20
  else
19
- [:agent, base_url_from(agent_settings)]
21
+ [:agent, base_url_from(agent_settings)].freeze
20
22
  end
21
23
 
22
- status, result = validate_exporter(@exporter_configuration)
24
+ status, result = validate_exporter(exporter_configuration)
23
25
 
24
26
  raise(ArgumentError, "Failed to initialize transport: #{result}") if status == :error
25
27
  end
26
28
 
27
29
  def export(flush)
28
30
  status, result = do_export(
29
- exporter_configuration: @exporter_configuration,
31
+ exporter_configuration: exporter_configuration,
30
32
  upload_timeout_milliseconds: @upload_timeout_milliseconds,
31
33
 
32
34
  # why "timespec"?
@@ -66,12 +68,6 @@ module Datadog
66
68
  end
67
69
  end
68
70
 
69
- # Used to log soft failures in `ddog_Vec_tag_push` (e.g. we still report the profile in these cases)
70
- # Called from native code
71
- def self.log_failure_to_process_tag(failure_details)
72
- Datadog.logger.warn("Failed to add tag to profiling request: #{failure_details}")
73
- end
74
-
75
71
  private
76
72
 
77
73
  def base_url_from(agent_settings)
@@ -136,7 +132,7 @@ module Datadog
136
132
  end
137
133
 
138
134
  def config_without_api_key
139
- [@exporter_configuration[0..1]].to_h
135
+ [exporter_configuration[0..1]].to_h
140
136
  end
141
137
  end
142
138
  end
@@ -8,21 +8,24 @@ module Datadog
8
8
 
9
9
  private
10
10
 
11
- attr_reader :worker, :scheduler
11
+ attr_reader :worker, :scheduler, :optional_crashtracker
12
12
 
13
13
  public
14
14
 
15
- def initialize(worker:, scheduler:)
15
+ def initialize(worker:, scheduler:, optional_crashtracker:)
16
16
  @worker = worker
17
17
  @scheduler = scheduler
18
+ @optional_crashtracker = optional_crashtracker
18
19
  end
19
20
 
20
21
  def start
21
22
  after_fork! do
23
+ optional_crashtracker.reset_after_fork if optional_crashtracker
22
24
  worker.reset_after_fork
23
25
  scheduler.reset_after_fork
24
26
  end
25
27
 
28
+ optional_crashtracker.start if optional_crashtracker
26
29
  worker.start(on_failure_proc: proc { component_failed(:worker) })
27
30
  scheduler.start(on_failure_proc: proc { component_failed(:scheduler) })
28
31
  end
@@ -32,6 +35,7 @@ module Datadog
32
35
 
33
36
  stop_worker
34
37
  stop_scheduler
38
+ optional_crashtracker.stop if optional_crashtracker
35
39
  end
36
40
 
37
41
  private
@@ -51,6 +55,9 @@ module Datadog
51
55
  'See previous log messages for details.'
52
56
  )
53
57
 
58
+ # We explicitly not stop the crash tracker in this situation, under the assumption that, if a component failed,
59
+ # we're operating in a degraded state and crash tracking may still be helpful.
60
+
54
61
  if failed_component == :worker
55
62
  stop_scheduler
56
63
  elsif failed_component == :scheduler
@@ -33,11 +33,11 @@ module Datadog
33
33
  status, result = @no_concurrent_synchronize_mutex.synchronize { self.class._native_serialize(self) }
34
34
 
35
35
  if status == :ok
36
- start, finish, encoded_pprof = result
36
+ start, finish, encoded_pprof, profile_stats = result
37
37
 
38
38
  Datadog.logger.debug { "Encoded profile covering #{start.iso8601} to #{finish.iso8601}" }
39
39
 
40
- [start, finish, encoded_pprof]
40
+ [start, finish, encoded_pprof, profile_stats]
41
41
  else
42
42
  error_message = result
43
43
 
@@ -64,6 +64,10 @@ module Datadog
64
64
  def reset_after_fork
65
65
  self.class._native_reset_after_fork(self)
66
66
  end
67
+
68
+ def stats
69
+ self.class._native_stats(self)
70
+ end
67
71
  end
68
72
  end
69
73
  end
@@ -143,6 +143,7 @@ module Datadog
143
143
  require_relative 'profiling/collectors/idle_sampling_helper'
144
144
  require_relative 'profiling/collectors/stack'
145
145
  require_relative 'profiling/collectors/thread_context'
146
+ require_relative 'profiling/crashtracker'
146
147
  require_relative 'profiling/stack_recorder'
147
148
  require_relative 'profiling/exporter'
148
149
  require_relative 'profiling/flush'
@@ -124,9 +124,13 @@ module Datadog
124
124
  end
125
125
 
126
126
  WRITER_RECORD_ENVIRONMENT_INFORMATION_CALLBACK = lambda do |_, responses|
127
- Tracing::Diagnostics::EnvironmentLogger.collect_and_log!(responses: responses)
127
+ WRITER_RECORD_ENVIRONMENT_INFORMATION_ONLY_ONCE.run do
128
+ Tracing::Diagnostics::EnvironmentLogger.collect_and_log!(responses: responses)
129
+ end
128
130
  end
129
131
 
132
+ WRITER_RECORD_ENVIRONMENT_INFORMATION_ONLY_ONCE = Core::Utils::OnlyOnce.new
133
+
130
134
  # Create new lambda for writer callback,
131
135
  # capture the current sampler in the callback closure.
132
136
  def writer_update_priority_sampler_rates_callback(sampler)
@@ -53,8 +53,46 @@ module Datadog
53
53
  end
54
54
  end
55
55
 
56
+ # Dynamic configuration for `DD_TRACE_SAMPLING_RULES`.
57
+ class TracingSamplingRules < SimpleOption
58
+ def initialize
59
+ super('tracing_sampling_rules', 'DD_TRACE_SAMPLING_RULES', :rules)
60
+ end
61
+
62
+ # Ensures sampler is rebuilt and new configuration is applied
63
+ def call(tracing_sampling_rules)
64
+ # Modify the remote configuration value that it matches the
65
+ # local environment variable it configures.
66
+ if tracing_sampling_rules
67
+ tracing_sampling_rules.each do |rule|
68
+ next unless (tags = rule['tags'])
69
+
70
+ # Tag maps come in as arrays of 'key' and `value_glob`.
71
+ # We need to convert them into a hash for local use.
72
+ tag_array = tags.map! do |tag|
73
+ [tag['key'], tag['value_glob']]
74
+ end
75
+
76
+ rule['tags'] = tag_array.to_h
77
+ end
78
+
79
+ # The configuration is stored as JSON, so we need to convert it back
80
+ tracing_sampling_rules = tracing_sampling_rules.to_json
81
+ end
82
+
83
+ super(tracing_sampling_rules)
84
+ Datadog.send(:components).reconfigure_live_sampler
85
+ end
86
+
87
+ protected
88
+
89
+ def configuration_object
90
+ Datadog.configuration.tracing.sampling
91
+ end
92
+ end
93
+
56
94
  # List of all tracing dynamic configuration options supported.
57
- OPTIONS = [LogInjectionEnabled, TracingHeaderTags, TracingSamplingRate].map do |option_class|
95
+ OPTIONS = [LogInjectionEnabled, TracingHeaderTags, TracingSamplingRate, TracingSamplingRules].map do |option_class|
58
96
  option = option_class.new
59
97
  [option.name, option.env_var, option]
60
98
  end
@@ -274,6 +274,7 @@ module Datadog
274
274
  # @return [String,nil]
275
275
  # @public_api
276
276
  option :rules do |o|
277
+ o.type :string, nilable: true
277
278
  o.default { ENV.fetch(Configuration::Ext::Sampling::ENV_RULES, nil) }
278
279
  end
279
280
 
@@ -32,6 +32,7 @@ module Datadog
32
32
  # based on addition order (`#add`).
33
33
  class Resolver < Contrib::Configuration::Resolver
34
34
  prepend MakaraResolver
35
+ prepend Contrib::Configuration::CachingResolver
35
36
 
36
37
  def initialize(active_record_configuration = nil)
37
38
  super()
@@ -4,6 +4,7 @@ require_relative 'configuration/resolver'
4
4
  require_relative 'configuration/settings'
5
5
  require_relative 'events'
6
6
  require_relative 'patcher'
7
+ require_relative '../component'
7
8
  require_relative '../integration'
8
9
  require_relative '../rails/ext'
9
10
  require_relative '../rails/utils'
@@ -50,6 +51,15 @@ module Datadog
50
51
  def resolver
51
52
  @resolver ||= Configuration::Resolver.new
52
53
  end
54
+
55
+ def reset_resolver_cache
56
+ @resolver&.reset_cache
57
+ end
58
+
59
+ Contrib::Component.register('activerecord') do |_config|
60
+ # Ensure resolver cache is reset on configuration change
61
+ Datadog.configuration.tracing.fetch_integration(:active_record).reset_resolver_cache
62
+ end
53
63
  end
54
64
  end
55
65
  end
@@ -79,6 +79,49 @@ module Datadog
79
79
  matcher
80
80
  end
81
81
  end
82
+
83
+ # The {CachingResolver} is a mixin that provides caching functionality to the {Resolver} class.
84
+ # This is useful when {Resolver#resolve} values that are expensive to compute.
85
+ # This is a size-limited, FIFO cache.
86
+ #
87
+ # @example
88
+ # class MyResolver < Datadog::Tracing::Contrib::Configuration::Resolver
89
+ # prepend Datadog::Tracing::Contrib::Configuration::CachingResolver
90
+ # # ...
91
+ # end
92
+ module CachingResolver
93
+ # @param [Integer] cache_limit maximum number of entries to cache
94
+ def initialize(*args, cache_limit: 200)
95
+ super(*args)
96
+
97
+ @cache_limit = cache_limit
98
+ @cache = {}
99
+ end
100
+
101
+ # (see Resolver#resolve)
102
+ def resolve(value)
103
+ if @cache.key?(value)
104
+ @cache[value]
105
+ else
106
+ if @cache.size >= @cache_limit
107
+ @cache.shift # Remove the oldest entry if cache is full
108
+ end
109
+
110
+ @cache[value] = super
111
+ end
112
+ end
113
+
114
+ # (see Resolver#add)
115
+ def add(matcher, value)
116
+ reset_cache # Bust the cache when a new matcher is added
117
+ super
118
+ end
119
+
120
+ # Clears the internal cache.
121
+ def reset_cache
122
+ @cache.clear
123
+ end
124
+ end
82
125
  end
83
126
  end
84
127
  end
@@ -24,7 +24,7 @@ module Datadog
24
24
 
25
25
  Tracing.trace(Ext::SPAN_QUERY, service: service) do |span, trace_op|
26
26
  span.resource = sql
27
- span.span_type = Tracing::Metadata::Ext::SQL::TYPE
27
+ span.type = Tracing::Metadata::Ext::SQL::TYPE
28
28
 
29
29
  if datadog_configuration[:peer_service]
30
30
  span.set_tag(
@@ -12,12 +12,16 @@ module Datadog
12
12
  class << self
13
13
  PRODUCT = 'APM_TRACING'
14
14
 
15
+ CAPABILITIES = [
16
+ 1 << 29 # APM_TRACING_SAMPLE_RULES: Dynamic trace sampling rules configuration
17
+ ].freeze
18
+
15
19
  def products
16
20
  [PRODUCT]
17
21
  end
18
22
 
19
23
  def capabilities
20
- [] # No capabilities advertised
24
+ CAPABILITIES
21
25
  end
22
26
 
23
27
  def process_config(config, content)
@@ -40,7 +40,7 @@ module Datadog
40
40
  DEFAULT = '-0'
41
41
  # The sampling rate received in the agent's http response.
42
42
  AGENT_RATE = '-1'
43
- # Sampling rule or sampling rate based on tracer config.
43
+ # Locally configured rule.
44
44
  TRACE_SAMPLING_RULE = '-3'
45
45
  # User directly sets sampling priority via {Tracing.reject!} or {Tracing.keep!},
46
46
  # or by a custom sampler implementation.
@@ -49,6 +49,10 @@ module Datadog
49
49
  ASM = '-5'
50
50
  # Single Span Sampled.
51
51
  SPAN_SAMPLING_RATE = '-8'
52
+ # Dynamically configured rule, explicitly created by the user.
53
+ REMOTE_USER_RULE = '-11'
54
+ # Dynamically configured rule, automatically generated by Datadog.
55
+ REMOTE_DYNAMIC_RULE = '-12'
52
56
  end
53
57
  end
54
58
  end