ddtrace 0.34.2 → 0.35.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (103) hide show
  1. checksums.yaml +5 -5
  2. data/.circleci/config.yml +58 -9
  3. data/.circleci/images/primary/Dockerfile-jruby-9.2 +77 -0
  4. data/Appraisals +1 -1
  5. data/CHANGELOG.md +33 -1
  6. data/Rakefile +1 -1
  7. data/ddtrace.gemspec +5 -3
  8. data/docs/DevelopmentGuide.md +1 -1
  9. data/docs/GettingStarted.md +89 -36
  10. data/lib/ddtrace.rb +1 -1
  11. data/lib/ddtrace/buffer.rb +9 -9
  12. data/lib/ddtrace/chunker.rb +34 -0
  13. data/lib/ddtrace/configuration.rb +28 -5
  14. data/lib/ddtrace/configuration/components.rb +154 -0
  15. data/lib/ddtrace/configuration/settings.rb +131 -63
  16. data/lib/ddtrace/context.rb +6 -6
  17. data/lib/ddtrace/context_flush.rb +1 -1
  18. data/lib/ddtrace/contrib/action_cable/instrumentation.rb +1 -1
  19. data/lib/ddtrace/contrib/action_pack/action_controller/instrumentation.rb +2 -2
  20. data/lib/ddtrace/contrib/action_view/events/render_partial.rb +1 -1
  21. data/lib/ddtrace/contrib/action_view/events/render_template.rb +1 -1
  22. data/lib/ddtrace/contrib/action_view/instrumentation/partial_renderer.rb +1 -1
  23. data/lib/ddtrace/contrib/action_view/instrumentation/template_renderer.rb +2 -2
  24. data/lib/ddtrace/contrib/action_view/patcher.rb +1 -1
  25. data/lib/ddtrace/contrib/active_record/events/instantiation.rb +1 -1
  26. data/lib/ddtrace/contrib/active_record/events/sql.rb +1 -1
  27. data/lib/ddtrace/contrib/active_support/cache/instrumentation.rb +2 -2
  28. data/lib/ddtrace/contrib/active_support/notifications/subscription.rb +2 -2
  29. data/lib/ddtrace/contrib/analytics.rb +1 -1
  30. data/lib/ddtrace/contrib/dalli/patcher.rb +1 -1
  31. data/lib/ddtrace/contrib/dalli/quantize.rb +1 -1
  32. data/lib/ddtrace/contrib/elasticsearch/patcher.rb +1 -1
  33. data/lib/ddtrace/contrib/excon/middleware.rb +2 -2
  34. data/lib/ddtrace/contrib/faraday/patcher.rb +1 -1
  35. data/lib/ddtrace/contrib/grape/endpoint.rb +5 -5
  36. data/lib/ddtrace/contrib/grape/patcher.rb +1 -1
  37. data/lib/ddtrace/contrib/grpc/datadog_interceptor/client.rb +1 -1
  38. data/lib/ddtrace/contrib/grpc/datadog_interceptor/server.rb +2 -2
  39. data/lib/ddtrace/contrib/grpc/patcher.rb +1 -1
  40. data/lib/ddtrace/contrib/http/instrumentation.rb +1 -1
  41. data/lib/ddtrace/contrib/mongodb/subscribers.rb +2 -2
  42. data/lib/ddtrace/contrib/patchable.rb +1 -1
  43. data/lib/ddtrace/contrib/patcher.rb +3 -3
  44. data/lib/ddtrace/contrib/presto/instrumentation.rb +3 -3
  45. data/lib/ddtrace/contrib/presto/patcher.rb +1 -1
  46. data/lib/ddtrace/contrib/rack/middlewares.rb +2 -2
  47. data/lib/ddtrace/contrib/rack/patcher.rb +2 -2
  48. data/lib/ddtrace/contrib/rack/request_queue.rb +1 -1
  49. data/lib/ddtrace/contrib/rake/instrumentation.rb +2 -2
  50. data/lib/ddtrace/contrib/redis/quantize.rb +1 -1
  51. data/lib/ddtrace/contrib/resque/resque_job.rb +2 -2
  52. data/lib/ddtrace/contrib/sidekiq/tracing.rb +1 -1
  53. data/lib/ddtrace/contrib/sinatra/env.rb +20 -0
  54. data/lib/ddtrace/contrib/sinatra/ext.rb +6 -0
  55. data/lib/ddtrace/contrib/sinatra/patcher.rb +1 -0
  56. data/lib/ddtrace/contrib/sinatra/tracer.rb +98 -35
  57. data/lib/ddtrace/contrib/sinatra/tracer_middleware.rb +16 -13
  58. data/lib/ddtrace/correlation.rb +9 -6
  59. data/lib/ddtrace/diagnostics/health.rb +2 -6
  60. data/lib/ddtrace/encoding.rb +13 -39
  61. data/lib/ddtrace/event.rb +1 -1
  62. data/lib/ddtrace/ext/correlation.rb +1 -0
  63. data/lib/ddtrace/ext/diagnostics.rb +2 -0
  64. data/lib/ddtrace/ext/environment.rb +1 -0
  65. data/lib/ddtrace/ext/forced_tracing.rb +1 -1
  66. data/lib/ddtrace/logger.rb +3 -44
  67. data/lib/ddtrace/metrics.rb +5 -5
  68. data/lib/ddtrace/monkey.rb +1 -1
  69. data/lib/ddtrace/opentracer/global_tracer.rb +1 -1
  70. data/lib/ddtrace/pin.rb +1 -1
  71. data/lib/ddtrace/pipeline.rb +1 -1
  72. data/lib/ddtrace/propagation/http_propagator.rb +2 -2
  73. data/lib/ddtrace/runtime/cgroup.rb +1 -1
  74. data/lib/ddtrace/runtime/container.rb +1 -1
  75. data/lib/ddtrace/runtime/metrics.rb +5 -2
  76. data/lib/ddtrace/sampler.rb +2 -2
  77. data/lib/ddtrace/sampling/rule.rb +1 -1
  78. data/lib/ddtrace/sampling/rule_sampler.rb +1 -1
  79. data/lib/ddtrace/span.rb +4 -4
  80. data/lib/ddtrace/sync_writer.rb +3 -8
  81. data/lib/ddtrace/tracer.rb +26 -31
  82. data/lib/ddtrace/transport/http.rb +1 -1
  83. data/lib/ddtrace/transport/http/api/instance.rb +4 -0
  84. data/lib/ddtrace/transport/http/builder.rb +3 -5
  85. data/lib/ddtrace/transport/http/client.rb +7 -64
  86. data/lib/ddtrace/transport/http/response.rb +1 -1
  87. data/lib/ddtrace/transport/http/statistics.rb +1 -1
  88. data/lib/ddtrace/transport/http/traces.rb +10 -7
  89. data/lib/ddtrace/transport/io.rb +1 -1
  90. data/lib/ddtrace/transport/io/client.rb +2 -2
  91. data/lib/ddtrace/transport/io/response.rb +3 -1
  92. data/lib/ddtrace/transport/io/traces.rb +50 -3
  93. data/lib/ddtrace/transport/parcel.rb +0 -4
  94. data/lib/ddtrace/transport/statistics.rb +2 -2
  95. data/lib/ddtrace/transport/traces.rb +160 -10
  96. data/lib/ddtrace/utils.rb +1 -1
  97. data/lib/ddtrace/version.rb +2 -2
  98. data/lib/ddtrace/workers.rb +5 -13
  99. data/lib/ddtrace/workers/async.rb +2 -2
  100. data/lib/ddtrace/workers/runtime_metrics.rb +47 -0
  101. data/lib/ddtrace/workers/trace_writer.rb +199 -0
  102. data/lib/ddtrace/writer.rb +20 -27
  103. metadata +22 -32
@@ -41,7 +41,7 @@ module Datadog
41
41
  begin
42
42
  block.call(*args)
43
43
  rescue StandardError => e
44
- Datadog::Logger.log.debug("Error while handling '#{key}' for '#{name}' event: #{e.message}")
44
+ Datadog.logger.debug("Error while handling '#{key}' for '#{name}' event: #{e.message}")
45
45
  end
46
46
  end
47
47
 
@@ -2,6 +2,7 @@ module Datadog
2
2
  module Ext
3
3
  module Correlation
4
4
  ATTR_ENV = 'dd.env'.freeze
5
+ ATTR_SERVICE = 'dd.service'.freeze
5
6
  ATTR_SPAN_ID = 'dd.span_id'.freeze
6
7
  ATTR_TRACE_ID = 'dd.trace_id'.freeze
7
8
  ATTR_VERSION = 'dd.version'.freeze
@@ -23,6 +23,8 @@ module Datadog
23
23
  METRIC_QUEUE_SPANS = 'datadog.tracer.queue.spans'.freeze
24
24
  METRIC_SAMPLING_SERVICE_CACHE_LENGTH = 'datadog.tracer.sampling.service_cache_length'.freeze
25
25
  METRIC_TRACES_FILTERED = 'datadog.tracer.traces.filtered'.freeze
26
+ METRIC_TRANSPORT_CHUNKED = 'datadog.tracer.transport.chunked'.freeze
27
+ METRIC_TRANSPORT_TRACE_TOO_LARGE = 'datadog.tracer.transport.trace_too_large'.freeze
26
28
  METRIC_WRITER_CPU_TIME = 'datadog.tracer.writer.cpu_time'.freeze
27
29
  end
28
30
  end
@@ -7,6 +7,7 @@ module Datadog
7
7
  ENV_VERSION = 'DD_VERSION'.freeze
8
8
 
9
9
  TAG_ENV = 'env'.freeze
10
+ TAG_SERVICE = 'service'.freeze
10
11
  TAG_VERSION = 'version'.freeze
11
12
  end
12
13
  end
@@ -12,7 +12,7 @@ module Datadog
12
12
 
13
13
  # Only log each deprecation warning once (safeguard against log spam)
14
14
  unless @deprecation_warning_shown
15
- Datadog::Logger.log.warn(
15
+ Datadog.logger.warn(
16
16
  'forced tracing: Datadog::Ext::ForcedTracing has been renamed to Datadog::Ext::ManualTracing'
17
17
  )
18
18
  @deprecation_warning_shown = true
@@ -1,57 +1,16 @@
1
1
  require 'logger'
2
2
 
3
3
  module Datadog
4
- LOG_PREFIX = 'ddtrace'.freeze
5
-
6
4
  # A custom logger with minor enhancements:
7
5
  # - progname defaults to ddtrace to clearly identify Datadog dd-trace-rb related messages
8
6
  # - adds last caller stack-trace info to know where the message comes from
9
7
  class Logger < ::Logger
10
- # Global, memoized, lazy initialized instance of a logger that is used within the the Datadog
11
- # namespace. This logger outputs to +STDOUT+ by default, and is considered thread-safe.
12
- class << self
13
- def log
14
- unless defined? @logger
15
- @logger = Datadog::Logger.new(STDOUT)
16
- @logger.level = Logger::WARN
17
- end
18
- @logger
19
- end
20
-
21
- # Override the default logger with a custom one.
22
- def log=(logger)
23
- return unless logger
24
- return unless logger.respond_to? :methods
25
- return unless logger.respond_to? :error
26
- if logger.respond_to? :methods
27
- unimplemented = new(STDOUT).methods - logger.methods
28
- unless unimplemented.empty?
29
- logger.error("logger #{logger} does not implement #{unimplemented}")
30
- return
31
- end
32
- end
33
- @logger = logger
34
- end
35
-
36
- # Activate the debug mode providing more information related to tracer usage
37
- # Default to Warn level unless using custom logger
38
- def debug_logging=(value)
39
- if value
40
- log.level = Logger::DEBUG
41
- elsif log.is_a?(Datadog::Logger)
42
- log.level = Logger::WARN
43
- end
44
- end
45
-
46
- # Return if the debug mode is activated or not
47
- def debug_logging
48
- log.level == Logger::DEBUG
49
- end
50
- end
8
+ PREFIX = 'ddtrace'.freeze
51
9
 
52
10
  def initialize(*args, &block)
53
11
  super
54
- self.progname = LOG_PREFIX
12
+ self.progname = PREFIX
13
+ self.level = ::Logger::WARN
55
14
  end
56
15
 
57
16
  def add(severity, message = nil, progname = nil, &block)
@@ -63,7 +63,7 @@ module Datadog
63
63
 
64
64
  statsd.count(stat, value, metric_options(options))
65
65
  rescue StandardError => e
66
- Datadog::Logger.log.error("Failed to send count stat. Cause: #{e.message} Source: #{e.backtrace.first}")
66
+ Datadog.logger.error("Failed to send count stat. Cause: #{e.message} Source: #{e.backtrace.first}")
67
67
  end
68
68
 
69
69
  def distribution(stat, value = nil, options = nil, &block)
@@ -73,7 +73,7 @@ module Datadog
73
73
 
74
74
  statsd.distribution(stat, value, metric_options(options))
75
75
  rescue StandardError => e
76
- Datadog::Logger.log.error("Failed to send distribution stat. Cause: #{e.message} Source: #{e.backtrace.first}")
76
+ Datadog.logger.error("Failed to send distribution stat. Cause: #{e.message} Source: #{e.backtrace.first}")
77
77
  end
78
78
 
79
79
  def increment(stat, options = nil)
@@ -82,7 +82,7 @@ module Datadog
82
82
 
83
83
  statsd.increment(stat, metric_options(options))
84
84
  rescue StandardError => e
85
- Datadog::Logger.log.error("Failed to send increment stat. Cause: #{e.message} Source: #{e.backtrace.first}")
85
+ Datadog.logger.error("Failed to send increment stat. Cause: #{e.message} Source: #{e.backtrace.first}")
86
86
  end
87
87
 
88
88
  def gauge(stat, value = nil, options = nil, &block)
@@ -92,7 +92,7 @@ module Datadog
92
92
 
93
93
  statsd.gauge(stat, value, metric_options(options))
94
94
  rescue StandardError => e
95
- Datadog::Logger.log.error("Failed to send gauge stat. Cause: #{e.message} Source: #{e.backtrace.first}")
95
+ Datadog.logger.error("Failed to send gauge stat. Cause: #{e.message} Source: #{e.backtrace.first}")
96
96
  end
97
97
 
98
98
  def time(stat, options = nil)
@@ -108,7 +108,7 @@ module Datadog
108
108
  distribution(stat, ((finished - start) * 1000), options)
109
109
  end
110
110
  rescue StandardError => e
111
- Datadog::Logger.log.error("Failed to send time stat. Cause: #{e.message} Source: #{e.backtrace.first}")
111
+ Datadog.logger.error("Failed to send time stat. Cause: #{e.message} Source: #{e.backtrace.first}")
112
112
  end
113
113
  end
114
114
 
@@ -48,7 +48,7 @@ module Datadog
48
48
  end
49
49
 
50
50
  def log_deprecation_warning(method)
51
- Datadog::Logger.log.warn("#{method}:#{DEPRECATION_WARNING}")
51
+ Datadog.logger.warn("#{method}:#{DEPRECATION_WARNING}")
52
52
  end
53
53
 
54
54
  class << self
@@ -6,7 +6,7 @@ module Datadog
6
6
  super.tap do
7
7
  if tracer.class <= Datadog::OpenTracer::Tracer
8
8
  # Update the Datadog global tracer, too.
9
- Datadog.configuration.tracer = tracer.datadog_tracer
9
+ Datadog.configure { |c| c.tracer = tracer.datadog_tracer }
10
10
  end
11
11
  end
12
12
  end
@@ -107,7 +107,7 @@ module Datadog
107
107
  def log_deprecation_warning(method_name)
108
108
  # Only log each deprecation warning once (safeguard against log spam)
109
109
  do_once(method_name) do
110
- Datadog::Logger.log.warn("#{method_name}:#{DEPRECATION_WARNING}")
110
+ Datadog.logger.warn("#{method_name}:#{DEPRECATION_WARNING}")
111
111
  end
112
112
  end
113
113
  end
@@ -34,7 +34,7 @@ module Datadog
34
34
 
35
35
  result || []
36
36
  rescue => e
37
- Datadog::Logger.log.debug(
37
+ Datadog.logger.debug(
38
38
  "trace dropped entirely due to `Pipeline.before_flush` error: #{e}"
39
39
  )
40
40
 
@@ -19,7 +19,7 @@ module Datadog
19
19
  def self.inject!(context, env)
20
20
  # Prevent propagation from being attempted if context provided is nil.
21
21
  if context.nil?
22
- ::Datadog::Logger.log.debug('Cannot inject context into env to propagate over HTTP: context is nil.'.freeze)
22
+ ::Datadog.logger.debug('Cannot inject context into env to propagate over HTTP: context is nil.'.freeze)
23
23
  return
24
24
  end
25
25
 
@@ -58,7 +58,7 @@ module Datadog
58
58
  # Return an empty/new context if we have a mismatch in values extracted
59
59
  msg = "#{context.trace_id} != #{extracted_context.trace_id} && " \
60
60
  "#{context.span_id} != #{extracted_context.span_id}"
61
- ::Datadog::Logger.log.debug("Cannot extract context from HTTP: extracted contexts differ, #{msg}".freeze)
61
+ ::Datadog.logger.debug("Cannot extract context from HTTP: extracted contexts differ, #{msg}".freeze)
62
62
  # DEV: This will return from `self.extract` not this `each` block
63
63
  return ::Datadog::Context.new
64
64
  end
@@ -27,7 +27,7 @@ module Datadog
27
27
  end
28
28
  end
29
29
  rescue StandardError => e
30
- Datadog::Logger.log.error("Error while parsing cgroup. Cause: #{e.message} Location: #{e.backtrace.first}")
30
+ Datadog.logger.error("Error while parsing cgroup. Cause: #{e.message} Location: #{e.backtrace.first}")
31
31
  end
32
32
  end
33
33
  end
@@ -61,7 +61,7 @@ module Datadog
61
61
  break
62
62
  end
63
63
  rescue StandardError => e
64
- Datadog::Logger.log.error(
64
+ Datadog.logger.error(
65
65
  "Error while parsing container info. Cause: #{e.message} Location: #{e.backtrace.first}"
66
66
  )
67
67
  end
@@ -14,8 +14,9 @@ module Datadog
14
14
  super
15
15
 
16
16
  # Initialize service list
17
- @services = Set.new
17
+ @services = Set.new(options.fetch(:services, []))
18
18
  @service_tags = nil
19
+ compile_service_tags!
19
20
  end
20
21
 
21
22
  def associate_with_span(span)
@@ -55,6 +56,8 @@ module Datadog
55
56
  def gc_metrics
56
57
  Hash[
57
58
  GC.stat.map do |k, v|
59
+ next if v.is_a?(Hash) # TODO: JRuby supports additional nested metrics
60
+
58
61
  ["#{Ext::Runtime::Metrics::METRIC_GC_PREFIX}.#{k}", v]
59
62
  end
60
63
  ]
@@ -63,7 +66,7 @@ module Datadog
63
66
  def try_flush
64
67
  yield
65
68
  rescue StandardError => e
66
- Datadog::Logger.log.error("Error while sending runtime metric. Cause: #{e.message}")
69
+ Datadog.logger.error("Error while sending runtime metric. Cause: #{e.message}")
67
70
  end
68
71
 
69
72
  def default_metric_options
@@ -48,7 +48,7 @@ module Datadog
48
48
  # sampled.
49
49
  def initialize(sample_rate = 1.0)
50
50
  unless sample_rate > 0.0 && sample_rate <= 1.0
51
- Datadog::Logger.log.error('sample rate is not between 0 and 1, disabling the sampler')
51
+ Datadog.logger.error('sample rate is not between 0 and 1, disabling the sampler')
52
52
  sample_rate = 1.0
53
53
  end
54
54
 
@@ -176,7 +176,7 @@ module Datadog
176
176
  update_all(rate_by_service)
177
177
 
178
178
  # Emit metric for service cache size
179
- Diagnostics::Health.metrics.sampling_service_cache_length(length)
179
+ Datadog.health_metrics.sampling_service_cache_length(length)
180
180
  end
181
181
 
182
182
  private
@@ -28,7 +28,7 @@ module Datadog
28
28
  def match?(span)
29
29
  @matcher.match?(span)
30
30
  rescue => e
31
- Datadog::Logger.log.error("Matcher failed. Cause: #{e.message} Source: #{e.backtrace.first}")
31
+ Datadog.logger.error("Matcher failed. Cause: #{e.message} Source: #{e.backtrace.first}")
32
32
  nil
33
33
  end
34
34
 
@@ -109,7 +109,7 @@ module Datadog
109
109
  set_limiter_metrics(span, rate_limiter.effective_rate)
110
110
  end
111
111
  rescue StandardError => e
112
- Datadog::Logger.log.error("Rule sampling failed. Cause: #{e.message} Source: #{e.backtrace.first}")
112
+ Datadog.logger.error("Rule sampling failed. Cause: #{e.message} Source: #{e.backtrace.first}")
113
113
  yield(span)
114
114
  end
115
115
 
@@ -104,7 +104,7 @@ module Datadog
104
104
  @meta[key] = value.to_s
105
105
  end
106
106
  rescue StandardError => e
107
- Datadog::Logger.log.debug("Unable to set the tag #{key}, ignoring it. Caused by: #{e}")
107
+ Datadog.logger.debug("Unable to set the tag #{key}, ignoring it. Caused by: #{e}")
108
108
  end
109
109
 
110
110
  # This method removes a tag for the given key.
@@ -127,7 +127,7 @@ module Datadog
127
127
  value = Float(value)
128
128
  @metrics[key] = value
129
129
  rescue StandardError => e
130
- Datadog::Logger.log.debug("Unable to set the metric #{key}, ignoring it. Caused by: #{e}")
130
+ Datadog.logger.debug("Unable to set the metric #{key}, ignoring it. Caused by: #{e}")
131
131
  end
132
132
 
133
133
  # This method removes a metric for the given key. It acts like {#remove_tag}.
@@ -179,8 +179,8 @@ module Datadog
179
179
  @context.close_span(self)
180
180
  @tracer.record(self)
181
181
  rescue StandardError => e
182
- Datadog::Logger.log.debug("error recording finished trace: #{e}")
183
- Diagnostics::Health.metrics.error_span_finish(1, tags: ["error:#{e.class.name}"])
182
+ Datadog.logger.debug("error recording finished trace: #{e}")
183
+ Datadog.health_metrics.error_span_finish(1, tags: ["error:#{e.class.name}"])
184
184
  end
185
185
  self
186
186
  end
@@ -4,10 +4,10 @@ require 'ddtrace/runtime/metrics'
4
4
 
5
5
  module Datadog
6
6
  # SyncWriter flushes both services and traces synchronously
7
+ # DEV: To be replaced by Datadog::Workers::TraceWriter.
7
8
  class SyncWriter
8
9
  attr_reader \
9
10
  :priority_sampler,
10
- :runtime_metrics,
11
11
  :transport
12
12
 
13
13
  def initialize(options = {})
@@ -16,18 +16,13 @@ module Datadog
16
16
  Transport::HTTP.default(transport_options)
17
17
  end
18
18
 
19
- # Runtime metrics
20
- @runtime_metrics = options.fetch(:runtime_metrics) do
21
- Runtime::Metrics.new
22
- end
23
-
24
19
  @priority_sampler = options.fetch(:priority_sampler, nil)
25
20
  end
26
21
 
27
22
  def write(trace, services = nil)
28
23
  unless services.nil?
29
24
  Datadog::Patcher.do_once('SyncWriter#write') do
30
- Datadog::Logger.log.warn(%(
25
+ Datadog.logger.warn(%(
31
26
  write: Writing services has been deprecated and no longer need to be provided.
32
27
  write(traces, services) can be updted to write(traces)
33
28
  ))
@@ -38,7 +33,7 @@ module Datadog
38
33
  proc { flush_trace(trace) }
39
34
  )
40
35
  rescue => e
41
- Logger.log.debug(e)
36
+ Datadog.logger.debug(e)
42
37
  end
43
38
 
44
39
  # Added for interface completeness
@@ -30,7 +30,7 @@ module Datadog
30
30
  def services
31
31
  # Only log each deprecation warning once (safeguard against log spam)
32
32
  Datadog::Patcher.do_once('Tracer#set_service_info') do
33
- Datadog::Logger.log.warn('services: Usage of Tracer.services has been deprecated')
33
+ Datadog.logger.warn('services: Usage of Tracer.services has been deprecated')
34
34
  end
35
35
 
36
36
  {}
@@ -70,22 +70,23 @@ module Datadog
70
70
  # * +enabled+: set if the tracer submits or not spans to the local agent. It's enabled
71
71
  # by default.
72
72
  def initialize(options = {})
73
- @enabled = options.fetch(:enabled, true)
74
- @writer = options.fetch(:writer, Datadog::Writer.new)
75
- @sampler = options.fetch(:sampler, Datadog::AllSampler.new)
76
-
77
- @provider = options.fetch(:context_provider, Datadog::DefaultContextProvider.new)
78
- @provider ||= Datadog::DefaultContextProvider.new # @provider should never be nil
79
-
73
+ # Configurable options
80
74
  @context_flush = if options[:partial_flush]
81
75
  Datadog::ContextFlush::Partial.new(options)
82
76
  else
83
77
  Datadog::ContextFlush::Finished.new
84
78
  end
85
79
 
80
+ @default_service = options[:default_service]
81
+ @enabled = options.fetch(:enabled, true)
82
+ @provider = options.fetch(:context_provider, Datadog::DefaultContextProvider.new)
83
+ @sampler = options.fetch(:sampler, Datadog::AllSampler.new)
84
+ @tags = options.fetch(:tags, {})
85
+ @writer = options.fetch(:writer, Datadog::Writer.new)
86
+
87
+ # Instance variables
86
88
  @mutex = Mutex.new
87
- @tags = options.fetch(:tags, Datadog.configuration.tags)
88
- @default_service = options.fetch(:default_service, Datadog.configuration.service)
89
+ @provider ||= Datadog::DefaultContextProvider.new # @provider should never be nil
89
90
 
90
91
  # Enable priority sampling by default
91
92
  activate_priority_sampling!(@sampler)
@@ -114,11 +115,13 @@ module Datadog
114
115
 
115
116
  configure_writer(options)
116
117
 
117
- @context_flush = if options[:partial_flush]
118
- Datadog::ContextFlush::Partial.new(options)
119
- else
120
- Datadog::ContextFlush::Finished.new
121
- end
118
+ if options.key?(:partial_flush)
119
+ @context_flush = if options[:partial_flush]
120
+ Datadog::ContextFlush::Partial.new(options)
121
+ else
122
+ Datadog::ContextFlush::Finished.new
123
+ end
124
+ end
122
125
  end
123
126
 
124
127
  # Set the information about the given service. A valid example is:
@@ -129,7 +132,7 @@ module Datadog
129
132
  def set_service_info(service, app, app_type)
130
133
  # Only log each deprecation warning once (safeguard against log spam)
131
134
  Datadog::Patcher.do_once('Tracer#set_service_info') do
132
- Datadog::Logger.log.warn(%(
135
+ Datadog.logger.warn(%(
133
136
  set_service_info: Usage of set_service_info has been deprecated,
134
137
  service information no longer needs to be reported to the trace agent.
135
138
  ))
@@ -144,7 +147,7 @@ module Datadog
144
147
  begin
145
148
  @default_service = File.basename($PROGRAM_NAME, '.*')
146
149
  rescue StandardError => e
147
- Datadog::Logger.log.error("unable to guess default service: #{e}")
150
+ Datadog.logger.error("unable to guess default service: #{e}")
148
151
  @default_service = 'ruby'.freeze
149
152
  end
150
153
  @default_service
@@ -268,7 +271,7 @@ module Datadog
268
271
  span = start_span(name, options)
269
272
  # rubocop:disable Lint/UselessAssignment
270
273
  rescue StandardError => e
271
- Datadog::Logger.log.debug('Failed to start span: #{e}')
274
+ Datadog.logger.debug('Failed to start span: #{e}')
272
275
  ensure
273
276
  return_value = yield(span)
274
277
  end
@@ -334,11 +337,11 @@ module Datadog
334
337
  def write(trace)
335
338
  return if @writer.nil? || !@enabled
336
339
 
337
- if Datadog::Logger.debug_logging
338
- Datadog::Logger.log.debug("Writing #{trace.length} spans (enabled: #{@enabled})")
340
+ if Datadog.configuration.diagnostics.debug
341
+ Datadog.logger.debug("Writing #{trace.length} spans (enabled: #{@enabled})")
339
342
  str = String.new('')
340
343
  PP.pp(trace, str)
341
- Datadog::Logger.log.debug(str)
344
+ Datadog.logger.debug(str)
342
345
  end
343
346
 
344
347
  @writer.write(trace)
@@ -355,10 +358,10 @@ module Datadog
355
358
  sampler = options.fetch(:sampler, nil)
356
359
  priority_sampling = options.fetch(:priority_sampling, nil)
357
360
  writer = options.fetch(:writer, nil)
358
- transport_options = options.fetch(:transport_options, {})
361
+ transport_options = options.fetch(:transport_options, {}).dup
359
362
 
360
363
  # Compile writer options
361
- writer_options = options.fetch(:writer_options, {})
364
+ writer_options = options.fetch(:writer_options, {}).dup
362
365
  rebuild_writer = !writer_options.empty?
363
366
 
364
367
  # Re-build the sampler and writer if priority sampling is enabled,
@@ -395,14 +398,6 @@ module Datadog
395
398
 
396
399
  writer_options[:transport_options] = transport_options
397
400
 
398
- # ensure any configuration to runtime_metrics statsd client is
399
- # passed on when writer gets rebuilt
400
- unless writer_options.key?(:runtime_metrics)
401
- if @writer && !@writer.runtime_metrics.nil?
402
- writer_options[:runtime_metrics] = @writer.runtime_metrics
403
- end
404
- end
405
-
406
401
  if rebuild_writer || writer
407
402
  # Make sure old writer is shut down before throwing away.
408
403
  # Don't want additional threads running...