cw-datadog 2.23.0.4 → 2.23.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. checksums.yaml +4 -4
  2. data/ext/libdatadog_api/feature_flags.c +554 -0
  3. data/ext/libdatadog_api/feature_flags.h +5 -0
  4. data/ext/libdatadog_api/init.c +2 -0
  5. data/lib/datadog/core/cloudwise/client.rb +99 -4
  6. data/lib/datadog/core/cloudwise/component.rb +55 -13
  7. data/lib/datadog/core/cloudwise/docc_operation_worker.rb +11 -1
  8. data/lib/datadog/core/cloudwise/license_worker.rb +7 -0
  9. data/lib/datadog/core/cloudwise/time_sync_worker.rb +200 -0
  10. data/lib/datadog/core/configuration/settings.rb +12 -0
  11. data/lib/datadog/core/configuration/supported_configurations.rb +14 -0
  12. data/lib/datadog/core/environment/ext.rb +6 -0
  13. data/lib/datadog/core/environment/process.rb +79 -0
  14. data/lib/datadog/core/feature_flags.rb +61 -0
  15. data/lib/datadog/core/tag_normalizer.rb +84 -0
  16. data/lib/datadog/core/transport/http/adapters/net.rb +8 -0
  17. data/lib/datadog/core/utils/array.rb +29 -0
  18. data/lib/datadog/core/utils.rb +2 -0
  19. data/lib/datadog/data_streams/processor.rb +1 -1
  20. data/lib/datadog/di/transport/http.rb +6 -2
  21. data/lib/datadog/di/transport/input.rb +62 -2
  22. data/lib/datadog/open_feature/evaluation_engine.rb +19 -9
  23. data/lib/datadog/open_feature/ext.rb +1 -0
  24. data/lib/datadog/open_feature/native_evaluator.rb +38 -0
  25. data/lib/datadog/open_feature/noop_evaluator.rb +3 -3
  26. data/lib/datadog/open_feature/provider.rb +15 -8
  27. data/lib/datadog/open_feature/remote.rb +1 -1
  28. data/lib/datadog/opentelemetry/configuration/settings.rb +159 -0
  29. data/lib/datadog/opentelemetry/metrics.rb +110 -0
  30. data/lib/datadog/opentelemetry/sdk/configurator.rb +25 -1
  31. data/lib/datadog/opentelemetry/sdk/metrics_exporter.rb +38 -0
  32. data/lib/datadog/opentelemetry.rb +3 -0
  33. data/lib/datadog/tracing/configuration/ext.rb +1 -0
  34. data/lib/datadog/tracing/contrib/cloudwise/propagation.rb +143 -17
  35. data/lib/datadog/tracing/contrib/grape/endpoint.rb +141 -0
  36. data/lib/datadog/tracing/contrib/kafka/events/consumer/process_batch.rb +26 -0
  37. data/lib/datadog/tracing/contrib/kafka/events/consumer/process_message.rb +26 -0
  38. data/lib/datadog/tracing/contrib/kafka/instrumentation/consumer.rb +79 -9
  39. data/lib/datadog/tracing/contrib/kafka/instrumentation/producer.rb +29 -6
  40. data/lib/datadog/tracing/contrib/rack/middlewares.rb +6 -54
  41. data/lib/datadog/tracing/diagnostics/environment_logger.rb +1 -1
  42. data/lib/datadog/tracing/transport/serializable_trace.rb +8 -1
  43. data/lib/datadog/tracing/transport/trace_formatter.rb +11 -0
  44. data/lib/datadog/tracing/transport/traces.rb +3 -5
  45. data/lib/datadog/version.rb +1 -1
  46. metadata +29 -4
@@ -16,6 +16,9 @@ module Datadog
16
16
  KEY_RUN = 'datadog_grape_endpoint_run'
17
17
  KEY_RENDER = 'datadog_grape_endpoint_render'
18
18
 
19
+ # ✅ 优化:缓存 ENV 配置为常量,避免每次请求读取操作系统环境变量
20
+ CLOUDWISE_JS_ENABLED = ENV.fetch('CLOUDWISE_JS_CONFIG', 'false') == 'true'
21
+
19
22
  class << self
20
23
  def subscribe
21
24
  # subscribe when a Grape endpoint is hit
@@ -62,6 +65,45 @@ module Datadog
62
65
 
63
66
  span.set_tag(Tracing::Metadata::Ext::TAG_COMPONENT, Ext::TAG_COMPONENT)
64
67
  span.set_tag(Tracing::Metadata::Ext::TAG_OPERATION, Ext::TAG_OPERATION_ENDPOINT_RUN)
68
+ span.set_tag(Tracing::Metadata::Ext::TAG_KIND, Tracing::Metadata::Ext::SpanKind::TAG_SERVER)
69
+
70
+ # Set User-Agent
71
+ user_agent = env['HTTP_USER_AGENT']
72
+ span.set_tag(Tracing::Metadata::Ext::HTTP::TAG_USER_AGENT, user_agent) if user_agent
73
+
74
+ # Set base URL (scheme://host:port)
75
+ base_url = build_base_url_from_env(env)
76
+ span.set_tag(Tracing::Metadata::Ext::HTTP::TAG_BASE_URL, base_url) unless base_url.empty?
77
+
78
+ # 处理 CLOUDWISEREQUESTINFO(前端调用)
79
+ cloudwise_info = env['HTTP_CLOUDWISEREQUESTINFO']
80
+ request_id = nil
81
+
82
+ if cloudwise_info
83
+ # 提取 request_id
84
+ request_id = extract_request_id(cloudwise_info)
85
+ end
86
+
87
+ # 如果存在 request_id,设置到 span
88
+ if request_id
89
+ span.set_tag('cwsa_trace', request_id)
90
+ Datadog.logger.debug do
91
+ "Set cwsa_trace tag on span: #{request_id}"
92
+ end
93
+ end
94
+
95
+ # 处理 CLOUDWISE(外部服务调用,如 Java/PHP/Ruby)
96
+ cloudwise_header = env['HTTP_CLOUDWISE']
97
+
98
+ # 如果存在 CLOUDWISE 头(外部服务调用),提取并设置字段到 span
99
+ if cloudwise_header
100
+ require_relative '../cloudwise/propagation'
101
+ Cloudwise::Propagation.extract_and_tag_from_header!(span, cloudwise_header)
102
+ end
103
+
104
+ # 提取 CLOUDWISE-OTHER 头并添加到根 span
105
+ require_relative '../cloudwise/propagation'
106
+ Cloudwise::Propagation.extract_other_from_request!(span, env)
65
107
 
66
108
  if (grape_route = env['grape.routing_args']) && grape_route[:route_info]
67
109
  trace.set_tag(
@@ -95,6 +137,7 @@ module Datadog
95
137
  begin
96
138
  # collect endpoint details
97
139
  endpoint = payload.fetch(:endpoint)
140
+ env = payload.fetch(:env)
98
141
  api_view = api_view(endpoint.options[:for])
99
142
  request_method = endpoint.options.fetch(:method).first
100
143
  path = endpoint_expand_path(endpoint)
@@ -116,6 +159,9 @@ module Datadog
116
159
 
117
160
  span.set_tag(Tracing::Metadata::Ext::HTTP::TAG_METHOD, request_method)
118
161
  span.set_tag(Tracing::Metadata::Ext::HTTP::TAG_URL, path)
162
+
163
+ # 处理响应头注入
164
+ inject_response_headers(endpoint, env, span, trace)
119
165
  ensure
120
166
  span.start(start)
121
167
  span.finish(finish)
@@ -235,6 +281,101 @@ module Datadog
235
281
 
236
282
  private
237
283
 
284
+ # 构建 base URL (scheme://host:port)
285
+ def build_base_url_from_env(env)
286
+ # 获取 scheme
287
+ scheme = env['rack.url_scheme'] || 'http'
288
+
289
+ # 优先使用 HTTP_HOST(可能已包含端口)
290
+ host = env['HTTP_HOST']
291
+ if host
292
+ # HTTP_HOST 已经包含端口(如果有的话),直接使用
293
+ "#{scheme}://#{host}"
294
+ else
295
+ # 回退到 SERVER_NAME + SERVER_PORT
296
+ host = env['SERVER_NAME'] || 'localhost'
297
+ port = env['SERVER_PORT']
298
+
299
+ # 只有在非标准端口时才添加端口号
300
+ if port && ((scheme == 'http' && port != '80') || (scheme == 'https' && port != '443'))
301
+ "#{scheme}://#{host}:#{port}"
302
+ else
303
+ "#{scheme}://#{host}"
304
+ end
305
+ end
306
+ rescue => e
307
+ Datadog.logger.debug do
308
+ "Error building base URL: #{e.message}"
309
+ end
310
+ ''
311
+ end
312
+
313
+ # 注入响应头(CLOUDWISETRACE 和 CLOUDWISE)
314
+ def inject_response_headers(endpoint, env, span, trace)
315
+
316
+ cloudwise_info = env['HTTP_CLOUDWISEREQUESTINFO']
317
+ if cloudwise_info
318
+ endpoint.header('CLOUDWISETRACE', 'true')
319
+ Datadog.logger.debug do
320
+ "Added CLOUDWISETRACE response header"
321
+ end
322
+ end
323
+
324
+ # 2. 如果启用了 CLOUDWISE_JS_CONFIG,在响应头中添加 CLOUDWISE(用于 RUM 追踪)
325
+ if CLOUDWISE_JS_ENABLED
326
+ require_relative '../cloudwise/propagation'
327
+
328
+ # 构建 CLOUDWISE 响应头值
329
+ service_name = Datadog.configuration.service
330
+ if service_name && span
331
+ cloudwise_value = Cloudwise::Propagation.build_cloudwise_value(
332
+ span: span,
333
+ trace: trace,
334
+ service_name: service_name,
335
+ target_url: nil # 响应头不需要 target_url
336
+ )
337
+
338
+ endpoint.header('CLOUDWISE', cloudwise_value)
339
+ Datadog.logger.debug do
340
+ "Added CLOUDWISE response header for RUM: #{cloudwise_value}"
341
+ end
342
+ elsif service_name && span
343
+ Datadog.logger.debug do
344
+ "Skipped CLOUDWISE response header: Cloudwise probe suspended"
345
+ end
346
+ end
347
+ end
348
+ rescue => e
349
+ Datadog.logger.error do
350
+ "Error injecting response headers: #{e.message}"
351
+ end
352
+ end
353
+
354
+
355
+ # 从 CLOUDWISEREQUESTINFO 中提取 request_id
356
+ def extract_request_id(cloudwise_info)
357
+ return nil unless cloudwise_info.is_a?(String)
358
+
359
+ # 使用正则表达式提取:第一个 _ 之后,@SDK 之前的内容(不区分大小写)
360
+ match = cloudwise_info.match(/_([^_@]+)@SDK/i)
361
+
362
+ if match && match[1]
363
+ request_id = match[1]
364
+ return request_id
365
+ else
366
+ Datadog.logger.debug do
367
+ "Could not extract request_id from CLOUDWISEREQUESTINFO: #{cloudwise_info}"
368
+ end
369
+ end
370
+
371
+ nil
372
+ rescue => e
373
+ Datadog.logger.error do
374
+ "Error extracting request_id from CLOUDWISEREQUESTINFO: #{e.message}"
375
+ end
376
+ nil
377
+ end
378
+
238
379
  def handle_error(span, exception, status = nil)
239
380
  status ||= (exception.status if exception.respond_to?(:status))
240
381
  if status
@@ -3,6 +3,8 @@
3
3
  require_relative '../../ext'
4
4
  require_relative '../../event'
5
5
  require_relative '../../consumer_event'
6
+ require_relative '../../instrumentation/consumer'
7
+ require_relative '../../../cloudwise/propagation'
6
8
 
7
9
  module Datadog
8
10
  module Tracing
@@ -31,6 +33,17 @@ module Datadog
31
33
  span.set_tag(Ext::TAG_HIGHWATER_MARK_OFFSET, payload[:highwater_mark_offset])
32
34
  end
33
35
  span.set_tag(Ext::TAG_OFFSET_LAG, payload[:offset_lag]) if payload.key?(:offset_lag)
36
+
37
+ # 注意:CLOUDWISE headers 的提取移到 on_finish 中
38
+ # 因为 ruby-kafka 的执行顺序是:on_start -> block.call -> on_finish
39
+ # headers 在 block.call 中被存储到 Thread-local
40
+ end
41
+
42
+ # 在 span 结束时提取 CLOUDWISE headers
43
+ # 此时 wrapped_block 已经执行完毕,Thread-local 中有正确的 headers
44
+ def on_finish(span, _event, _id, _payload)
45
+ # 从 Thread-local 获取 CLOUDWISE headers 并提取到 span
46
+ extract_cloudwise_headers!(span)
34
47
  end
35
48
 
36
49
  def span_name
@@ -40,6 +53,19 @@ module Datadog
40
53
  def span_options
41
54
  super.merge({tags: {Tracing::Metadata::Ext::TAG_OPERATION => Ext::TAG_OPERATION_PROCESS_BATCH}})
42
55
  end
56
+
57
+ # 从 Thread-local 获取 CLOUDWISE headers 并提取到 span
58
+ # @param span [Datadog::Tracing::SpanOperation] 当前 span
59
+ def extract_cloudwise_headers!(span)
60
+ # 从 Thread-local 获取 consumer instrumentation 存储的 headers
61
+ # 使用 peek 而不是 fetch,因为可能有其他地方也需要这个 headers
62
+ headers = Instrumentation::Consumer.peek_cloudwise_headers
63
+ return unless headers
64
+
65
+ Cloudwise::Propagation.extract_kafka_cloudwise_headers!(span, headers)
66
+ rescue => e
67
+ Datadog.logger.debug("Error extracting CLOUDWISE headers from batch: #{e.class}: #{e}")
68
+ end
43
69
  end
44
70
  end
45
71
  end
@@ -3,6 +3,8 @@
3
3
  require_relative '../../ext'
4
4
  require_relative '../../event'
5
5
  require_relative '../../consumer_event'
6
+ require_relative '../../instrumentation/consumer'
7
+ require_relative '../../../cloudwise/propagation'
6
8
 
7
9
  module Datadog
8
10
  module Tracing
@@ -29,6 +31,17 @@ module Datadog
29
31
  span.set_tag(Ext::TAG_PARTITION, payload[:partition]) if payload.key?(:partition)
30
32
  span.set_tag(Ext::TAG_OFFSET, payload[:offset]) if payload.key?(:offset)
31
33
  span.set_tag(Ext::TAG_OFFSET_LAG, payload[:offset_lag]) if payload.key?(:offset_lag)
34
+
35
+ # 注意:CLOUDWISE headers 的提取移到 on_finish 中
36
+ # 因为 ruby-kafka 的执行顺序是:on_start -> block.call -> on_finish
37
+ # headers 在 block.call 中被存储到 Thread-local
38
+ end
39
+
40
+ # 在 span 结束时提取 CLOUDWISE headers
41
+ # 此时 wrapped_block 已经执行完毕,Thread-local 中有正确的 headers
42
+ def on_finish(span, _event, _id, _payload)
43
+ # 从 Thread-local 获取 CLOUDWISE headers 并提取到 span
44
+ extract_cloudwise_headers!(span)
32
45
  end
33
46
 
34
47
  def span_name
@@ -38,6 +51,19 @@ module Datadog
38
51
  def span_options
39
52
  super.merge({tags: {Tracing::Metadata::Ext::TAG_OPERATION => Ext::TAG_OPERATION_PROCESS_MESSAGE}})
40
53
  end
54
+
55
+ # 从 Thread-local 获取 CLOUDWISE headers 并提取到 span
56
+ # @param span [Datadog::Tracing::SpanOperation] 当前 span
57
+ def extract_cloudwise_headers!(span)
58
+ # 从 Thread-local 获取 consumer instrumentation 存储的 headers
59
+ # 使用 peek 而不是 fetch,因为可能有其他地方也需要这个 headers
60
+ headers = Instrumentation::Consumer.peek_cloudwise_headers
61
+ return unless headers
62
+
63
+ Cloudwise::Propagation.extract_kafka_cloudwise_headers!(span, headers)
64
+ rescue => e
65
+ Datadog.logger.debug("Error extracting CLOUDWISE headers: #{e.class}: #{e}")
66
+ end
41
67
  end
42
68
  end
43
69
  end
@@ -1,26 +1,59 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require_relative '../../cloudwise/propagation'
4
+
3
5
  module Datadog
4
6
  module Tracing
5
7
  module Contrib
6
8
  module Kafka
7
9
  module Instrumentation
8
10
  # Instrumentation for Kafka::Consumer
11
+ # 这个模块直接 prepend 到 ::Kafka::Consumer,实例方法直接定义在模块中
9
12
  module Consumer
10
- def self.prepended(base)
11
- base.prepend(InstanceMethods)
13
+ # Thread-local 变量用于存储当前消息的 CLOUDWISE headers
14
+ # 由于 ruby-kafka 的 process_message 事件在 block 调用时触发(instrument 包裹 block.call)
15
+ # 执行顺序是:on_start -> block.call -> on_finish
16
+ # 所以我们在 block 中存储 headers,在 on_finish 中提取
17
+ CLOUDWISE_HEADERS_KEY = :datadog_kafka_cloudwise_headers
18
+
19
+ class << self
20
+ # 存储当前消息的 CLOUDWISE headers 到 Thread-local
21
+ def store_cloudwise_headers(headers)
22
+ Thread.current[CLOUDWISE_HEADERS_KEY] = headers
23
+ end
24
+
25
+ # 获取 Thread-local 中存储的 CLOUDWISE headers(不清除)
26
+ # 用于 on_finish 中获取 headers
27
+ def peek_cloudwise_headers
28
+ Thread.current[CLOUDWISE_HEADERS_KEY]
29
+ end
30
+
31
+ # 获取并清除 Thread-local 中存储的 CLOUDWISE headers
32
+ def fetch_cloudwise_headers
33
+ headers = Thread.current[CLOUDWISE_HEADERS_KEY]
34
+ Thread.current[CLOUDWISE_HEADERS_KEY] = nil
35
+ headers
36
+ end
37
+
38
+ # 清除 Thread-local 中存储的 CLOUDWISE headers
39
+ def clear_cloudwise_headers
40
+ Thread.current[CLOUDWISE_HEADERS_KEY] = nil
41
+ end
12
42
  end
13
43
 
14
- # Instance methods for consumer instrumentation
15
- module InstanceMethods
44
+ # 实例方法直接定义在模块中,会被 prepend ::Kafka::Consumer
16
45
  def each_message(**kwargs, &block)
17
- return super unless Datadog::DataStreams.enabled?
46
+ return super unless block
18
47
 
48
+ original_block = block
19
49
  wrapped_block = proc do |message|
50
+ headers = message.headers || {}
51
+
52
+ # 处理 DSM checkpoint
53
+ if Datadog::DataStreams.enabled?
20
54
  Datadog.logger.debug { "Kafka each_message: DSM enabled for topic #{message.topic}" }
21
55
 
22
56
  begin
23
- headers = message.headers || {}
24
57
  Datadog::DataStreams.set_consume_checkpoint(
25
58
  type: 'kafka',
26
59
  source: message.topic,
@@ -29,17 +62,27 @@ module Datadog
29
62
  rescue => e
30
63
  Datadog.logger.debug("Error setting DSM checkpoint: #{e.class}: #{e}")
31
64
  end
65
+ end
66
+
67
+ # 存储 CLOUDWISE headers 到 Thread-local
68
+ # 这会在 process_message 事件的 on_start 之后、on_finish 之前执行
69
+ # 所以 on_finish 可以获取到正确的 headers
70
+ Datadog::Tracing::Contrib::Kafka::Instrumentation::Consumer.store_cloudwise_headers(headers)
32
71
 
33
- yield(message) if block
72
+ # 调用原始 block
73
+ original_block.call(message)
34
74
  end
35
75
 
36
76
  super(**kwargs, &wrapped_block)
37
77
  end
38
78
 
39
79
  def each_batch(**kwargs, &block)
40
- return super unless Datadog::DataStreams.enabled?
80
+ return super unless block
41
81
 
82
+ original_block = block
42
83
  wrapped_block = proc do |batch|
84
+ # 处理 DSM checkpoint
85
+ if Datadog::DataStreams.enabled?
43
86
  Datadog.logger.debug { "Kafka each_batch: DSM enabled for topic #{batch.topic}" }
44
87
 
45
88
  begin
@@ -51,12 +94,39 @@ module Datadog
51
94
  rescue => e
52
95
  Datadog.logger.debug("Error setting DSM checkpoint: #{e.class}: #{e}")
53
96
  end
97
+ end
54
98
 
55
- yield(batch) if block
99
+ # 从批量消息的第一条消息中提取 headers 并存储到 Thread-local
100
+ store_batch_cloudwise_headers!(batch)
101
+
102
+ # 调用原始 block
103
+ original_block.call(batch)
56
104
  end
57
105
 
58
106
  super(**kwargs, &wrapped_block)
59
107
  end
108
+
109
+ private
110
+
111
+ # 从批量消息的第一条消息中提取 headers 并存储到 Thread-local
112
+ # @param batch [Object] 批量消息对象
113
+ def store_batch_cloudwise_headers!(batch)
114
+ return unless batch
115
+
116
+ # 获取批量消息中的消息列表
117
+ messages = batch.respond_to?(:messages) ? batch.messages : nil
118
+ return unless messages && !messages.empty?
119
+
120
+ # 从第一条消息中提取 headers
121
+ first_message = messages.first
122
+ return unless first_message
123
+
124
+ headers = first_message.respond_to?(:headers) ? first_message.headers : nil
125
+ return unless headers
126
+
127
+ Datadog::Tracing::Contrib::Kafka::Instrumentation::Consumer.store_cloudwise_headers(headers)
128
+ rescue => e
129
+ Datadog.logger.debug("Error storing CLOUDWISE headers from batch: #{e.class}: #{e}")
60
130
  end
61
131
  end
62
132
  end
@@ -1,5 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require_relative '../../cloudwise/propagation'
4
+
3
5
  module Datadog
4
6
  module Tracing
5
7
  module Contrib
@@ -13,13 +15,15 @@ module Datadog
13
15
 
14
16
  module InstanceMethods
15
17
  def deliver_messages(**kwargs)
16
- if Datadog::DataStreams.enabled?
17
18
  begin
18
19
  pending_messages = instance_variable_get(:@pending_message_queue)
19
20
 
20
21
  if pending_messages && !pending_messages.empty?
21
22
  pending_messages.each do |message|
22
23
  message.headers ||= {}
24
+
25
+ # 注入 DSM checkpoint
26
+ if Datadog::DataStreams.enabled?
23
27
  Datadog::DataStreams.set_produce_checkpoint(
24
28
  type: 'kafka',
25
29
  destination: message.topic,
@@ -28,20 +32,25 @@ module Datadog
28
32
  message.headers[key] = value
29
33
  end
30
34
  end
35
+
36
+ # 注入 CLOUDWISE header
37
+ inject_cloudwise_headers!(message.headers, message.topic)
31
38
  end
32
- rescue => e
33
- Datadog.logger.debug("Error setting DSM checkpoint: #{e.class}: #{e}")
34
39
  end
40
+ rescue => e
41
+ Datadog.logger.debug("Error in Kafka producer instrumentation: #{e.class}: #{e}")
35
42
  end
36
43
 
37
44
  super
38
45
  end
39
46
 
40
47
  def send_messages(messages, **kwargs)
41
- if Datadog::DataStreams.enabled?
42
48
  begin
43
49
  messages.each do |message|
44
50
  message[:headers] ||= {}
51
+
52
+ # 注入 DSM checkpoint
53
+ if Datadog::DataStreams.enabled?
45
54
  Datadog::DataStreams.set_produce_checkpoint(
46
55
  type: 'kafka',
47
56
  destination: message[:topic],
@@ -50,13 +59,27 @@ module Datadog
50
59
  message[:headers][key] = value
51
60
  end
52
61
  end
53
- rescue => e
54
- Datadog.logger.debug("Error setting DSM checkpoint: #{e.class}: #{e}")
62
+
63
+ # 注入 CLOUDWISE header
64
+ inject_cloudwise_headers!(message[:headers], message[:topic])
55
65
  end
66
+ rescue => e
67
+ Datadog.logger.debug("Error in Kafka producer instrumentation: #{e.class}: #{e}")
56
68
  end
57
69
 
58
70
  super
59
71
  end
72
+
73
+ private
74
+
75
+ # 注入 CLOUDWISE headers 到 Kafka 消息
76
+ # @param headers [Hash] 消息 headers
77
+ # @param topic [String] Kafka topic 名称
78
+ def inject_cloudwise_headers!(headers, topic)
79
+ Cloudwise::Propagation.inject_kafka_headers!(headers, topic)
80
+ rescue => e
81
+ Datadog.logger.debug("Error injecting CLOUDWISE headers: #{e.class}: #{e}")
82
+ end
60
83
  end
61
84
  end
62
85
  end
@@ -28,6 +28,9 @@ module Datadog
28
28
  # application. If request tags are not set by the app, they will be set using
29
29
  # information available at the Rack level.
30
30
  class TraceMiddleware
31
+ # ✅ 优化:缓存 ENV 配置为常量,避免每次请求读取操作系统环境变量
32
+ CLOUDWISE_JS_ENABLED = ENV.fetch('CLOUDWISE_JS_CONFIG', 'false') == 'true'
33
+
31
34
  def initialize(app)
32
35
  @app = app
33
36
  end
@@ -47,7 +50,6 @@ module Datadog
47
50
  Tracing.continue_trace!(trace_digest) if trace_digest
48
51
  end
49
52
 
50
- # 检查请求头中是否包含 CLOUDWISEREQUESTINFO 和 CLOUDWISE
51
53
  # 需要在创建 span 之前提取信息
52
54
  request_headers = Header::RequestHeaderCollection.new(env)
53
55
 
@@ -121,15 +123,15 @@ module Datadog
121
123
 
122
124
  # 如果启用了 CLOUDWISE_JS_CONFIG,在响应头中添加 CLOUDWISE(用于 RUM 追踪)
123
125
  # 默认关闭,可通过环境变量 CLOUDWISE_JS_CONFIG=true 开启
124
- cloudwise_js_enabled = ENV.fetch('CLOUDWISE_JS_CONFIG', 'false') == 'true'
125
- if cloudwise_js_enabled
126
+ # 使用常量替代每次 ENV 读取
127
+ if CLOUDWISE_JS_ENABLED
126
128
  headers ||= {}
127
129
  require_relative '../cloudwise/propagation'
128
130
 
129
131
  # 构建 CLOUDWISE 响应头值
130
132
  # Only add CLOUDWISE header if Cloudwise is active (not suspended)
131
133
  service_name = Datadog.configuration.service
132
- if service_name && request_span && cloudwise_active?
134
+ if service_name && request_span
133
135
  cloudwise_value = Cloudwise::Propagation.build_cloudwise_value(
134
136
  span: request_span,
135
137
  trace: request_trace,
@@ -180,57 +182,7 @@ module Datadog
180
182
  end
181
183
  end
182
184
 
183
- # Check if Cloudwise probe is active (not suspended)
184
- def cloudwise_active?
185
- # Try to get components from tracer first (more reliable in request context)
186
- components = nil
187
-
188
- # First try: Get components through Tracing (which accesses the tracer's @components)
189
- if defined?(Datadog::Tracing) && Datadog::Tracing.respond_to?(:send)
190
- tracer = Datadog::Tracing.send(:tracer) rescue nil
191
- if tracer
192
- components = tracer.instance_variable_get(:@components) rescue nil
193
- Datadog.logger.debug { "Cloudwise active check: Got components from tracer" } if components
194
- end
195
- end
196
-
197
- # Second try: Global Datadog.components
198
- if components.nil? && defined?(Datadog.components)
199
- components = Datadog.components rescue nil
200
- Datadog.logger.debug { "Cloudwise active check: Got components from Datadog.components" } if components
201
- end
202
-
203
- unless components
204
- Datadog.logger.debug { "Cloudwise active check: No components available" }
205
- return false
206
- end
207
-
208
- unless components.respond_to?(:cloudwise)
209
- Datadog.logger.debug { "Cloudwise active check: Components doesn't respond to cloudwise" }
210
- return false
211
- end
212
-
213
- cloudwise = components.cloudwise
214
- unless cloudwise&.respond_to?(:probe_state)
215
- Datadog.logger.debug { "Cloudwise active check: Cloudwise doesn't respond to probe_state or is nil" }
216
- return false
217
- end
218
-
219
- suspended = cloudwise.probe_state.suspended?
220
- Datadog.logger.debug { "Cloudwise active check: suspended? = #{suspended}, active = #{!suspended}" }
221
-
222
- !suspended
223
- rescue => e
224
- # If any error occurs, assume not active
225
- Datadog.logger.debug { "Cloudwise active check error: #{e.message}" }
226
- Datadog.logger.debug { e.backtrace.first(3).join("\n") } if e.backtrace
227
- false
228
- end
229
-
230
- # ====== 🔥 新增:提取 request_id 的方法 ======
231
185
  # 从 CLOUDWISEREQUESTINFO 中提取 request_id
232
- # 格式: ...某些内容_4fa12aec-bd00-4824-8a58-9dde05466f4a@SDK...
233
- # 规则: 第一个 _ 开始,以 @SDK 结束
234
186
  def extract_request_id(cloudwise_info)
235
187
  return nil unless cloudwise_info.is_a?(String)
236
188
 
@@ -15,7 +15,7 @@ module Datadog
15
15
  def self.collect_and_log!(responses: nil)
16
16
  if log?
17
17
  log_configuration!('TRACING', EnvironmentCollector.collect_config!.to_json)
18
- log_debug!('TRACING INTEGRATIONS', EnvironmentCollector.collect_integrations_settings!.to_json)
18
+ # log_debug!('TRACING INTEGRATIONS', EnvironmentCollector.collect_integrations_settings!.to_json)
19
19
 
20
20
  if responses
21
21
  err_data = EnvironmentCollector.collect_errors!(responses)
@@ -136,8 +136,15 @@ module Datadog
136
136
 
137
137
  # Used for serialization
138
138
  # @return [Integer] in nanoseconds since Epoch
139
+ # 如果启用了时间同步,会自动应用时间偏移校正
139
140
  def time_nano(time)
140
- time.to_i * 1000000000 + time.nsec
141
+ local_time_ns = time.to_i * 1000000000 + time.nsec
142
+ # 应用时间同步偏移校正(如果启用且 TimeSyncWorker 已加载)
143
+ if defined?(Datadog::Core::Cloudwise::TimeSyncWorker)
144
+ Datadog::Core::Cloudwise::TimeSyncWorker.adjust_timestamp_ns(local_time_ns)
145
+ else
146
+ local_time_ns
147
+ end
141
148
  end
142
149
 
143
150
  def to_hash
@@ -1,6 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require_relative '../../core/environment/identity'
4
+ require_relative '../../core/environment/process'
4
5
  require_relative '../../core/environment/socket'
5
6
  require_relative '../../core/environment/git'
6
7
  require_relative '../../core/git/ext'
@@ -63,6 +64,7 @@ module Datadog
63
64
  tag_cloudwise_metadata!
64
65
 
65
66
  if first_span
67
+ tag_process_tags!
66
68
  tag_git_repository_url!
67
69
  tag_git_commit_sha!
68
70
  end
@@ -260,6 +262,15 @@ module Datadog
260
262
  first_span.set_tag(Core::Git::Ext::TAG_COMMIT_SHA, git_commit_sha)
261
263
  end
262
264
 
265
+ def tag_process_tags!
266
+ return unless Datadog.configuration.experimental_propagate_process_tags_enabled
267
+
268
+ first_span.set_tag(
269
+ Core::Environment::Ext::TAG_PROCESS_TAGS,
270
+ Core::Environment::Process.serialized
271
+ )
272
+ end
273
+
263
274
  private
264
275
 
265
276
  def partial?
@@ -3,6 +3,7 @@
3
3
  require_relative '../../core/chunker'
4
4
  require_relative '../../core/transport/parcel'
5
5
  require_relative '../../core/transport/request'
6
+ require_relative '../../core/utils/array'
6
7
  require_relative 'serializable_trace'
7
8
  require_relative 'trace_formatter'
8
9
 
@@ -65,11 +66,8 @@ module Datadog
65
66
  # @return [Enumerable[Array[Bytes,Integer]]] list of encoded chunks: each containing a byte array and
66
67
  # number of traces
67
68
  def encode_in_chunks(traces)
68
- encoded_traces = if traces.respond_to?(:filter_map)
69
- # DEV Supported since Ruby 2.7, saves an intermediate object creation
70
- traces.filter_map { |t| encode_one(t) }
71
- else
72
- traces.map { |t| encode_one(t) }.reject(&:nil?)
69
+ encoded_traces = Core::Utils::Array.filter_map(traces) do |trace|
70
+ encode_one(trace)
73
71
  end
74
72
 
75
73
  Datadog::Core::Chunker.chunk_by_size(encoded_traces, max_size).map do |chunk|
@@ -5,7 +5,7 @@ module Datadog
5
5
  MAJOR = 2
6
6
  MINOR = 23
7
7
  PATCH = 0
8
- PATCH_MINOR = 4
8
+ PATCH_MINOR = 5
9
9
  PRE = nil
10
10
  BUILD = nil
11
11
  # PRE and BUILD above are modified for dev gems during gem build GHA workflow