datadog 2.8.0 → 2.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (128) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +62 -1
  3. data/ext/datadog_profiling_native_extension/clock_id.h +2 -2
  4. data/ext/datadog_profiling_native_extension/collectors_cpu_and_wall_time_worker.c +66 -56
  5. data/ext/datadog_profiling_native_extension/collectors_discrete_dynamic_sampler.c +1 -1
  6. data/ext/datadog_profiling_native_extension/collectors_discrete_dynamic_sampler.h +1 -1
  7. data/ext/datadog_profiling_native_extension/collectors_idle_sampling_helper.c +16 -16
  8. data/ext/datadog_profiling_native_extension/collectors_stack.c +7 -7
  9. data/ext/datadog_profiling_native_extension/collectors_stack.h +2 -2
  10. data/ext/datadog_profiling_native_extension/collectors_thread_context.c +221 -127
  11. data/ext/datadog_profiling_native_extension/heap_recorder.c +50 -92
  12. data/ext/datadog_profiling_native_extension/heap_recorder.h +2 -2
  13. data/ext/datadog_profiling_native_extension/http_transport.c +4 -4
  14. data/ext/datadog_profiling_native_extension/private_vm_api_access.c +3 -0
  15. data/ext/datadog_profiling_native_extension/private_vm_api_access.h +3 -1
  16. data/ext/datadog_profiling_native_extension/profiling.c +10 -8
  17. data/ext/datadog_profiling_native_extension/ruby_helpers.c +8 -8
  18. data/ext/datadog_profiling_native_extension/stack_recorder.c +63 -76
  19. data/ext/datadog_profiling_native_extension/stack_recorder.h +2 -2
  20. data/ext/datadog_profiling_native_extension/time_helpers.h +1 -1
  21. data/ext/datadog_profiling_native_extension/unsafe_api_calls_check.c +47 -0
  22. data/ext/datadog_profiling_native_extension/unsafe_api_calls_check.h +31 -0
  23. data/ext/libdatadog_api/crashtracker.c +3 -0
  24. data/lib/datadog/appsec/actions_handler.rb +27 -0
  25. data/lib/datadog/appsec/assets/waf_rules/recommended.json +355 -157
  26. data/lib/datadog/appsec/assets/waf_rules/strict.json +62 -32
  27. data/lib/datadog/appsec/component.rb +14 -8
  28. data/lib/datadog/appsec/configuration/settings.rb +9 -0
  29. data/lib/datadog/appsec/context.rb +74 -0
  30. data/lib/datadog/appsec/contrib/active_record/instrumentation.rb +12 -8
  31. data/lib/datadog/appsec/contrib/devise/patcher/authenticatable_patch.rb +6 -6
  32. data/lib/datadog/appsec/contrib/devise/patcher/registration_controller_patch.rb +4 -4
  33. data/lib/datadog/appsec/contrib/graphql/appsec_trace.rb +1 -7
  34. data/lib/datadog/appsec/contrib/graphql/gateway/watcher.rb +20 -30
  35. data/lib/datadog/appsec/contrib/graphql/reactive/multiplex.rb +6 -6
  36. data/lib/datadog/appsec/contrib/rack/gateway/response.rb +3 -3
  37. data/lib/datadog/appsec/contrib/rack/gateway/watcher.rb +67 -96
  38. data/lib/datadog/appsec/contrib/rack/reactive/request.rb +11 -11
  39. data/lib/datadog/appsec/contrib/rack/reactive/request_body.rb +6 -6
  40. data/lib/datadog/appsec/contrib/rack/reactive/response.rb +7 -7
  41. data/lib/datadog/appsec/contrib/rack/request_body_middleware.rb +10 -11
  42. data/lib/datadog/appsec/contrib/rack/request_middleware.rb +43 -60
  43. data/lib/datadog/appsec/contrib/rails/gateway/watcher.rb +23 -33
  44. data/lib/datadog/appsec/contrib/rails/patcher.rb +4 -14
  45. data/lib/datadog/appsec/contrib/rails/reactive/action.rb +7 -7
  46. data/lib/datadog/appsec/contrib/sinatra/gateway/watcher.rb +45 -65
  47. data/lib/datadog/appsec/contrib/sinatra/patcher.rb +5 -28
  48. data/lib/datadog/appsec/contrib/sinatra/reactive/routed.rb +6 -6
  49. data/lib/datadog/appsec/event.rb +6 -6
  50. data/lib/datadog/appsec/ext.rb +8 -1
  51. data/lib/datadog/appsec/metrics/collector.rb +38 -0
  52. data/lib/datadog/appsec/metrics/exporter.rb +35 -0
  53. data/lib/datadog/appsec/metrics/telemetry.rb +23 -0
  54. data/lib/datadog/appsec/metrics.rb +13 -0
  55. data/lib/datadog/appsec/monitor/gateway/watcher.rb +23 -32
  56. data/lib/datadog/appsec/monitor/reactive/set_user.rb +6 -6
  57. data/lib/datadog/appsec/processor/rule_loader.rb +0 -3
  58. data/lib/datadog/appsec/processor.rb +4 -3
  59. data/lib/datadog/appsec/response.rb +18 -80
  60. data/lib/datadog/appsec/security_engine/result.rb +67 -0
  61. data/lib/datadog/appsec/security_engine/runner.rb +88 -0
  62. data/lib/datadog/appsec/security_engine.rb +9 -0
  63. data/lib/datadog/appsec.rb +17 -8
  64. data/lib/datadog/auto_instrument.rb +3 -0
  65. data/lib/datadog/core/configuration/agent_settings_resolver.rb +39 -11
  66. data/lib/datadog/core/configuration/components.rb +4 -2
  67. data/lib/datadog/core/configuration.rb +1 -1
  68. data/lib/datadog/{tracing → core}/contrib/rails/utils.rb +1 -3
  69. data/lib/datadog/core/crashtracking/component.rb +1 -3
  70. data/lib/datadog/core/telemetry/event.rb +87 -3
  71. data/lib/datadog/core/telemetry/logging.rb +2 -2
  72. data/lib/datadog/core/telemetry/metric.rb +22 -0
  73. data/lib/datadog/core/telemetry/worker.rb +33 -0
  74. data/lib/datadog/di/base.rb +115 -0
  75. data/lib/datadog/di/code_tracker.rb +7 -4
  76. data/lib/datadog/di/component.rb +19 -11
  77. data/lib/datadog/di/configuration/settings.rb +11 -1
  78. data/lib/datadog/di/contrib/railtie.rb +15 -0
  79. data/lib/datadog/di/contrib.rb +26 -0
  80. data/lib/datadog/di/error.rb +5 -0
  81. data/lib/datadog/di/instrumenter.rb +39 -18
  82. data/lib/datadog/di/{init.rb → preload.rb} +2 -4
  83. data/lib/datadog/di/probe_manager.rb +4 -4
  84. data/lib/datadog/di/probe_notification_builder.rb +22 -2
  85. data/lib/datadog/di/probe_notifier_worker.rb +5 -6
  86. data/lib/datadog/di/redactor.rb +0 -1
  87. data/lib/datadog/di/remote.rb +30 -9
  88. data/lib/datadog/di/transport.rb +2 -4
  89. data/lib/datadog/di.rb +5 -108
  90. data/lib/datadog/kit/appsec/events.rb +3 -3
  91. data/lib/datadog/kit/identity.rb +4 -4
  92. data/lib/datadog/profiling/component.rb +55 -53
  93. data/lib/datadog/profiling/http_transport.rb +1 -26
  94. data/lib/datadog/tracing/contrib/action_cable/integration.rb +5 -2
  95. data/lib/datadog/tracing/contrib/action_mailer/integration.rb +6 -2
  96. data/lib/datadog/tracing/contrib/action_pack/integration.rb +5 -2
  97. data/lib/datadog/tracing/contrib/action_view/integration.rb +5 -2
  98. data/lib/datadog/tracing/contrib/active_job/integration.rb +5 -2
  99. data/lib/datadog/tracing/contrib/active_record/integration.rb +6 -2
  100. data/lib/datadog/tracing/contrib/active_support/cache/events/cache.rb +3 -1
  101. data/lib/datadog/tracing/contrib/active_support/cache/instrumentation.rb +3 -1
  102. data/lib/datadog/tracing/contrib/active_support/configuration/settings.rb +10 -0
  103. data/lib/datadog/tracing/contrib/active_support/integration.rb +5 -2
  104. data/lib/datadog/tracing/contrib/auto_instrument.rb +2 -2
  105. data/lib/datadog/tracing/contrib/aws/integration.rb +3 -0
  106. data/lib/datadog/tracing/contrib/concurrent_ruby/integration.rb +3 -0
  107. data/lib/datadog/tracing/contrib/extensions.rb +15 -3
  108. data/lib/datadog/tracing/contrib/http/integration.rb +3 -0
  109. data/lib/datadog/tracing/contrib/httprb/integration.rb +3 -0
  110. data/lib/datadog/tracing/contrib/kafka/integration.rb +3 -0
  111. data/lib/datadog/tracing/contrib/mongodb/integration.rb +3 -0
  112. data/lib/datadog/tracing/contrib/opensearch/integration.rb +3 -0
  113. data/lib/datadog/tracing/contrib/presto/integration.rb +3 -0
  114. data/lib/datadog/tracing/contrib/rack/integration.rb +2 -2
  115. data/lib/datadog/tracing/contrib/rails/framework.rb +2 -2
  116. data/lib/datadog/tracing/contrib/rails/patcher.rb +1 -1
  117. data/lib/datadog/tracing/contrib/rest_client/integration.rb +3 -0
  118. data/lib/datadog/tracing/span.rb +12 -4
  119. data/lib/datadog/tracing/span_event.rb +123 -3
  120. data/lib/datadog/tracing/span_operation.rb +6 -0
  121. data/lib/datadog/tracing/transport/serializable_trace.rb +24 -6
  122. data/lib/datadog/version.rb +1 -1
  123. metadata +40 -17
  124. data/lib/datadog/appsec/contrib/sinatra/ext.rb +0 -14
  125. data/lib/datadog/appsec/processor/context.rb +0 -107
  126. data/lib/datadog/appsec/reactive/operation.rb +0 -68
  127. data/lib/datadog/appsec/scope.rb +0 -58
  128. data/lib/datadog/core/crashtracking/agent_base_url.rb +0 -21
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 6f4b811f0c5014e6f325ac55406d225454a2101cc7576df15657ebc74cf47542
4
- data.tar.gz: 20c6095b149238c31501bd0be8eb4363a9b22011f750d5f2d68e3f8b730bb970
3
+ metadata.gz: 88fca1176a1b0fe35703ec00277925dd64a377c6736c52674f5aeb3195ec4d86
4
+ data.tar.gz: 92631bc4c25b6132821d30858e99e8cd8308445f99a258226508ed04f4a696ba
5
5
  SHA512:
6
- metadata.gz: af600463b83509c10417cc90fa808b4148baaa0961bde7aa2d1cdea98c6537a68fdde456af2d700a1e8d795a49fbc9aef3f61d68cc81949132abd8d9165ed19e
7
- data.tar.gz: 28977b792b9f957e57bf8386b759a59257d0a74c161038ae7f2f4a1b0f8016b8c3828cf71092920c6fd14fd6640e5ab4dbf987dec257fc2ff54061f7342c20bf
6
+ metadata.gz: 19a5fd9e66f759a1db3aee5cb1496abaa7393381809eaa956d27b8eb045fb993d7f77ef11c305a97b44bd627294e44fde236836d10c9458fc1543846d58e36e1
7
+ data.tar.gz: 42bbfab98df4de69947602daa14f75bbb2cdf170bf8203164fb93f496c16d824aea2f659d93688c67e4f0baf7d5221ea1908a4ab5f714d903a9268916a5387ce
data/CHANGELOG.md CHANGED
@@ -2,6 +2,46 @@
2
2
 
3
3
  ## [Unreleased]
4
4
 
5
+ ## [2.10.0] - 2025-02-04
6
+
7
+ ### Added
8
+
9
+ * AppSec: Add configuration option(`Datadog.configuration.appsec.rasp_enabled`) to enable/disable Runtime Application Self-Protection checks ([#4311][])
10
+ * AppSec: Add stack trace when SQL Injection attack is detected ([#4321][])
11
+
12
+ ### Changed
13
+
14
+ * Add `logger` gem as dependency ([#4257][])
15
+ * Bump minimum version of `datadog-ruby_core_source` to 3.4 ([#4323][])
16
+
17
+ ### Fixed
18
+
19
+ * Dynamic instrumentation: Fix report probe status when dynamic instrumentation probes fail to instrument ([#4301][])
20
+ * Dynamic instrumentation: Include variables named `env` in probe snapshots ([#4292][])
21
+ * Fix a concurrency issue during application boot ([#4303][])
22
+
23
+ ## [2.9.0] - 2025-01-15
24
+
25
+ ### Added
26
+
27
+ * Core: add support for Ruby 3.4 ([#4249][])
28
+ * Integrations: add a new option for `ActiveSupport` to disable adding the `cache_key` as a Span Tag with the `cache_key_enabled` option ([#4022][])
29
+
30
+ ### Changed
31
+
32
+ * Dynamic instrumentation: move DI preloading to `datadog/di/preload` ([#4288][])
33
+ * Dynamic instrumentation: dd-trace-rb now reports whether dynamic instrumentation is enabled in startup summary report ([#4285][])
34
+ * Dynamic instrumentation: improve loading of DI components ([#4272][], [#4239][])
35
+ * Dynamic instrumentation: logging of internal conditions is now done on debug level ([#4266][])
36
+ * Dynamic instrumentation: report instrumentation error for line probes when the target file is loaded but not in code tracker registry ([#4208][])
37
+ * Profiling: require datadog-ruby_core_source >= 3.3.7 to ensure Ruby 3.4 support ([#4228][])
38
+
39
+ ### Fixed
40
+
41
+ * Core: fix a crash in crashtracker when agent hostname is an IPv6 address ([#4237][])
42
+ * Profiling: fix allocation profiling + otel tracing causing Ruby crash ([#4240][])
43
+ * Profiling: fix profiling warnings being really hard to silence ([#4232][])
44
+
5
45
  ## [2.8.0] - 2024-12-10
6
46
 
7
47
  ### Added
@@ -3057,7 +3097,9 @@ Release notes: https://github.com/DataDog/dd-trace-rb/releases/tag/v0.3.1
3057
3097
  Git diff: https://github.com/DataDog/dd-trace-rb/compare/v0.3.0...v0.3.1
3058
3098
 
3059
3099
 
3060
- [Unreleased]: https://github.com/DataDog/dd-trace-rb/compare/v2.8.0...master
3100
+ [Unreleased]: https://github.com/DataDog/dd-trace-rb/compare/v2.10.0...master
3101
+ [2.10.0]: https://github.com/DataDog/dd-trace-rb/compare/v2.9.0...v2.10.0
3102
+ [2.9.0]: https://github.com/DataDog/dd-trace-rb/compare/v2.8.0...v2.9.0
3061
3103
  [2.8.0]: https://github.com/DataDog/dd-trace-rb/compare/v2.7.1...v2.8.0
3062
3104
  [2.7.0]: https://github.com/DataDog/dd-trace-rb/compare/v2.6.0...v2.7.0
3063
3105
  [2.6.0]: https://github.com/DataDog/dd-trace-rb/compare/v2.5.0...v2.6.0
@@ -4499,6 +4541,7 @@ Git diff: https://github.com/DataDog/dd-trace-rb/compare/v0.3.0...v0.3.1
4499
4541
  [#3997]: https://github.com/DataDog/dd-trace-rb/issues/3997
4500
4542
  [#4014]: https://github.com/DataDog/dd-trace-rb/issues/4014
4501
4543
  [#4020]: https://github.com/DataDog/dd-trace-rb/issues/4020
4544
+ [#4022]: https://github.com/DataDog/dd-trace-rb/issues/4022
4502
4545
  [#4024]: https://github.com/DataDog/dd-trace-rb/issues/4024
4503
4546
  [#4027]: https://github.com/DataDog/dd-trace-rb/issues/4027
4504
4547
  [#4033]: https://github.com/DataDog/dd-trace-rb/issues/4033
@@ -4519,6 +4562,24 @@ Git diff: https://github.com/DataDog/dd-trace-rb/compare/v0.3.0...v0.3.1
4519
4562
  [#4195]: https://github.com/DataDog/dd-trace-rb/issues/4195
4520
4563
  [#4196]: https://github.com/DataDog/dd-trace-rb/issues/4196
4521
4564
  [#4197]: https://github.com/DataDog/dd-trace-rb/issues/4197
4565
+ [#4208]: https://github.com/DataDog/dd-trace-rb/issues/4208
4566
+ [#4228]: https://github.com/DataDog/dd-trace-rb/issues/4228
4567
+ [#4232]: https://github.com/DataDog/dd-trace-rb/issues/4232
4568
+ [#4237]: https://github.com/DataDog/dd-trace-rb/issues/4237
4569
+ [#4239]: https://github.com/DataDog/dd-trace-rb/issues/4239
4570
+ [#4240]: https://github.com/DataDog/dd-trace-rb/issues/4240
4571
+ [#4249]: https://github.com/DataDog/dd-trace-rb/issues/4249
4572
+ [#4257]: https://github.com/DataDog/dd-trace-rb/issues/4257
4573
+ [#4266]: https://github.com/DataDog/dd-trace-rb/issues/4266
4574
+ [#4272]: https://github.com/DataDog/dd-trace-rb/issues/4272
4575
+ [#4285]: https://github.com/DataDog/dd-trace-rb/issues/4285
4576
+ [#4288]: https://github.com/DataDog/dd-trace-rb/issues/4288
4577
+ [#4292]: https://github.com/DataDog/dd-trace-rb/issues/4292
4578
+ [#4301]: https://github.com/DataDog/dd-trace-rb/issues/4301
4579
+ [#4303]: https://github.com/DataDog/dd-trace-rb/issues/4303
4580
+ [#4311]: https://github.com/DataDog/dd-trace-rb/issues/4311
4581
+ [#4321]: https://github.com/DataDog/dd-trace-rb/issues/4321
4582
+ [#4323]: https://github.com/DataDog/dd-trace-rb/issues/4323
4522
4583
  [@AdrianLC]: https://github.com/AdrianLC
4523
4584
  [@Azure7111]: https://github.com/Azure7111
4524
4585
  [@BabyGroot]: https://github.com/BabyGroot
@@ -5,13 +5,13 @@
5
5
  #include <ruby.h>
6
6
 
7
7
  // Contains the operating-system specific identifier needed to fetch CPU-time, and a flag to indicate if we failed to fetch it
8
- typedef struct thread_cpu_time_id {
8
+ typedef struct {
9
9
  bool valid;
10
10
  clockid_t clock_id;
11
11
  } thread_cpu_time_id;
12
12
 
13
13
  // Contains the current cpu time, and a flag to indicate if we failed to fetch it
14
- typedef struct thread_cpu_time {
14
+ typedef struct {
15
15
  bool valid;
16
16
  long result_ns;
17
17
  } thread_cpu_time;
@@ -17,7 +17,7 @@
17
17
  #include "setup_signal_handler.h"
18
18
  #include "time_helpers.h"
19
19
 
20
- // Used to trigger the execution of Collectors::ThreadState, which implements all of the sampling logic
20
+ // Used to trigger the execution of Collectors::ThreadContext, which implements all of the sampling logic
21
21
  // itself; this class only implements the "when to do it" part.
22
22
  //
23
23
  // This file implements the native bits of the Datadog::Profiling::Collectors::CpuAndWallTimeWorker class
@@ -33,7 +33,7 @@
33
33
  // Currently, sampling Ruby threads requires calling Ruby VM APIs that are only safe to call while holding on to the
34
34
  // global VM lock (and are not async-signal safe -- cannot be called from a signal handler).
35
35
  //
36
- // @ivoanjo: As a note, I don't think we should think of this constraint as set in stone. Since can reach into the Ruby
36
+ // @ivoanjo: As a note, I don't think we should think of this constraint as set in stone. Since we can reach inside the Ruby
37
37
  // internals, we may be able to figure out a way of overcoming it. But it's definitely going to be hard so for now
38
38
  // we're considering it as a given.
39
39
  //
@@ -92,7 +92,7 @@ unsigned int MAX_ALLOC_WEIGHT = 10000;
92
92
  #endif
93
93
 
94
94
  // Contains state for a single CpuAndWallTimeWorker instance
95
- struct cpu_and_wall_time_worker_state {
95
+ typedef struct {
96
96
  // These are immutable after initialization
97
97
 
98
98
  bool gc_profiling_enabled;
@@ -187,7 +187,7 @@ struct cpu_and_wall_time_worker_state {
187
187
  uint64_t gvl_sampling_time_ns_max;
188
188
  uint64_t gvl_sampling_time_ns_total;
189
189
  } stats;
190
- };
190
+ } cpu_and_wall_time_worker_state;
191
191
 
192
192
  static VALUE _native_new(VALUE klass);
193
193
  static VALUE _native_initialize(int argc, VALUE *argv, DDTRACE_UNUSED VALUE _self);
@@ -195,7 +195,7 @@ static void cpu_and_wall_time_worker_typed_data_mark(void *state_ptr);
195
195
  static VALUE _native_sampling_loop(VALUE self, VALUE instance);
196
196
  static VALUE _native_stop(DDTRACE_UNUSED VALUE _self, VALUE self_instance, VALUE worker_thread);
197
197
  static VALUE stop(VALUE self_instance, VALUE optional_exception);
198
- static void stop_state(struct cpu_and_wall_time_worker_state *state, VALUE optional_exception);
198
+ static void stop_state(cpu_and_wall_time_worker_state *state, VALUE optional_exception);
199
199
  static void handle_sampling_signal(DDTRACE_UNUSED int _signal, DDTRACE_UNUSED siginfo_t *_info, DDTRACE_UNUSED void *_ucontext);
200
200
  static void *run_sampling_trigger_loop(void *state_ptr);
201
201
  static void interrupt_sampling_trigger_loop(void *state_ptr);
@@ -221,14 +221,14 @@ static VALUE _native_stats(DDTRACE_UNUSED VALUE self, VALUE instance);
221
221
  static VALUE _native_stats_reset_not_thread_safe(DDTRACE_UNUSED VALUE self, VALUE instance);
222
222
  void *simulate_sampling_signal_delivery(DDTRACE_UNUSED void *_unused);
223
223
  static void grab_gvl_and_sample(void);
224
- static void reset_stats_not_thread_safe(struct cpu_and_wall_time_worker_state *state);
224
+ static void reset_stats_not_thread_safe(cpu_and_wall_time_worker_state *state);
225
225
  static void sleep_for(uint64_t time_ns);
226
226
  static VALUE _native_allocation_count(DDTRACE_UNUSED VALUE self);
227
227
  static void on_newobj_event(DDTRACE_UNUSED VALUE unused1, DDTRACE_UNUSED void *unused2);
228
- static void disable_tracepoints(struct cpu_and_wall_time_worker_state *state);
228
+ static void disable_tracepoints(cpu_and_wall_time_worker_state *state);
229
229
  static VALUE _native_with_blocked_sigprof(DDTRACE_UNUSED VALUE self);
230
230
  static VALUE rescued_sample_allocation(VALUE tracepoint_data);
231
- static void delayed_error(struct cpu_and_wall_time_worker_state *state, const char *error);
231
+ static void delayed_error(cpu_and_wall_time_worker_state *state, const char *error);
232
232
  static VALUE _native_delayed_error(DDTRACE_UNUSED VALUE self, VALUE instance, VALUE error_msg);
233
233
  static VALUE _native_hold_signals(DDTRACE_UNUSED VALUE self);
234
234
  static VALUE _native_resume_signals(DDTRACE_UNUSED VALUE self);
@@ -262,7 +262,7 @@ static VALUE _native_gvl_profiling_hook_active(DDTRACE_UNUSED VALUE self, VALUE
262
262
  // This global state is needed because a bunch of functions on this file need to access it from situations
263
263
  // (e.g. signal handler) where it's impossible or just awkward to pass it as an argument.
264
264
  static VALUE active_sampler_instance = Qnil;
265
- static struct cpu_and_wall_time_worker_state *active_sampler_instance_state = NULL;
265
+ static cpu_and_wall_time_worker_state *active_sampler_instance_state = NULL;
266
266
 
267
267
  // See handle_sampling_signal for details on what this does
268
268
  #ifdef NO_POSTPONED_TRIGGER
@@ -334,7 +334,7 @@ void collectors_cpu_and_wall_time_worker_init(VALUE profiling_module) {
334
334
  rb_define_singleton_method(testing_module, "_native_gvl_profiling_hook_active", _native_gvl_profiling_hook_active, 1);
335
335
  }
336
336
 
337
- // This structure is used to define a Ruby object that stores a pointer to a struct cpu_and_wall_time_worker_state
337
+ // This structure is used to define a Ruby object that stores a pointer to a cpu_and_wall_time_worker_state
338
338
  // See also https://github.com/ruby/ruby/blob/master/doc/extension.rdoc for how this works
339
339
  static const rb_data_type_t cpu_and_wall_time_worker_typed_data = {
340
340
  .wrap_struct_name = "Datadog::Profiling::Collectors::CpuAndWallTimeWorker",
@@ -350,7 +350,7 @@ static const rb_data_type_t cpu_and_wall_time_worker_typed_data = {
350
350
  static VALUE _native_new(VALUE klass) {
351
351
  long now = monotonic_wall_time_now_ns(RAISE_ON_FAILURE);
352
352
 
353
- struct cpu_and_wall_time_worker_state *state = ruby_xcalloc(1, sizeof(struct cpu_and_wall_time_worker_state));
353
+ cpu_and_wall_time_worker_state *state = ruby_xcalloc(1, sizeof(cpu_and_wall_time_worker_state));
354
354
 
355
355
  // Note: Any exceptions raised from this note until the TypedData_Wrap_Struct call will lead to the state memory
356
356
  // being leaked.
@@ -414,8 +414,8 @@ static VALUE _native_initialize(int argc, VALUE *argv, DDTRACE_UNUSED VALUE _sel
414
414
  ENFORCE_BOOLEAN(gvl_profiling_enabled);
415
415
  ENFORCE_BOOLEAN(skip_idle_samples_for_testing)
416
416
 
417
- struct cpu_and_wall_time_worker_state *state;
418
- TypedData_Get_Struct(self_instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
417
+ cpu_and_wall_time_worker_state *state;
418
+ TypedData_Get_Struct(self_instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
419
419
 
420
420
  state->gc_profiling_enabled = (gc_profiling_enabled == Qtrue);
421
421
  state->no_signals_workaround_enabled = (no_signals_workaround_enabled == Qtrue);
@@ -445,7 +445,7 @@ static VALUE _native_initialize(int argc, VALUE *argv, DDTRACE_UNUSED VALUE _sel
445
445
 
446
446
  // Since our state contains references to Ruby objects, we need to tell the Ruby GC about them
447
447
  static void cpu_and_wall_time_worker_typed_data_mark(void *state_ptr) {
448
- struct cpu_and_wall_time_worker_state *state = (struct cpu_and_wall_time_worker_state *) state_ptr;
448
+ cpu_and_wall_time_worker_state *state = (cpu_and_wall_time_worker_state *) state_ptr;
449
449
 
450
450
  rb_gc_mark(state->thread_context_collector_instance);
451
451
  rb_gc_mark(state->idle_sampling_helper_instance);
@@ -457,8 +457,8 @@ static void cpu_and_wall_time_worker_typed_data_mark(void *state_ptr) {
457
457
 
458
458
  // Called in a background thread created in CpuAndWallTimeWorker#start
459
459
  static VALUE _native_sampling_loop(DDTRACE_UNUSED VALUE _self, VALUE instance) {
460
- struct cpu_and_wall_time_worker_state *state;
461
- TypedData_Get_Struct(instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
460
+ cpu_and_wall_time_worker_state *state;
461
+ TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
462
462
 
463
463
  // If we already got a delayed exception registered even before starting, raise before starting
464
464
  if (state->failure_exception != Qnil) {
@@ -466,7 +466,7 @@ static VALUE _native_sampling_loop(DDTRACE_UNUSED VALUE _self, VALUE instance) {
466
466
  rb_exc_raise(state->failure_exception);
467
467
  }
468
468
 
469
- struct cpu_and_wall_time_worker_state *old_state = active_sampler_instance_state;
469
+ cpu_and_wall_time_worker_state *old_state = active_sampler_instance_state;
470
470
  if (old_state != NULL) {
471
471
  if (is_thread_alive(old_state->owner_thread)) {
472
472
  rb_raise(
@@ -546,15 +546,15 @@ static VALUE _native_sampling_loop(DDTRACE_UNUSED VALUE _self, VALUE instance) {
546
546
  }
547
547
 
548
548
  static VALUE _native_stop(DDTRACE_UNUSED VALUE _self, VALUE self_instance, VALUE worker_thread) {
549
- struct cpu_and_wall_time_worker_state *state;
550
- TypedData_Get_Struct(self_instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
549
+ cpu_and_wall_time_worker_state *state;
550
+ TypedData_Get_Struct(self_instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
551
551
 
552
552
  state->stop_thread = worker_thread;
553
553
 
554
554
  return stop(self_instance, /* optional_exception: */ Qnil);
555
555
  }
556
556
 
557
- static void stop_state(struct cpu_and_wall_time_worker_state *state, VALUE optional_exception) {
557
+ static void stop_state(cpu_and_wall_time_worker_state *state, VALUE optional_exception) {
558
558
  atomic_store(&state->should_run, false);
559
559
  state->failure_exception = optional_exception;
560
560
 
@@ -563,8 +563,8 @@ static void stop_state(struct cpu_and_wall_time_worker_state *state, VALUE optio
563
563
  }
564
564
 
565
565
  static VALUE stop(VALUE self_instance, VALUE optional_exception) {
566
- struct cpu_and_wall_time_worker_state *state;
567
- TypedData_Get_Struct(self_instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
566
+ cpu_and_wall_time_worker_state *state;
567
+ TypedData_Get_Struct(self_instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
568
568
 
569
569
  stop_state(state, optional_exception);
570
570
 
@@ -575,7 +575,7 @@ static VALUE stop(VALUE self_instance, VALUE optional_exception) {
575
575
  // We need to be careful not to change any state that may be observed OR to restore it if we do. For instance, if anything
576
576
  // we do here can set `errno`, then we must be careful to restore the old `errno` after the fact.
577
577
  static void handle_sampling_signal(DDTRACE_UNUSED int _signal, DDTRACE_UNUSED siginfo_t *_info, DDTRACE_UNUSED void *_ucontext) {
578
- struct cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
578
+ cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
579
579
 
580
580
  // This can potentially happen if the CpuAndWallTimeWorker was stopped while the signal delivery was happening; nothing to do
581
581
  if (state == NULL) return;
@@ -650,7 +650,7 @@ static void handle_sampling_signal(DDTRACE_UNUSED int _signal, DDTRACE_UNUSED si
650
650
 
651
651
  // The actual sampling trigger loop always runs **without** the global vm lock.
652
652
  static void *run_sampling_trigger_loop(void *state_ptr) {
653
- struct cpu_and_wall_time_worker_state *state = (struct cpu_and_wall_time_worker_state *) state_ptr;
653
+ cpu_and_wall_time_worker_state *state = (cpu_and_wall_time_worker_state *) state_ptr;
654
654
 
655
655
  uint64_t minimum_time_between_signals = MILLIS_AS_NS(10);
656
656
 
@@ -709,13 +709,13 @@ static void *run_sampling_trigger_loop(void *state_ptr) {
709
709
 
710
710
  // This is called by the Ruby VM when it wants to shut down the background thread
711
711
  static void interrupt_sampling_trigger_loop(void *state_ptr) {
712
- struct cpu_and_wall_time_worker_state *state = (struct cpu_and_wall_time_worker_state *) state_ptr;
712
+ cpu_and_wall_time_worker_state *state = (cpu_and_wall_time_worker_state *) state_ptr;
713
713
 
714
714
  atomic_store(&state->should_run, false);
715
715
  }
716
716
 
717
717
  static void sample_from_postponed_job(DDTRACE_UNUSED void *_unused) {
718
- struct cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
718
+ cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
719
719
 
720
720
  // This can potentially happen if the CpuAndWallTimeWorker was stopped while the postponed job was waiting to be executed; nothing to do
721
721
  if (state == NULL) return;
@@ -735,8 +735,8 @@ static void sample_from_postponed_job(DDTRACE_UNUSED void *_unused) {
735
735
  }
736
736
 
737
737
  static VALUE rescued_sample_from_postponed_job(VALUE self_instance) {
738
- struct cpu_and_wall_time_worker_state *state;
739
- TypedData_Get_Struct(self_instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
738
+ cpu_and_wall_time_worker_state *state;
739
+ TypedData_Get_Struct(self_instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
740
740
 
741
741
  long wall_time_ns_before_sample = monotonic_wall_time_now_ns(RAISE_ON_FAILURE);
742
742
 
@@ -791,8 +791,8 @@ static VALUE _native_current_sigprof_signal_handler(DDTRACE_UNUSED VALUE self) {
791
791
  }
792
792
 
793
793
  static VALUE release_gvl_and_run_sampling_trigger_loop(VALUE instance) {
794
- struct cpu_and_wall_time_worker_state *state;
795
- TypedData_Get_Struct(instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
794
+ cpu_and_wall_time_worker_state *state;
795
+ TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
796
796
 
797
797
  // Final preparations: Setup signal handler and enable tracepoints. We run these here and not in `_native_sampling_loop`
798
798
  // because they may raise exceptions.
@@ -842,7 +842,7 @@ static VALUE release_gvl_and_run_sampling_trigger_loop(VALUE instance) {
842
842
  // This method exists only to enable testing Datadog::Profiling::Collectors::CpuAndWallTimeWorker behavior using RSpec.
843
843
  // It SHOULD NOT be used for other purposes.
844
844
  static VALUE _native_is_running(DDTRACE_UNUSED VALUE self, VALUE instance) {
845
- struct cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
845
+ cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
846
846
 
847
847
  return (state != NULL && is_thread_alive(state->owner_thread) && state->self_instance == instance) ? Qtrue : Qfalse;
848
848
  }
@@ -875,8 +875,8 @@ static VALUE _native_trigger_sample(DDTRACE_UNUSED VALUE self) {
875
875
  // This method exists only to enable testing Datadog::Profiling::Collectors::CpuAndWallTimeWorker behavior using RSpec.
876
876
  // It SHOULD NOT be used for other purposes.
877
877
  static VALUE _native_gc_tracepoint(DDTRACE_UNUSED VALUE self, VALUE instance) {
878
- struct cpu_and_wall_time_worker_state *state;
879
- TypedData_Get_Struct(instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
878
+ cpu_and_wall_time_worker_state *state;
879
+ TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
880
880
 
881
881
  return state->gc_tracepoint;
882
882
  }
@@ -902,7 +902,7 @@ static void on_gc_event(VALUE tracepoint_data, DDTRACE_UNUSED void *unused) {
902
902
  int event = rb_tracearg_event_flag(rb_tracearg_from_tracepoint(tracepoint_data));
903
903
  if (event != RUBY_INTERNAL_EVENT_GC_ENTER && event != RUBY_INTERNAL_EVENT_GC_EXIT) return; // Unknown event
904
904
 
905
- struct cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
905
+ cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
906
906
 
907
907
  // This should not happen in a normal situation because the tracepoint is always enabled after the instance is set
908
908
  // and disabled before it is cleared, but just in case...
@@ -926,7 +926,7 @@ static void on_gc_event(VALUE tracepoint_data, DDTRACE_UNUSED void *unused) {
926
926
  }
927
927
 
928
928
  static void after_gc_from_postponed_job(DDTRACE_UNUSED void *_unused) {
929
- struct cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
929
+ cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
930
930
 
931
931
  // This can potentially happen if the CpuAndWallTimeWorker was stopped while the postponed job was waiting to be executed; nothing to do
932
932
  if (state == NULL) return;
@@ -981,8 +981,8 @@ static VALUE _native_simulate_sample_from_postponed_job(DDTRACE_UNUSED VALUE sel
981
981
  // In the future, if we add more other components with tracepoints, we will need to coordinate stopping all such
982
982
  // tracepoints before doing the other cleaning steps.
983
983
  static VALUE _native_reset_after_fork(DDTRACE_UNUSED VALUE self, VALUE instance) {
984
- struct cpu_and_wall_time_worker_state *state;
985
- TypedData_Get_Struct(instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
984
+ cpu_and_wall_time_worker_state *state;
985
+ TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
986
986
 
987
987
  // Disable all tracepoints, so that there are no more attempts to mutate the profile
988
988
  disable_tracepoints(state);
@@ -1000,8 +1000,8 @@ static VALUE _native_is_sigprof_blocked_in_current_thread(DDTRACE_UNUSED VALUE s
1000
1000
  }
1001
1001
 
1002
1002
  static VALUE _native_stats(DDTRACE_UNUSED VALUE self, VALUE instance) {
1003
- struct cpu_and_wall_time_worker_state *state;
1004
- TypedData_Get_Struct(instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
1003
+ cpu_and_wall_time_worker_state *state;
1004
+ TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
1005
1005
 
1006
1006
  unsigned long total_cpu_samples_attempted = state->stats.cpu_sampled + state->stats.cpu_skipped;
1007
1007
  VALUE effective_cpu_sample_rate =
@@ -1059,14 +1059,14 @@ static VALUE _native_stats(DDTRACE_UNUSED VALUE self, VALUE instance) {
1059
1059
  }
1060
1060
 
1061
1061
  static VALUE _native_stats_reset_not_thread_safe(DDTRACE_UNUSED VALUE self, VALUE instance) {
1062
- struct cpu_and_wall_time_worker_state *state;
1063
- TypedData_Get_Struct(instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
1062
+ cpu_and_wall_time_worker_state *state;
1063
+ TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
1064
1064
  reset_stats_not_thread_safe(state);
1065
1065
  return Qnil;
1066
1066
  }
1067
1067
 
1068
1068
  void *simulate_sampling_signal_delivery(DDTRACE_UNUSED void *_unused) {
1069
- struct cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
1069
+ cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
1070
1070
 
1071
1071
  // This can potentially happen if the CpuAndWallTimeWorker was stopped while the IdleSamplingHelper was trying to execute this action
1072
1072
  if (state == NULL) return NULL;
@@ -1082,7 +1082,7 @@ void *simulate_sampling_signal_delivery(DDTRACE_UNUSED void *_unused) {
1082
1082
 
1083
1083
  static void grab_gvl_and_sample(void) { rb_thread_call_with_gvl(simulate_sampling_signal_delivery, NULL); }
1084
1084
 
1085
- static void reset_stats_not_thread_safe(struct cpu_and_wall_time_worker_state *state) {
1085
+ static void reset_stats_not_thread_safe(cpu_and_wall_time_worker_state *state) {
1086
1086
  // NOTE: This is not really thread safe so ongoing sampling operations that are concurrent with a reset can have their stats:
1087
1087
  // * Lost (writes after stats retrieval but before reset).
1088
1088
  // * Included in the previous stats window (writes before stats retrieval and reset).
@@ -1116,7 +1116,7 @@ static void sleep_for(uint64_t time_ns) {
1116
1116
  }
1117
1117
 
1118
1118
  static VALUE _native_allocation_count(DDTRACE_UNUSED VALUE self) {
1119
- struct cpu_and_wall_time_worker_state *state = active_sampler_instance_state;
1119
+ cpu_and_wall_time_worker_state *state = active_sampler_instance_state;
1120
1120
 
1121
1121
  bool are_allocations_being_tracked = state != NULL && state->allocation_profiling_enabled && state->allocation_counting_enabled;
1122
1122
 
@@ -1149,7 +1149,7 @@ static VALUE _native_allocation_count(DDTRACE_UNUSED VALUE self) {
1149
1149
  // call `rb_tracearg_from_tracepoint(anything)` anywhere during this function or its callees to get the data, so that's
1150
1150
  // why it's not being passed as an argument.
1151
1151
  static void on_newobj_event(DDTRACE_UNUSED VALUE unused1, DDTRACE_UNUSED void *unused2) {
1152
- struct cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
1152
+ cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
1153
1153
 
1154
1154
  // This should not happen in a normal situation because the tracepoint is always enabled after the instance is set
1155
1155
  // and disabled before it is cleared, but just in case...
@@ -1171,6 +1171,16 @@ static void on_newobj_event(DDTRACE_UNUSED VALUE unused1, DDTRACE_UNUSED void *u
1171
1171
  return;
1172
1172
  }
1173
1173
 
1174
+ // If Ruby is in the middle of raising an exception, we don't want to try to sample. This is because if we accidentally
1175
+ // trigger an exception inside the profiler code, bad things will happen (specifically, Ruby will try to kill off the
1176
+ // thread even though we may try to catch the exception).
1177
+ //
1178
+ // Note that "in the middle of raising an exception" means the exception itself has already been allocated.
1179
+ // What's getting allocated now is probably the backtrace objects (@ivoanjo or at least that's what I've observed)
1180
+ if (is_raised_flag_set(rb_thread_current())) {
1181
+ return;
1182
+ }
1183
+
1174
1184
  // Hot path: Dynamic sampling rate is usually enabled and the sampling decision is usually false
1175
1185
  if (RB_LIKELY(state->dynamic_sampling_rate_enabled && !discrete_dynamic_sampler_should_sample(&state->allocation_sampler))) {
1176
1186
  state->stats.allocation_skipped++;
@@ -1225,7 +1235,7 @@ static void on_newobj_event(DDTRACE_UNUSED VALUE unused1, DDTRACE_UNUSED void *u
1225
1235
  state->during_sample = false;
1226
1236
  }
1227
1237
 
1228
- static void disable_tracepoints(struct cpu_and_wall_time_worker_state *state) {
1238
+ static void disable_tracepoints(cpu_and_wall_time_worker_state *state) {
1229
1239
  if (state->gc_tracepoint != Qnil) {
1230
1240
  rb_tracepoint_disable(state->gc_tracepoint);
1231
1241
  }
@@ -1254,7 +1264,7 @@ static VALUE _native_with_blocked_sigprof(DDTRACE_UNUSED VALUE self) {
1254
1264
  }
1255
1265
 
1256
1266
  static VALUE rescued_sample_allocation(DDTRACE_UNUSED VALUE unused) {
1257
- struct cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
1267
+ cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
1258
1268
 
1259
1269
  // This should not happen in a normal situation because on_newobj_event already checked for this, but just in case...
1260
1270
  if (state == NULL) return Qnil;
@@ -1283,7 +1293,7 @@ static VALUE rescued_sample_allocation(DDTRACE_UNUSED VALUE unused) {
1283
1293
  return Qnil;
1284
1294
  }
1285
1295
 
1286
- static void delayed_error(struct cpu_and_wall_time_worker_state *state, const char *error) {
1296
+ static void delayed_error(cpu_and_wall_time_worker_state *state, const char *error) {
1287
1297
  // If we can't raise an immediate exception at the calling site, use the asynchronous flow through the main worker loop.
1288
1298
  stop_state(state, rb_exc_new_cstr(rb_eRuntimeError, error));
1289
1299
  }
@@ -1291,8 +1301,8 @@ static void delayed_error(struct cpu_and_wall_time_worker_state *state, const ch
1291
1301
  static VALUE _native_delayed_error(DDTRACE_UNUSED VALUE self, VALUE instance, VALUE error_msg) {
1292
1302
  ENFORCE_TYPE(error_msg, T_STRING);
1293
1303
 
1294
- struct cpu_and_wall_time_worker_state *state;
1295
- TypedData_Get_Struct(instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
1304
+ cpu_and_wall_time_worker_state *state;
1305
+ TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
1296
1306
 
1297
1307
  delayed_error(state, rb_string_value_cstr(&error_msg));
1298
1308
 
@@ -1345,7 +1355,7 @@ static VALUE _native_resume_signals(DDTRACE_UNUSED VALUE self) {
1345
1355
  rb_postponed_job_register_one(0, after_gvl_running_from_postponed_job, NULL);
1346
1356
  #endif
1347
1357
  } else if (result == ON_GVL_RUNNING_DONT_SAMPLE) {
1348
- struct cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
1358
+ cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
1349
1359
 
1350
1360
  if (state == NULL) return; // This should not happen, but just in case...
1351
1361
 
@@ -1358,7 +1368,7 @@ static VALUE _native_resume_signals(DDTRACE_UNUSED VALUE self) {
1358
1368
  }
1359
1369
 
1360
1370
  static void after_gvl_running_from_postponed_job(DDTRACE_UNUSED void *_unused) {
1361
- struct cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
1371
+ cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
1362
1372
 
1363
1373
  // This can potentially happen if the CpuAndWallTimeWorker was stopped while the postponed job was waiting to be executed; nothing to do
1364
1374
  if (state == NULL) return;
@@ -1372,8 +1382,8 @@ static VALUE _native_resume_signals(DDTRACE_UNUSED VALUE self) {
1372
1382
  }
1373
1383
 
1374
1384
  static VALUE rescued_after_gvl_running_from_postponed_job(VALUE self_instance) {
1375
- struct cpu_and_wall_time_worker_state *state;
1376
- TypedData_Get_Struct(self_instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
1385
+ cpu_and_wall_time_worker_state *state;
1386
+ TypedData_Get_Struct(self_instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
1377
1387
 
1378
1388
  long wall_time_ns_before_sample = monotonic_wall_time_now_ns(RAISE_ON_FAILURE);
1379
1389
  thread_context_collector_sample_after_gvl_running(state->thread_context_collector_instance, rb_thread_current(), wall_time_ns_before_sample);
@@ -1394,8 +1404,8 @@ static VALUE _native_resume_signals(DDTRACE_UNUSED VALUE self) {
1394
1404
  }
1395
1405
 
1396
1406
  static VALUE _native_gvl_profiling_hook_active(DDTRACE_UNUSED VALUE self, VALUE instance) {
1397
- struct cpu_and_wall_time_worker_state *state;
1398
- TypedData_Get_Struct(instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
1407
+ cpu_and_wall_time_worker_state *state;
1408
+ TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
1399
1409
 
1400
1410
  return state->gvl_profiling_hook != NULL ? Qtrue : Qfalse;
1401
1411
  }
@@ -333,7 +333,7 @@ static VALUE _native_should_sample(VALUE self, VALUE now);
333
333
  static VALUE _native_after_sample(VALUE self, VALUE now);
334
334
  static VALUE _native_state_snapshot(VALUE self);
335
335
 
336
- typedef struct sampler_state {
336
+ typedef struct {
337
337
  discrete_dynamic_sampler sampler;
338
338
  } sampler_state;
339
339
 
@@ -16,7 +16,7 @@
16
16
  // every event and is thus, in theory, susceptible to some pattern
17
17
  // biases. In practice, the dynamic readjustment of sampling interval
18
18
  // and randomized starting point should help with avoiding heavy biases.
19
- typedef struct discrete_dynamic_sampler {
19
+ typedef struct {
20
20
  // --- Config ---
21
21
  // Name of this sampler for debug logs.
22
22
  const char *debug_name;