datadog 2.25.0 → 2.27.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +48 -2
  3. data/ext/datadog_profiling_native_extension/clock_id_from_pthread.c +2 -1
  4. data/ext/datadog_profiling_native_extension/collectors_cpu_and_wall_time_worker.c +100 -29
  5. data/ext/datadog_profiling_native_extension/collectors_discrete_dynamic_sampler.c +2 -2
  6. data/ext/datadog_profiling_native_extension/collectors_gc_profiling_helper.c +3 -2
  7. data/ext/datadog_profiling_native_extension/collectors_stack.c +6 -5
  8. data/ext/datadog_profiling_native_extension/collectors_thread_context.c +16 -12
  9. data/ext/datadog_profiling_native_extension/crashtracking_runtime_stacks.c +2 -2
  10. data/ext/datadog_profiling_native_extension/datadog_ruby_common.c +48 -1
  11. data/ext/datadog_profiling_native_extension/datadog_ruby_common.h +41 -0
  12. data/ext/datadog_profiling_native_extension/encoded_profile.c +2 -1
  13. data/ext/datadog_profiling_native_extension/heap_recorder.c +24 -24
  14. data/ext/datadog_profiling_native_extension/http_transport.c +10 -4
  15. data/ext/datadog_profiling_native_extension/libdatadog_helpers.c +3 -22
  16. data/ext/datadog_profiling_native_extension/libdatadog_helpers.h +0 -5
  17. data/ext/datadog_profiling_native_extension/private_vm_api_access.c +9 -8
  18. data/ext/datadog_profiling_native_extension/profiling.c +20 -15
  19. data/ext/datadog_profiling_native_extension/ruby_helpers.c +55 -44
  20. data/ext/datadog_profiling_native_extension/ruby_helpers.h +17 -5
  21. data/ext/datadog_profiling_native_extension/setup_signal_handler.c +8 -2
  22. data/ext/datadog_profiling_native_extension/setup_signal_handler.h +3 -0
  23. data/ext/datadog_profiling_native_extension/stack_recorder.c +16 -16
  24. data/ext/datadog_profiling_native_extension/unsafe_api_calls_check.c +2 -1
  25. data/ext/datadog_profiling_native_extension/unsafe_api_calls_check.h +5 -2
  26. data/ext/libdatadog_api/crashtracker.c +5 -8
  27. data/ext/libdatadog_api/datadog_ruby_common.c +48 -1
  28. data/ext/libdatadog_api/datadog_ruby_common.h +41 -0
  29. data/ext/libdatadog_api/ddsketch.c +4 -8
  30. data/ext/libdatadog_api/feature_flags.c +5 -5
  31. data/ext/libdatadog_api/helpers.h +27 -0
  32. data/ext/libdatadog_api/init.c +4 -0
  33. data/ext/libdatadog_extconf_helpers.rb +1 -1
  34. data/lib/datadog/appsec/api_security/endpoint_collection/rails_collector.rb +8 -1
  35. data/lib/datadog/appsec/api_security/endpoint_collection/rails_route_serializer.rb +9 -2
  36. data/lib/datadog/appsec/component.rb +1 -1
  37. data/lib/datadog/appsec/context.rb +3 -3
  38. data/lib/datadog/appsec/contrib/excon/integration.rb +1 -1
  39. data/lib/datadog/appsec/contrib/excon/patcher.rb +1 -1
  40. data/lib/datadog/appsec/contrib/excon/ssrf_detection_middleware.rb +47 -12
  41. data/lib/datadog/appsec/contrib/faraday/ssrf_detection_middleware.rb +32 -15
  42. data/lib/datadog/appsec/contrib/rest_client/integration.rb +1 -1
  43. data/lib/datadog/appsec/contrib/rest_client/patcher.rb +1 -1
  44. data/lib/datadog/appsec/contrib/rest_client/request_ssrf_detection_patch.rb +50 -14
  45. data/lib/datadog/appsec/ext.rb +2 -0
  46. data/lib/datadog/appsec/metrics/collector.rb +8 -3
  47. data/lib/datadog/appsec/metrics/exporter.rb +7 -0
  48. data/lib/datadog/appsec/metrics/telemetry.rb +7 -2
  49. data/lib/datadog/appsec/metrics.rb +5 -5
  50. data/lib/datadog/appsec/remote.rb +4 -4
  51. data/lib/datadog/appsec.rb +7 -1
  52. data/lib/datadog/core/configuration/components.rb +1 -0
  53. data/lib/datadog/core/configuration/settings.rb +17 -0
  54. data/lib/datadog/core/configuration/supported_configurations.rb +1 -0
  55. data/lib/datadog/core/runtime/metrics.rb +11 -1
  56. data/lib/datadog/core/telemetry/logger.rb +2 -0
  57. data/lib/datadog/core/telemetry/logging.rb +20 -2
  58. data/lib/datadog/profiling/collectors/cpu_and_wall_time_worker.rb +3 -2
  59. data/lib/datadog/profiling/component.rb +13 -0
  60. data/lib/datadog/profiling/exporter.rb +4 -0
  61. data/lib/datadog/profiling/ext/exec_monkey_patch.rb +32 -0
  62. data/lib/datadog/profiling/flush.rb +3 -0
  63. data/lib/datadog/profiling/profiler.rb +3 -5
  64. data/lib/datadog/profiling/scheduler.rb +8 -7
  65. data/lib/datadog/profiling/tag_builder.rb +1 -0
  66. data/lib/datadog/version.rb +1 -1
  67. metadata +10 -8
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: b31bd418a350c8999b821462c0699a19af1b02242a7a22f4fee10859cd520d4c
4
- data.tar.gz: 4a77d7cb714bfb82312d642b583d23f7b9407435c6577b864f1ca98751826ec6
3
+ metadata.gz: fec5b056193bb744192c4df3a9843e630d0675a53d88162ba51d5f250ca7a31d
4
+ data.tar.gz: bb542f2f2c8164afc1426c103bd49113b10a5282be3ee13872fc13d138fa6183
5
5
  SHA512:
6
- metadata.gz: f51a7d85ab5e057e5a485f7dffe7d329b4aacde56d784239a15dbb8a48508d082ba69dbecb992336d132965b642ccdf39cd4f4e2378d749174cab64496b1fe2f
7
- data.tar.gz: 83504124b5fad578d31002d5ab4d19450c14c8a857b7b553925bbff2cb2fdb53ee2dbeb2e7c774128c92203e69b3c484896591435ec4df01740e2028b70ff7fe
6
+ metadata.gz: cd063e5e9e617d771603c6eae2305b1c5677587b3ae75690e1e96d273957fc20dc911bdd03264982ded4353c263fd06423f990a00135be3ed310cfd9a1a9c259
7
+ data.tar.gz: dbeea8e08bdd6e1f2fb592532410008980a44a10d349437da297fae0f49f21aa93ec72f0af909e5844d5bf5ddf3fa7be36762627505508bf509d78f57c81220e
data/CHANGELOG.md CHANGED
@@ -2,6 +2,41 @@
2
2
 
3
3
  ## [Unreleased]
4
4
 
5
+ ## [2.27.0] - 2026-01-21
6
+
7
+ ### Added
8
+
9
+ * AppSec: Add analysis of the downstream requests ([#5206][])
10
+ * Telemetry: Add static error reporting for native extensions ([#5076][])
11
+
12
+ ### Changed
13
+
14
+ * SSI: Update injector to v1.2.1 ([#5254][])
15
+ * SSI: Prepare for expanded platform support ([#5254][])
16
+ * SSI: Improve remote resolution with expanded platform support via fallback to local gems ([#5254][])
17
+ * SSI: Introduce experimental fully local resolution support ([#5254][])
18
+ * Profiling: Telemetry-safe error reporting for native extensions ([#5076][])
19
+
20
+
21
+ ### Fixed
22
+
23
+ * Profiler: Fix interrupting new processes with the message `Profiling timer expired` during `exec` ([#5246][])
24
+ * Profiler: Fix rare race in profiler causing flaky spec on Ruby 2.7 ([#5247][])
25
+ * Appsec: Fix reporting of multi-method routes for Endpoint Collection ([#5240][])
26
+ * AppSec: Fix reporting of Rails routes that accept multiple request methods. ([#5240][])
27
+
28
+ ## [2.26.0] - 2026-01-16
29
+
30
+ ### Added
31
+
32
+ * Core: Add process tags to runtime metrics when `DD_EXPERIMENTAL_PROPAGATE_PROCESS_TAGS_ENABLED` is enabled. ([#5210][])
33
+ * SSI: Add experimental dependency injection validation.
34
+
35
+ ### Changed
36
+
37
+ * Profiling: Improve profiler error reporting. ([#5237][])
38
+ * SSI: Improve injection debug error reporting. ([#5238][])
39
+
5
40
  ## [2.25.0] - 2026-01-13
6
41
 
7
42
  ### Added
@@ -3437,7 +3472,9 @@ Release notes: https://github.com/DataDog/dd-trace-rb/releases/tag/v0.3.1
3437
3472
  Git diff: https://github.com/DataDog/dd-trace-rb/compare/v0.3.0...v0.3.1
3438
3473
 
3439
3474
 
3440
- [Unreleased]: https://github.com/DataDog/dd-trace-rb/compare/v2.25.0...master
3475
+ [Unreleased]: https://github.com/DataDog/dd-trace-rb/compare/v2.27.0...master
3476
+ [2.27.0]: https://github.com/DataDog/dd-trace-rb/compare/v2.26.0...v2.27.0
3477
+ [2.26.0]: https://github.com/DataDog/dd-trace-rb/compare/v2.25.0...v2.26.0
3441
3478
  [2.25.0]: https://github.com/DataDog/dd-trace-rb/compare/v2.24.0...v2.25.0
3442
3479
  [2.24.0]: https://github.com/DataDog/dd-trace-rb/compare/v2.23.0...v2.24.0
3443
3480
  [2.23.0]: https://github.com/DataDog/dd-trace-rb/compare/v2.22.0...v2.23.0
@@ -5083,6 +5120,7 @@ Git diff: https://github.com/DataDog/dd-trace-rb/compare/v0.3.0...v0.3.1
5083
5120
  [#5054]: https://github.com/DataDog/dd-trace-rb/issues/5054
5084
5121
  [#5058]: https://github.com/DataDog/dd-trace-rb/issues/5058
5085
5122
  [#5073]: https://github.com/DataDog/dd-trace-rb/issues/5073
5123
+ [#5076]: https://github.com/DataDog/dd-trace-rb/issues/5076
5086
5124
  [#5086]: https://github.com/DataDog/dd-trace-rb/issues/5086
5087
5125
  [#5091]: https://github.com/DataDog/dd-trace-rb/issues/5091
5088
5126
  [#5122]: https://github.com/DataDog/dd-trace-rb/issues/5122
@@ -5100,8 +5138,16 @@ Git diff: https://github.com/DataDog/dd-trace-rb/compare/v0.3.0...v0.3.1
5100
5138
  [#5176]: https://github.com/DataDog/dd-trace-rb/issues/5176
5101
5139
  [#5194]: https://github.com/DataDog/dd-trace-rb/issues/5194
5102
5140
  [#5197]: https://github.com/DataDog/dd-trace-rb/issues/5197
5141
+ [#5206]: https://github.com/DataDog/dd-trace-rb/issues/5206
5142
+ [#5210]: https://github.com/DataDog/dd-trace-rb/issues/5210
5103
5143
  [#5215]: https://github.com/DataDog/dd-trace-rb/issues/5215
5104
5144
  [#5222]: https://github.com/DataDog/dd-trace-rb/issues/5222
5145
+ [#5237]: https://github.com/DataDog/dd-trace-rb/issues/5237
5146
+ [#5238]: https://github.com/DataDog/dd-trace-rb/issues/5238
5147
+ [#5240]: https://github.com/DataDog/dd-trace-rb/issues/5240
5148
+ [#5246]: https://github.com/DataDog/dd-trace-rb/issues/5246
5149
+ [#5247]: https://github.com/DataDog/dd-trace-rb/issues/5247
5150
+ [#5254]: https://github.com/DataDog/dd-trace-rb/issues/5254
5105
5151
  [@AdrianLC]: https://github.com/AdrianLC
5106
5152
  [@Azure7111]: https://github.com/Azure7111
5107
5153
  [@BabyGroot]: https://github.com/BabyGroot
@@ -5256,4 +5302,4 @@ Git diff: https://github.com/DataDog/dd-trace-rb/compare/v0.3.0...v0.3.1
5256
5302
  [@y-yagi]: https://github.com/y-yagi
5257
5303
  [@yujideveloper]: https://github.com/yujideveloper
5258
5304
  [@yukimurasawa]: https://github.com/yukimurasawa
5259
- [@zachmccormick]: https://github.com/zachmccormick
5305
+ [@zachmccormick]: https://github.com/zachmccormick
@@ -11,6 +11,7 @@
11
11
  #include "clock_id.h"
12
12
  #include "helpers.h"
13
13
  #include "private_vm_api_access.h"
14
+ #include "ruby_helpers.h"
14
15
  #include "time_helpers.h"
15
16
 
16
17
  // Validate that our home-cooked pthread_id_for() matches pthread_self() for the current thread
@@ -18,7 +19,7 @@ void self_test_clock_id(void) {
18
19
  rb_nativethread_id_t expected_pthread_id = pthread_self();
19
20
  rb_nativethread_id_t actual_pthread_id = pthread_id_for(rb_thread_current());
20
21
 
21
- if (expected_pthread_id != actual_pthread_id) rb_raise(rb_eRuntimeError, "pthread_id_for() self-test failed");
22
+ if (expected_pthread_id != actual_pthread_id) raise_error(rb_eRuntimeError, "pthread_id_for() self-test failed");
22
23
  }
23
24
 
24
25
  // Safety: This function is assumed never to raise exceptions by callers
@@ -76,8 +76,6 @@
76
76
  //
77
77
  // ---
78
78
 
79
- #define ERR_CLOCK_FAIL "failed to get clock time"
80
-
81
79
  // Maximum allowed value for an allocation weight. Attempts to use higher values will result in clamping.
82
80
  // See https://docs.google.com/document/d/1lWLB714wlLBBq6T4xZyAc4a5wtWhSmr4-hgiPKeErlA/edit#heading=h.ugp0zxcj5iqh
83
81
  // (Datadog-only link) for research backing the choice of this value.
@@ -117,6 +115,7 @@ typedef struct {
117
115
  // When something goes wrong during sampling, we record the Ruby exception here, so that it can be "re-raised" on
118
116
  // the CpuAndWallTimeWorker thread
119
117
  VALUE failure_exception;
118
+ const char *failure_exception_during_operation;
120
119
  // Used by `_native_stop` to flag the worker thread to start (see comment on `_native_sampling_loop`)
121
120
  VALUE stop_thread;
122
121
 
@@ -191,17 +190,17 @@ static VALUE _native_initialize(int argc, VALUE *argv, DDTRACE_UNUSED VALUE _sel
191
190
  static void cpu_and_wall_time_worker_typed_data_mark(void *state_ptr);
192
191
  static VALUE _native_sampling_loop(VALUE self, VALUE instance);
193
192
  static VALUE _native_stop(DDTRACE_UNUSED VALUE _self, VALUE self_instance, VALUE worker_thread);
194
- static VALUE stop(VALUE self_instance, VALUE optional_exception);
195
- static void stop_state(cpu_and_wall_time_worker_state *state, VALUE optional_exception);
193
+ static VALUE stop(VALUE self_instance, VALUE optional_exception, const char *optional_exception_during_operation);
194
+ static void stop_state(cpu_and_wall_time_worker_state *state, VALUE optional_exception, const char *optional_operation_name);
196
195
  static void handle_sampling_signal(DDTRACE_UNUSED int _signal, DDTRACE_UNUSED siginfo_t *_info, DDTRACE_UNUSED void *_ucontext);
197
196
  static void *run_sampling_trigger_loop(void *state_ptr);
198
197
  static void interrupt_sampling_trigger_loop(void *state_ptr);
199
198
  static void sample_from_postponed_job(DDTRACE_UNUSED void *_unused);
200
199
  static VALUE rescued_sample_from_postponed_job(VALUE self_instance);
201
- static VALUE handle_sampling_failure(VALUE self_instance, VALUE exception);
202
200
  static VALUE _native_current_sigprof_signal_handler(DDTRACE_UNUSED VALUE self);
203
201
  static VALUE release_gvl_and_run_sampling_trigger_loop(VALUE instance);
204
202
  static VALUE _native_is_running(DDTRACE_UNUSED VALUE self, VALUE instance);
203
+ static VALUE _native_failure_exception_during_operation(DDTRACE_UNUSED VALUE self, VALUE instance);
205
204
  static void testing_signal_handler(DDTRACE_UNUSED int _signal, DDTRACE_UNUSED siginfo_t *_info, DDTRACE_UNUSED void *_ucontext);
206
205
  static VALUE _native_install_testing_signal_handler(DDTRACE_UNUSED VALUE self);
207
206
  static VALUE _native_remove_testing_signal_handler(DDTRACE_UNUSED VALUE self);
@@ -209,7 +208,12 @@ static VALUE _native_trigger_sample(DDTRACE_UNUSED VALUE self);
209
208
  static VALUE _native_gc_tracepoint(DDTRACE_UNUSED VALUE self, VALUE instance);
210
209
  static void on_gc_event(VALUE tracepoint_data, DDTRACE_UNUSED void *unused);
211
210
  static void after_gc_from_postponed_job(DDTRACE_UNUSED void *_unused);
212
- static VALUE safely_call(VALUE (*function_to_call_safely)(VALUE), VALUE function_to_call_safely_arg, VALUE instance);
211
+ static VALUE safely_call(
212
+ VALUE (*function_to_call_safely)(VALUE),
213
+ VALUE function_to_call_safely_arg,
214
+ VALUE instance,
215
+ VALUE (*handle_sampling_failure)(VALUE, VALUE)
216
+ );
213
217
  static VALUE _native_simulate_handle_sampling_signal(DDTRACE_UNUSED VALUE self);
214
218
  static VALUE _native_simulate_sample_from_postponed_job(DDTRACE_UNUSED VALUE self);
215
219
  static VALUE _native_reset_after_fork(DDTRACE_UNUSED VALUE self, VALUE instance);
@@ -226,6 +230,7 @@ static void disable_tracepoints(cpu_and_wall_time_worker_state *state);
226
230
  static VALUE _native_with_blocked_sigprof(DDTRACE_UNUSED VALUE self);
227
231
  static VALUE rescued_sample_allocation(VALUE tracepoint_data);
228
232
  static void delayed_error(cpu_and_wall_time_worker_state *state, const char *error);
233
+ static void delayed_error_clock_failure(cpu_and_wall_time_worker_state *state);
229
234
  static VALUE _native_delayed_error(DDTRACE_UNUSED VALUE self, VALUE instance, VALUE error_msg);
230
235
  static VALUE _native_hold_signals(DDTRACE_UNUSED VALUE self);
231
236
  static VALUE _native_resume_signals(DDTRACE_UNUSED VALUE self);
@@ -235,6 +240,10 @@ static void after_gvl_running_from_postponed_job(DDTRACE_UNUSED void *_unused);
235
240
  #endif
236
241
  static VALUE rescued_after_gvl_running_from_postponed_job(VALUE self_instance);
237
242
  static VALUE _native_gvl_profiling_hook_active(DDTRACE_UNUSED VALUE self, VALUE instance);
243
+ static VALUE handle_sampling_failure_rescued_sample_from_postponed_job(VALUE self_instance, VALUE exception);
244
+ static VALUE handle_sampling_failure_thread_context_collector_sample_after_gc(VALUE self_instance, VALUE exception);
245
+ static VALUE handle_sampling_failure_rescued_sample_allocation(VALUE self_instance, VALUE exception);
246
+ static VALUE handle_sampling_failure_rescued_after_gvl_running_from_postponed_job(VALUE self_instance, VALUE exception);
238
247
  static inline void during_sample_enter(cpu_and_wall_time_worker_state* state);
239
248
  static inline void during_sample_exit(cpu_and_wall_time_worker_state* state);
240
249
 
@@ -262,6 +271,7 @@ static inline void during_sample_exit(cpu_and_wall_time_worker_state* state);
262
271
  // (e.g. signal handler) where it's impossible or just awkward to pass it as an argument.
263
272
  static VALUE active_sampler_instance = Qnil;
264
273
  static cpu_and_wall_time_worker_state *active_sampler_instance_state = NULL;
274
+ static VALUE clock_failure_exception_class = Qnil;
265
275
 
266
276
  // See handle_sampling_signal for details on what this does
267
277
  #ifdef NO_POSTPONED_TRIGGER
@@ -289,7 +299,7 @@ void collectors_cpu_and_wall_time_worker_init(VALUE profiling_module) {
289
299
  after_gc_from_postponed_job_handle == POSTPONED_JOB_HANDLE_INVALID ||
290
300
  after_gvl_running_from_postponed_job_handle == POSTPONED_JOB_HANDLE_INVALID
291
301
  ) {
292
- rb_raise(rb_eRuntimeError, "Failed to register profiler postponed jobs (got POSTPONED_JOB_HANDLE_INVALID)");
302
+ raise_error(rb_eRuntimeError, "Failed to register profiler postponed jobs (got POSTPONED_JOB_HANDLE_INVALID)");
293
303
  }
294
304
  #else
295
305
  gc_finalize_deferred_workaround = objspace_ptr_for_gc_finalize_deferred_workaround();
@@ -299,6 +309,8 @@ void collectors_cpu_and_wall_time_worker_init(VALUE profiling_module) {
299
309
  VALUE collectors_cpu_and_wall_time_worker_class = rb_define_class_under(collectors_module, "CpuAndWallTimeWorker", rb_cObject);
300
310
  // Hosts methods used for testing the native code using RSpec
301
311
  VALUE testing_module = rb_define_module_under(collectors_cpu_and_wall_time_worker_class, "Testing");
312
+ clock_failure_exception_class = rb_define_class_under(collectors_cpu_and_wall_time_worker_class, "ClockFailure", rb_eRuntimeError);
313
+ rb_gc_register_mark_object(clock_failure_exception_class);
302
314
 
303
315
  // Instances of the CpuAndWallTimeWorker class are "TypedData" objects.
304
316
  // "TypedData" objects are special objects in the Ruby VM that can wrap C structs.
@@ -318,6 +330,7 @@ void collectors_cpu_and_wall_time_worker_init(VALUE profiling_module) {
318
330
  rb_define_singleton_method(collectors_cpu_and_wall_time_worker_class, "_native_stats_reset_not_thread_safe", _native_stats_reset_not_thread_safe, 1);
319
331
  rb_define_singleton_method(collectors_cpu_and_wall_time_worker_class, "_native_allocation_count", _native_allocation_count, 0);
320
332
  rb_define_singleton_method(collectors_cpu_and_wall_time_worker_class, "_native_is_running?", _native_is_running, 1);
333
+ rb_define_singleton_method(collectors_cpu_and_wall_time_worker_class, "_native_failure_exception_during_operation", _native_failure_exception_during_operation, 1);
321
334
  rb_define_singleton_method(testing_module, "_native_current_sigprof_signal_handler", _native_current_sigprof_signal_handler, 0);
322
335
  rb_define_singleton_method(collectors_cpu_and_wall_time_worker_class, "_native_hold_signals", _native_hold_signals, 0);
323
336
  rb_define_singleton_method(collectors_cpu_and_wall_time_worker_class, "_native_resume_signals", _native_resume_signals, 0);
@@ -370,6 +383,7 @@ static VALUE _native_new(VALUE klass) {
370
383
 
371
384
  atomic_init(&state->should_run, false);
372
385
  state->failure_exception = Qnil;
386
+ state->failure_exception_during_operation = NULL;
373
387
  state->stop_thread = Qnil;
374
388
 
375
389
  during_sample_exit(state);
@@ -472,10 +486,7 @@ static VALUE _native_sampling_loop(DDTRACE_UNUSED VALUE _self, VALUE instance) {
472
486
  cpu_and_wall_time_worker_state *old_state = active_sampler_instance_state;
473
487
  if (old_state != NULL) {
474
488
  if (is_thread_alive(old_state->owner_thread)) {
475
- rb_raise(
476
- rb_eRuntimeError,
477
- "Could not start CpuAndWallTimeWorker: There's already another instance of CpuAndWallTimeWorker active in a different thread"
478
- );
489
+ raise_error(rb_eRuntimeError, "Could not start CpuAndWallTimeWorker: There's already another instance of CpuAndWallTimeWorker active in a different thread");
479
490
  } else {
480
491
  // The previously active thread seems to have died without cleaning up after itself.
481
492
  // In this case, we can still go ahead and start the profiler BUT we make sure to disable any existing tracepoint
@@ -554,22 +565,24 @@ static VALUE _native_stop(DDTRACE_UNUSED VALUE _self, VALUE self_instance, VALUE
554
565
 
555
566
  state->stop_thread = worker_thread;
556
567
 
557
- return stop(self_instance, /* optional_exception: */ Qnil);
568
+ return stop(self_instance, Qnil, NULL);
558
569
  }
559
570
 
560
- static void stop_state(cpu_and_wall_time_worker_state *state, VALUE optional_exception) {
571
+ // When providing an `optional_exception`, `optional_exception_during_operation` should be provided as well
572
+ static void stop_state(cpu_and_wall_time_worker_state *state, VALUE optional_exception, const char *optional_exception_during_operation) {
561
573
  atomic_store(&state->should_run, false);
562
574
  state->failure_exception = optional_exception;
575
+ state->failure_exception_during_operation = optional_exception_during_operation;
563
576
 
564
577
  // Disable the tracepoints as soon as possible, so the VM doesn't keep on calling them
565
578
  disable_tracepoints(state);
566
579
  }
567
580
 
568
- static VALUE stop(VALUE self_instance, VALUE optional_exception) {
581
+ static VALUE stop(VALUE self_instance, VALUE optional_exception, const char *optional_exception_during_operation) {
569
582
  cpu_and_wall_time_worker_state *state;
570
583
  TypedData_Get_Struct(self_instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
571
584
 
572
- stop_state(state, optional_exception);
585
+ stop_state(state, optional_exception, optional_exception_during_operation);
573
586
 
574
587
  return Qtrue;
575
588
  }
@@ -669,6 +682,10 @@ static void *run_sampling_trigger_loop(void *state_ptr) {
669
682
  // we're doing this, so we may still not signal the correct thread from time to time, but our signal handler
670
683
  // includes a check to see if it got called in the right thread
671
684
  state->stats.interrupt_thread_attempts++;
685
+
686
+ // Pick up any last-minute attempts to stop before we send the signal
687
+ if (!atomic_load(&state->should_run)) return NULL;
688
+
672
689
  pthread_kill(owner.owner, SIGPROF);
673
690
  } else {
674
691
  if (state->skip_idle_samples_for_testing) {
@@ -726,7 +743,12 @@ static void sample_from_postponed_job(DDTRACE_UNUSED void *_unused) {
726
743
  during_sample_enter(state);
727
744
 
728
745
  // Rescue against any exceptions that happen during sampling
729
- safely_call(rescued_sample_from_postponed_job, state->self_instance, state->self_instance);
746
+ safely_call(
747
+ rescued_sample_from_postponed_job,
748
+ state->self_instance,
749
+ state->self_instance,
750
+ handle_sampling_failure_rescued_sample_from_postponed_job
751
+ );
730
752
 
731
753
  during_sample_exit(state);
732
754
  }
@@ -763,11 +785,6 @@ static VALUE rescued_sample_from_postponed_job(VALUE self_instance) {
763
785
  return Qnil;
764
786
  }
765
787
 
766
- static VALUE handle_sampling_failure(VALUE self_instance, VALUE exception) {
767
- stop(self_instance, exception);
768
- return Qnil;
769
- }
770
-
771
788
  // This method exists only to enable testing Datadog::Profiling::Collectors::CpuAndWallTimeWorker behavior using RSpec.
772
789
  // It SHOULD NOT be used for other purposes.
773
790
  static VALUE _native_current_sigprof_signal_handler(DDTRACE_UNUSED VALUE self) {
@@ -821,7 +838,7 @@ static VALUE release_gvl_and_run_sampling_trigger_loop(VALUE instance) {
821
838
  NULL
822
839
  );
823
840
  #else
824
- rb_raise(rb_eArgError, "GVL profiling is not supported in this Ruby version");
841
+ raise_error(rb_eArgError, "GVL profiling is not supported in this Ruby version");
825
842
  #endif
826
843
  }
827
844
 
@@ -844,6 +861,15 @@ static VALUE _native_is_running(DDTRACE_UNUSED VALUE self, VALUE instance) {
844
861
  return (state != NULL && is_thread_alive(state->owner_thread) && state->self_instance == instance) ? Qtrue : Qfalse;
845
862
  }
846
863
 
864
+ static VALUE _native_failure_exception_during_operation(DDTRACE_UNUSED VALUE self, VALUE instance) {
865
+ cpu_and_wall_time_worker_state *state;
866
+ TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
867
+
868
+ if (state->failure_exception_during_operation == NULL) return Qnil;
869
+
870
+ return rb_str_new_cstr(state->failure_exception_during_operation);
871
+ }
872
+
847
873
  static void testing_signal_handler(DDTRACE_UNUSED int _signal, DDTRACE_UNUSED siginfo_t *_info, DDTRACE_UNUSED void *_ucontext) {
848
874
  /* Does nothing on purpose */
849
875
  }
@@ -936,14 +962,24 @@ static void after_gc_from_postponed_job(DDTRACE_UNUSED void *_unused) {
936
962
 
937
963
  during_sample_enter(state);
938
964
 
939
- safely_call(thread_context_collector_sample_after_gc, state->thread_context_collector_instance, state->self_instance);
965
+ safely_call(
966
+ thread_context_collector_sample_after_gc,
967
+ state->thread_context_collector_instance,
968
+ state->self_instance,
969
+ handle_sampling_failure_thread_context_collector_sample_after_gc
970
+ );
940
971
 
941
972
  during_sample_exit(state);
942
973
  }
943
974
 
944
975
  // Equivalent to Ruby begin/rescue call, where we call a C function and jump to the exception handler if an
945
976
  // exception gets raised within
946
- static VALUE safely_call(VALUE (*function_to_call_safely)(VALUE), VALUE function_to_call_safely_arg, VALUE instance) {
977
+ static VALUE safely_call(
978
+ VALUE (*function_to_call_safely)(VALUE),
979
+ VALUE function_to_call_safely_arg,
980
+ VALUE instance,
981
+ VALUE (*handle_sampling_failure)(VALUE, VALUE)
982
+ ) {
947
983
  VALUE exception_handler_function_arg = instance;
948
984
  return rb_rescue2(
949
985
  function_to_call_safely,
@@ -1119,7 +1155,7 @@ static VALUE _native_allocation_count(DDTRACE_UNUSED VALUE self) {
1119
1155
  #define HANDLE_CLOCK_FAILURE(call) ({ \
1120
1156
  long _result = (call); \
1121
1157
  if (_result == 0) { \
1122
- delayed_error(state, ERR_CLOCK_FAIL); \
1158
+ delayed_error_clock_failure(state); \
1123
1159
  return; \
1124
1160
  } \
1125
1161
  _result; \
@@ -1203,12 +1239,17 @@ static void on_newobj_event(DDTRACE_UNUSED VALUE unused1, DDTRACE_UNUSED void *u
1203
1239
  during_sample_enter(state);
1204
1240
 
1205
1241
  // Rescue against any exceptions that happen during sampling
1206
- safely_call(rescued_sample_allocation, Qnil, state->self_instance);
1242
+ safely_call(
1243
+ rescued_sample_allocation,
1244
+ Qnil,
1245
+ state->self_instance,
1246
+ handle_sampling_failure_rescued_sample_allocation
1247
+ );
1207
1248
 
1208
1249
  if (state->dynamic_sampling_rate_enabled) {
1209
1250
  long now = monotonic_wall_time_now_ns(DO_NOT_RAISE_ON_FAILURE);
1210
1251
  if (now == 0) {
1211
- delayed_error(state, ERR_CLOCK_FAIL);
1252
+ delayed_error_clock_failure(state);
1212
1253
  // NOTE: Not short-circuiting here to make sure cleanup happens
1213
1254
  }
1214
1255
  uint64_t sampling_time_ns = discrete_dynamic_sampler_after_sample(&state->allocation_sampler, now);
@@ -1284,7 +1325,12 @@ static VALUE rescued_sample_allocation(DDTRACE_UNUSED VALUE unused) {
1284
1325
 
1285
1326
  static void delayed_error(cpu_and_wall_time_worker_state *state, const char *error) {
1286
1327
  // If we can't raise an immediate exception at the calling site, use the asynchronous flow through the main worker loop.
1287
- stop_state(state, rb_exc_new_cstr(rb_eRuntimeError, error));
1328
+ stop_state(state, rb_exc_new_cstr(rb_eRuntimeError, error), "delayed_error");
1329
+ }
1330
+
1331
+ static void delayed_error_clock_failure(cpu_and_wall_time_worker_state *state) {
1332
+ // If we can't raise an immediate exception at the calling site, use the asynchronous flow through the main worker loop.
1333
+ stop_state(state, rb_exc_new_cstr(clock_failure_exception_class, "failed to get clock time"), "delayed_error_clock_failure");
1288
1334
  }
1289
1335
 
1290
1336
  static VALUE _native_delayed_error(DDTRACE_UNUSED VALUE self, VALUE instance, VALUE error_msg) {
@@ -1365,7 +1411,12 @@ static VALUE _native_resume_signals(DDTRACE_UNUSED VALUE self) {
1365
1411
  during_sample_enter(state);
1366
1412
 
1367
1413
  // Rescue against any exceptions that happen during sampling
1368
- safely_call(rescued_after_gvl_running_from_postponed_job, state->self_instance, state->self_instance);
1414
+ safely_call(
1415
+ rescued_after_gvl_running_from_postponed_job,
1416
+ state->self_instance,
1417
+ state->self_instance,
1418
+ handle_sampling_failure_rescued_after_gvl_running_from_postponed_job
1419
+ );
1369
1420
 
1370
1421
  during_sample_exit(state);
1371
1422
  }
@@ -1404,6 +1455,26 @@ static VALUE _native_resume_signals(DDTRACE_UNUSED VALUE self) {
1404
1455
  }
1405
1456
  #endif
1406
1457
 
1458
+ static VALUE handle_sampling_failure_rescued_sample_from_postponed_job(VALUE self_instance, VALUE exception) {
1459
+ stop(self_instance, exception, "rescued_sample_from_postponed_job");
1460
+ return Qnil;
1461
+ }
1462
+
1463
+ static VALUE handle_sampling_failure_thread_context_collector_sample_after_gc(VALUE self_instance, VALUE exception) {
1464
+ stop(self_instance, exception, "thread_context_collector_sample_after_gc");
1465
+ return Qnil;
1466
+ }
1467
+
1468
+ static VALUE handle_sampling_failure_rescued_sample_allocation(VALUE self_instance, VALUE exception) {
1469
+ stop(self_instance, exception, "rescued_sample_allocation");
1470
+ return Qnil;
1471
+ }
1472
+
1473
+ static VALUE handle_sampling_failure_rescued_after_gvl_running_from_postponed_job(VALUE self_instance, VALUE exception) {
1474
+ stop(self_instance, exception, "rescued_after_gvl_running_from_postponed_job");
1475
+ return Qnil;
1476
+ }
1477
+
1407
1478
  static inline void during_sample_enter(cpu_and_wall_time_worker_state* state) {
1408
1479
  // Tell the compiler it's not allowed to reorder the `during_sample` flag with anything that happens after.
1409
1480
  //
@@ -51,7 +51,7 @@ void discrete_dynamic_sampler_reset(discrete_dynamic_sampler *sampler, long now_
51
51
 
52
52
  void discrete_dynamic_sampler_set_overhead_target_percentage(discrete_dynamic_sampler *sampler, double target_overhead, long now_ns) {
53
53
  if (target_overhead <= 0 || target_overhead > 100) {
54
- rb_raise(rb_eArgError, "Target overhead must be a double between ]0,100] was %f", target_overhead);
54
+ raise_error(rb_eArgError, "Target overhead must be a double between ]0,100] was %f", target_overhead);
55
55
  }
56
56
  sampler->target_overhead = target_overhead;
57
57
  return discrete_dynamic_sampler_reset(sampler, now_ns);
@@ -369,7 +369,7 @@ static VALUE _native_new(VALUE klass) {
369
369
 
370
370
  long now_ns = monotonic_wall_time_now_ns(DO_NOT_RAISE_ON_FAILURE);
371
371
  if (now_ns == 0) {
372
- rb_raise(rb_eRuntimeError, "failed to get clock time");
372
+ raise_error(rb_eRuntimeError, "failed to get clock time");
373
373
  }
374
374
  discrete_dynamic_sampler_init(&state->sampler, "test sampler", now_ns);
375
375
 
@@ -2,6 +2,7 @@
2
2
  #include <datadog/profiling.h>
3
3
 
4
4
  #include "collectors_gc_profiling_helper.h"
5
+ #include "ruby_helpers.h"
5
6
 
6
7
  // This helper is used by the Datadog::Profiling::Collectors::ThreadContext to profile garbage collection.
7
8
  // It's tested through that class' interfaces.
@@ -71,7 +72,7 @@ uint8_t gc_profiling_set_metadata(ddog_prof_Label *labels, int labels_length) {
71
72
  1; // gc type
72
73
 
73
74
  if (max_label_count > labels_length) {
74
- rb_raise(rb_eArgError, "BUG: gc_profiling_set_metadata invalid labels_length (%d) < max_label_count (%d)", labels_length, max_label_count);
75
+ raise_error(rb_eArgError, "BUG: gc_profiling_set_metadata invalid labels_length (%d) < max_label_count (%d)", labels_length, max_label_count);
75
76
  }
76
77
 
77
78
  uint8_t label_pos = 0;
@@ -119,7 +120,7 @@ uint8_t gc_profiling_set_metadata(ddog_prof_Label *labels, int labels_length) {
119
120
  };
120
121
 
121
122
  if (label_pos > max_label_count) {
122
- rb_raise(rb_eRuntimeError, "BUG: gc_profiling_set_metadata unexpected label_pos (%d) > max_label_count (%d)", label_pos, max_label_count);
123
+ raise_error(rb_eRuntimeError, "BUG: gc_profiling_set_metadata unexpected label_pos (%d) > max_label_count (%d)", label_pos, max_label_count);
123
124
  }
124
125
 
125
126
  return label_pos;
@@ -17,6 +17,7 @@
17
17
 
18
18
  #include "datadog_ruby_common.h"
19
19
  #include "private_vm_api_access.h"
20
+ #include "ruby_helpers.h"
20
21
  #include "stack_recorder.h"
21
22
  #include "collectors_stack.h"
22
23
 
@@ -284,11 +285,11 @@ void sample_thread(
284
285
  // here, but >= 0 makes this easier to understand/debug.
285
286
  bool only_wall_time = cpu_or_wall_sample && values.cpu_time_ns == 0 && values.wall_time_ns >= 0;
286
287
 
287
- if (cpu_or_wall_sample && state_label == NULL) rb_raise(rb_eRuntimeError, "BUG: Unexpected missing state_label");
288
+ if (cpu_or_wall_sample && state_label == NULL) raise_error(rb_eRuntimeError, "BUG: Unexpected missing state_label");
288
289
 
289
290
  if (has_cpu_time) {
290
291
  state_label->str = DDOG_CHARSLICE_C("had cpu");
291
- if (labels.is_gvl_waiting_state) rb_raise(rb_eRuntimeError, "BUG: Unexpected combination of cpu-time with is_gvl_waiting");
292
+ if (labels.is_gvl_waiting_state) raise_error(rb_eRuntimeError, "BUG: Unexpected combination of cpu-time with is_gvl_waiting");
292
293
  }
293
294
 
294
295
  int top_of_stack_position = captured_frames - 1;
@@ -612,8 +613,8 @@ bool prepare_sample_thread(VALUE thread, sampling_buffer *buffer) {
612
613
  }
613
614
 
614
615
  uint16_t sampling_buffer_check_max_frames(int max_frames) {
615
- if (max_frames < 5) rb_raise(rb_eArgError, "Invalid max_frames: value must be >= 5");
616
- if (max_frames > MAX_FRAMES_LIMIT) rb_raise(rb_eArgError, "Invalid max_frames: value must be <= " MAX_FRAMES_LIMIT_AS_STRING);
616
+ if (max_frames < 5) raise_error(rb_eArgError, "Invalid max_frames: value must be >= 5");
617
+ if (max_frames > MAX_FRAMES_LIMIT) raise_error(rb_eArgError, "Invalid max_frames: value must be <= " MAX_FRAMES_LIMIT_AS_STRING);
617
618
  return max_frames;
618
619
  }
619
620
 
@@ -630,7 +631,7 @@ void sampling_buffer_initialize(sampling_buffer *buffer, uint16_t max_frames, dd
630
631
 
631
632
  void sampling_buffer_free(sampling_buffer *buffer) {
632
633
  if (buffer->max_frames == 0 || buffer->locations == NULL || buffer->stack_buffer == NULL) {
633
- rb_raise(rb_eArgError, "sampling_buffer_free called with invalid buffer");
634
+ raise_error(rb_eArgError, "sampling_buffer_free called with invalid buffer");
634
635
  }
635
636
 
636
637
  ruby_xfree(buffer->stack_buffer);
@@ -8,6 +8,7 @@
8
8
  #include "helpers.h"
9
9
  #include "libdatadog_helpers.h"
10
10
  #include "private_vm_api_access.h"
11
+ #include "ruby_helpers.h"
11
12
  #include "stack_recorder.h"
12
13
  #include "time_helpers.h"
13
14
  #include "unsafe_api_calls_check.h"
@@ -292,7 +293,7 @@ static bool handle_gvl_waiting(
292
293
  static VALUE _native_on_gvl_waiting(DDTRACE_UNUSED VALUE self, VALUE thread);
293
294
  static VALUE _native_gvl_waiting_at_for(DDTRACE_UNUSED VALUE self, VALUE thread);
294
295
  static VALUE _native_on_gvl_running(DDTRACE_UNUSED VALUE self, VALUE thread);
295
- static VALUE _native_sample_after_gvl_running(DDTRACE_UNUSED VALUE self, VALUE collector_instance, VALUE thread);
296
+ static VALUE _native_sample_after_gvl_running(DDTRACE_UNUSED VALUE self, VALUE collector_instance, VALUE thread, VALUE allow_exception);
296
297
  static VALUE _native_apply_delta_to_cpu_time_at_previous_sample_ns(DDTRACE_UNUSED VALUE self, VALUE collector_instance, VALUE thread, VALUE delta_ns);
297
298
  static void otel_without_ddtrace_trace_identifiers_for(
298
299
  thread_context_collector_state *state,
@@ -342,7 +343,7 @@ void collectors_thread_context_init(VALUE profiling_module) {
342
343
  rb_define_singleton_method(testing_module, "_native_on_gvl_waiting", _native_on_gvl_waiting, 1);
343
344
  rb_define_singleton_method(testing_module, "_native_gvl_waiting_at_for", _native_gvl_waiting_at_for, 1);
344
345
  rb_define_singleton_method(testing_module, "_native_on_gvl_running", _native_on_gvl_running, 1);
345
- rb_define_singleton_method(testing_module, "_native_sample_after_gvl_running", _native_sample_after_gvl_running, 2);
346
+ rb_define_singleton_method(testing_module, "_native_sample_after_gvl_running", _native_sample_after_gvl_running, 3);
346
347
  rb_define_singleton_method(testing_module, "_native_apply_delta_to_cpu_time_at_previous_sample_ns", _native_apply_delta_to_cpu_time_at_previous_sample_ns, 3);
347
348
  #endif
348
349
 
@@ -518,7 +519,7 @@ static VALUE _native_initialize(int argc, VALUE *argv, DDTRACE_UNUSED VALUE _sel
518
519
  } else if (otel_context_enabled == ID2SYM(rb_intern("both"))) {
519
520
  state->otel_context_enabled = OTEL_CONTEXT_ENABLED_BOTH;
520
521
  } else {
521
- rb_raise(rb_eArgError, "Unexpected value for otel_context_enabled: %+" PRIsVALUE, otel_context_enabled);
522
+ raise_error(rb_eArgError, "Unexpected value for otel_context_enabled: %+" PRIsVALUE, otel_context_enabled);
522
523
  }
523
524
 
524
525
  global_waiting_for_gvl_threshold_ns = NUM2UINT(waiting_for_gvl_threshold_ns);
@@ -539,7 +540,7 @@ static VALUE _native_initialize(int argc, VALUE *argv, DDTRACE_UNUSED VALUE _sel
539
540
  static VALUE _native_sample(DDTRACE_UNUSED VALUE _self, VALUE collector_instance, VALUE profiler_overhead_stack_thread, VALUE allow_exception) {
540
541
  ENFORCE_BOOLEAN(allow_exception);
541
542
 
542
- if (!is_thread_alive(profiler_overhead_stack_thread)) rb_raise(rb_eArgError, "Unexpected: profiler_overhead_stack_thread is not alive");
543
+ if (!is_thread_alive(profiler_overhead_stack_thread)) raise_error(rb_eArgError, "Unexpected: profiler_overhead_stack_thread is not alive");
543
544
 
544
545
  if (allow_exception == Qfalse) debug_enter_unsafe_context();
545
546
 
@@ -831,7 +832,7 @@ VALUE thread_context_collector_sample_after_gc(VALUE self_instance) {
831
832
  TypedData_Get_Struct(self_instance, thread_context_collector_state, &thread_context_collector_typed_data, state);
832
833
 
833
834
  if (state->gc_tracking.wall_time_at_previous_gc_ns == INVALID_TIME) {
834
- rb_raise(rb_eRuntimeError, "BUG: Unexpected call to sample_after_gc without valid GC information available");
835
+ raise_error(rb_eRuntimeError, "BUG: Unexpected call to sample_after_gc without valid GC information available");
835
836
  }
836
837
 
837
838
  int max_labels_needed_for_gc = 7; // Magic number gets validated inside gc_profiling_set_metadata
@@ -998,7 +999,7 @@ static void trigger_sample_for_thread(
998
999
  // @ivoanjo: I wonder if C compilers are smart enough to statically prove this check never triggers unless someone
999
1000
  // changes the code erroneously and remove it entirely?
1000
1001
  if (label_pos > max_label_count) {
1001
- rb_raise(rb_eRuntimeError, "BUG: Unexpected label_pos (%d) > max_label_count (%d)", label_pos, max_label_count);
1002
+ raise_error(rb_eRuntimeError, "BUG: Unexpected label_pos (%d) > max_label_count (%d)", label_pos, max_label_count);
1002
1003
  }
1003
1004
 
1004
1005
  ddog_prof_Slice_Label slice_labels = {.ptr = labels, .len = label_pos};
@@ -1295,7 +1296,7 @@ static long update_time_since_previous_sample(long *time_at_previous_sample_ns,
1295
1296
  elapsed_time_ns = 0;
1296
1297
  } else {
1297
1298
  // We don't expect non-wall time to go backwards, so let's flag this as a bug
1298
- rb_raise(rb_eRuntimeError, "BUG: Unexpected negative elapsed_time_ns between samples");
1299
+ raise_error(rb_eRuntimeError, "BUG: Unexpected negative elapsed_time_ns between samples");
1299
1300
  }
1300
1301
  }
1301
1302
 
@@ -1961,7 +1962,7 @@ static uint64_t otel_span_id_to_uint(VALUE otel_span_id) {
1961
1962
  thread_context_collector_state *state;
1962
1963
  TypedData_Get_Struct(self_instance, thread_context_collector_state, &thread_context_collector_typed_data, state);
1963
1964
 
1964
- if (!state->timeline_enabled) rb_raise(rb_eRuntimeError, "GVL profiling requires timeline to be enabled");
1965
+ if (!state->timeline_enabled) raise_error(rb_eRuntimeError, "GVL profiling requires timeline to be enabled");
1965
1966
 
1966
1967
  intptr_t gvl_waiting_at = gvl_profiling_state_thread_object_get(current_thread);
1967
1968
 
@@ -2131,10 +2132,13 @@ static uint64_t otel_span_id_to_uint(VALUE otel_span_id) {
2131
2132
  return result;
2132
2133
  }
2133
2134
 
2134
- static VALUE _native_sample_after_gvl_running(DDTRACE_UNUSED VALUE self, VALUE collector_instance, VALUE thread) {
2135
+ static VALUE _native_sample_after_gvl_running(DDTRACE_UNUSED VALUE self, VALUE collector_instance, VALUE thread, VALUE allow_exception) {
2135
2136
  ENFORCE_THREAD(thread);
2137
+ ENFORCE_BOOLEAN(allow_exception);
2138
+
2136
2139
 
2137
- debug_enter_unsafe_context();
2140
+
2141
+ if (allow_exception == Qfalse) debug_enter_unsafe_context();
2138
2142
 
2139
2143
  VALUE result = thread_context_collector_sample_after_gvl_running(
2140
2144
  collector_instance,
@@ -2142,7 +2146,7 @@ static uint64_t otel_span_id_to_uint(VALUE otel_span_id) {
2142
2146
  monotonic_wall_time_now_ns(RAISE_ON_FAILURE)
2143
2147
  );
2144
2148
 
2145
- debug_leave_unsafe_context();
2149
+ if (allow_exception == Qfalse) debug_leave_unsafe_context();
2146
2150
 
2147
2151
  return result;
2148
2152
  }
@@ -2154,7 +2158,7 @@ static uint64_t otel_span_id_to_uint(VALUE otel_span_id) {
2154
2158
  TypedData_Get_Struct(collector_instance, thread_context_collector_state, &thread_context_collector_typed_data, state);
2155
2159
 
2156
2160
  per_thread_context *thread_context = get_context_for(thread, state);
2157
- if (thread_context == NULL) rb_raise(rb_eArgError, "Unexpected: This method cannot be used unless the per-thread context for the thread already exists");
2161
+ if (thread_context == NULL) raise_error(rb_eArgError, "Unexpected: This method cannot be used unless the per-thread context for the thread already exists");
2158
2162
 
2159
2163
  thread_context->cpu_time_at_previous_sample_ns += NUM2LONG(delta_ns);
2160
2164
 
@@ -217,12 +217,12 @@ void crashtracking_runtime_stacks_init(void) {
217
217
  if (crashtracker_thread_data_type == NULL) {
218
218
  VALUE current_thread = rb_thread_current();
219
219
  if (current_thread == Qnil) {
220
- rb_raise(rb_eRuntimeError, "crashtracking_runtime_stacks_init: rb_thread_current returned Qnil");
220
+ raise_error(rb_eRuntimeError, "crashtracking_runtime_stacks_init: rb_thread_current returned Qnil");
221
221
  }
222
222
 
223
223
  const rb_data_type_t *thread_data_type = RTYPEDDATA_TYPE(current_thread);
224
224
  if (!thread_data_type) {
225
- rb_raise(rb_eRuntimeError, "crashtracking_runtime_stacks_init: RTYPEDDATA_TYPE returned NULL");
225
+ raise_error(rb_eRuntimeError, "crashtracking_runtime_stacks_init: RTYPEDDATA_TYPE returned NULL");
226
226
  }
227
227
 
228
228
  crashtracker_thread_data_type = thread_data_type;