datadog 2.8.0 → 2.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +36 -1
  3. data/ext/datadog_profiling_native_extension/clock_id.h +2 -2
  4. data/ext/datadog_profiling_native_extension/collectors_cpu_and_wall_time_worker.c +64 -54
  5. data/ext/datadog_profiling_native_extension/collectors_discrete_dynamic_sampler.c +1 -1
  6. data/ext/datadog_profiling_native_extension/collectors_discrete_dynamic_sampler.h +1 -1
  7. data/ext/datadog_profiling_native_extension/collectors_idle_sampling_helper.c +16 -16
  8. data/ext/datadog_profiling_native_extension/collectors_stack.c +7 -7
  9. data/ext/datadog_profiling_native_extension/collectors_thread_context.c +219 -122
  10. data/ext/datadog_profiling_native_extension/heap_recorder.h +1 -1
  11. data/ext/datadog_profiling_native_extension/http_transport.c +4 -4
  12. data/ext/datadog_profiling_native_extension/private_vm_api_access.c +3 -0
  13. data/ext/datadog_profiling_native_extension/private_vm_api_access.h +3 -1
  14. data/ext/datadog_profiling_native_extension/profiling.c +10 -8
  15. data/ext/datadog_profiling_native_extension/ruby_helpers.c +8 -8
  16. data/ext/datadog_profiling_native_extension/stack_recorder.c +54 -54
  17. data/ext/datadog_profiling_native_extension/stack_recorder.h +1 -1
  18. data/ext/datadog_profiling_native_extension/time_helpers.h +1 -1
  19. data/ext/datadog_profiling_native_extension/unsafe_api_calls_check.c +47 -0
  20. data/ext/datadog_profiling_native_extension/unsafe_api_calls_check.h +31 -0
  21. data/ext/libdatadog_api/crashtracker.c +3 -0
  22. data/lib/datadog/appsec/assets/waf_rules/recommended.json +355 -157
  23. data/lib/datadog/appsec/assets/waf_rules/strict.json +62 -32
  24. data/lib/datadog/appsec/context.rb +54 -0
  25. data/lib/datadog/appsec/contrib/active_record/instrumentation.rb +7 -7
  26. data/lib/datadog/appsec/contrib/devise/patcher/authenticatable_patch.rb +6 -6
  27. data/lib/datadog/appsec/contrib/devise/patcher/registration_controller_patch.rb +4 -4
  28. data/lib/datadog/appsec/contrib/graphql/gateway/watcher.rb +19 -28
  29. data/lib/datadog/appsec/contrib/graphql/reactive/multiplex.rb +5 -5
  30. data/lib/datadog/appsec/contrib/rack/gateway/response.rb +3 -3
  31. data/lib/datadog/appsec/contrib/rack/gateway/watcher.rb +64 -96
  32. data/lib/datadog/appsec/contrib/rack/reactive/request.rb +10 -10
  33. data/lib/datadog/appsec/contrib/rack/reactive/request_body.rb +5 -5
  34. data/lib/datadog/appsec/contrib/rack/reactive/response.rb +6 -6
  35. data/lib/datadog/appsec/contrib/rack/request_body_middleware.rb +10 -11
  36. data/lib/datadog/appsec/contrib/rack/request_middleware.rb +43 -49
  37. data/lib/datadog/appsec/contrib/rails/gateway/watcher.rb +21 -32
  38. data/lib/datadog/appsec/contrib/rails/patcher.rb +1 -1
  39. data/lib/datadog/appsec/contrib/rails/reactive/action.rb +6 -6
  40. data/lib/datadog/appsec/contrib/sinatra/gateway/watcher.rb +41 -63
  41. data/lib/datadog/appsec/contrib/sinatra/patcher.rb +2 -2
  42. data/lib/datadog/appsec/contrib/sinatra/reactive/routed.rb +5 -5
  43. data/lib/datadog/appsec/event.rb +6 -6
  44. data/lib/datadog/appsec/ext.rb +3 -1
  45. data/lib/datadog/appsec/monitor/gateway/watcher.rb +22 -32
  46. data/lib/datadog/appsec/monitor/reactive/set_user.rb +5 -5
  47. data/lib/datadog/appsec/processor/rule_loader.rb +0 -3
  48. data/lib/datadog/appsec.rb +3 -3
  49. data/lib/datadog/auto_instrument.rb +3 -0
  50. data/lib/datadog/core/configuration/agent_settings_resolver.rb +39 -11
  51. data/lib/datadog/core/configuration/components.rb +4 -2
  52. data/lib/datadog/core/configuration.rb +1 -1
  53. data/lib/datadog/{tracing → core}/contrib/rails/utils.rb +1 -3
  54. data/lib/datadog/core/crashtracking/component.rb +1 -3
  55. data/lib/datadog/core/telemetry/event.rb +87 -3
  56. data/lib/datadog/core/telemetry/logging.rb +2 -2
  57. data/lib/datadog/core/telemetry/metric.rb +22 -0
  58. data/lib/datadog/core/telemetry/worker.rb +33 -0
  59. data/lib/datadog/di/base.rb +115 -0
  60. data/lib/datadog/di/code_tracker.rb +7 -4
  61. data/lib/datadog/di/component.rb +17 -11
  62. data/lib/datadog/di/configuration/settings.rb +11 -1
  63. data/lib/datadog/di/contrib/railtie.rb +15 -0
  64. data/lib/datadog/di/contrib.rb +26 -0
  65. data/lib/datadog/di/error.rb +5 -0
  66. data/lib/datadog/di/instrumenter.rb +39 -18
  67. data/lib/datadog/di/{init.rb → preload.rb} +2 -4
  68. data/lib/datadog/di/probe_manager.rb +4 -4
  69. data/lib/datadog/di/probe_notification_builder.rb +16 -2
  70. data/lib/datadog/di/probe_notifier_worker.rb +5 -6
  71. data/lib/datadog/di/remote.rb +4 -4
  72. data/lib/datadog/di/transport.rb +2 -4
  73. data/lib/datadog/di.rb +5 -108
  74. data/lib/datadog/kit/appsec/events.rb +3 -3
  75. data/lib/datadog/kit/identity.rb +4 -4
  76. data/lib/datadog/profiling/component.rb +55 -53
  77. data/lib/datadog/profiling/http_transport.rb +1 -26
  78. data/lib/datadog/tracing/contrib/action_cable/integration.rb +5 -2
  79. data/lib/datadog/tracing/contrib/action_mailer/integration.rb +6 -2
  80. data/lib/datadog/tracing/contrib/action_pack/integration.rb +5 -2
  81. data/lib/datadog/tracing/contrib/action_view/integration.rb +5 -2
  82. data/lib/datadog/tracing/contrib/active_job/integration.rb +5 -2
  83. data/lib/datadog/tracing/contrib/active_record/integration.rb +6 -2
  84. data/lib/datadog/tracing/contrib/active_support/cache/events/cache.rb +3 -1
  85. data/lib/datadog/tracing/contrib/active_support/cache/instrumentation.rb +3 -1
  86. data/lib/datadog/tracing/contrib/active_support/configuration/settings.rb +10 -0
  87. data/lib/datadog/tracing/contrib/active_support/integration.rb +5 -2
  88. data/lib/datadog/tracing/contrib/auto_instrument.rb +2 -2
  89. data/lib/datadog/tracing/contrib/aws/integration.rb +3 -0
  90. data/lib/datadog/tracing/contrib/concurrent_ruby/integration.rb +3 -0
  91. data/lib/datadog/tracing/contrib/httprb/integration.rb +3 -0
  92. data/lib/datadog/tracing/contrib/kafka/integration.rb +3 -0
  93. data/lib/datadog/tracing/contrib/mongodb/integration.rb +3 -0
  94. data/lib/datadog/tracing/contrib/opensearch/integration.rb +3 -0
  95. data/lib/datadog/tracing/contrib/presto/integration.rb +3 -0
  96. data/lib/datadog/tracing/contrib/rack/integration.rb +2 -2
  97. data/lib/datadog/tracing/contrib/rails/framework.rb +2 -2
  98. data/lib/datadog/tracing/contrib/rails/patcher.rb +1 -1
  99. data/lib/datadog/tracing/contrib/rest_client/integration.rb +3 -0
  100. data/lib/datadog/tracing/span.rb +12 -4
  101. data/lib/datadog/tracing/span_event.rb +123 -3
  102. data/lib/datadog/tracing/span_operation.rb +6 -0
  103. data/lib/datadog/tracing/transport/serializable_trace.rb +24 -6
  104. data/lib/datadog/version.rb +1 -1
  105. metadata +19 -10
  106. data/lib/datadog/appsec/reactive/operation.rb +0 -68
  107. data/lib/datadog/appsec/scope.rb +0 -58
  108. data/lib/datadog/core/crashtracking/agent_base_url.rb +0 -21
@@ -17,7 +17,7 @@
17
17
  typedef struct heap_recorder heap_recorder;
18
18
 
19
19
  // Extra data associated with each live object being tracked.
20
- typedef struct live_object_data {
20
+ typedef struct {
21
21
  // The weight of this object from a sampling perspective.
22
22
  //
23
23
  // A notion of weight is preserved for each tracked object to allow for an approximate
@@ -13,13 +13,13 @@ static VALUE error_symbol = Qnil; // :error in Ruby
13
13
 
14
14
  static VALUE library_version_string = Qnil;
15
15
 
16
- struct call_exporter_without_gvl_arguments {
16
+ typedef struct {
17
17
  ddog_prof_Exporter *exporter;
18
18
  ddog_prof_Exporter_Request_BuildResult *build_result;
19
19
  ddog_CancellationToken *cancel_token;
20
20
  ddog_prof_Exporter_SendResult result;
21
21
  bool send_ran;
22
- };
22
+ } call_exporter_without_gvl_arguments;
23
23
 
24
24
  static inline ddog_ByteSlice byte_slice_from_ruby_string(VALUE string);
25
25
  static VALUE _native_validate_exporter(VALUE self, VALUE exporter_configuration);
@@ -165,7 +165,7 @@ static VALUE perform_export(
165
165
 
166
166
  // We'll release the Global VM Lock while we're calling send, so that the Ruby VM can continue to work while this
167
167
  // is pending
168
- struct call_exporter_without_gvl_arguments args =
168
+ call_exporter_without_gvl_arguments args =
169
169
  {.exporter = exporter, .build_result = &build_result, .cancel_token = cancel_token, .send_ran = false};
170
170
 
171
171
  // We use rb_thread_call_without_gvl2 instead of rb_thread_call_without_gvl as the gvl2 variant never raises any
@@ -300,7 +300,7 @@ static VALUE _native_do_export(
300
300
  }
301
301
 
302
302
  static void *call_exporter_without_gvl(void *call_args) {
303
- struct call_exporter_without_gvl_arguments *args = (struct call_exporter_without_gvl_arguments*) call_args;
303
+ call_exporter_without_gvl_arguments *args = (call_exporter_without_gvl_arguments*) call_args;
304
304
 
305
305
  args->result = ddog_prof_Exporter_send(args->exporter, &args->build_result->ok, args->cancel_token);
306
306
  args->send_ran = true;
@@ -800,3 +800,6 @@ static inline int ddtrace_imemo_type(VALUE imemo) {
800
800
  return current_thread;
801
801
  }
802
802
  #endif
803
+
804
+ // Is the VM smack in the middle of raising an exception?
805
+ bool is_raised_flag_set(VALUE thread) { return thread_struct_from_object(thread)->ec->raised_flag > 0; }
@@ -18,7 +18,7 @@ typedef struct {
18
18
  rb_nativethread_id_t owner;
19
19
  } current_gvl_owner;
20
20
 
21
- typedef struct frame_info {
21
+ typedef struct {
22
22
  union {
23
23
  struct {
24
24
  VALUE iseq;
@@ -68,3 +68,5 @@ const char *imemo_kind(VALUE imemo);
68
68
 
69
69
  #define ENFORCE_THREAD(value) \
70
70
  { if (RB_UNLIKELY(!rb_typeddata_is_kind_of(value, RTYPEDDATA_TYPE(rb_thread_current())))) raise_unexpected_type(value, ADD_QUOTES(value), "Thread", __FILE__, __LINE__, __func__); }
71
+
72
+ bool is_raised_flag_set(VALUE thread);
@@ -11,6 +11,7 @@
11
11
  #include "ruby_helpers.h"
12
12
  #include "setup_signal_handler.h"
13
13
  #include "time_helpers.h"
14
+ #include "unsafe_api_calls_check.h"
14
15
 
15
16
  // Each class/module here is implemented in their separate file
16
17
  void collectors_cpu_and_wall_time_worker_init(VALUE profiling_module);
@@ -56,6 +57,7 @@ void DDTRACE_EXPORT Init_datadog_profiling_native_extension(void) {
56
57
  collectors_thread_context_init(profiling_module);
57
58
  http_transport_init(profiling_module);
58
59
  stack_recorder_init(profiling_module);
60
+ unsafe_api_calls_check_init();
59
61
 
60
62
  // Hosts methods used for testing the native code using RSpec
61
63
  VALUE testing_module = rb_define_module_under(native_extension_module, "Testing");
@@ -83,16 +85,16 @@ static VALUE native_working_p(DDTRACE_UNUSED VALUE _self) {
83
85
  return Qtrue;
84
86
  }
85
87
 
86
- struct trigger_grab_gvl_and_raise_arguments {
88
+ typedef struct {
87
89
  VALUE exception_class;
88
90
  char *test_message;
89
91
  int test_message_arg;
90
- };
92
+ } trigger_grab_gvl_and_raise_arguments;
91
93
 
92
94
  static VALUE _native_grab_gvl_and_raise(DDTRACE_UNUSED VALUE _self, VALUE exception_class, VALUE test_message, VALUE test_message_arg, VALUE release_gvl) {
93
95
  ENFORCE_TYPE(test_message, T_STRING);
94
96
 
95
- struct trigger_grab_gvl_and_raise_arguments args;
97
+ trigger_grab_gvl_and_raise_arguments args;
96
98
 
97
99
  args.exception_class = exception_class;
98
100
  args.test_message = StringValueCStr(test_message);
@@ -108,7 +110,7 @@ static VALUE _native_grab_gvl_and_raise(DDTRACE_UNUSED VALUE _self, VALUE except
108
110
  }
109
111
 
110
112
  static void *trigger_grab_gvl_and_raise(void *trigger_args) {
111
- struct trigger_grab_gvl_and_raise_arguments *args = (struct trigger_grab_gvl_and_raise_arguments *) trigger_args;
113
+ trigger_grab_gvl_and_raise_arguments *args = (trigger_grab_gvl_and_raise_arguments *) trigger_args;
112
114
 
113
115
  if (args->test_message_arg >= 0) {
114
116
  grab_gvl_and_raise(args->exception_class, "%s%d", args->test_message, args->test_message_arg);
@@ -119,16 +121,16 @@ static void *trigger_grab_gvl_and_raise(void *trigger_args) {
119
121
  return NULL;
120
122
  }
121
123
 
122
- struct trigger_grab_gvl_and_raise_syserr_arguments {
124
+ typedef struct {
123
125
  int syserr_errno;
124
126
  char *test_message;
125
127
  int test_message_arg;
126
- };
128
+ } trigger_grab_gvl_and_raise_syserr_arguments;
127
129
 
128
130
  static VALUE _native_grab_gvl_and_raise_syserr(DDTRACE_UNUSED VALUE _self, VALUE syserr_errno, VALUE test_message, VALUE test_message_arg, VALUE release_gvl) {
129
131
  ENFORCE_TYPE(test_message, T_STRING);
130
132
 
131
- struct trigger_grab_gvl_and_raise_syserr_arguments args;
133
+ trigger_grab_gvl_and_raise_syserr_arguments args;
132
134
 
133
135
  args.syserr_errno = NUM2INT(syserr_errno);
134
136
  args.test_message = StringValueCStr(test_message);
@@ -144,7 +146,7 @@ static VALUE _native_grab_gvl_and_raise_syserr(DDTRACE_UNUSED VALUE _self, VALUE
144
146
  }
145
147
 
146
148
  static void *trigger_grab_gvl_and_raise_syserr(void *trigger_args) {
147
- struct trigger_grab_gvl_and_raise_syserr_arguments *args = (struct trigger_grab_gvl_and_raise_syserr_arguments *) trigger_args;
149
+ trigger_grab_gvl_and_raise_syserr_arguments *args = (trigger_grab_gvl_and_raise_syserr_arguments *) trigger_args;
148
150
 
149
151
  if (args->test_message_arg >= 0) {
150
152
  grab_gvl_and_raise_syserr(args->syserr_errno, "%s%d", args->test_message, args->test_message_arg);
@@ -23,18 +23,18 @@ void ruby_helpers_init(void) {
23
23
 
24
24
  #define MAX_RAISE_MESSAGE_SIZE 256
25
25
 
26
- struct raise_arguments {
26
+ typedef struct {
27
27
  VALUE exception_class;
28
28
  char exception_message[MAX_RAISE_MESSAGE_SIZE];
29
- };
29
+ } raise_args;
30
30
 
31
31
  static void *trigger_raise(void *raise_arguments) {
32
- struct raise_arguments *args = (struct raise_arguments *) raise_arguments;
32
+ raise_args *args = (raise_args *) raise_arguments;
33
33
  rb_raise(args->exception_class, "%s", args->exception_message);
34
34
  }
35
35
 
36
36
  void grab_gvl_and_raise(VALUE exception_class, const char *format_string, ...) {
37
- struct raise_arguments args;
37
+ raise_args args;
38
38
 
39
39
  args.exception_class = exception_class;
40
40
 
@@ -55,18 +55,18 @@ void grab_gvl_and_raise(VALUE exception_class, const char *format_string, ...) {
55
55
  rb_bug("[ddtrace] Unexpected: Reached the end of grab_gvl_and_raise while raising '%s'\n", args.exception_message);
56
56
  }
57
57
 
58
- struct syserr_raise_arguments {
58
+ typedef struct {
59
59
  int syserr_errno;
60
60
  char exception_message[MAX_RAISE_MESSAGE_SIZE];
61
- };
61
+ } syserr_raise_args;
62
62
 
63
63
  static void *trigger_syserr_raise(void *syserr_raise_arguments) {
64
- struct syserr_raise_arguments *args = (struct syserr_raise_arguments *) syserr_raise_arguments;
64
+ syserr_raise_args *args = (syserr_raise_args *) syserr_raise_arguments;
65
65
  rb_syserr_fail(args->syserr_errno, args->exception_message);
66
66
  }
67
67
 
68
68
  void grab_gvl_and_raise_syserr(int syserr_errno, const char *format_string, ...) {
69
- struct syserr_raise_arguments args;
69
+ syserr_raise_args args;
70
70
 
71
71
  args.syserr_errno = syserr_errno;
72
72
 
@@ -173,18 +173,18 @@ static const uint8_t all_value_types_positions[] =
173
173
 
174
174
  // Struct for storing stats related to a profile in a particular slot.
175
175
  // These stats will share the same lifetime as the data in that profile slot.
176
- typedef struct slot_stats {
176
+ typedef struct {
177
177
  // How many individual samples were recorded into this slot (un-weighted)
178
178
  uint64_t recorded_samples;
179
179
  } stats_slot;
180
180
 
181
- typedef struct profile_slot {
181
+ typedef struct {
182
182
  ddog_prof_Profile profile;
183
183
  stats_slot stats;
184
184
  } profile_slot;
185
185
 
186
186
  // Contains native state for each instance
187
- struct stack_recorder_state {
187
+ typedef struct {
188
188
  // Heap recorder instance
189
189
  heap_recorder *heap_recorder;
190
190
  bool heap_clean_after_gc_enabled;
@@ -210,17 +210,17 @@ struct stack_recorder_state {
210
210
  long serialization_time_ns_max;
211
211
  uint64_t serialization_time_ns_total;
212
212
  } stats_lifetime;
213
- };
213
+ } stack_recorder_state;
214
214
 
215
215
  // Used to group mutex and the corresponding profile slot for easy unlocking after work is done.
216
- typedef struct locked_profile_slot {
216
+ typedef struct {
217
217
  pthread_mutex_t *mutex;
218
218
  profile_slot *data;
219
219
  } locked_profile_slot;
220
220
 
221
- struct call_serialize_without_gvl_arguments {
221
+ typedef struct {
222
222
  // Set by caller
223
- struct stack_recorder_state *state;
223
+ stack_recorder_state *state;
224
224
  ddog_Timespec finish_timestamp;
225
225
 
226
226
  // Set by callee
@@ -231,26 +231,26 @@ struct call_serialize_without_gvl_arguments {
231
231
 
232
232
  // Set by both
233
233
  bool serialize_ran;
234
- };
234
+ } call_serialize_without_gvl_arguments;
235
235
 
236
236
  static VALUE _native_new(VALUE klass);
237
- static void initialize_slot_concurrency_control(struct stack_recorder_state *state);
238
- static void initialize_profiles(struct stack_recorder_state *state, ddog_prof_Slice_ValueType sample_types);
237
+ static void initialize_slot_concurrency_control(stack_recorder_state *state);
238
+ static void initialize_profiles(stack_recorder_state *state, ddog_prof_Slice_ValueType sample_types);
239
239
  static void stack_recorder_typed_data_free(void *data);
240
240
  static VALUE _native_initialize(int argc, VALUE *argv, DDTRACE_UNUSED VALUE _self);
241
241
  static VALUE _native_serialize(VALUE self, VALUE recorder_instance);
242
242
  static VALUE ruby_time_from(ddog_Timespec ddprof_time);
243
243
  static void *call_serialize_without_gvl(void *call_args);
244
- static locked_profile_slot sampler_lock_active_profile(struct stack_recorder_state *state);
244
+ static locked_profile_slot sampler_lock_active_profile(stack_recorder_state *state);
245
245
  static void sampler_unlock_active_profile(locked_profile_slot active_slot);
246
- static profile_slot* serializer_flip_active_and_inactive_slots(struct stack_recorder_state *state);
246
+ static profile_slot* serializer_flip_active_and_inactive_slots(stack_recorder_state *state);
247
247
  static VALUE _native_active_slot(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance);
248
248
  static VALUE _native_is_slot_one_mutex_locked(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance);
249
249
  static VALUE _native_is_slot_two_mutex_locked(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance);
250
250
  static VALUE test_slot_mutex_state(VALUE recorder_instance, int slot);
251
251
  static ddog_Timespec system_epoch_now_timespec(void);
252
252
  static VALUE _native_reset_after_fork(DDTRACE_UNUSED VALUE self, VALUE recorder_instance);
253
- static void serializer_set_start_timestamp_for_next_profile(struct stack_recorder_state *state, ddog_Timespec start_time);
253
+ static void serializer_set_start_timestamp_for_next_profile(stack_recorder_state *state, ddog_Timespec start_time);
254
254
  static VALUE _native_record_endpoint(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance, VALUE local_root_span_id, VALUE endpoint);
255
255
  static void reset_profile_slot(profile_slot *slot, ddog_Timespec *start_time /* Can be null */);
256
256
  static VALUE _native_track_object(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance, VALUE new_obj, VALUE weight, VALUE alloc_class);
@@ -316,7 +316,7 @@ static const rb_data_type_t stack_recorder_typed_data = {
316
316
  };
317
317
 
318
318
  static VALUE _native_new(VALUE klass) {
319
- struct stack_recorder_state *state = ruby_xcalloc(1, sizeof(struct stack_recorder_state));
319
+ stack_recorder_state *state = ruby_xcalloc(1, sizeof(stack_recorder_state));
320
320
 
321
321
  // Note: Any exceptions raised from this note until the TypedData_Wrap_Struct call will lead to the state memory
322
322
  // being leaked.
@@ -354,7 +354,7 @@ static VALUE _native_new(VALUE klass) {
354
354
  return stack_recorder;
355
355
  }
356
356
 
357
- static void initialize_slot_concurrency_control(struct stack_recorder_state *state) {
357
+ static void initialize_slot_concurrency_control(stack_recorder_state *state) {
358
358
  state->mutex_slot_one = (pthread_mutex_t) PTHREAD_MUTEX_INITIALIZER;
359
359
  state->mutex_slot_two = (pthread_mutex_t) PTHREAD_MUTEX_INITIALIZER;
360
360
 
@@ -364,7 +364,7 @@ static void initialize_slot_concurrency_control(struct stack_recorder_state *sta
364
364
  state->active_slot = 1;
365
365
  }
366
366
 
367
- static void initialize_profiles(struct stack_recorder_state *state, ddog_prof_Slice_ValueType sample_types) {
367
+ static void initialize_profiles(stack_recorder_state *state, ddog_prof_Slice_ValueType sample_types) {
368
368
  ddog_prof_Profile_NewResult slot_one_profile_result =
369
369
  ddog_prof_Profile_new(sample_types, NULL /* period is optional */, NULL /* start_time is optional */);
370
370
 
@@ -391,7 +391,7 @@ static void initialize_profiles(struct stack_recorder_state *state, ddog_prof_Sl
391
391
  }
392
392
 
393
393
  static void stack_recorder_typed_data_free(void *state_ptr) {
394
- struct stack_recorder_state *state = (struct stack_recorder_state *) state_ptr;
394
+ stack_recorder_state *state = (stack_recorder_state *) state_ptr;
395
395
 
396
396
  pthread_mutex_destroy(&state->mutex_slot_one);
397
397
  ddog_prof_Profile_drop(&state->profile_slot_one.profile);
@@ -426,8 +426,8 @@ static VALUE _native_initialize(int argc, VALUE *argv, DDTRACE_UNUSED VALUE _sel
426
426
  ENFORCE_BOOLEAN(timeline_enabled);
427
427
  ENFORCE_BOOLEAN(heap_clean_after_gc_enabled);
428
428
 
429
- struct stack_recorder_state *state;
430
- TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state);
429
+ stack_recorder_state *state;
430
+ TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state);
431
431
 
432
432
  state->heap_clean_after_gc_enabled = (heap_clean_after_gc_enabled == Qtrue);
433
433
 
@@ -517,8 +517,8 @@ static VALUE _native_initialize(int argc, VALUE *argv, DDTRACE_UNUSED VALUE _sel
517
517
  }
518
518
 
519
519
  static VALUE _native_serialize(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance) {
520
- struct stack_recorder_state *state;
521
- TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state);
520
+ stack_recorder_state *state;
521
+ TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state);
522
522
 
523
523
  ddog_Timespec finish_timestamp = system_epoch_now_timespec();
524
524
  // Need to do this while still holding on to the Global VM Lock; see comments on method for why
@@ -532,7 +532,7 @@ static VALUE _native_serialize(DDTRACE_UNUSED VALUE _self, VALUE recorder_instan
532
532
 
533
533
  // We'll release the Global VM Lock while we're calling serialize, so that the Ruby VM can continue to work while this
534
534
  // is pending
535
- struct call_serialize_without_gvl_arguments args = {
535
+ call_serialize_without_gvl_arguments args = {
536
536
  .state = state,
537
537
  .finish_timestamp = finish_timestamp,
538
538
  .serialize_ran = false
@@ -597,8 +597,8 @@ static VALUE ruby_time_from(ddog_Timespec ddprof_time) {
597
597
  }
598
598
 
599
599
  void record_sample(VALUE recorder_instance, ddog_prof_Slice_Location locations, sample_values values, sample_labels labels) {
600
- struct stack_recorder_state *state;
601
- TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state);
600
+ stack_recorder_state *state;
601
+ TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state);
602
602
 
603
603
  locked_profile_slot active_slot = sampler_lock_active_profile(state);
604
604
 
@@ -652,8 +652,8 @@ void record_sample(VALUE recorder_instance, ddog_prof_Slice_Location locations,
652
652
  }
653
653
 
654
654
  void track_object(VALUE recorder_instance, VALUE new_object, unsigned int sample_weight, ddog_CharSlice *alloc_class) {
655
- struct stack_recorder_state *state;
656
- TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state);
655
+ stack_recorder_state *state;
656
+ TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state);
657
657
  // FIXME: Heap sampling currently has to be done in 2 parts because the construction of locations is happening
658
658
  // very late in the allocation-sampling path (which is shared with the cpu sampling path). This can
659
659
  // be fixed with some refactoring but for now this leads to a less impactful change.
@@ -661,8 +661,8 @@ void track_object(VALUE recorder_instance, VALUE new_object, unsigned int sample
661
661
  }
662
662
 
663
663
  void record_endpoint(VALUE recorder_instance, uint64_t local_root_span_id, ddog_CharSlice endpoint) {
664
- struct stack_recorder_state *state;
665
- TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state);
664
+ stack_recorder_state *state;
665
+ TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state);
666
666
 
667
667
  locked_profile_slot active_slot = sampler_lock_active_profile(state);
668
668
 
@@ -676,8 +676,8 @@ void record_endpoint(VALUE recorder_instance, uint64_t local_root_span_id, ddog_
676
676
  }
677
677
 
678
678
  void recorder_after_gc_step(VALUE recorder_instance) {
679
- struct stack_recorder_state *state;
680
- TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state);
679
+ stack_recorder_state *state;
680
+ TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state);
681
681
 
682
682
  if (state->heap_clean_after_gc_enabled) heap_recorder_update_young_objects(state->heap_recorder);
683
683
  }
@@ -687,7 +687,7 @@ void recorder_after_gc_step(VALUE recorder_instance) {
687
687
  // Heap recorder iteration context allows us access to stack recorder state and profile being serialized
688
688
  // during iteration of heap recorder live objects.
689
689
  typedef struct heap_recorder_iteration_context {
690
- struct stack_recorder_state *state;
690
+ stack_recorder_state *state;
691
691
  profile_slot *slot;
692
692
 
693
693
  bool error;
@@ -749,7 +749,7 @@ static bool add_heap_sample_to_active_profile_without_gvl(heap_recorder_iteratio
749
749
  return true;
750
750
  }
751
751
 
752
- static void build_heap_profile_without_gvl(struct stack_recorder_state *state, profile_slot *slot) {
752
+ static void build_heap_profile_without_gvl(stack_recorder_state *state, profile_slot *slot) {
753
753
  heap_recorder_iteration_context iteration_context = {
754
754
  .state = state,
755
755
  .slot = slot,
@@ -770,7 +770,7 @@ static void build_heap_profile_without_gvl(struct stack_recorder_state *state, p
770
770
  }
771
771
 
772
772
  static void *call_serialize_without_gvl(void *call_args) {
773
- struct call_serialize_without_gvl_arguments *args = (struct call_serialize_without_gvl_arguments *) call_args;
773
+ call_serialize_without_gvl_arguments *args = (call_serialize_without_gvl_arguments *) call_args;
774
774
 
775
775
  long serialize_no_gvl_start_time_ns = monotonic_wall_time_now_ns(DO_NOT_RAISE_ON_FAILURE);
776
776
 
@@ -796,7 +796,7 @@ VALUE enforce_recorder_instance(VALUE object) {
796
796
  return object;
797
797
  }
798
798
 
799
- static locked_profile_slot sampler_lock_active_profile(struct stack_recorder_state *state) {
799
+ static locked_profile_slot sampler_lock_active_profile(stack_recorder_state *state) {
800
800
  int error;
801
801
 
802
802
  for (int attempts = 0; attempts < 2; attempts++) {
@@ -823,7 +823,7 @@ static void sampler_unlock_active_profile(locked_profile_slot active_slot) {
823
823
  ENFORCE_SUCCESS_GVL(pthread_mutex_unlock(active_slot.mutex));
824
824
  }
825
825
 
826
- static profile_slot* serializer_flip_active_and_inactive_slots(struct stack_recorder_state *state) {
826
+ static profile_slot* serializer_flip_active_and_inactive_slots(stack_recorder_state *state) {
827
827
  int previously_active_slot = state->active_slot;
828
828
 
829
829
  if (previously_active_slot != 1 && previously_active_slot != 2) {
@@ -849,8 +849,8 @@ static profile_slot* serializer_flip_active_and_inactive_slots(struct stack_reco
849
849
  // This method exists only to enable testing Datadog::Profiling::StackRecorder behavior using RSpec.
850
850
  // It SHOULD NOT be used for other purposes.
851
851
  static VALUE _native_active_slot(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance) {
852
- struct stack_recorder_state *state;
853
- TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state);
852
+ stack_recorder_state *state;
853
+ TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state);
854
854
 
855
855
  return INT2NUM(state->active_slot);
856
856
  }
@@ -864,8 +864,8 @@ static VALUE _native_is_slot_one_mutex_locked(DDTRACE_UNUSED VALUE _self, VALUE
864
864
  static VALUE _native_is_slot_two_mutex_locked(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance) { return test_slot_mutex_state(recorder_instance, 2); }
865
865
 
866
866
  static VALUE test_slot_mutex_state(VALUE recorder_instance, int slot) {
867
- struct stack_recorder_state *state;
868
- TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state);
867
+ stack_recorder_state *state;
868
+ TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state);
869
869
 
870
870
  pthread_mutex_t *slot_mutex = (slot == 1) ? &state->mutex_slot_one : &state->mutex_slot_two;
871
871
 
@@ -895,8 +895,8 @@ static ddog_Timespec system_epoch_now_timespec(void) {
895
895
  // Assumption: This method gets called BEFORE restarting profiling -- e.g. there are no components attempting to
896
896
  // trigger samples at the same time.
897
897
  static VALUE _native_reset_after_fork(DDTRACE_UNUSED VALUE self, VALUE recorder_instance) {
898
- struct stack_recorder_state *state;
899
- TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state);
898
+ stack_recorder_state *state;
899
+ TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state);
900
900
 
901
901
  // In case the fork happened halfway through `serializer_flip_active_and_inactive_slots` execution and the
902
902
  // resulting state is inconsistent, we make sure to reset it back to the initial state.
@@ -912,7 +912,7 @@ static VALUE _native_reset_after_fork(DDTRACE_UNUSED VALUE self, VALUE recorder_
912
912
 
913
913
  // Assumption 1: This method is called with the GVL being held, because `ddog_prof_Profile_reset` mutates the profile and must
914
914
  // not be interrupted part-way through by a VM fork.
915
- static void serializer_set_start_timestamp_for_next_profile(struct stack_recorder_state *state, ddog_Timespec start_time) {
915
+ static void serializer_set_start_timestamp_for_next_profile(stack_recorder_state *state, ddog_Timespec start_time) {
916
916
  // Before making this profile active, we reset it so that it uses the correct start_time for its start
917
917
  profile_slot *next_profile_slot = (state->active_slot == 1) ? &state->profile_slot_two : &state->profile_slot_one;
918
918
  reset_profile_slot(next_profile_slot, &start_time);
@@ -972,8 +972,8 @@ static void reset_profile_slot(profile_slot *slot, ddog_Timespec *start_time /*
972
972
  // This method exists only to enable testing Datadog::Profiling::StackRecorder behavior using RSpec.
973
973
  // It SHOULD NOT be used for other purposes.
974
974
  static VALUE _native_start_fake_slow_heap_serialization(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance) {
975
- struct stack_recorder_state *state;
976
- TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state);
975
+ stack_recorder_state *state;
976
+ TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state);
977
977
 
978
978
  heap_recorder_prepare_iteration(state->heap_recorder);
979
979
 
@@ -983,8 +983,8 @@ static VALUE _native_start_fake_slow_heap_serialization(DDTRACE_UNUSED VALUE _se
983
983
  // This method exists only to enable testing Datadog::Profiling::StackRecorder behavior using RSpec.
984
984
  // It SHOULD NOT be used for other purposes.
985
985
  static VALUE _native_end_fake_slow_heap_serialization(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance) {
986
- struct stack_recorder_state *state;
987
- TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state);
986
+ stack_recorder_state *state;
987
+ TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state);
988
988
 
989
989
  heap_recorder_finish_iteration(state->heap_recorder);
990
990
 
@@ -994,15 +994,15 @@ static VALUE _native_end_fake_slow_heap_serialization(DDTRACE_UNUSED VALUE _self
994
994
  // This method exists only to enable testing Datadog::Profiling::StackRecorder behavior using RSpec.
995
995
  // It SHOULD NOT be used for other purposes.
996
996
  static VALUE _native_debug_heap_recorder(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance) {
997
- struct stack_recorder_state *state;
998
- TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state);
997
+ stack_recorder_state *state;
998
+ TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state);
999
999
 
1000
1000
  return heap_recorder_testonly_debug(state->heap_recorder);
1001
1001
  }
1002
1002
 
1003
1003
  static VALUE _native_stats(DDTRACE_UNUSED VALUE self, VALUE recorder_instance) {
1004
- struct stack_recorder_state *state;
1005
- TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state);
1004
+ stack_recorder_state *state;
1005
+ TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state);
1006
1006
 
1007
1007
  uint64_t total_serializations = state->stats_lifetime.serialization_successes + state->stats_lifetime.serialization_failures;
1008
1008
 
@@ -1040,15 +1040,15 @@ static VALUE build_profile_stats(profile_slot *slot, long serialization_time_ns,
1040
1040
  static VALUE _native_is_object_recorded(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance, VALUE obj_id) {
1041
1041
  ENFORCE_TYPE(obj_id, T_FIXNUM);
1042
1042
 
1043
- struct stack_recorder_state *state;
1044
- TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state);
1043
+ stack_recorder_state *state;
1044
+ TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state);
1045
1045
 
1046
1046
  return heap_recorder_testonly_is_object_recorded(state->heap_recorder, obj_id);
1047
1047
  }
1048
1048
 
1049
1049
  static VALUE _native_heap_recorder_reset_last_update(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance) {
1050
- struct stack_recorder_state *state;
1051
- TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state);
1050
+ stack_recorder_state *state;
1051
+ TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state);
1052
1052
 
1053
1053
  heap_recorder_testonly_reset_last_update(state->heap_recorder);
1054
1054
 
@@ -13,7 +13,7 @@ typedef struct {
13
13
  int64_t timeline_wall_time_ns;
14
14
  } sample_values;
15
15
 
16
- typedef struct sample_labels {
16
+ typedef struct {
17
17
  ddog_prof_Slice_Label labels;
18
18
 
19
19
  // This is used to allow the `Collectors::Stack` to modify the existing label, if any. This MUST be NULL or point
@@ -39,7 +39,7 @@ static inline long system_epoch_time_now_ns(raise_on_failure_setting raise_on_fa
39
39
  // https://docs.redhat.com/en/documentation/red_hat_enterprise_linux_for_real_time/7/html/reference_guide/sect-posix_clocks#Using_clock_getres_to_compare_clock_resolution
40
40
  // We introduce here a separate type for it, so as to make it harder to misuse/more explicit when these timestamps are used
41
41
 
42
- typedef struct coarse_instant {
42
+ typedef struct {
43
43
  long timestamp_ns;
44
44
  } coarse_instant;
45
45
 
@@ -0,0 +1,47 @@
1
+ #include <ruby.h>
2
+ #include <ruby/debug.h>
3
+ #include <stdbool.h>
4
+
5
+ #include "datadog_ruby_common.h"
6
+ #include "unsafe_api_calls_check.h"
7
+ #include "extconf.h"
8
+
9
+ static bool inside_unsafe_context = false;
10
+
11
+ #ifndef NO_POSTPONED_TRIGGER
12
+ static rb_postponed_job_handle_t check_for_unsafe_api_calls_handle;
13
+ #endif
14
+
15
+ static void check_for_unsafe_api_calls(DDTRACE_UNUSED void *_unused);
16
+
17
+ void unsafe_api_calls_check_init(void) {
18
+ #ifndef NO_POSTPONED_TRIGGER
19
+ int unused_flags = 0;
20
+
21
+ check_for_unsafe_api_calls_handle = rb_postponed_job_preregister(unused_flags, check_for_unsafe_api_calls, NULL);
22
+
23
+ if (check_for_unsafe_api_calls_handle == POSTPONED_JOB_HANDLE_INVALID) {
24
+ rb_raise(rb_eRuntimeError, "Failed to register check_for_unsafe_api_calls_handle postponed job (got POSTPONED_JOB_HANDLE_INVALID)");
25
+ }
26
+ #endif
27
+ }
28
+
29
+ void debug_enter_unsafe_context(void) {
30
+ inside_unsafe_context = true;
31
+
32
+ #ifndef NO_POSTPONED_TRIGGER
33
+ rb_postponed_job_trigger(check_for_unsafe_api_calls_handle);
34
+ #else
35
+ rb_postponed_job_register(0, check_for_unsafe_api_calls, NULL);
36
+ #endif
37
+ }
38
+
39
+ void debug_leave_unsafe_context(void) {
40
+ inside_unsafe_context = false;
41
+ }
42
+
43
+ static void check_for_unsafe_api_calls(DDTRACE_UNUSED void *_unused) {
44
+ if (inside_unsafe_context) rb_bug(
45
+ "Datadog Ruby profiler detected callback nested inside sample. Please report this at https://github.com/datadog/dd-trace-rb/blob/master/CONTRIBUTING.md#found-a-bug"
46
+ );
47
+ }
@@ -0,0 +1,31 @@
1
+ #pragma once
2
+
3
+ // This checker is used to detect accidental thread scheduling switching points happening during profiling sampling.
4
+ //
5
+ // Specifically, when the profiler is sampling, we're never supposed to call into Ruby code (e.g. methods
6
+ // implemented using Ruby code) or allocate Ruby objects.
7
+ // That's because those events introduce thread switch points, and really we don't the VM switching between threads
8
+ // in the middle of the profiler sampling.
9
+ // This includes raising exceptions, unless we're trying to stop the profiler, and even then we must be careful.
10
+ //
11
+ // The above is especially true in situations such as GC profiling or allocation/heap profiling, as in those situations
12
+ // we can even crash the Ruby VM if we switch away at the wrong time.
13
+ //
14
+ // The below APIs can be used to detect these situations. They work by relying on the following observation:
15
+ // in most (all?) thread switch points, Ruby will check for interrupts and run the postponed jobs.
16
+ //
17
+ // Thus, if we set a flag while we're sampling (inside_unsafe_context), trigger the postponed job, and then only unset
18
+ // the flag after sampling, he correct thing to happen is that the postponed job should never see the flag.
19
+ //
20
+ // If, however, we have a bug and there's a thread switch point, our postponed job will see the flag and immediately
21
+ // stop the Ruby VM before further damage happens (and hopefully giving us a stack trace clearly pointing to the culprit).
22
+
23
+ void unsafe_api_calls_check_init(void);
24
+
25
+ // IMPORTANT: This method **MUST** only be called from test code, as it causes an immediate hard-crash on the Ruby VM
26
+ // when it detects a potential issue, and that's not something we want for production apps.
27
+ //
28
+ // In the future we may introduce some kind of setting (off by default) to also allow this to be safely be used
29
+ // in production code if needed.
30
+ void debug_enter_unsafe_context(void);
31
+ void debug_leave_unsafe_context(void);
@@ -54,6 +54,9 @@ static VALUE _native_start_or_update_on_fork(int argc, VALUE *argv, DDTRACE_UNUS
54
54
  // Tags and endpoint are heap-allocated, so after here we can't raise exceptions otherwise we'll leak this memory
55
55
  // Start of exception-free zone to prevent leaks {{
56
56
  ddog_Endpoint *endpoint = ddog_endpoint_from_url(char_slice_from_ruby_string(agent_base_url));
57
+ if (endpoint == NULL) {
58
+ rb_raise(rb_eRuntimeError, "Failed to create endpoint from agent_base_url: %"PRIsVALUE, agent_base_url);
59
+ }
57
60
  ddog_Vec_Tag tags = convert_tags(tags_as_array);
58
61
 
59
62
  ddog_crasht_Config config = {