ddtrace 1.19.0 → 1.21.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (194) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +93 -2
  3. data/LICENSE-3rdparty.csv +1 -1
  4. data/bin/ddprofrb +15 -0
  5. data/bin/ddtracerb +3 -1
  6. data/ext/{ddtrace_profiling_loader/ddtrace_profiling_loader.c → datadog_profiling_loader/datadog_profiling_loader.c} +2 -2
  7. data/ext/{ddtrace_profiling_loader → datadog_profiling_loader}/extconf.rb +3 -3
  8. data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/collectors_cpu_and_wall_time_worker.c +237 -65
  9. data/ext/datadog_profiling_native_extension/collectors_discrete_dynamic_sampler.c +422 -0
  10. data/ext/datadog_profiling_native_extension/collectors_discrete_dynamic_sampler.h +101 -0
  11. data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/collectors_thread_context.c +92 -2
  12. data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/extconf.rb +5 -2
  13. data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/helpers.h +4 -0
  14. data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/http_transport.c +10 -14
  15. data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/native_extension_helpers.rb +4 -4
  16. data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/private_vm_api_access.c +14 -0
  17. data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/private_vm_api_access.h +4 -0
  18. data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/profiling.c +17 -1
  19. data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/ruby_helpers.c +10 -0
  20. data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/ruby_helpers.h +2 -0
  21. data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/time_helpers.h +2 -0
  22. data/lib/datadog/appsec/contrib/rack/request_middleware.rb +2 -1
  23. data/lib/datadog/core/configuration/components.rb +5 -5
  24. data/lib/datadog/core/configuration/option.rb +1 -1
  25. data/lib/datadog/core/configuration/settings.rb +107 -46
  26. data/lib/datadog/core/diagnostics/environment_logger.rb +4 -3
  27. data/lib/datadog/core/environment/class_count.rb +6 -6
  28. data/lib/datadog/core/environment/git.rb +25 -0
  29. data/lib/datadog/core/environment/identity.rb +18 -48
  30. data/lib/datadog/core/git/ext.rb +2 -23
  31. data/lib/datadog/core/remote/component.rb +25 -12
  32. data/lib/datadog/core/remote/ext.rb +1 -0
  33. data/lib/datadog/core/remote/negotiation.rb +2 -2
  34. data/lib/datadog/core/remote/tie/tracing.rb +39 -0
  35. data/lib/datadog/core/remote/tie.rb +27 -0
  36. data/lib/datadog/core/remote/worker.rb +7 -4
  37. data/lib/datadog/core/transport/ext.rb +2 -0
  38. data/lib/datadog/core/utils/url.rb +25 -0
  39. data/lib/datadog/opentelemetry/sdk/propagator.rb +3 -2
  40. data/lib/datadog/opentelemetry.rb +3 -0
  41. data/lib/datadog/profiling/collectors/cpu_and_wall_time_worker.rb +6 -2
  42. data/lib/datadog/profiling/collectors/info.rb +101 -0
  43. data/lib/datadog/profiling/component.rb +14 -30
  44. data/lib/datadog/profiling/exporter.rb +19 -5
  45. data/lib/datadog/profiling/ext.rb +2 -0
  46. data/lib/datadog/profiling/flush.rb +6 -3
  47. data/lib/datadog/profiling/http_transport.rb +5 -1
  48. data/lib/datadog/profiling/load_native_extension.rb +5 -5
  49. data/lib/datadog/profiling/native_extension.rb +1 -1
  50. data/lib/datadog/profiling/tag_builder.rb +5 -0
  51. data/lib/datadog/profiling/tasks/exec.rb +3 -3
  52. data/lib/datadog/profiling/tasks/help.rb +3 -3
  53. data/lib/datadog/profiling.rb +2 -2
  54. data/lib/datadog/tracing/configuration/ext.rb +0 -1
  55. data/lib/datadog/tracing/configuration/settings.rb +2 -1
  56. data/lib/datadog/tracing/contrib/action_cable/configuration/settings.rb +1 -0
  57. data/lib/datadog/tracing/contrib/action_cable/ext.rb +1 -0
  58. data/lib/datadog/tracing/contrib/action_mailer/configuration/settings.rb +1 -0
  59. data/lib/datadog/tracing/contrib/action_mailer/ext.rb +1 -0
  60. data/lib/datadog/tracing/contrib/action_pack/configuration/settings.rb +1 -0
  61. data/lib/datadog/tracing/contrib/action_pack/ext.rb +1 -0
  62. data/lib/datadog/tracing/contrib/action_view/configuration/settings.rb +1 -0
  63. data/lib/datadog/tracing/contrib/action_view/ext.rb +1 -0
  64. data/lib/datadog/tracing/contrib/active_job/configuration/settings.rb +1 -0
  65. data/lib/datadog/tracing/contrib/active_job/ext.rb +1 -0
  66. data/lib/datadog/tracing/contrib/active_model_serializers/configuration/settings.rb +1 -0
  67. data/lib/datadog/tracing/contrib/active_model_serializers/ext.rb +1 -0
  68. data/lib/datadog/tracing/contrib/active_record/configuration/settings.rb +1 -0
  69. data/lib/datadog/tracing/contrib/active_record/ext.rb +1 -0
  70. data/lib/datadog/tracing/contrib/active_support/configuration/settings.rb +1 -0
  71. data/lib/datadog/tracing/contrib/active_support/ext.rb +1 -0
  72. data/lib/datadog/tracing/contrib/analytics.rb +0 -1
  73. data/lib/datadog/tracing/contrib/aws/configuration/settings.rb +1 -0
  74. data/lib/datadog/tracing/contrib/aws/ext.rb +1 -0
  75. data/lib/datadog/tracing/contrib/concurrent_ruby/async_patch.rb +20 -0
  76. data/lib/datadog/tracing/contrib/concurrent_ruby/patcher.rb +11 -1
  77. data/lib/datadog/tracing/contrib/dalli/configuration/settings.rb +1 -0
  78. data/lib/datadog/tracing/contrib/dalli/ext.rb +1 -0
  79. data/lib/datadog/tracing/contrib/delayed_job/configuration/settings.rb +1 -0
  80. data/lib/datadog/tracing/contrib/delayed_job/ext.rb +1 -0
  81. data/lib/datadog/tracing/contrib/elasticsearch/configuration/settings.rb +1 -0
  82. data/lib/datadog/tracing/contrib/elasticsearch/ext.rb +1 -0
  83. data/lib/datadog/tracing/contrib/ethon/configuration/settings.rb +1 -0
  84. data/lib/datadog/tracing/contrib/ethon/ext.rb +1 -0
  85. data/lib/datadog/tracing/contrib/excon/configuration/settings.rb +1 -0
  86. data/lib/datadog/tracing/contrib/excon/ext.rb +1 -0
  87. data/lib/datadog/tracing/contrib/extensions.rb +6 -2
  88. data/lib/datadog/tracing/contrib/faraday/configuration/settings.rb +7 -0
  89. data/lib/datadog/tracing/contrib/faraday/ext.rb +1 -0
  90. data/lib/datadog/tracing/contrib/faraday/middleware.rb +1 -1
  91. data/lib/datadog/tracing/contrib/grape/configuration/settings.rb +1 -0
  92. data/lib/datadog/tracing/contrib/grape/endpoint.rb +5 -0
  93. data/lib/datadog/tracing/contrib/grape/ext.rb +1 -0
  94. data/lib/datadog/tracing/contrib/graphql/configuration/settings.rb +1 -0
  95. data/lib/datadog/tracing/contrib/graphql/ext.rb +1 -0
  96. data/lib/datadog/tracing/contrib/grpc/configuration/settings.rb +1 -0
  97. data/lib/datadog/tracing/contrib/grpc/ext.rb +1 -0
  98. data/lib/datadog/tracing/contrib/http/configuration/settings.rb +1 -0
  99. data/lib/datadog/tracing/contrib/http/distributed/fetcher.rb +2 -2
  100. data/lib/datadog/tracing/contrib/http/ext.rb +1 -0
  101. data/lib/datadog/tracing/contrib/httpclient/configuration/settings.rb +1 -0
  102. data/lib/datadog/tracing/contrib/httpclient/ext.rb +1 -0
  103. data/lib/datadog/tracing/contrib/httprb/configuration/settings.rb +1 -0
  104. data/lib/datadog/tracing/contrib/httprb/ext.rb +1 -0
  105. data/lib/datadog/tracing/contrib/kafka/configuration/settings.rb +1 -0
  106. data/lib/datadog/tracing/contrib/kafka/ext.rb +1 -0
  107. data/lib/datadog/tracing/contrib/mongodb/configuration/settings.rb +1 -0
  108. data/lib/datadog/tracing/contrib/mongodb/ext.rb +1 -0
  109. data/lib/datadog/tracing/contrib/mysql2/configuration/settings.rb +1 -0
  110. data/lib/datadog/tracing/contrib/mysql2/ext.rb +1 -0
  111. data/lib/datadog/tracing/contrib/opensearch/configuration/settings.rb +1 -0
  112. data/lib/datadog/tracing/contrib/opensearch/ext.rb +1 -0
  113. data/lib/datadog/tracing/contrib/pg/configuration/settings.rb +1 -0
  114. data/lib/datadog/tracing/contrib/pg/ext.rb +1 -0
  115. data/lib/datadog/tracing/contrib/pg/instrumentation.rb +11 -4
  116. data/lib/datadog/tracing/contrib/presto/configuration/settings.rb +1 -0
  117. data/lib/datadog/tracing/contrib/presto/ext.rb +1 -0
  118. data/lib/datadog/tracing/contrib/qless/configuration/settings.rb +1 -0
  119. data/lib/datadog/tracing/contrib/qless/ext.rb +1 -0
  120. data/lib/datadog/tracing/contrib/que/configuration/settings.rb +1 -0
  121. data/lib/datadog/tracing/contrib/que/ext.rb +1 -0
  122. data/lib/datadog/tracing/contrib/racecar/configuration/settings.rb +1 -0
  123. data/lib/datadog/tracing/contrib/racecar/ext.rb +1 -0
  124. data/lib/datadog/tracing/contrib/rack/configuration/settings.rb +1 -0
  125. data/lib/datadog/tracing/contrib/rack/ext.rb +1 -0
  126. data/lib/datadog/tracing/contrib/rack/middlewares.rb +37 -6
  127. data/lib/datadog/tracing/contrib/rails/configuration/settings.rb +1 -0
  128. data/lib/datadog/tracing/contrib/rails/ext.rb +1 -0
  129. data/lib/datadog/tracing/contrib/rails/patcher.rb +16 -0
  130. data/lib/datadog/tracing/contrib/rake/configuration/settings.rb +1 -0
  131. data/lib/datadog/tracing/contrib/rake/ext.rb +1 -0
  132. data/lib/datadog/tracing/contrib/redis/configuration/settings.rb +1 -0
  133. data/lib/datadog/tracing/contrib/redis/ext.rb +1 -0
  134. data/lib/datadog/tracing/contrib/redis/instrumentation.rb +2 -2
  135. data/lib/datadog/tracing/contrib/redis/patcher.rb +34 -21
  136. data/lib/datadog/tracing/contrib/resque/configuration/settings.rb +1 -0
  137. data/lib/datadog/tracing/contrib/resque/ext.rb +1 -0
  138. data/lib/datadog/tracing/contrib/rest_client/configuration/settings.rb +1 -0
  139. data/lib/datadog/tracing/contrib/rest_client/ext.rb +1 -0
  140. data/lib/datadog/tracing/contrib/roda/configuration/settings.rb +1 -0
  141. data/lib/datadog/tracing/contrib/roda/ext.rb +1 -0
  142. data/lib/datadog/tracing/contrib/sequel/configuration/settings.rb +1 -0
  143. data/lib/datadog/tracing/contrib/sequel/ext.rb +1 -0
  144. data/lib/datadog/tracing/contrib/shoryuken/configuration/settings.rb +1 -0
  145. data/lib/datadog/tracing/contrib/shoryuken/ext.rb +1 -0
  146. data/lib/datadog/tracing/contrib/sidekiq/configuration/settings.rb +1 -0
  147. data/lib/datadog/tracing/contrib/sidekiq/ext.rb +1 -0
  148. data/lib/datadog/tracing/contrib/sinatra/configuration/settings.rb +1 -0
  149. data/lib/datadog/tracing/contrib/sinatra/ext.rb +1 -0
  150. data/lib/datadog/tracing/contrib/sinatra/tracer.rb +6 -3
  151. data/lib/datadog/tracing/contrib/sneakers/configuration/settings.rb +1 -0
  152. data/lib/datadog/tracing/contrib/sneakers/ext.rb +1 -0
  153. data/lib/datadog/tracing/contrib/stripe/configuration/settings.rb +1 -0
  154. data/lib/datadog/tracing/contrib/stripe/ext.rb +1 -0
  155. data/lib/datadog/tracing/contrib/sucker_punch/configuration/settings.rb +1 -0
  156. data/lib/datadog/tracing/contrib/sucker_punch/ext.rb +1 -0
  157. data/lib/datadog/tracing/contrib/trilogy/configuration/settings.rb +58 -0
  158. data/lib/datadog/tracing/contrib/trilogy/ext.rb +27 -0
  159. data/lib/datadog/tracing/contrib/trilogy/instrumentation.rb +94 -0
  160. data/lib/datadog/tracing/contrib/trilogy/integration.rb +43 -0
  161. data/lib/datadog/tracing/contrib/trilogy/patcher.rb +31 -0
  162. data/lib/datadog/tracing/contrib.rb +1 -0
  163. data/lib/datadog/tracing/metadata/ext.rb +2 -0
  164. data/lib/datadog/tracing/trace_operation.rb +1 -2
  165. data/lib/datadog/tracing/transport/http.rb +1 -0
  166. data/lib/datadog/tracing/transport/trace_formatter.rb +31 -0
  167. data/lib/datadog/tracing.rb +8 -2
  168. data/lib/ddtrace/version.rb +1 -1
  169. metadata +62 -50
  170. data/ext/ddtrace_profiling_native_extension/pid_controller.c +0 -57
  171. data/ext/ddtrace_profiling_native_extension/pid_controller.h +0 -45
  172. data/lib/datadog/profiling/diagnostics/environment_logger.rb +0 -39
  173. /data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/NativeExtensionDesign.md +0 -0
  174. /data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/clock_id.h +0 -0
  175. /data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/clock_id_from_pthread.c +0 -0
  176. /data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/clock_id_noop.c +0 -0
  177. /data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/collectors_dynamic_sampling_rate.c +0 -0
  178. /data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/collectors_dynamic_sampling_rate.h +0 -0
  179. /data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/collectors_gc_profiling_helper.c +0 -0
  180. /data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/collectors_gc_profiling_helper.h +0 -0
  181. /data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/collectors_idle_sampling_helper.c +0 -0
  182. /data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/collectors_idle_sampling_helper.h +0 -0
  183. /data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/collectors_stack.c +0 -0
  184. /data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/collectors_stack.h +0 -0
  185. /data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/collectors_thread_context.h +0 -0
  186. /data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/heap_recorder.c +0 -0
  187. /data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/heap_recorder.h +0 -0
  188. /data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/libdatadog_helpers.c +0 -0
  189. /data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/libdatadog_helpers.h +0 -0
  190. /data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/setup_signal_handler.c +0 -0
  191. /data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/setup_signal_handler.h +0 -0
  192. /data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/stack_recorder.c +0 -0
  193. /data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/stack_recorder.h +0 -0
  194. /data/ext/{ddtrace_profiling_native_extension → datadog_profiling_native_extension}/time_helpers.c +0 -0
@@ -12,10 +12,16 @@
12
12
  #include "collectors_thread_context.h"
13
13
  #include "collectors_dynamic_sampling_rate.h"
14
14
  #include "collectors_idle_sampling_helper.h"
15
+ #include "collectors_discrete_dynamic_sampler.h"
15
16
  #include "private_vm_api_access.h"
16
17
  #include "setup_signal_handler.h"
17
18
  #include "time_helpers.h"
18
19
 
20
+ #define ERR_CLOCK_FAIL "failed to get clock time"
21
+
22
+ // Maximum allowed value for an allocation weight. Attempts to use higher values will result in clamping.
23
+ unsigned int MAX_ALLOC_WEIGHT = 65535;
24
+
19
25
  // Used to trigger the execution of Collectors::ThreadState, which implements all of the sampling logic
20
26
  // itself; this class only implements the "when to do it" part.
21
27
  //
@@ -89,13 +95,13 @@ struct cpu_and_wall_time_worker_state {
89
95
  bool gc_profiling_enabled;
90
96
  bool no_signals_workaround_enabled;
91
97
  bool dynamic_sampling_rate_enabled;
92
- int allocation_sample_every;
93
98
  bool allocation_profiling_enabled;
94
99
  VALUE self_instance;
95
100
  VALUE thread_context_collector_instance;
96
101
  VALUE idle_sampling_helper_instance;
97
102
  VALUE owner_thread;
98
- dynamic_sampling_rate_state dynamic_sampling_rate;
103
+ dynamic_sampling_rate_state cpu_dynamic_sampling_rate;
104
+ discrete_dynamic_sampler allocation_sampler;
99
105
  VALUE gc_tracepoint; // Used to get gc start/finish information
100
106
  VALUE object_allocation_tracepoint; // Used to get allocation counts and allocation profiling
101
107
 
@@ -115,6 +121,7 @@ struct cpu_and_wall_time_worker_state {
115
121
  bool during_sample;
116
122
 
117
123
  struct stats {
124
+ // # Generic stats
118
125
  // How many times we tried to trigger a sample
119
126
  unsigned int trigger_sample_attempts;
120
127
  // How many times we tried to simulate signal delivery
@@ -125,25 +132,36 @@ struct cpu_and_wall_time_worker_state {
125
132
  unsigned int signal_handler_enqueued_sample;
126
133
  // How many times the signal handler was called from the wrong thread
127
134
  unsigned int signal_handler_wrong_thread;
128
- // How many times we actually sampled (except GC samples)
129
- unsigned int sampled;
130
- // How many times we skipped a sample because of the dynamic sampling rate mechanism
131
- unsigned int skipped_sample_because_of_dynamic_sampling_rate;
132
135
 
133
- // Stats for the results of calling rb_postponed_job_register_one
134
- // The same function was already waiting to be executed
136
+ // # Stats for the results of calling rb_postponed_job_register_one
137
+ // The same function was already waiting to be executed
135
138
  unsigned int postponed_job_skipped_already_existed;
136
- // The function was added to the queue successfully
139
+ // The function was added to the queue successfully
137
140
  unsigned int postponed_job_success;
138
- // The queue was full
141
+ // The queue was full
139
142
  unsigned int postponed_job_full;
140
- // The function returned an unknown result code
143
+ // The function returned an unknown result code
141
144
  unsigned int postponed_job_unknown_result;
142
145
 
143
- // Min/max/total wall-time spent sampling (except GC samples)
144
- uint64_t sampling_time_ns_min;
145
- uint64_t sampling_time_ns_max;
146
- uint64_t sampling_time_ns_total;
146
+ // # CPU/Walltime sampling stats
147
+ // How many times we actually CPU/wall sampled
148
+ unsigned int cpu_sampled;
149
+ // How many times we skipped a CPU/wall sample because of the dynamic sampling rate mechanism
150
+ unsigned int cpu_skipped;
151
+ // Min/max/total wall-time spent on CPU/wall sampling
152
+ uint64_t cpu_sampling_time_ns_min;
153
+ uint64_t cpu_sampling_time_ns_max;
154
+ uint64_t cpu_sampling_time_ns_total;
155
+
156
+ // # Allocation sampling stats
157
+ // How many times we actually allocation sampled
158
+ uint64_t allocation_sampled;
159
+ // How many times we skipped an allocation sample because of the dynamic sampling rate mechanism
160
+ uint64_t allocation_skipped;
161
+ // Min/max/total wall-time spent on allocation sampling
162
+ uint64_t allocation_sampling_time_ns_min;
163
+ uint64_t allocation_sampling_time_ns_max;
164
+ uint64_t allocation_sampling_time_ns_total;
147
165
  // How many times we saw allocations being done inside a sample
148
166
  unsigned int allocations_during_sample;
149
167
  } stats;
@@ -159,13 +177,13 @@ static VALUE _native_initialize(
159
177
  VALUE no_signals_workaround_enabled,
160
178
  VALUE dynamic_sampling_rate_enabled,
161
179
  VALUE dynamic_sampling_rate_overhead_target_percentage,
162
- VALUE allocation_sample_every,
163
180
  VALUE allocation_profiling_enabled
164
181
  );
165
182
  static void cpu_and_wall_time_worker_typed_data_mark(void *state_ptr);
166
183
  static VALUE _native_sampling_loop(VALUE self, VALUE instance);
167
184
  static VALUE _native_stop(DDTRACE_UNUSED VALUE _self, VALUE self_instance, VALUE worker_thread);
168
185
  static VALUE stop(VALUE self_instance, VALUE optional_exception);
186
+ static void stop_state(struct cpu_and_wall_time_worker_state *state, VALUE optional_exception);
169
187
  static void handle_sampling_signal(DDTRACE_UNUSED int _signal, DDTRACE_UNUSED siginfo_t *_info, DDTRACE_UNUSED void *_ucontext);
170
188
  static void *run_sampling_trigger_loop(void *state_ptr);
171
189
  static void interrupt_sampling_trigger_loop(void *state_ptr);
@@ -188,15 +206,18 @@ static VALUE _native_simulate_sample_from_postponed_job(DDTRACE_UNUSED VALUE sel
188
206
  static VALUE _native_reset_after_fork(DDTRACE_UNUSED VALUE self, VALUE instance);
189
207
  static VALUE _native_is_sigprof_blocked_in_current_thread(DDTRACE_UNUSED VALUE self);
190
208
  static VALUE _native_stats(DDTRACE_UNUSED VALUE self, VALUE instance);
209
+ static VALUE _native_stats_reset_not_thread_safe(DDTRACE_UNUSED VALUE self, VALUE instance);
191
210
  void *simulate_sampling_signal_delivery(DDTRACE_UNUSED void *_unused);
192
211
  static void grab_gvl_and_sample(void);
193
- static void reset_stats(struct cpu_and_wall_time_worker_state *state);
212
+ static void reset_stats_not_thread_safe(struct cpu_and_wall_time_worker_state *state);
194
213
  static void sleep_for(uint64_t time_ns);
195
214
  static VALUE _native_allocation_count(DDTRACE_UNUSED VALUE self);
196
215
  static void on_newobj_event(VALUE tracepoint_data, DDTRACE_UNUSED void *unused);
197
216
  static void disable_tracepoints(struct cpu_and_wall_time_worker_state *state);
198
217
  static VALUE _native_with_blocked_sigprof(DDTRACE_UNUSED VALUE self);
199
218
  static VALUE rescued_sample_allocation(VALUE tracepoint_data);
219
+ static void delayed_error(struct cpu_and_wall_time_worker_state *state, const char *error);
220
+ static VALUE _native_delayed_error(DDTRACE_UNUSED VALUE self, VALUE instance, VALUE error_msg);
200
221
 
201
222
  // Note on sampler global state safety:
202
223
  //
@@ -209,6 +230,11 @@ static VALUE rescued_sample_allocation(VALUE tracepoint_data);
209
230
  static VALUE active_sampler_instance = Qnil;
210
231
  static struct cpu_and_wall_time_worker_state *active_sampler_instance_state = NULL;
211
232
 
233
+ // See handle_sampling_signal for details on what this does
234
+ #ifdef NO_POSTPONED_TRIGGER
235
+ static void *gc_finalize_deferred_workaround;
236
+ #endif
237
+
212
238
  // Used to implement CpuAndWallTimeWorker._native_allocation_count . To be able to use cheap thread-local variables
213
239
  // (here with `__thread`, see https://gcc.gnu.org/onlinedocs/gcc/Thread-Local.html), this needs to be global.
214
240
  //
@@ -227,6 +253,8 @@ void collectors_cpu_and_wall_time_worker_init(VALUE profiling_module) {
227
253
  if (sample_from_postponed_job_handle == POSTPONED_JOB_HANDLE_INVALID || after_gc_from_postponed_job_handle == POSTPONED_JOB_HANDLE_INVALID) {
228
254
  rb_raise(rb_eRuntimeError, "Failed to register profiler postponed jobs (got POSTPONED_JOB_HANDLE_INVALID)");
229
255
  }
256
+ #else
257
+ gc_finalize_deferred_workaround = objspace_ptr_for_gc_finalize_deferred_workaround();
230
258
  #endif
231
259
 
232
260
  VALUE collectors_module = rb_define_module_under(profiling_module, "Collectors");
@@ -244,11 +272,12 @@ void collectors_cpu_and_wall_time_worker_init(VALUE profiling_module) {
244
272
  // https://bugs.ruby-lang.org/issues/18007 for a discussion around this.
245
273
  rb_define_alloc_func(collectors_cpu_and_wall_time_worker_class, _native_new);
246
274
 
247
- rb_define_singleton_method(collectors_cpu_and_wall_time_worker_class, "_native_initialize", _native_initialize, 9);
275
+ rb_define_singleton_method(collectors_cpu_and_wall_time_worker_class, "_native_initialize", _native_initialize, 8);
248
276
  rb_define_singleton_method(collectors_cpu_and_wall_time_worker_class, "_native_sampling_loop", _native_sampling_loop, 1);
249
277
  rb_define_singleton_method(collectors_cpu_and_wall_time_worker_class, "_native_stop", _native_stop, 2);
250
278
  rb_define_singleton_method(collectors_cpu_and_wall_time_worker_class, "_native_reset_after_fork", _native_reset_after_fork, 1);
251
279
  rb_define_singleton_method(collectors_cpu_and_wall_time_worker_class, "_native_stats", _native_stats, 1);
280
+ rb_define_singleton_method(collectors_cpu_and_wall_time_worker_class, "_native_stats_reset_not_thread_safe", _native_stats_reset_not_thread_safe, 1);
252
281
  rb_define_singleton_method(collectors_cpu_and_wall_time_worker_class, "_native_allocation_count", _native_allocation_count, 0);
253
282
  rb_define_singleton_method(testing_module, "_native_current_sigprof_signal_handler", _native_current_sigprof_signal_handler, 0);
254
283
  rb_define_singleton_method(testing_module, "_native_is_running?", _native_is_running, 1);
@@ -260,6 +289,7 @@ void collectors_cpu_and_wall_time_worker_init(VALUE profiling_module) {
260
289
  rb_define_singleton_method(testing_module, "_native_simulate_sample_from_postponed_job", _native_simulate_sample_from_postponed_job, 0);
261
290
  rb_define_singleton_method(testing_module, "_native_is_sigprof_blocked_in_current_thread", _native_is_sigprof_blocked_in_current_thread, 0);
262
291
  rb_define_singleton_method(testing_module, "_native_with_blocked_sigprof", _native_with_blocked_sigprof, 0);
292
+ rb_define_singleton_method(testing_module, "_native_delayed_error", _native_delayed_error, 2);
263
293
  }
264
294
 
265
295
  // This structure is used to define a Ruby object that stores a pointer to a struct cpu_and_wall_time_worker_state
@@ -284,12 +314,11 @@ static VALUE _native_new(VALUE klass) {
284
314
  state->gc_profiling_enabled = false;
285
315
  state->no_signals_workaround_enabled = false;
286
316
  state->dynamic_sampling_rate_enabled = true;
287
- state->allocation_sample_every = 0;
288
317
  state->allocation_profiling_enabled = false;
289
318
  state->thread_context_collector_instance = Qnil;
290
319
  state->idle_sampling_helper_instance = Qnil;
291
320
  state->owner_thread = Qnil;
292
- dynamic_sampling_rate_init(&state->dynamic_sampling_rate);
321
+ dynamic_sampling_rate_init(&state->cpu_dynamic_sampling_rate);
293
322
  state->gc_tracepoint = Qnil;
294
323
  state->object_allocation_tracepoint = Qnil;
295
324
 
@@ -299,7 +328,15 @@ static VALUE _native_new(VALUE klass) {
299
328
 
300
329
  state->during_sample = false;
301
330
 
302
- reset_stats(state);
331
+ reset_stats_not_thread_safe(state);
332
+
333
+ long now = monotonic_wall_time_now_ns(DO_NOT_RAISE_ON_FAILURE);
334
+ if (now == 0) {
335
+ ruby_xfree(state);
336
+ rb_raise(rb_eRuntimeError, ERR_CLOCK_FAIL);
337
+ }
338
+
339
+ discrete_dynamic_sampler_init(&state->allocation_sampler, "allocation", now);
303
340
 
304
341
  return state->self_instance = TypedData_Wrap_Struct(klass, &cpu_and_wall_time_worker_typed_data, state);
305
342
  }
@@ -313,13 +350,11 @@ static VALUE _native_initialize(
313
350
  VALUE no_signals_workaround_enabled,
314
351
  VALUE dynamic_sampling_rate_enabled,
315
352
  VALUE dynamic_sampling_rate_overhead_target_percentage,
316
- VALUE allocation_sample_every,
317
353
  VALUE allocation_profiling_enabled
318
354
  ) {
319
355
  ENFORCE_BOOLEAN(gc_profiling_enabled);
320
356
  ENFORCE_BOOLEAN(no_signals_workaround_enabled);
321
357
  ENFORCE_BOOLEAN(dynamic_sampling_rate_enabled);
322
- ENFORCE_TYPE(allocation_sample_every, T_FIXNUM);
323
358
  ENFORCE_TYPE(dynamic_sampling_rate_overhead_target_percentage, T_FLOAT);
324
359
  ENFORCE_BOOLEAN(allocation_profiling_enabled);
325
360
 
@@ -329,12 +364,17 @@ static VALUE _native_initialize(
329
364
  state->gc_profiling_enabled = (gc_profiling_enabled == Qtrue);
330
365
  state->no_signals_workaround_enabled = (no_signals_workaround_enabled == Qtrue);
331
366
  state->dynamic_sampling_rate_enabled = (dynamic_sampling_rate_enabled == Qtrue);
332
- dynamic_sampling_rate_set_overhead_target_percentage(&state->dynamic_sampling_rate, NUM2DBL(dynamic_sampling_rate_overhead_target_percentage));
333
- state->allocation_sample_every = NUM2INT(allocation_sample_every);
334
367
  state->allocation_profiling_enabled = (allocation_profiling_enabled == Qtrue);
335
368
 
336
- if (state->allocation_sample_every <= 0) {
337
- rb_raise(rb_eArgError, "Unexpected value for allocation_sample_every: %d. This value must be > 0.", state->allocation_sample_every);
369
+ double total_overhead_target_percentage = NUM2DBL(dynamic_sampling_rate_overhead_target_percentage);
370
+ if (!state->allocation_profiling_enabled) {
371
+ dynamic_sampling_rate_set_overhead_target_percentage(&state->cpu_dynamic_sampling_rate, total_overhead_target_percentage);
372
+ } else {
373
+ // TODO: May be nice to offer customization here? Distribute available "overhead" margin with a bias towards one or the other
374
+ // sampler.
375
+ dynamic_sampling_rate_set_overhead_target_percentage(&state->cpu_dynamic_sampling_rate, total_overhead_target_percentage / 2);
376
+ long now = monotonic_wall_time_now_ns(RAISE_ON_FAILURE);
377
+ discrete_dynamic_sampler_set_overhead_target_percentage(&state->allocation_sampler, total_overhead_target_percentage / 2, now);
338
378
  }
339
379
 
340
380
  state->thread_context_collector_instance = enforce_thread_context_collector_instance(thread_context_collector_instance);
@@ -363,6 +403,12 @@ static VALUE _native_sampling_loop(DDTRACE_UNUSED VALUE _self, VALUE instance) {
363
403
  struct cpu_and_wall_time_worker_state *state;
364
404
  TypedData_Get_Struct(instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
365
405
 
406
+ // If we already got a delayed exception registered even before starting, raise before starting
407
+ if (state->failure_exception != Qnil) {
408
+ disable_tracepoints(state);
409
+ rb_exc_raise(state->failure_exception);
410
+ }
411
+
366
412
  struct cpu_and_wall_time_worker_state *old_state = active_sampler_instance_state;
367
413
  if (old_state != NULL) {
368
414
  if (is_thread_alive(old_state->owner_thread)) {
@@ -387,7 +433,9 @@ static VALUE _native_sampling_loop(DDTRACE_UNUSED VALUE _self, VALUE instance) {
387
433
  if (state->stop_thread == rb_thread_current()) return Qnil;
388
434
 
389
435
  // Reset the dynamic sampling rate state, if any (reminder: the monotonic clock reference may change after a fork)
390
- dynamic_sampling_rate_reset(&state->dynamic_sampling_rate);
436
+ dynamic_sampling_rate_reset(&state->cpu_dynamic_sampling_rate);
437
+ long now = monotonic_wall_time_now_ns(RAISE_ON_FAILURE);
438
+ discrete_dynamic_sampler_reset(&state->allocation_sampler, now);
391
439
 
392
440
  // This write to a global is thread-safe BECAUSE we're still holding on to the global VM lock at this point
393
441
  active_sampler_instance_state = state;
@@ -449,15 +497,19 @@ static VALUE _native_stop(DDTRACE_UNUSED VALUE _self, VALUE self_instance, VALUE
449
497
  return stop(self_instance, /* optional_exception: */ Qnil);
450
498
  }
451
499
 
452
- static VALUE stop(VALUE self_instance, VALUE optional_exception) {
453
- struct cpu_and_wall_time_worker_state *state;
454
- TypedData_Get_Struct(self_instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
455
-
500
+ static void stop_state(struct cpu_and_wall_time_worker_state *state, VALUE optional_exception) {
456
501
  atomic_store(&state->should_run, false);
457
502
  state->failure_exception = optional_exception;
458
503
 
459
504
  // Disable the tracepoints as soon as possible, so the VM doesn't keep on calling them
460
505
  disable_tracepoints(state);
506
+ }
507
+
508
+ static VALUE stop(VALUE self_instance, VALUE optional_exception) {
509
+ struct cpu_and_wall_time_worker_state *state;
510
+ TypedData_Get_Struct(self_instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
511
+
512
+ stop_state(state, optional_exception);
461
513
 
462
514
  return Qtrue;
463
515
  }
@@ -497,7 +549,32 @@ static void handle_sampling_signal(DDTRACE_UNUSED int _signal, DDTRACE_UNUSED si
497
549
  rb_postponed_job_trigger(sample_from_postponed_job_handle);
498
550
  state->stats.postponed_job_success++; // Always succeeds
499
551
  #else
500
- int result = rb_postponed_job_register_one(0, sample_from_postponed_job, NULL);
552
+
553
+ // This is a workaround for https://bugs.ruby-lang.org/issues/19991 (for Ruby < 3.3)
554
+ //
555
+ // TL;DR the `rb_postponed_job_register_one` API is not atomic (which is why it got replaced by `rb_postponed_job_trigger`)
556
+ // and in rare cases can cause VM crashes.
557
+ //
558
+ // Specifically, if we're interrupting `rb_postponed_job_flush` (the function that processes postponed jobs), the way
559
+ // that this function reads the jobs is not atomic, and can cause our call to
560
+ // `rb_postponed_job_register(function, arg)` to clobber an existing job that is getting dequeued.
561
+ // Clobbering an existing job is somewhat annoying, but the worst part is that it can happen that we clobber only
562
+ // the existing job's arguments.
563
+ // As surveyed in https://github.com/ruby/ruby/pull/8949#issuecomment-1821441370 clobbering the arguments turns out
564
+ // to not matter in many cases as usually `rb_postponed_job_register` calls in the VM and ecosystem ignore the argument.
565
+ //
566
+ // https://bugs.ruby-lang.org/issues/19991 is the exception: inside Ruby's `gc.c`, when dealing with object
567
+ // finalizers, Ruby calls `gc_finalize_deferred_register` which internally calls
568
+ // `rb_postponed_job_register_one(gc_finalize_deferred, objspace)`.
569
+ // Clobbering this call means that `gc_finalize_deferred` would get called with `NULL`, causing a segmentation fault.
570
+ //
571
+ // Note that this is quite rare: our signal needs to land at exactly the point where the VM has read the function
572
+ // to execute, but has yet to read the arguments. @ivoanjo: I could only reproduce it by manually changing the VM
573
+ // code to simulate this happening.
574
+ //
575
+ // Thus, our workaround is simple: we pass in objspace as our argument, just in case the clobbering happens.
576
+ // In the happy path, we never use this argument so it makes no difference. In the buggy path, we avoid crashing the VM.
577
+ int result = rb_postponed_job_register(0, sample_from_postponed_job, gc_finalize_deferred_workaround /* instead of NULL */);
501
578
 
502
579
  // Officially, the result of rb_postponed_job_register_one is documented as being opaque, but in practice it does not
503
580
  // seem to have changed between Ruby 2.3 and 3.2, and so we track it as a debugging mechanism
@@ -560,7 +637,7 @@ static void *run_sampling_trigger_loop(void *state_ptr) {
560
637
  // Note that we deliberately should NOT combine this sleep_for with the one above because the result of
561
638
  // `dynamic_sampling_rate_get_sleep` may have changed while the above sleep was ongoing.
562
639
  uint64_t extra_sleep =
563
- dynamic_sampling_rate_get_sleep(&state->dynamic_sampling_rate, monotonic_wall_time_now_ns(DO_NOT_RAISE_ON_FAILURE));
640
+ dynamic_sampling_rate_get_sleep(&state->cpu_dynamic_sampling_rate, monotonic_wall_time_now_ns(DO_NOT_RAISE_ON_FAILURE));
564
641
  if (state->dynamic_sampling_rate_enabled && extra_sleep > 0) sleep_for(extra_sleep);
565
642
  }
566
643
 
@@ -600,12 +677,12 @@ static VALUE rescued_sample_from_postponed_job(VALUE self_instance) {
600
677
 
601
678
  long wall_time_ns_before_sample = monotonic_wall_time_now_ns(RAISE_ON_FAILURE);
602
679
 
603
- if (state->dynamic_sampling_rate_enabled && !dynamic_sampling_rate_should_sample(&state->dynamic_sampling_rate, wall_time_ns_before_sample)) {
604
- state->stats.skipped_sample_because_of_dynamic_sampling_rate++;
680
+ if (state->dynamic_sampling_rate_enabled && !dynamic_sampling_rate_should_sample(&state->cpu_dynamic_sampling_rate, wall_time_ns_before_sample)) {
681
+ state->stats.cpu_skipped++;
605
682
  return Qnil;
606
683
  }
607
684
 
608
- state->stats.sampled++;
685
+ state->stats.cpu_sampled++;
609
686
 
610
687
  VALUE profiler_overhead_stack_thread = state->owner_thread; // Used to attribute profiler overhead to a different stack
611
688
  thread_context_collector_sample(state->thread_context_collector_instance, wall_time_ns_before_sample, profiler_overhead_stack_thread);
@@ -616,11 +693,11 @@ static VALUE rescued_sample_from_postponed_job(VALUE self_instance) {
616
693
  // Guard against wall-time going backwards, see https://github.com/DataDog/dd-trace-rb/pull/2336 for discussion.
617
694
  uint64_t sampling_time_ns = delta_ns < 0 ? 0 : delta_ns;
618
695
 
619
- state->stats.sampling_time_ns_min = uint64_min_of(sampling_time_ns, state->stats.sampling_time_ns_min);
620
- state->stats.sampling_time_ns_max = uint64_max_of(sampling_time_ns, state->stats.sampling_time_ns_max);
621
- state->stats.sampling_time_ns_total += sampling_time_ns;
696
+ state->stats.cpu_sampling_time_ns_min = uint64_min_of(sampling_time_ns, state->stats.cpu_sampling_time_ns_min);
697
+ state->stats.cpu_sampling_time_ns_max = uint64_max_of(sampling_time_ns, state->stats.cpu_sampling_time_ns_max);
698
+ state->stats.cpu_sampling_time_ns_total += sampling_time_ns;
622
699
 
623
- dynamic_sampling_rate_after_sample(&state->dynamic_sampling_rate, wall_time_ns_after_sample, sampling_time_ns);
700
+ dynamic_sampling_rate_after_sample(&state->cpu_dynamic_sampling_rate, wall_time_ns_after_sample, sampling_time_ns);
624
701
 
625
702
  // Return a dummy VALUE because we're called from rb_rescue2 which requires it
626
703
  return Qnil;
@@ -817,7 +894,7 @@ static VALUE _native_reset_after_fork(DDTRACE_UNUSED VALUE self, VALUE instance)
817
894
  // Disable all tracepoints, so that there are no more attempts to mutate the profile
818
895
  disable_tracepoints(state);
819
896
 
820
- reset_stats(state);
897
+ reset_stats_not_thread_safe(state);
821
898
 
822
899
  // Remove all state from the `Collectors::ThreadState` and connected downstream components
823
900
  rb_funcall(state->thread_context_collector_instance, rb_intern("reset_after_fork"), 0);
@@ -833,11 +910,27 @@ static VALUE _native_stats(DDTRACE_UNUSED VALUE self, VALUE instance) {
833
910
  struct cpu_and_wall_time_worker_state *state;
834
911
  TypedData_Get_Struct(instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
835
912
 
836
- VALUE pretty_sampling_time_ns_min = state->stats.sampling_time_ns_min == UINT64_MAX ? Qnil : ULL2NUM(state->stats.sampling_time_ns_min);
837
- VALUE pretty_sampling_time_ns_max = state->stats.sampling_time_ns_max == 0 ? Qnil : ULL2NUM(state->stats.sampling_time_ns_max);
838
- VALUE pretty_sampling_time_ns_total = state->stats.sampling_time_ns_total == 0 ? Qnil : ULL2NUM(state->stats.sampling_time_ns_total);
839
- VALUE pretty_sampling_time_ns_avg =
840
- state->stats.sampled == 0 ? Qnil : DBL2NUM(((double) state->stats.sampling_time_ns_total) / state->stats.sampled);
913
+ VALUE pretty_cpu_sampling_time_ns_min = state->stats.cpu_sampling_time_ns_min == UINT64_MAX ? Qnil : ULL2NUM(state->stats.cpu_sampling_time_ns_min);
914
+ VALUE pretty_cpu_sampling_time_ns_max = state->stats.cpu_sampling_time_ns_max == 0 ? Qnil : ULL2NUM(state->stats.cpu_sampling_time_ns_max);
915
+ VALUE pretty_cpu_sampling_time_ns_total = state->stats.cpu_sampling_time_ns_total == 0 ? Qnil : ULL2NUM(state->stats.cpu_sampling_time_ns_total);
916
+ VALUE pretty_cpu_sampling_time_ns_avg =
917
+ state->stats.cpu_sampled == 0 ? Qnil : DBL2NUM(((double) state->stats.cpu_sampling_time_ns_total) / state->stats.cpu_sampled);
918
+
919
+ VALUE pretty_allocation_sampling_time_ns_min = state->stats.allocation_sampling_time_ns_min == UINT64_MAX ? Qnil : ULL2NUM(state->stats.allocation_sampling_time_ns_min);
920
+ VALUE pretty_allocation_sampling_time_ns_max = state->stats.allocation_sampling_time_ns_max == 0 ? Qnil : ULL2NUM(state->stats.allocation_sampling_time_ns_max);
921
+ VALUE pretty_allocation_sampling_time_ns_total = state->stats.allocation_sampling_time_ns_total == 0 ? Qnil : ULL2NUM(state->stats.allocation_sampling_time_ns_total);
922
+ VALUE pretty_allocation_sampling_time_ns_avg =
923
+ state->stats.allocation_sampled == 0 ? Qnil : DBL2NUM(((double) state->stats.allocation_sampling_time_ns_total) / state->stats.allocation_sampled);
924
+
925
+ unsigned long total_cpu_samples_attempted = state->stats.cpu_sampled + state->stats.cpu_skipped;
926
+ VALUE effective_cpu_sample_rate =
927
+ total_cpu_samples_attempted == 0 ? Qnil : DBL2NUM(((double) state->stats.cpu_sampled) / total_cpu_samples_attempted);
928
+ unsigned long total_allocation_samples_attempted = state->stats.allocation_sampled + state->stats.allocation_skipped;
929
+ VALUE effective_allocation_sample_rate =
930
+ total_allocation_samples_attempted == 0 ? Qnil : DBL2NUM(((double) state->stats.allocation_sampled) / total_allocation_samples_attempted);
931
+
932
+ VALUE allocation_sampler_snapshot = state->allocation_profiling_enabled && state->dynamic_sampling_rate_enabled ?
933
+ discrete_dynamic_sampler_state_snapshot(&state->allocation_sampler) : Qnil;
841
934
 
842
935
  VALUE stats_as_hash = rb_hash_new();
843
936
  VALUE arguments[] = {
@@ -846,22 +939,42 @@ static VALUE _native_stats(DDTRACE_UNUSED VALUE self, VALUE instance) {
846
939
  ID2SYM(rb_intern("simulated_signal_delivery")), /* => */ UINT2NUM(state->stats.simulated_signal_delivery),
847
940
  ID2SYM(rb_intern("signal_handler_enqueued_sample")), /* => */ UINT2NUM(state->stats.signal_handler_enqueued_sample),
848
941
  ID2SYM(rb_intern("signal_handler_wrong_thread")), /* => */ UINT2NUM(state->stats.signal_handler_wrong_thread),
849
- ID2SYM(rb_intern("sampled")), /* => */ UINT2NUM(state->stats.sampled),
850
- ID2SYM(rb_intern("skipped_sample_because_of_dynamic_sampling_rate")), /* => */ UINT2NUM(state->stats.skipped_sample_because_of_dynamic_sampling_rate),
851
942
  ID2SYM(rb_intern("postponed_job_skipped_already_existed")), /* => */ UINT2NUM(state->stats.postponed_job_skipped_already_existed),
852
943
  ID2SYM(rb_intern("postponed_job_success")), /* => */ UINT2NUM(state->stats.postponed_job_success),
853
944
  ID2SYM(rb_intern("postponed_job_full")), /* => */ UINT2NUM(state->stats.postponed_job_full),
854
945
  ID2SYM(rb_intern("postponed_job_unknown_result")), /* => */ UINT2NUM(state->stats.postponed_job_unknown_result),
855
- ID2SYM(rb_intern("sampling_time_ns_min")), /* => */ pretty_sampling_time_ns_min,
856
- ID2SYM(rb_intern("sampling_time_ns_max")), /* => */ pretty_sampling_time_ns_max,
857
- ID2SYM(rb_intern("sampling_time_ns_total")), /* => */ pretty_sampling_time_ns_total,
858
- ID2SYM(rb_intern("sampling_time_ns_avg")), /* => */ pretty_sampling_time_ns_avg,
859
- ID2SYM(rb_intern("allocations_during_sample")), /* => */ UINT2NUM(state->stats.allocations_during_sample),
946
+
947
+ // CPU Stats
948
+ ID2SYM(rb_intern("cpu_sampled")), /* => */ UINT2NUM(state->stats.cpu_sampled),
949
+ ID2SYM(rb_intern("cpu_skipped")), /* => */ UINT2NUM(state->stats.cpu_skipped),
950
+ ID2SYM(rb_intern("cpu_effective_sample_rate")), /* => */ effective_cpu_sample_rate,
951
+ ID2SYM(rb_intern("cpu_sampling_time_ns_min")), /* => */ pretty_cpu_sampling_time_ns_min,
952
+ ID2SYM(rb_intern("cpu_sampling_time_ns_max")), /* => */ pretty_cpu_sampling_time_ns_max,
953
+ ID2SYM(rb_intern("cpu_sampling_time_ns_total")), /* => */ pretty_cpu_sampling_time_ns_total,
954
+ ID2SYM(rb_intern("cpu_sampling_time_ns_avg")), /* => */ pretty_cpu_sampling_time_ns_avg,
955
+
956
+ // Allocation stats
957
+ ID2SYM(rb_intern("allocation_sampled")), /* => */ state->allocation_profiling_enabled ? ULONG2NUM(state->stats.allocation_sampled) : Qnil,
958
+ ID2SYM(rb_intern("allocation_skipped")), /* => */ state->allocation_profiling_enabled ? ULONG2NUM(state->stats.allocation_skipped) : Qnil,
959
+ ID2SYM(rb_intern("allocation_effective_sample_rate")), /* => */ effective_allocation_sample_rate,
960
+ ID2SYM(rb_intern("allocation_sampling_time_ns_min")), /* => */ pretty_allocation_sampling_time_ns_min,
961
+ ID2SYM(rb_intern("allocation_sampling_time_ns_max")), /* => */ pretty_allocation_sampling_time_ns_max,
962
+ ID2SYM(rb_intern("allocation_sampling_time_ns_total")), /* => */ pretty_allocation_sampling_time_ns_total,
963
+ ID2SYM(rb_intern("allocation_sampling_time_ns_avg")), /* => */ pretty_allocation_sampling_time_ns_avg,
964
+ ID2SYM(rb_intern("allocation_sampler_snapshot")), /* => */ allocation_sampler_snapshot,
965
+ ID2SYM(rb_intern("allocations_during_sample")), /* => */ state->allocation_profiling_enabled ? UINT2NUM(state->stats.allocations_during_sample) : Qnil,
860
966
  };
861
967
  for (long unsigned int i = 0; i < VALUE_COUNT(arguments); i += 2) rb_hash_aset(stats_as_hash, arguments[i], arguments[i+1]);
862
968
  return stats_as_hash;
863
969
  }
864
970
 
971
+ static VALUE _native_stats_reset_not_thread_safe(DDTRACE_UNUSED VALUE self, VALUE instance) {
972
+ struct cpu_and_wall_time_worker_state *state;
973
+ TypedData_Get_Struct(instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
974
+ reset_stats_not_thread_safe(state);
975
+ return Qnil;
976
+ }
977
+
865
978
  void *simulate_sampling_signal_delivery(DDTRACE_UNUSED void *_unused) {
866
979
  struct cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
867
980
 
@@ -879,9 +992,17 @@ void *simulate_sampling_signal_delivery(DDTRACE_UNUSED void *_unused) {
879
992
 
880
993
  static void grab_gvl_and_sample(void) { rb_thread_call_with_gvl(simulate_sampling_signal_delivery, NULL); }
881
994
 
882
- static void reset_stats(struct cpu_and_wall_time_worker_state *state) {
883
- state->stats = (struct stats) {}; // Resets all stats back to zero
884
- state->stats.sampling_time_ns_min = UINT64_MAX; // Since we always take the min between existing and latest sample
995
+ static void reset_stats_not_thread_safe(struct cpu_and_wall_time_worker_state *state) {
996
+ // NOTE: This is not really thread safe so ongoing sampling operations that are concurrent with a reset can have their stats:
997
+ // * Lost (writes after stats retrieval but before reset).
998
+ // * Included in the previous stats window (writes before stats retrieval and reset).
999
+ // * Included in the following stats window (writes after stats retrieval and reset).
1000
+ // Given the expected infrequency of resetting (~once per 60s profile) and the auxiliary/non-critical nature of these stats
1001
+ // this momentary loss of accuracy is deemed acceptable to keep overhead to a minimum.
1002
+ state->stats = (struct stats) {
1003
+ .cpu_sampling_time_ns_min = UINT64_MAX, // Since we always take the min between existing and latest sample
1004
+ .allocation_sampling_time_ns_min = UINT64_MAX, // Since we always take the min between existing and latest sample
1005
+ };
885
1006
  }
886
1007
 
887
1008
  static void sleep_for(uint64_t time_ns) {
@@ -931,25 +1052,53 @@ static void on_newobj_event(VALUE tracepoint_data, DDTRACE_UNUSED void *unused)
931
1052
  return;
932
1053
  }
933
1054
 
1055
+ if (state->dynamic_sampling_rate_enabled) {
1056
+ long now = monotonic_wall_time_now_ns(DO_NOT_RAISE_ON_FAILURE);
1057
+ if (now == 0) {
1058
+ delayed_error(state, ERR_CLOCK_FAIL);
1059
+ return;
1060
+ }
1061
+ if (!discrete_dynamic_sampler_should_sample(&state->allocation_sampler, now)) {
1062
+ state->stats.allocation_skipped++;
1063
+ return;
1064
+ }
1065
+ }
1066
+
934
1067
  // @ivoanjo: Strictly speaking, this is not needed because Ruby should not call the same tracepoint while a previous
935
1068
  // invocation is still pending, (e.g. it wouldn't call `on_newobj_event` while it's already running), but I decided
936
1069
  // to keep this here for consistency -- every call to the thread context (other than the special gc calls which are
937
1070
  // defined as not being able to allocate) sets this.
938
1071
  state->during_sample = true;
939
1072
 
940
- // TODO: This is a placeholder sampling decision strategy. We plan to replace it with a better one soon (e.g. before
941
- // beta), and having something here allows us to test the rest of feature, sampling decision aside.
942
- if (allocation_count % state->allocation_sample_every == 0) {
943
- // Rescue against any exceptions that happen during sampling
944
- safely_call(rescued_sample_allocation, tracepoint_data, state->self_instance);
1073
+ // Rescue against any exceptions that happen during sampling
1074
+ safely_call(rescued_sample_allocation, tracepoint_data, state->self_instance);
1075
+
1076
+ if (state->dynamic_sampling_rate_enabled) {
1077
+ long now = monotonic_wall_time_now_ns(DO_NOT_RAISE_ON_FAILURE);
1078
+ if (now == 0) {
1079
+ delayed_error(state, ERR_CLOCK_FAIL);
1080
+ // NOTE: Not short-circuiting here to make sure cleanup happens
1081
+ }
1082
+ uint64_t sampling_time_ns = discrete_dynamic_sampler_after_sample(&state->allocation_sampler, now);
1083
+ // NOTE: To keep things lean when dynamic sampling rate is disabled we skip clock interactions which is
1084
+ // why we're fine with having this inside this conditional.
1085
+ state->stats.allocation_sampling_time_ns_min = uint64_min_of(sampling_time_ns, state->stats.allocation_sampling_time_ns_min);
1086
+ state->stats.allocation_sampling_time_ns_max = uint64_max_of(sampling_time_ns, state->stats.allocation_sampling_time_ns_max);
1087
+ state->stats.allocation_sampling_time_ns_total += sampling_time_ns;
945
1088
  }
946
1089
 
1090
+ state->stats.allocation_sampled++;
1091
+
947
1092
  state->during_sample = false;
948
1093
  }
949
1094
 
950
1095
  static void disable_tracepoints(struct cpu_and_wall_time_worker_state *state) {
951
- rb_tracepoint_disable(state->gc_tracepoint);
952
- rb_tracepoint_disable(state->object_allocation_tracepoint);
1096
+ if (state->gc_tracepoint != Qnil) {
1097
+ rb_tracepoint_disable(state->gc_tracepoint);
1098
+ }
1099
+ if (state->object_allocation_tracepoint != Qnil) {
1100
+ rb_tracepoint_disable(state->object_allocation_tracepoint);
1101
+ }
953
1102
  }
954
1103
 
955
1104
  static VALUE _native_with_blocked_sigprof(DDTRACE_UNUSED VALUE self) {
@@ -974,8 +1123,31 @@ static VALUE rescued_sample_allocation(VALUE tracepoint_data) {
974
1123
  rb_trace_arg_t *data = rb_tracearg_from_tracepoint(tracepoint_data);
975
1124
  VALUE new_object = rb_tracearg_object(data);
976
1125
 
977
- thread_context_collector_sample_allocation(state->thread_context_collector_instance, state->allocation_sample_every, new_object);
1126
+ unsigned long allocations_since_last_sample = state->dynamic_sampling_rate_enabled ?
1127
+ // if we're doing dynamic sampling, ask the sampler how many events since last sample
1128
+ discrete_dynamic_sampler_events_since_last_sample(&state->allocation_sampler) :
1129
+ // if we aren't, then we're sampling every event
1130
+ 1;
1131
+ // TODO: Signal in the profile that clamping happened?
1132
+ unsigned int weight = allocations_since_last_sample > MAX_ALLOC_WEIGHT ? MAX_ALLOC_WEIGHT : (unsigned int) allocations_since_last_sample;
1133
+ thread_context_collector_sample_allocation(state->thread_context_collector_instance, weight, new_object);
978
1134
 
979
1135
  // Return a dummy VALUE because we're called from rb_rescue2 which requires it
980
1136
  return Qnil;
981
1137
  }
1138
+
1139
+ static void delayed_error(struct cpu_and_wall_time_worker_state *state, const char *error) {
1140
+ // If we can't raise an immediate exception at the calling site, use the asynchronous flow through the main worker loop.
1141
+ stop_state(state, rb_exc_new_cstr(rb_eRuntimeError, error));
1142
+ }
1143
+
1144
+ static VALUE _native_delayed_error(DDTRACE_UNUSED VALUE self, VALUE instance, VALUE error_msg) {
1145
+ ENFORCE_TYPE(error_msg, T_STRING);
1146
+
1147
+ struct cpu_and_wall_time_worker_state *state;
1148
+ TypedData_Get_Struct(instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
1149
+
1150
+ delayed_error(state, rb_string_value_cstr(&error_msg));
1151
+
1152
+ return Qnil;
1153
+ }