datadog 2.2.0 → 2.3.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (113) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +51 -2
  3. data/ext/datadog_profiling_loader/extconf.rb +15 -15
  4. data/ext/datadog_profiling_native_extension/clock_id.h +1 -0
  5. data/ext/datadog_profiling_native_extension/clock_id_from_pthread.c +1 -2
  6. data/ext/datadog_profiling_native_extension/clock_id_noop.c +1 -2
  7. data/ext/datadog_profiling_native_extension/collectors_cpu_and_wall_time_worker.c +113 -43
  8. data/ext/datadog_profiling_native_extension/collectors_discrete_dynamic_sampler.c +49 -26
  9. data/ext/datadog_profiling_native_extension/collectors_discrete_dynamic_sampler.h +34 -4
  10. data/ext/datadog_profiling_native_extension/collectors_idle_sampling_helper.c +4 -0
  11. data/ext/datadog_profiling_native_extension/collectors_stack.c +49 -37
  12. data/ext/datadog_profiling_native_extension/collectors_stack.h +2 -2
  13. data/ext/datadog_profiling_native_extension/collectors_thread_context.c +81 -19
  14. data/ext/datadog_profiling_native_extension/collectors_thread_context.h +1 -0
  15. data/ext/datadog_profiling_native_extension/datadog_ruby_common.c +110 -0
  16. data/ext/datadog_profiling_native_extension/datadog_ruby_common.h +57 -0
  17. data/ext/datadog_profiling_native_extension/extconf.rb +65 -60
  18. data/ext/datadog_profiling_native_extension/heap_recorder.c +34 -6
  19. data/ext/datadog_profiling_native_extension/heap_recorder.h +3 -1
  20. data/ext/datadog_profiling_native_extension/helpers.h +6 -17
  21. data/ext/datadog_profiling_native_extension/http_transport.c +3 -3
  22. data/ext/datadog_profiling_native_extension/libdatadog_helpers.c +0 -86
  23. data/ext/datadog_profiling_native_extension/libdatadog_helpers.h +2 -23
  24. data/ext/datadog_profiling_native_extension/native_extension_helpers.rb +61 -172
  25. data/ext/datadog_profiling_native_extension/private_vm_api_access.c +64 -138
  26. data/ext/datadog_profiling_native_extension/private_vm_api_access.h +17 -11
  27. data/ext/datadog_profiling_native_extension/profiling.c +0 -2
  28. data/ext/datadog_profiling_native_extension/ruby_helpers.c +0 -33
  29. data/ext/datadog_profiling_native_extension/ruby_helpers.h +1 -26
  30. data/ext/datadog_profiling_native_extension/setup_signal_handler.h +1 -0
  31. data/ext/datadog_profiling_native_extension/stack_recorder.c +14 -2
  32. data/ext/datadog_profiling_native_extension/stack_recorder.h +1 -0
  33. data/ext/datadog_profiling_native_extension/time_helpers.c +0 -15
  34. data/ext/datadog_profiling_native_extension/time_helpers.h +36 -6
  35. data/ext/{datadog_profiling_native_extension → libdatadog_api}/crashtracker.c +19 -6
  36. data/ext/libdatadog_api/datadog_ruby_common.c +110 -0
  37. data/ext/libdatadog_api/datadog_ruby_common.h +57 -0
  38. data/ext/libdatadog_api/extconf.rb +108 -0
  39. data/ext/libdatadog_api/macos_development.md +26 -0
  40. data/ext/libdatadog_extconf_helpers.rb +130 -0
  41. data/lib/datadog/appsec/contrib/graphql/appsec_trace.rb +49 -0
  42. data/lib/datadog/appsec/contrib/graphql/gateway/multiplex.rb +73 -0
  43. data/lib/datadog/appsec/contrib/graphql/gateway/watcher.rb +68 -0
  44. data/lib/datadog/appsec/contrib/graphql/integration.rb +41 -0
  45. data/lib/datadog/appsec/contrib/graphql/patcher.rb +37 -0
  46. data/lib/datadog/appsec/contrib/graphql/reactive/multiplex.rb +59 -0
  47. data/lib/datadog/appsec/contrib/rack/gateway/request.rb +1 -1
  48. data/lib/datadog/appsec/processor/actions.rb +1 -1
  49. data/lib/datadog/appsec/response.rb +15 -1
  50. data/lib/datadog/appsec.rb +1 -0
  51. data/lib/datadog/core/configuration/components.rb +14 -12
  52. data/lib/datadog/core/configuration/settings.rb +54 -7
  53. data/lib/datadog/core/crashtracking/agent_base_url.rb +21 -0
  54. data/lib/datadog/core/crashtracking/component.rb +111 -0
  55. data/lib/datadog/core/crashtracking/tag_builder.rb +39 -0
  56. data/lib/datadog/core/diagnostics/environment_logger.rb +8 -11
  57. data/lib/datadog/core/telemetry/component.rb +49 -2
  58. data/lib/datadog/core/telemetry/emitter.rb +9 -11
  59. data/lib/datadog/core/telemetry/event.rb +32 -1
  60. data/lib/datadog/core/telemetry/ext.rb +1 -0
  61. data/lib/datadog/core/telemetry/http/adapters/net.rb +10 -12
  62. data/lib/datadog/core/telemetry/http/ext.rb +3 -0
  63. data/lib/datadog/core/telemetry/http/transport.rb +38 -9
  64. data/lib/datadog/core/telemetry/logging.rb +35 -0
  65. data/lib/datadog/core/utils/at_fork_monkey_patch.rb +102 -0
  66. data/lib/datadog/kit/appsec/events.rb +2 -4
  67. data/lib/datadog/opentelemetry/sdk/span_processor.rb +10 -0
  68. data/lib/datadog/opentelemetry/sdk/trace/span.rb +23 -0
  69. data/lib/datadog/profiling/collectors/code_provenance.rb +7 -7
  70. data/lib/datadog/profiling/collectors/cpu_and_wall_time_worker.rb +17 -17
  71. data/lib/datadog/profiling/collectors/idle_sampling_helper.rb +11 -13
  72. data/lib/datadog/profiling/collectors/info.rb +3 -3
  73. data/lib/datadog/profiling/collectors/thread_context.rb +4 -2
  74. data/lib/datadog/profiling/component.rb +69 -91
  75. data/lib/datadog/profiling/exporter.rb +3 -3
  76. data/lib/datadog/profiling/ext/dir_monkey_patches.rb +3 -3
  77. data/lib/datadog/profiling/ext.rb +21 -21
  78. data/lib/datadog/profiling/flush.rb +1 -1
  79. data/lib/datadog/profiling/http_transport.rb +8 -6
  80. data/lib/datadog/profiling/load_native_extension.rb +5 -5
  81. data/lib/datadog/profiling/preload.rb +1 -1
  82. data/lib/datadog/profiling/profiler.rb +5 -8
  83. data/lib/datadog/profiling/scheduler.rb +31 -25
  84. data/lib/datadog/profiling/tag_builder.rb +2 -2
  85. data/lib/datadog/profiling/tasks/exec.rb +5 -5
  86. data/lib/datadog/profiling/tasks/setup.rb +16 -35
  87. data/lib/datadog/profiling.rb +4 -5
  88. data/lib/datadog/tracing/contrib/active_record/events/sql.rb +1 -0
  89. data/lib/datadog/tracing/contrib/ext.rb +14 -0
  90. data/lib/datadog/tracing/contrib/graphql/unified_trace.rb +1 -1
  91. data/lib/datadog/tracing/contrib/graphql/unified_trace_patcher.rb +4 -1
  92. data/lib/datadog/tracing/contrib/lograge/patcher.rb +16 -0
  93. data/lib/datadog/tracing/contrib/mysql2/configuration/settings.rb +5 -0
  94. data/lib/datadog/tracing/contrib/mysql2/instrumentation.rb +17 -13
  95. data/lib/datadog/tracing/contrib/pg/configuration/settings.rb +5 -0
  96. data/lib/datadog/tracing/contrib/pg/instrumentation.rb +4 -1
  97. data/lib/datadog/tracing/contrib/propagation/sql_comment/ext.rb +28 -0
  98. data/lib/datadog/tracing/contrib/propagation/sql_comment/mode.rb +5 -1
  99. data/lib/datadog/tracing/contrib/propagation/sql_comment.rb +22 -10
  100. data/lib/datadog/tracing/contrib/trilogy/configuration/settings.rb +5 -0
  101. data/lib/datadog/tracing/contrib/trilogy/instrumentation.rb +4 -1
  102. data/lib/datadog/tracing/diagnostics/environment_logger.rb +14 -16
  103. data/lib/datadog/tracing/metadata/errors.rb +9 -1
  104. data/lib/datadog/tracing/metadata/ext.rb +4 -0
  105. data/lib/datadog/tracing/pipeline/span_filter.rb +2 -2
  106. data/lib/datadog/tracing/span.rb +9 -2
  107. data/lib/datadog/tracing/span_event.rb +41 -0
  108. data/lib/datadog/tracing/span_operation.rb +6 -2
  109. data/lib/datadog/tracing/transport/serializable_trace.rb +3 -0
  110. data/lib/datadog/version.rb +1 -1
  111. metadata +28 -10
  112. data/lib/datadog/profiling/crashtracker.rb +0 -91
  113. data/lib/datadog/profiling/ext/forking.rb +0 -98
@@ -92,7 +92,8 @@ struct thread_context_collector_state {
92
92
  // "Update this when modifying state struct"
93
93
 
94
94
  // Required by Datadog::Profiling::Collectors::Stack as a scratch buffer during sampling
95
- sampling_buffer *sampling_buffer;
95
+ ddog_prof_Location *locations;
96
+ uint16_t max_frames;
96
97
  // Hashmap <Thread Object, struct per_thread_context>
97
98
  st_table *hash_map_per_thread_context;
98
99
  // Datadog::Profiling::StackRecorder instance
@@ -138,6 +139,7 @@ struct thread_context_collector_state {
138
139
 
139
140
  // Tracks per-thread state
140
141
  struct per_thread_context {
142
+ sampling_buffer *sampling_buffer;
141
143
  char thread_id[THREAD_ID_LIMIT_CHARS];
142
144
  ddog_CharSlice thread_id_char_slice;
143
145
  char thread_invoke_location[THREAD_INVOKE_LOCATION_LIMIT_CHARS];
@@ -184,8 +186,9 @@ static VALUE _native_sample_after_gc(DDTRACE_UNUSED VALUE self, VALUE collector_
184
186
  void update_metrics_and_sample(
185
187
  struct thread_context_collector_state *state,
186
188
  VALUE thread_being_sampled,
187
- VALUE profiler_overhead_stack_thread,
189
+ VALUE stack_from_thread,
188
190
  struct per_thread_context *thread_context,
191
+ sampling_buffer* sampling_buffer,
189
192
  long current_cpu_time_ns,
190
193
  long current_monotonic_wall_time_ns
191
194
  );
@@ -194,6 +197,7 @@ static void trigger_sample_for_thread(
194
197
  VALUE thread,
195
198
  VALUE stack_from_thread,
196
199
  struct per_thread_context *thread_context,
200
+ sampling_buffer* sampling_buffer,
197
201
  sample_values values,
198
202
  long current_monotonic_wall_time_ns,
199
203
  ddog_CharSlice *ruby_vm_type,
@@ -203,6 +207,7 @@ static VALUE _native_thread_list(VALUE self);
203
207
  static struct per_thread_context *get_or_create_context_for(VALUE thread, struct thread_context_collector_state *state);
204
208
  static struct per_thread_context *get_context_for(VALUE thread, struct thread_context_collector_state *state);
205
209
  static void initialize_context(VALUE thread, struct per_thread_context *thread_context, struct thread_context_collector_state *state);
210
+ static void free_context(struct per_thread_context* thread_context);
206
211
  static VALUE _native_inspect(VALUE self, VALUE collector_instance);
207
212
  static VALUE per_thread_context_st_table_as_ruby_hash(struct thread_context_collector_state *state);
208
213
  static int per_thread_context_as_ruby_hash(st_data_t key_thread, st_data_t value_context, st_data_t result_hash);
@@ -231,6 +236,7 @@ static void ddtrace_otel_trace_identifiers_for(
231
236
  VALUE active_span,
232
237
  VALUE otel_values
233
238
  );
239
+ static VALUE _native_sample_skipped_allocation_samples(DDTRACE_UNUSED VALUE self, VALUE collector_instance, VALUE skipped_samples);
234
240
 
235
241
  void collectors_thread_context_init(VALUE profiling_module) {
236
242
  VALUE collectors_module = rb_define_module_under(profiling_module, "Collectors");
@@ -261,6 +267,7 @@ void collectors_thread_context_init(VALUE profiling_module) {
261
267
  rb_define_singleton_method(testing_module, "_native_stats", _native_stats, 1);
262
268
  rb_define_singleton_method(testing_module, "_native_gc_tracking", _native_gc_tracking, 1);
263
269
  rb_define_singleton_method(testing_module, "_native_new_empty_thread", _native_new_empty_thread, 0);
270
+ rb_define_singleton_method(testing_module, "_native_sample_skipped_allocation_samples", _native_sample_skipped_allocation_samples, 2);
264
271
 
265
272
  at_active_span_id = rb_intern_const("@active_span");
266
273
  at_active_trace_id = rb_intern_const("@active_trace");
@@ -308,7 +315,7 @@ static void thread_context_collector_typed_data_free(void *state_ptr) {
308
315
 
309
316
  // Important: Remember that we're only guaranteed to see here what's been set in _native_new, aka
310
317
  // pointers that have been set NULL there may still be NULL here.
311
- if (state->sampling_buffer != NULL) sampling_buffer_free(state->sampling_buffer);
318
+ if (state->locations != NULL) ruby_xfree(state->locations);
312
319
 
313
320
  // Free each entry in the map
314
321
  st_foreach(state->hash_map_per_thread_context, hash_map_per_thread_context_free_values, 0 /* unused */);
@@ -327,8 +334,8 @@ static int hash_map_per_thread_context_mark(st_data_t key_thread, DDTRACE_UNUSED
327
334
 
328
335
  // Used to clear each of the per_thread_contexts inside the hash_map_per_thread_context
329
336
  static int hash_map_per_thread_context_free_values(DDTRACE_UNUSED st_data_t _thread, st_data_t value_per_thread_context, DDTRACE_UNUSED st_data_t _argument) {
330
- struct per_thread_context *per_thread_context = (struct per_thread_context*) value_per_thread_context;
331
- ruby_xfree(per_thread_context);
337
+ struct per_thread_context *thread_context = (struct per_thread_context*) value_per_thread_context;
338
+ free_context(thread_context);
332
339
  return ST_CONTINUE;
333
340
  }
334
341
 
@@ -339,23 +346,35 @@ static VALUE _native_new(VALUE klass) {
339
346
  // being leaked.
340
347
 
341
348
  // Update this when modifying state struct
342
- state->sampling_buffer = NULL;
349
+ state->locations = NULL;
350
+ state->max_frames = 0;
343
351
  state->hash_map_per_thread_context =
344
352
  // "numtable" is an awful name, but TL;DR it's what should be used when keys are `VALUE`s.
345
353
  st_init_numtable();
346
354
  state->recorder_instance = Qnil;
347
355
  state->tracer_context_key = MISSING_TRACER_CONTEXT_KEY;
348
- state->thread_list_buffer = rb_ary_new();
356
+ VALUE thread_list_buffer = rb_ary_new();
357
+ state->thread_list_buffer = thread_list_buffer;
349
358
  state->endpoint_collection_enabled = true;
350
359
  state->timeline_enabled = true;
351
360
  state->allocation_type_enabled = true;
352
361
  state->time_converter_state = (monotonic_to_system_epoch_state) MONOTONIC_TO_SYSTEM_EPOCH_INITIALIZER;
353
- state->main_thread = rb_thread_main();
362
+ VALUE main_thread = rb_thread_main();
363
+ state->main_thread = main_thread;
354
364
  state->otel_current_span_key = Qnil;
355
365
  state->gc_tracking.wall_time_at_previous_gc_ns = INVALID_TIME;
356
366
  state->gc_tracking.wall_time_at_last_flushed_gc_event_ns = 0;
357
367
 
358
- return TypedData_Wrap_Struct(klass, &thread_context_collector_typed_data, state);
368
+ // Note: Remember to keep any new allocated objects that get stored in the state also on the stack + mark them with
369
+ // RB_GC_GUARD -- otherwise it's possible for a GC to run and
370
+ // since the instance representing the state does not yet exist, such objects will not get marked.
371
+
372
+ VALUE instance = TypedData_Wrap_Struct(klass, &thread_context_collector_typed_data, state);
373
+
374
+ RB_GC_GUARD(thread_list_buffer);
375
+ RB_GC_GUARD(main_thread); // Arguably not needed, but perhaps can be move in some future Ruby release?
376
+
377
+ return instance;
359
378
  }
360
379
 
361
380
  static VALUE _native_initialize(
@@ -375,11 +394,9 @@ static VALUE _native_initialize(
375
394
  struct thread_context_collector_state *state;
376
395
  TypedData_Get_Struct(collector_instance, struct thread_context_collector_state, &thread_context_collector_typed_data, state);
377
396
 
378
- int max_frames_requested = NUM2INT(max_frames);
379
- if (max_frames_requested < 0) rb_raise(rb_eArgError, "Invalid max_frames: value must not be negative");
380
-
381
397
  // Update this when modifying state struct
382
- state->sampling_buffer = sampling_buffer_new(max_frames_requested);
398
+ state->max_frames = sampling_buffer_check_max_frames(NUM2INT(max_frames));
399
+ state->locations = ruby_xcalloc(state->max_frames, sizeof(ddog_prof_Location));
383
400
  // hash_map_per_thread_context is already initialized, nothing to do here
384
401
  state->recorder_instance = enforce_recorder_instance(recorder_instance);
385
402
  state->endpoint_collection_enabled = (endpoint_collection_enabled == Qtrue);
@@ -461,6 +478,7 @@ void thread_context_collector_sample(VALUE self_instance, long current_monotonic
461
478
  /* thread_being_sampled: */ thread,
462
479
  /* stack_from_thread: */ thread,
463
480
  thread_context,
481
+ thread_context->sampling_buffer,
464
482
  current_cpu_time_ns,
465
483
  current_monotonic_wall_time_ns
466
484
  );
@@ -477,6 +495,8 @@ void thread_context_collector_sample(VALUE self_instance, long current_monotonic
477
495
  /* thread_being_sampled: */ current_thread,
478
496
  /* stack_from_thread: */ profiler_overhead_stack_thread,
479
497
  current_thread_context,
498
+ // Here we use the overhead thread's sampling buffer so as to not invalidate the cache in the buffer of the thread being sampled
499
+ get_or_create_context_for(profiler_overhead_stack_thread, state)->sampling_buffer,
480
500
  cpu_time_now_ns(current_thread_context),
481
501
  monotonic_wall_time_now_ns(RAISE_ON_FAILURE)
482
502
  );
@@ -487,6 +507,7 @@ void update_metrics_and_sample(
487
507
  VALUE thread_being_sampled,
488
508
  VALUE stack_from_thread, // This can be different when attributing profiler overhead using a different stack
489
509
  struct per_thread_context *thread_context,
510
+ sampling_buffer* sampling_buffer,
490
511
  long current_cpu_time_ns,
491
512
  long current_monotonic_wall_time_ns
492
513
  ) {
@@ -512,6 +533,7 @@ void update_metrics_and_sample(
512
533
  thread_being_sampled,
513
534
  stack_from_thread,
514
535
  thread_context,
536
+ sampling_buffer,
515
537
  (sample_values) {.cpu_time_ns = cpu_time_elapsed_ns, .cpu_or_wall_samples = 1, .wall_time_ns = wall_time_elapsed_ns},
516
538
  current_monotonic_wall_time_ns,
517
539
  NULL,
@@ -661,7 +683,6 @@ VALUE thread_context_collector_sample_after_gc(VALUE self_instance) {
661
683
  }
662
684
 
663
685
  record_placeholder_stack(
664
- state->sampling_buffer,
665
686
  state->recorder_instance,
666
687
  (sample_values) {
667
688
  // This event gets both a regular cpu/wall-time duration, as a normal cpu/wall-time sample would, as well as a
@@ -692,6 +713,7 @@ static void trigger_sample_for_thread(
692
713
  VALUE thread,
693
714
  VALUE stack_from_thread, // This can be different when attributing profiler overhead using a different stack
694
715
  struct per_thread_context *thread_context,
716
+ sampling_buffer* sampling_buffer,
695
717
  sample_values values,
696
718
  long current_monotonic_wall_time_ns,
697
719
  // These two labels are only used for allocation profiling; @ivoanjo: may want to refactor this at some point?
@@ -812,7 +834,7 @@ static void trigger_sample_for_thread(
812
834
 
813
835
  sample_thread(
814
836
  stack_from_thread,
815
- state->sampling_buffer,
837
+ sampling_buffer,
816
838
  state->recorder_instance,
817
839
  values,
818
840
  (sample_labels) {.labels = slice_labels, .state_label = state_label, .end_timestamp_ns = end_timestamp_ns}
@@ -875,6 +897,8 @@ static bool is_logging_gem_monkey_patch(VALUE invoke_file_location) {
875
897
  }
876
898
 
877
899
  static void initialize_context(VALUE thread, struct per_thread_context *thread_context, struct thread_context_collector_state *state) {
900
+ thread_context->sampling_buffer = sampling_buffer_new(state->max_frames, state->locations);
901
+
878
902
  snprintf(thread_context->thread_id, THREAD_ID_LIMIT_CHARS, "%"PRIu64" (%lu)", native_thread_id_for(thread), (unsigned long) thread_id_for(thread));
879
903
  thread_context->thread_id_char_slice = (ddog_CharSlice) {.ptr = thread_context->thread_id, .len = strlen(thread_context->thread_id)};
880
904
 
@@ -915,6 +939,11 @@ static void initialize_context(VALUE thread, struct per_thread_context *thread_c
915
939
  thread_context->gc_tracking.wall_time_at_start_ns = INVALID_TIME;
916
940
  }
917
941
 
942
+ static void free_context(struct per_thread_context* thread_context) {
943
+ sampling_buffer_free(thread_context->sampling_buffer);
944
+ ruby_xfree(thread_context);
945
+ }
946
+
918
947
  static VALUE _native_inspect(DDTRACE_UNUSED VALUE _self, VALUE collector_instance) {
919
948
  struct thread_context_collector_state *state;
920
949
  TypedData_Get_Struct(collector_instance, struct thread_context_collector_state, &thread_context_collector_typed_data, state);
@@ -922,6 +951,7 @@ static VALUE _native_inspect(DDTRACE_UNUSED VALUE _self, VALUE collector_instanc
922
951
  VALUE result = rb_str_new2(" (native state)");
923
952
 
924
953
  // Update this when modifying state struct
954
+ rb_str_concat(result, rb_sprintf(" max_frames=%d", state->max_frames));
925
955
  rb_str_concat(result, rb_sprintf(" hash_map_per_thread_context=%"PRIsVALUE, per_thread_context_st_table_as_ruby_hash(state)));
926
956
  rb_str_concat(result, rb_sprintf(" recorder_instance=%"PRIsVALUE, state->recorder_instance));
927
957
  VALUE tracer_context_key = state->tracer_context_key == MISSING_TRACER_CONTEXT_KEY ? Qnil : ID2SYM(state->tracer_context_key);
@@ -1006,7 +1036,7 @@ static int remove_if_dead_thread(st_data_t key_thread, st_data_t value_context,
1006
1036
 
1007
1037
  if (is_thread_alive(thread)) return ST_CONTINUE;
1008
1038
 
1009
- ruby_xfree(thread_context);
1039
+ free_context(thread_context);
1010
1040
  return ST_DELETE;
1011
1041
  }
1012
1042
 
@@ -1257,7 +1287,7 @@ void thread_context_collector_sample_allocation(VALUE self_instance, unsigned in
1257
1287
  // Thus, we need to make sure there's actually a class before getting its name.
1258
1288
 
1259
1289
  if (klass != 0) {
1260
- const char *name = rb_obj_classname(new_object);
1290
+ const char *name = rb_class2name(klass);
1261
1291
  size_t name_length = name != NULL ? strlen(name) : 0;
1262
1292
 
1263
1293
  if (name_length > 0) {
@@ -1285,12 +1315,15 @@ void thread_context_collector_sample_allocation(VALUE self_instance, unsigned in
1285
1315
 
1286
1316
  track_object(state->recorder_instance, new_object, sample_weight, optional_class_name);
1287
1317
 
1318
+ struct per_thread_context *thread_context = get_or_create_context_for(current_thread, state);
1319
+
1288
1320
  trigger_sample_for_thread(
1289
1321
  state,
1290
1322
  /* thread: */ current_thread,
1291
1323
  /* stack_from_thread: */ current_thread,
1292
- get_or_create_context_for(current_thread, state),
1293
- (sample_values) {.alloc_samples = sample_weight, .alloc_samples_unscaled = 1},
1324
+ thread_context,
1325
+ thread_context->sampling_buffer,
1326
+ (sample_values) {.alloc_samples = sample_weight, .alloc_samples_unscaled = 1, .heap_sample = true},
1294
1327
  INVALID_TIME, // For now we're not collecting timestamps for allocation events, as per profiling team internal discussions
1295
1328
  &ruby_vm_type,
1296
1329
  optional_class_name
@@ -1400,3 +1433,32 @@ static void ddtrace_otel_trace_identifiers_for(
1400
1433
  *active_trace = current_trace;
1401
1434
  *numeric_span_id = resolved_numeric_span_id;
1402
1435
  }
1436
+
1437
+ void thread_context_collector_sample_skipped_allocation_samples(VALUE self_instance, unsigned int skipped_samples) {
1438
+ struct thread_context_collector_state *state;
1439
+ TypedData_Get_Struct(self_instance, struct thread_context_collector_state, &thread_context_collector_typed_data, state);
1440
+
1441
+ ddog_prof_Label labels[] = {
1442
+ // Providing .num = 0 should not be needed but the tracer-2.7 docker image ships a buggy gcc that complains about this
1443
+ {.key = DDOG_CHARSLICE_C("thread id"), .str = DDOG_CHARSLICE_C("SS"), .num = 0},
1444
+ {.key = DDOG_CHARSLICE_C("thread name"), .str = DDOG_CHARSLICE_C("Skipped Samples"), .num = 0},
1445
+ {.key = DDOG_CHARSLICE_C("allocation class"), .str = DDOG_CHARSLICE_C("(Skipped Samples)"), .num = 0},
1446
+ };
1447
+ ddog_prof_Slice_Label slice_labels = {.ptr = labels, .len = sizeof(labels) / sizeof(labels[0])};
1448
+
1449
+ record_placeholder_stack(
1450
+ state->recorder_instance,
1451
+ (sample_values) {.alloc_samples = skipped_samples},
1452
+ (sample_labels) {
1453
+ .labels = slice_labels,
1454
+ .state_label = NULL,
1455
+ .end_timestamp_ns = 0, // For now we're not collecting timestamps for allocation events
1456
+ },
1457
+ DDOG_CHARSLICE_C("Skipped Samples")
1458
+ );
1459
+ }
1460
+
1461
+ static VALUE _native_sample_skipped_allocation_samples(DDTRACE_UNUSED VALUE self, VALUE collector_instance, VALUE skipped_samples) {
1462
+ thread_context_collector_sample_skipped_allocation_samples(collector_instance, NUM2UINT(skipped_samples));
1463
+ return Qtrue;
1464
+ }
@@ -9,6 +9,7 @@ void thread_context_collector_sample(
9
9
  VALUE profiler_overhead_stack_thread
10
10
  );
11
11
  void thread_context_collector_sample_allocation(VALUE self_instance, unsigned int sample_weight, VALUE new_object);
12
+ void thread_context_collector_sample_skipped_allocation_samples(VALUE self_instance, unsigned int skipped_samples);
12
13
  VALUE thread_context_collector_sample_after_gc(VALUE self_instance);
13
14
  void thread_context_collector_on_gc_start(VALUE self_instance);
14
15
  bool thread_context_collector_on_gc_finish(VALUE self_instance);
@@ -0,0 +1,110 @@
1
+ #include "datadog_ruby_common.h"
2
+
3
+ // IMPORTANT: Currently this file is copy-pasted between extensions. Make sure to update all versions when doing any change!
4
+
5
+ void raise_unexpected_type(VALUE value, const char *value_name, const char *type_name, const char *file, int line, const char* function_name) {
6
+ rb_exc_raise(
7
+ rb_exc_new_str(
8
+ rb_eTypeError,
9
+ rb_sprintf("wrong argument %"PRIsVALUE" for '%s' (expected a %s) at %s:%d:in `%s'",
10
+ rb_inspect(value),
11
+ value_name,
12
+ type_name,
13
+ file,
14
+ line,
15
+ function_name
16
+ )
17
+ )
18
+ );
19
+ }
20
+
21
+ VALUE datadog_gem_version(void) {
22
+ VALUE ddtrace_module = rb_const_get(rb_cObject, rb_intern("Datadog"));
23
+ ENFORCE_TYPE(ddtrace_module, T_MODULE);
24
+ VALUE version_module = rb_const_get(ddtrace_module, rb_intern("VERSION"));
25
+ ENFORCE_TYPE(version_module, T_MODULE);
26
+ VALUE version_string = rb_const_get(version_module, rb_intern("STRING"));
27
+ ENFORCE_TYPE(version_string, T_STRING);
28
+ return version_string;
29
+ }
30
+
31
+ __attribute__((warn_unused_result))
32
+ ddog_prof_Endpoint endpoint_from(VALUE exporter_configuration) {
33
+ ENFORCE_TYPE(exporter_configuration, T_ARRAY);
34
+
35
+ VALUE exporter_working_mode = rb_ary_entry(exporter_configuration, 0);
36
+ ENFORCE_TYPE(exporter_working_mode, T_SYMBOL);
37
+ ID working_mode = SYM2ID(exporter_working_mode);
38
+
39
+ ID agentless_id = rb_intern("agentless");
40
+ ID agent_id = rb_intern("agent");
41
+
42
+ if (working_mode != agentless_id && working_mode != agent_id) {
43
+ rb_raise(rb_eArgError, "Failed to initialize transport: Unexpected working mode, expected :agentless or :agent");
44
+ }
45
+
46
+ if (working_mode == agentless_id) {
47
+ VALUE site = rb_ary_entry(exporter_configuration, 1);
48
+ VALUE api_key = rb_ary_entry(exporter_configuration, 2);
49
+
50
+ return ddog_prof_Endpoint_agentless(char_slice_from_ruby_string(site), char_slice_from_ruby_string(api_key));
51
+ } else { // agent_id
52
+ VALUE base_url = rb_ary_entry(exporter_configuration, 1);
53
+
54
+ return ddog_prof_Endpoint_agent(char_slice_from_ruby_string(base_url));
55
+ }
56
+ }
57
+
58
+ static VALUE log_failure_to_process_tag(VALUE err_details) {
59
+ VALUE datadog_module = rb_const_get(rb_cObject, rb_intern("Datadog"));
60
+ VALUE logger = rb_funcall(datadog_module, rb_intern("logger"), 0);
61
+
62
+ return rb_funcall(logger, rb_intern("warn"), 1, rb_sprintf("Failed to convert tag: %"PRIsVALUE, err_details));
63
+ }
64
+
65
+ __attribute__((warn_unused_result))
66
+ ddog_Vec_Tag convert_tags(VALUE tags_as_array) {
67
+ ENFORCE_TYPE(tags_as_array, T_ARRAY);
68
+
69
+ long tags_count = RARRAY_LEN(tags_as_array);
70
+ ddog_Vec_Tag tags = ddog_Vec_Tag_new();
71
+
72
+ for (long i = 0; i < tags_count; i++) {
73
+ VALUE name_value_pair = rb_ary_entry(tags_as_array, i);
74
+
75
+ if (!RB_TYPE_P(name_value_pair, T_ARRAY)) {
76
+ ddog_Vec_Tag_drop(tags);
77
+ ENFORCE_TYPE(name_value_pair, T_ARRAY);
78
+ }
79
+
80
+ // Note: We can index the array without checking its size first because rb_ary_entry returns Qnil if out of bounds
81
+ VALUE tag_name = rb_ary_entry(name_value_pair, 0);
82
+ VALUE tag_value = rb_ary_entry(name_value_pair, 1);
83
+
84
+ if (!(RB_TYPE_P(tag_name, T_STRING) && RB_TYPE_P(tag_value, T_STRING))) {
85
+ ddog_Vec_Tag_drop(tags);
86
+ ENFORCE_TYPE(tag_name, T_STRING);
87
+ ENFORCE_TYPE(tag_value, T_STRING);
88
+ }
89
+
90
+ ddog_Vec_Tag_PushResult push_result =
91
+ ddog_Vec_Tag_push(&tags, char_slice_from_ruby_string(tag_name), char_slice_from_ruby_string(tag_value));
92
+
93
+ if (push_result.tag == DDOG_VEC_TAG_PUSH_RESULT_ERR) {
94
+ // libdatadog validates tags and may catch invalid tags that ddtrace didn't actually catch.
95
+ // We warn users about such tags, and then just ignore them.
96
+
97
+ int exception_state;
98
+ rb_protect(log_failure_to_process_tag, get_error_details_and_drop(&push_result.err), &exception_state);
99
+
100
+ // Since we are calling into Ruby code, it may raise an exception. Ensure that dynamically-allocated tags
101
+ // get cleaned before propagating the exception.
102
+ if (exception_state) {
103
+ ddog_Vec_Tag_drop(tags);
104
+ rb_jump_tag(exception_state); // "Re-raise" exception
105
+ }
106
+ }
107
+ }
108
+
109
+ return tags;
110
+ }
@@ -0,0 +1,57 @@
1
+ #pragma once
2
+
3
+ // IMPORTANT: Currently this file is copy-pasted between extensions. Make sure to update all versions when doing any change!
4
+
5
+ #include <ruby.h>
6
+ #include <datadog/profiling.h>
7
+
8
+ // Used to mark symbols to be exported to the outside of the extension.
9
+ // Consider very carefully before tagging a function with this.
10
+ #define DDTRACE_EXPORT __attribute__ ((visibility ("default")))
11
+
12
+ // Used to mark function arguments that are deliberately left unused
13
+ #ifdef __GNUC__
14
+ #define DDTRACE_UNUSED __attribute__((unused))
15
+ #else
16
+ #define DDTRACE_UNUSED
17
+ #endif
18
+
19
+ #define ADD_QUOTES_HELPER(x) #x
20
+ #define ADD_QUOTES(x) ADD_QUOTES_HELPER(x)
21
+
22
+ // Ruby has a Check_Type(value, type) that is roughly equivalent to this BUT Ruby's version is rather cryptic when it fails
23
+ // e.g. "wrong argument type nil (expected String)". This is a replacement that prints more information to help debugging.
24
+ #define ENFORCE_TYPE(value, type) \
25
+ { if (RB_UNLIKELY(!RB_TYPE_P(value, type))) raise_unexpected_type(value, ADD_QUOTES(value), ADD_QUOTES(type), __FILE__, __LINE__, __func__); }
26
+
27
+ #define ENFORCE_BOOLEAN(value) \
28
+ { if (RB_UNLIKELY(value != Qtrue && value != Qfalse)) raise_unexpected_type(value, ADD_QUOTES(value), "true or false", __FILE__, __LINE__, __func__); }
29
+
30
+ // Called by ENFORCE_TYPE; should not be used directly
31
+ NORETURN(void raise_unexpected_type(VALUE value, const char *value_name, const char *type_name, const char *file, int line, const char* function_name));
32
+
33
+ // Helper to retrieve Datadog::VERSION::STRING
34
+ VALUE datadog_gem_version(void);
35
+
36
+ static inline ddog_CharSlice char_slice_from_ruby_string(VALUE string) {
37
+ ENFORCE_TYPE(string, T_STRING);
38
+ ddog_CharSlice char_slice = {.ptr = RSTRING_PTR(string), .len = RSTRING_LEN(string)};
39
+ return char_slice;
40
+ }
41
+
42
+ __attribute__((warn_unused_result))
43
+ ddog_prof_Endpoint endpoint_from(VALUE exporter_configuration);
44
+
45
+ __attribute__((warn_unused_result))
46
+ ddog_Vec_Tag convert_tags(VALUE tags_as_array);
47
+
48
+ static inline VALUE ruby_string_from_error(const ddog_Error *error) {
49
+ ddog_CharSlice char_slice = ddog_Error_message(error);
50
+ return rb_str_new(char_slice.ptr, char_slice.len);
51
+ }
52
+
53
+ static inline VALUE get_error_details_and_drop(ddog_Error *error) {
54
+ VALUE result = ruby_string_from_error(error);
55
+ ddog_Error_drop(error);
56
+ return result;
57
+ }