datadog 2.3.0 → 2.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (173) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +64 -2
  3. data/ext/datadog_profiling_loader/datadog_profiling_loader.c +9 -1
  4. data/ext/datadog_profiling_loader/extconf.rb +10 -22
  5. data/ext/datadog_profiling_native_extension/NativeExtensionDesign.md +3 -3
  6. data/ext/datadog_profiling_native_extension/collectors_cpu_and_wall_time_worker.c +198 -41
  7. data/ext/datadog_profiling_native_extension/collectors_discrete_dynamic_sampler.c +4 -2
  8. data/ext/datadog_profiling_native_extension/collectors_stack.c +89 -46
  9. data/ext/datadog_profiling_native_extension/collectors_thread_context.c +645 -107
  10. data/ext/datadog_profiling_native_extension/collectors_thread_context.h +15 -1
  11. data/ext/datadog_profiling_native_extension/datadog_ruby_common.c +0 -27
  12. data/ext/datadog_profiling_native_extension/datadog_ruby_common.h +0 -4
  13. data/ext/datadog_profiling_native_extension/extconf.rb +42 -25
  14. data/ext/datadog_profiling_native_extension/gvl_profiling_helper.c +50 -0
  15. data/ext/datadog_profiling_native_extension/gvl_profiling_helper.h +75 -0
  16. data/ext/datadog_profiling_native_extension/heap_recorder.c +194 -34
  17. data/ext/datadog_profiling_native_extension/heap_recorder.h +11 -0
  18. data/ext/datadog_profiling_native_extension/http_transport.c +38 -6
  19. data/ext/datadog_profiling_native_extension/native_extension_helpers.rb +1 -1
  20. data/ext/datadog_profiling_native_extension/private_vm_api_access.c +53 -2
  21. data/ext/datadog_profiling_native_extension/private_vm_api_access.h +3 -0
  22. data/ext/datadog_profiling_native_extension/profiling.c +1 -1
  23. data/ext/datadog_profiling_native_extension/ruby_helpers.c +14 -11
  24. data/ext/datadog_profiling_native_extension/stack_recorder.c +58 -22
  25. data/ext/datadog_profiling_native_extension/stack_recorder.h +2 -0
  26. data/ext/libdatadog_api/crashtracker.c +20 -18
  27. data/ext/libdatadog_api/datadog_ruby_common.c +0 -27
  28. data/ext/libdatadog_api/datadog_ruby_common.h +0 -4
  29. data/ext/libdatadog_extconf_helpers.rb +1 -1
  30. data/lib/datadog/appsec/assets/waf_rules/recommended.json +2184 -108
  31. data/lib/datadog/appsec/assets/waf_rules/strict.json +1430 -2
  32. data/lib/datadog/appsec/component.rb +29 -8
  33. data/lib/datadog/appsec/configuration/settings.rb +10 -2
  34. data/lib/datadog/appsec/contrib/devise/patcher/authenticatable_patch.rb +1 -0
  35. data/lib/datadog/appsec/contrib/devise/patcher/rememberable_patch.rb +21 -0
  36. data/lib/datadog/appsec/contrib/devise/patcher.rb +12 -2
  37. data/lib/datadog/appsec/contrib/graphql/appsec_trace.rb +0 -14
  38. data/lib/datadog/appsec/contrib/graphql/gateway/multiplex.rb +67 -31
  39. data/lib/datadog/appsec/contrib/graphql/gateway/watcher.rb +14 -15
  40. data/lib/datadog/appsec/contrib/graphql/integration.rb +14 -1
  41. data/lib/datadog/appsec/contrib/graphql/reactive/multiplex.rb +7 -20
  42. data/lib/datadog/appsec/contrib/rack/gateway/request.rb +2 -5
  43. data/lib/datadog/appsec/contrib/rack/gateway/watcher.rb +9 -15
  44. data/lib/datadog/appsec/contrib/rack/reactive/request.rb +6 -18
  45. data/lib/datadog/appsec/contrib/rack/reactive/request_body.rb +7 -20
  46. data/lib/datadog/appsec/contrib/rack/reactive/response.rb +5 -18
  47. data/lib/datadog/appsec/contrib/rack/request_middleware.rb +3 -1
  48. data/lib/datadog/appsec/contrib/rails/gateway/watcher.rb +3 -5
  49. data/lib/datadog/appsec/contrib/rails/reactive/action.rb +5 -18
  50. data/lib/datadog/appsec/contrib/sinatra/gateway/watcher.rb +6 -10
  51. data/lib/datadog/appsec/contrib/sinatra/reactive/routed.rb +7 -20
  52. data/lib/datadog/appsec/event.rb +25 -1
  53. data/lib/datadog/appsec/ext.rb +4 -0
  54. data/lib/datadog/appsec/monitor/gateway/watcher.rb +3 -5
  55. data/lib/datadog/appsec/monitor/reactive/set_user.rb +7 -20
  56. data/lib/datadog/appsec/processor/context.rb +109 -0
  57. data/lib/datadog/appsec/processor/rule_loader.rb +3 -1
  58. data/lib/datadog/appsec/processor/rule_merger.rb +33 -15
  59. data/lib/datadog/appsec/processor.rb +42 -107
  60. data/lib/datadog/appsec/rate_limiter.rb +25 -40
  61. data/lib/datadog/appsec/remote.rb +7 -3
  62. data/lib/datadog/appsec/scope.rb +1 -4
  63. data/lib/datadog/appsec/utils/trace_operation.rb +15 -0
  64. data/lib/datadog/appsec/utils.rb +2 -0
  65. data/lib/datadog/appsec.rb +3 -2
  66. data/lib/datadog/core/configuration/agent_settings_resolver.rb +26 -25
  67. data/lib/datadog/core/configuration/components.rb +4 -3
  68. data/lib/datadog/core/configuration/settings.rb +96 -5
  69. data/lib/datadog/core/configuration.rb +1 -3
  70. data/lib/datadog/core/crashtracking/component.rb +9 -6
  71. data/lib/datadog/core/environment/execution.rb +5 -5
  72. data/lib/datadog/core/environment/yjit.rb +5 -0
  73. data/lib/datadog/core/metrics/client.rb +7 -0
  74. data/lib/datadog/core/rate_limiter.rb +183 -0
  75. data/lib/datadog/core/remote/client/capabilities.rb +4 -3
  76. data/lib/datadog/core/remote/component.rb +4 -2
  77. data/lib/datadog/core/remote/negotiation.rb +4 -4
  78. data/lib/datadog/core/remote/tie.rb +2 -0
  79. data/lib/datadog/core/remote/transport/http.rb +5 -0
  80. data/lib/datadog/core/remote/worker.rb +1 -1
  81. data/lib/datadog/core/runtime/ext.rb +1 -0
  82. data/lib/datadog/core/runtime/metrics.rb +5 -1
  83. data/lib/datadog/core/semaphore.rb +35 -0
  84. data/lib/datadog/core/telemetry/component.rb +2 -0
  85. data/lib/datadog/core/telemetry/event.rb +12 -7
  86. data/lib/datadog/core/telemetry/logger.rb +51 -0
  87. data/lib/datadog/core/telemetry/logging.rb +50 -14
  88. data/lib/datadog/core/telemetry/request.rb +13 -1
  89. data/lib/datadog/core/transport/ext.rb +1 -0
  90. data/lib/datadog/core/utils/time.rb +12 -0
  91. data/lib/datadog/core/workers/async.rb +1 -1
  92. data/lib/datadog/di/code_tracker.rb +166 -0
  93. data/lib/datadog/di/configuration/settings.rb +163 -0
  94. data/lib/datadog/di/configuration.rb +11 -0
  95. data/lib/datadog/di/error.rb +31 -0
  96. data/lib/datadog/di/extensions.rb +16 -0
  97. data/lib/datadog/di/instrumenter.rb +301 -0
  98. data/lib/datadog/di/probe.rb +162 -0
  99. data/lib/datadog/di/probe_builder.rb +47 -0
  100. data/lib/datadog/di/probe_notification_builder.rb +207 -0
  101. data/lib/datadog/di/probe_notifier_worker.rb +244 -0
  102. data/lib/datadog/di/redactor.rb +188 -0
  103. data/lib/datadog/di/serializer.rb +215 -0
  104. data/lib/datadog/di/transport.rb +67 -0
  105. data/lib/datadog/di/utils.rb +39 -0
  106. data/lib/datadog/di.rb +57 -0
  107. data/lib/datadog/opentelemetry/sdk/propagator.rb +2 -0
  108. data/lib/datadog/profiling/collectors/cpu_and_wall_time_worker.rb +12 -10
  109. data/lib/datadog/profiling/collectors/info.rb +12 -3
  110. data/lib/datadog/profiling/collectors/thread_context.rb +32 -8
  111. data/lib/datadog/profiling/component.rb +21 -4
  112. data/lib/datadog/profiling/http_transport.rb +6 -1
  113. data/lib/datadog/profiling/scheduler.rb +2 -0
  114. data/lib/datadog/profiling/stack_recorder.rb +40 -9
  115. data/lib/datadog/single_step_instrument.rb +12 -0
  116. data/lib/datadog/tracing/component.rb +13 -0
  117. data/lib/datadog/tracing/contrib/action_cable/instrumentation.rb +8 -12
  118. data/lib/datadog/tracing/contrib/action_pack/action_controller/instrumentation.rb +5 -0
  119. data/lib/datadog/tracing/contrib/action_pack/action_dispatch/instrumentation.rb +78 -0
  120. data/lib/datadog/tracing/contrib/action_pack/action_dispatch/patcher.rb +33 -0
  121. data/lib/datadog/tracing/contrib/action_pack/patcher.rb +2 -0
  122. data/lib/datadog/tracing/contrib/active_record/configuration/resolver.rb +4 -0
  123. data/lib/datadog/tracing/contrib/active_record/events/instantiation.rb +3 -1
  124. data/lib/datadog/tracing/contrib/active_record/events/sql.rb +3 -1
  125. data/lib/datadog/tracing/contrib/active_support/cache/events/cache.rb +5 -1
  126. data/lib/datadog/tracing/contrib/aws/instrumentation.rb +5 -0
  127. data/lib/datadog/tracing/contrib/elasticsearch/patcher.rb +6 -1
  128. data/lib/datadog/tracing/contrib/ethon/easy_patch.rb +4 -0
  129. data/lib/datadog/tracing/contrib/excon/middleware.rb +3 -0
  130. data/lib/datadog/tracing/contrib/faraday/middleware.rb +12 -0
  131. data/lib/datadog/tracing/contrib/grape/endpoint.rb +24 -2
  132. data/lib/datadog/tracing/contrib/graphql/patcher.rb +9 -12
  133. data/lib/datadog/tracing/contrib/graphql/trace_patcher.rb +3 -3
  134. data/lib/datadog/tracing/contrib/graphql/tracing_patcher.rb +3 -3
  135. data/lib/datadog/tracing/contrib/graphql/unified_trace.rb +13 -9
  136. data/lib/datadog/tracing/contrib/graphql/unified_trace_patcher.rb +6 -3
  137. data/lib/datadog/tracing/contrib/http/circuit_breaker.rb +9 -0
  138. data/lib/datadog/tracing/contrib/http/instrumentation.rb +22 -15
  139. data/lib/datadog/tracing/contrib/httpclient/instrumentation.rb +10 -5
  140. data/lib/datadog/tracing/contrib/httpclient/patcher.rb +1 -14
  141. data/lib/datadog/tracing/contrib/httprb/instrumentation.rb +9 -0
  142. data/lib/datadog/tracing/contrib/httprb/patcher.rb +1 -14
  143. data/lib/datadog/tracing/contrib/lograge/patcher.rb +1 -2
  144. data/lib/datadog/tracing/contrib/mongodb/subscribers.rb +2 -0
  145. data/lib/datadog/tracing/contrib/opensearch/patcher.rb +13 -6
  146. data/lib/datadog/tracing/contrib/patcher.rb +2 -1
  147. data/lib/datadog/tracing/contrib/presto/patcher.rb +1 -13
  148. data/lib/datadog/tracing/contrib/rack/middlewares.rb +27 -0
  149. data/lib/datadog/tracing/contrib/rails/runner.rb +1 -1
  150. data/lib/datadog/tracing/contrib/redis/tags.rb +4 -0
  151. data/lib/datadog/tracing/contrib/rest_client/request_patch.rb +3 -0
  152. data/lib/datadog/tracing/contrib/sinatra/tracer.rb +4 -0
  153. data/lib/datadog/tracing/contrib/stripe/request.rb +3 -2
  154. data/lib/datadog/tracing/distributed/propagation.rb +7 -0
  155. data/lib/datadog/tracing/metadata/ext.rb +2 -0
  156. data/lib/datadog/tracing/remote.rb +5 -2
  157. data/lib/datadog/tracing/sampling/matcher.rb +6 -1
  158. data/lib/datadog/tracing/sampling/rate_sampler.rb +1 -1
  159. data/lib/datadog/tracing/sampling/rule.rb +2 -0
  160. data/lib/datadog/tracing/sampling/rule_sampler.rb +15 -9
  161. data/lib/datadog/tracing/sampling/span/ext.rb +1 -1
  162. data/lib/datadog/tracing/sampling/span/rule.rb +2 -2
  163. data/lib/datadog/tracing/trace_operation.rb +26 -2
  164. data/lib/datadog/tracing/tracer.rb +29 -22
  165. data/lib/datadog/tracing/transport/http/client.rb +1 -0
  166. data/lib/datadog/tracing/transport/http.rb +4 -0
  167. data/lib/datadog/tracing/transport/io/client.rb +1 -0
  168. data/lib/datadog/tracing/workers/trace_writer.rb +1 -1
  169. data/lib/datadog/tracing/workers.rb +2 -2
  170. data/lib/datadog/tracing/writer.rb +26 -28
  171. data/lib/datadog/version.rb +1 -1
  172. metadata +40 -15
  173. data/lib/datadog/tracing/sampling/rate_limiter.rb +0 -185
@@ -5,6 +5,7 @@
5
5
  #include <errno.h>
6
6
  #include "collectors_stack.h"
7
7
  #include "libdatadog_helpers.h"
8
+ #include "time_helpers.h"
8
9
 
9
10
  #if (defined(HAVE_WORKING_RB_GC_FORCE_RECYCLE) && ! defined(NO_SEEN_OBJ_ID_FLAG))
10
11
  #define CAN_APPLY_GC_FORCE_RECYCLE_BUG_WORKAROUND
@@ -16,6 +17,16 @@
16
17
  // relevant for heap profiles as the great majority should be trivially reclaimed
17
18
  // during the next GC.
18
19
  #define ITERATION_MIN_AGE 1
20
+ // Copied from https://github.com/ruby/ruby/blob/15135030e5808d527325feaaaf04caeb1b44f8b5/gc/default.c#L725C1-L725C27
21
+ // to align with Ruby's GC definition of what constitutes an old object which are only
22
+ // supposed to be reclaimed in major GCs.
23
+ #define OLD_AGE 3
24
+ // Wait at least 2 seconds before asking heap recorder to explicitly update itself. Heap recorder
25
+ // data will only materialize at profile serialization time but updating often helps keep our
26
+ // heap tracking data small since every GC should get rid of a bunch of temporary objects. The
27
+ // more we clean up before profile flush, the less work we'll have to do all-at-once when preparing
28
+ // to flush heap data and holding the GVL which should hopefully help with reducing latency impact.
29
+ #define MIN_TIME_BETWEEN_HEAP_RECORDER_UPDATES_NS SECONDS_AS_NS(2)
19
30
 
20
31
  // A compact representation of a stacktrace frame for a heap allocation.
21
32
  typedef struct {
@@ -144,11 +155,18 @@ struct heap_recorder {
144
155
  // mutation of the data so iteration can occur without acquiring a lock.
145
156
  // NOTE: Contrary to object_records, this table has no ownership of its data.
146
157
  st_table *object_records_snapshot;
147
- // The GC gen/epoch/count in which we prepared the current iteration.
158
+ // Are we currently updating or not?
159
+ bool updating;
160
+ // The GC gen/epoch/count in which we are updating (or last updated if not currently updating).
148
161
  //
149
- // This enables us to calculate the age of iterated objects in the above snapshot by
150
- // comparing it against an object's alloc_gen.
151
- size_t iteration_gen;
162
+ // This enables us to calculate the age of objects considered in the update by comparing it
163
+ // against an object's alloc_gen.
164
+ size_t update_gen;
165
+ // Whether the current update (or last update if not currently updating) is including old
166
+ // objects or not.
167
+ bool update_include_old;
168
+ // When did we do the last update of heap recorder?
169
+ long last_update_ns;
152
170
 
153
171
  // Data for a heap recording that was started but not yet ended
154
172
  recording active_recording;
@@ -165,6 +183,21 @@ struct heap_recorder {
165
183
  size_t objects_skipped;
166
184
  size_t objects_frozen;
167
185
  } stats_last_update;
186
+
187
+ struct stats_lifetime {
188
+ unsigned long updates_successful;
189
+ unsigned long updates_skipped_concurrent;
190
+ unsigned long updates_skipped_gcgen;
191
+ unsigned long updates_skipped_time;
192
+
193
+ double ewma_young_objects_alive;
194
+ double ewma_young_objects_dead;
195
+ double ewma_young_objects_skipped; // Note: Here "young" refers to the young update; objects skipped includes non-young objects
196
+
197
+ double ewma_objects_alive;
198
+ double ewma_objects_dead;
199
+ double ewma_objects_skipped;
200
+ } stats_lifetime;
168
201
  };
169
202
 
170
203
  struct end_heap_allocation_args {
@@ -183,6 +216,8 @@ static int st_object_records_debug(st_data_t key, st_data_t value, st_data_t ext
183
216
  static int update_object_record_entry(st_data_t*, st_data_t*, st_data_t, int);
184
217
  static void commit_recording(heap_recorder*, heap_record*, recording);
185
218
  static VALUE end_heap_allocation_recording(VALUE end_heap_allocation_args);
219
+ static void heap_recorder_update(heap_recorder *heap_recorder, bool full_update);
220
+ static inline double ewma_stat(double previous, double current);
186
221
 
187
222
  // ==========================
188
223
  // Heap Recorder External API
@@ -280,6 +315,9 @@ void heap_recorder_after_fork(heap_recorder *heap_recorder) {
280
315
  if (heap_recorder->object_records_snapshot != NULL) {
281
316
  heap_recorder_finish_iteration(heap_recorder);
282
317
  }
318
+
319
+ // Clear lifetime stats since this is essentially a new heap recorder
320
+ heap_recorder->stats_lifetime = (struct stats_lifetime) {0};
283
321
  }
284
322
 
285
323
  void start_heap_allocation_recording(heap_recorder *heap_recorder, VALUE new_obj, unsigned int weight, ddog_CharSlice *alloc_class) {
@@ -394,23 +432,94 @@ static VALUE end_heap_allocation_recording(VALUE end_heap_allocation_args) {
394
432
  return Qnil;
395
433
  }
396
434
 
397
- void heap_recorder_prepare_iteration(heap_recorder *heap_recorder) {
435
+ void heap_recorder_update_young_objects(heap_recorder *heap_recorder) {
398
436
  if (heap_recorder == NULL) {
399
437
  return;
400
438
  }
401
439
 
402
- heap_recorder->iteration_gen = rb_gc_count();
440
+ heap_recorder_update(heap_recorder, /* full_update: */ false);
441
+ }
442
+
443
+ static void heap_recorder_update(heap_recorder *heap_recorder, bool full_update) {
444
+ if (heap_recorder->updating) {
445
+ if (full_update) rb_raise(rb_eRuntimeError, "BUG: full_update should not be triggered during another update");
446
+
447
+ // If we try to update while another update is still running, short-circuit.
448
+ // NOTE: This runs while holding the GVL. But since updates may be triggered from GC activity, there's still
449
+ // a chance for updates to be attempted concurrently if scheduling gods so determine.
450
+ heap_recorder->stats_lifetime.updates_skipped_concurrent++;
451
+ return;
452
+ }
403
453
 
404
454
  if (heap_recorder->object_records_snapshot != NULL) {
405
- // we could trivially handle this but we raise to highlight and catch unexpected usages.
406
- rb_raise(rb_eRuntimeError, "New heap recorder iteration prepared without the previous one having been finished.");
455
+ // While serialization is happening, it runs without the GVL and uses the object_records_snapshot.
456
+ // Although we iterate on a snapshot of object_records, these records point to other data that has not been
457
+ // snapshotted for efficiency reasons (e.g. heap_records). Since updating may invalidate
458
+ // some of that non-snapshotted data, let's refrain from doing updates during iteration. This also enforces the
459
+ // semantic that iteration will operate as a point-in-time snapshot.
460
+ return;
407
461
  }
408
462
 
463
+ size_t current_gc_gen = rb_gc_count();
464
+ long now_ns = monotonic_wall_time_now_ns(DO_NOT_RAISE_ON_FAILURE);
465
+
466
+ if (!full_update) {
467
+ if (current_gc_gen == heap_recorder->update_gen) {
468
+ // Are we still in the same GC gen as last update? If so, skip updating since things should not have
469
+ // changed significantly since last time.
470
+ // NOTE: This is mostly a performance decision. I suppose some objects may be cleaned up in intermediate
471
+ // GC steps and sizes may change. But because we have to iterate through all our tracked
472
+ // object records to do an update, let's wait until all steps for a particular GC generation
473
+ // have finished to do so. We may revisit this once we have a better liveness checking mechanism.
474
+ heap_recorder->stats_lifetime.updates_skipped_gcgen++;
475
+ return;
476
+ }
477
+
478
+ if (now_ns > 0 && (now_ns - heap_recorder->last_update_ns) < MIN_TIME_BETWEEN_HEAP_RECORDER_UPDATES_NS) {
479
+ // We did an update not too long ago. Let's skip this one to avoid over-taxing the system.
480
+ heap_recorder->stats_lifetime.updates_skipped_time++;
481
+ return;
482
+ }
483
+ }
484
+
485
+ heap_recorder->updating = true;
409
486
  // Reset last update stats, we'll be building them from scratch during the st_foreach call below
410
- heap_recorder->stats_last_update = (struct stats_last_update) {};
487
+ heap_recorder->stats_last_update = (struct stats_last_update) {0};
488
+
489
+ heap_recorder->update_gen = current_gc_gen;
490
+ heap_recorder->update_include_old = full_update;
411
491
 
412
492
  st_foreach(heap_recorder->object_records, st_object_record_update, (st_data_t) heap_recorder);
413
493
 
494
+ heap_recorder->last_update_ns = now_ns;
495
+ heap_recorder->stats_lifetime.updates_successful++;
496
+
497
+ // Lifetime stats updating
498
+ if (!full_update) {
499
+ heap_recorder->stats_lifetime.ewma_young_objects_alive = ewma_stat(heap_recorder->stats_lifetime.ewma_young_objects_alive, heap_recorder->stats_last_update.objects_alive);
500
+ heap_recorder->stats_lifetime.ewma_young_objects_dead = ewma_stat(heap_recorder->stats_lifetime.ewma_young_objects_dead, heap_recorder->stats_last_update.objects_dead);
501
+ heap_recorder->stats_lifetime.ewma_young_objects_skipped = ewma_stat(heap_recorder->stats_lifetime.ewma_young_objects_skipped, heap_recorder->stats_last_update.objects_skipped);
502
+ } else {
503
+ heap_recorder->stats_lifetime.ewma_objects_alive = ewma_stat(heap_recorder->stats_lifetime.ewma_objects_alive, heap_recorder->stats_last_update.objects_alive);
504
+ heap_recorder->stats_lifetime.ewma_objects_dead = ewma_stat(heap_recorder->stats_lifetime.ewma_objects_dead, heap_recorder->stats_last_update.objects_dead);
505
+ heap_recorder->stats_lifetime.ewma_objects_skipped = ewma_stat(heap_recorder->stats_lifetime.ewma_objects_skipped, heap_recorder->stats_last_update.objects_skipped);
506
+ }
507
+
508
+ heap_recorder->updating = false;
509
+ }
510
+
511
+ void heap_recorder_prepare_iteration(heap_recorder *heap_recorder) {
512
+ if (heap_recorder == NULL) {
513
+ return;
514
+ }
515
+
516
+ if (heap_recorder->object_records_snapshot != NULL) {
517
+ // we could trivially handle this but we raise to highlight and catch unexpected usages.
518
+ rb_raise(rb_eRuntimeError, "New heap recorder iteration prepared without the previous one having been finished.");
519
+ }
520
+
521
+ heap_recorder_update(heap_recorder, /* full_update: */ true);
522
+
414
523
  heap_recorder->object_records_snapshot = st_copy(heap_recorder->object_records);
415
524
  if (heap_recorder->object_records_snapshot == NULL) {
416
525
  rb_raise(rb_eRuntimeError, "Failed to create heap snapshot.");
@@ -474,6 +583,19 @@ VALUE heap_recorder_state_snapshot(heap_recorder *heap_recorder) {
474
583
  ID2SYM(rb_intern("last_update_objects_dead")), /* => */ LONG2NUM(heap_recorder->stats_last_update.objects_dead),
475
584
  ID2SYM(rb_intern("last_update_objects_skipped")), /* => */ LONG2NUM(heap_recorder->stats_last_update.objects_skipped),
476
585
  ID2SYM(rb_intern("last_update_objects_frozen")), /* => */ LONG2NUM(heap_recorder->stats_last_update.objects_frozen),
586
+
587
+ // Lifetime stats
588
+ ID2SYM(rb_intern("lifetime_updates_successful")), /* => */ LONG2NUM(heap_recorder->stats_lifetime.updates_successful),
589
+ ID2SYM(rb_intern("lifetime_updates_skipped_concurrent")), /* => */ LONG2NUM(heap_recorder->stats_lifetime.updates_skipped_concurrent),
590
+ ID2SYM(rb_intern("lifetime_updates_skipped_gcgen")), /* => */ LONG2NUM(heap_recorder->stats_lifetime.updates_skipped_gcgen),
591
+ ID2SYM(rb_intern("lifetime_updates_skipped_time")), /* => */ LONG2NUM(heap_recorder->stats_lifetime.updates_skipped_time),
592
+ ID2SYM(rb_intern("lifetime_ewma_young_objects_alive")), /* => */ DBL2NUM(heap_recorder->stats_lifetime.ewma_young_objects_alive),
593
+ ID2SYM(rb_intern("lifetime_ewma_young_objects_dead")), /* => */ DBL2NUM(heap_recorder->stats_lifetime.ewma_young_objects_dead),
594
+ // Note: Here "young" refers to the young update; objects skipped includes non-young objects
595
+ ID2SYM(rb_intern("lifetime_ewma_young_objects_skipped")), /* => */ DBL2NUM(heap_recorder->stats_lifetime.ewma_young_objects_skipped),
596
+ ID2SYM(rb_intern("lifetime_ewma_objects_alive")), /* => */ DBL2NUM(heap_recorder->stats_lifetime.ewma_objects_alive),
597
+ ID2SYM(rb_intern("lifetime_ewma_objects_dead")), /* => */ DBL2NUM(heap_recorder->stats_lifetime.ewma_objects_dead),
598
+ ID2SYM(rb_intern("lifetime_ewma_objects_skipped")), /* => */ DBL2NUM(heap_recorder->stats_lifetime.ewma_objects_skipped),
477
599
  };
478
600
  VALUE hash = rb_hash_new();
479
601
  for (long unsigned int i = 0; i < VALUE_COUNT(arguments); i += 2) rb_hash_aset(hash, arguments[i], arguments[i+1]);
@@ -503,11 +625,14 @@ void heap_recorder_testonly_assert_hash_matches(ddog_prof_Slice_Location locatio
503
625
 
504
626
  VALUE heap_recorder_testonly_debug(heap_recorder *heap_recorder) {
505
627
  if (heap_recorder == NULL) {
506
- return rb_str_new2("NULL heap_recorder");
628
+ rb_raise(rb_eArgError, "heap_recorder is NULL");
507
629
  }
508
630
 
509
631
  VALUE debug_str = rb_str_new2("object records:\n");
510
632
  st_foreach(heap_recorder->object_records, st_object_records_debug, (st_data_t) debug_str);
633
+
634
+ rb_str_catf(debug_str, "state snapshot: %"PRIsVALUE"\n------\n", heap_recorder_state_snapshot(heap_recorder));
635
+
511
636
  return debug_str;
512
637
  }
513
638
 
@@ -526,13 +651,6 @@ static int st_object_record_entry_free(DDTRACE_UNUSED st_data_t key, st_data_t v
526
651
  return ST_DELETE;
527
652
  }
528
653
 
529
- // Check to see if an object should not be included in a heap recorder iteration.
530
- // This centralizes the checking logic to ensure it's equally applied between
531
- // preparation and iteration codepaths.
532
- static inline bool should_exclude_from_iteration(object_record *obj_record) {
533
- return obj_record->object_data.gen_age < ITERATION_MIN_AGE;
534
- }
535
-
536
654
  static int st_object_record_update(st_data_t key, st_data_t value, st_data_t extra_arg) {
537
655
  long obj_id = (long) key;
538
656
  object_record *record = (object_record*) value;
@@ -540,16 +658,20 @@ static int st_object_record_update(st_data_t key, st_data_t value, st_data_t ext
540
658
 
541
659
  VALUE ref;
542
660
 
543
- size_t iteration_gen = recorder->iteration_gen;
661
+ size_t update_gen = recorder->update_gen;
544
662
  size_t alloc_gen = record->object_data.alloc_gen;
545
663
  // Guard against potential overflows given unsigned types here.
546
- record->object_data.gen_age = alloc_gen < iteration_gen ? iteration_gen - alloc_gen : 0;
664
+ record->object_data.gen_age = alloc_gen < update_gen ? update_gen - alloc_gen : 0;
665
+
666
+ if (record->object_data.gen_age == 0) {
667
+ // Objects that belong to the current GC gen have not had a chance to be cleaned up yet
668
+ // and won't show up in the iteration anyway so no point in checking their liveness/sizes.
669
+ recorder->stats_last_update.objects_skipped++;
670
+ return ST_CONTINUE;
671
+ }
547
672
 
548
- if (should_exclude_from_iteration(record)) {
549
- // If an object won't be included in the current iteration, there's
550
- // no point checking for liveness or updating its size, so exit early.
551
- // NOTE: This means that there should be an equivalent check during actual
552
- // iteration otherwise we'd iterate/expose stale object data.
673
+ if (!recorder->update_include_old && record->object_data.gen_age >= OLD_AGE) {
674
+ // The current update is not including old objects but this record is for an old object, skip its update.
553
675
  recorder->stats_last_update.objects_skipped++;
554
676
  return ST_CONTINUE;
555
677
  }
@@ -598,7 +720,11 @@ static int st_object_record_update(st_data_t key, st_data_t value, st_data_t ext
598
720
 
599
721
  #endif
600
722
 
601
- if (recorder->size_enabled && !record->object_data.is_frozen) {
723
+ if (
724
+ recorder->size_enabled &&
725
+ recorder->update_include_old && // We only update sizes when doing a full update
726
+ !record->object_data.is_frozen
727
+ ) {
602
728
  // if we were asked to update sizes and this object was not already seen as being frozen,
603
729
  // update size again.
604
730
  record->object_data.size = ruby_obj_memsize_of(ref);
@@ -622,22 +748,22 @@ static int st_object_records_iterate(DDTRACE_UNUSED st_data_t key, st_data_t val
622
748
 
623
749
  const heap_recorder *recorder = context->heap_recorder;
624
750
 
625
- if (should_exclude_from_iteration(record)) {
751
+ if (record->object_data.gen_age < ITERATION_MIN_AGE) {
626
752
  // Skip objects that should not be included in iteration
627
- // NOTE: This matches the short-circuiting condition in st_object_record_update
628
- // and prevents iteration over stale objects.
629
753
  return ST_CONTINUE;
630
754
  }
631
755
 
632
756
  ddog_prof_Location *locations = recorder->reusable_locations;
633
757
  for (uint16_t i = 0; i < stack->frames_len; i++) {
634
758
  const heap_frame *frame = &stack->frames[i];
635
- ddog_prof_Location *location = &locations[i];
636
- location->function.name.ptr = frame->name;
637
- location->function.name.len = strlen(frame->name);
638
- location->function.filename.ptr = frame->filename;
639
- location->function.filename.len = strlen(frame->filename);
640
- location->line = frame->line;
759
+ locations[i] = (ddog_prof_Location) {
760
+ .mapping = {.filename = DDOG_CHARSLICE_C(""), .build_id = DDOG_CHARSLICE_C("")},
761
+ .function = {
762
+ .name = {.ptr = frame->name, .len = strlen(frame->name)},
763
+ .filename = {.ptr = frame->filename, .len = strlen(frame->filename)},
764
+ },
765
+ .line = frame->line,
766
+ };
641
767
  }
642
768
 
643
769
  heap_recorder_iteration_data iteration_data;
@@ -782,8 +908,20 @@ static void cleanup_heap_record_if_unused(heap_recorder *heap_recorder, heap_rec
782
908
  }
783
909
 
784
910
  static void on_committed_object_record_cleanup(heap_recorder *heap_recorder, object_record *record) {
911
+ // @ivoanjo: We've seen a segfault crash in the field in this function (October 2024) which we're still trying to investigate.
912
+ // (See PROF-10656 Datadog-internal for details). Just in case, I've sprinkled a bunch of NULL tests in this function for now.
913
+ // Once we figure out the issue we can get rid of them again.
914
+
915
+ if (heap_recorder == NULL) rb_raise(rb_eRuntimeError, "heap_recorder was NULL in on_committed_object_record_cleanup");
916
+ if (heap_recorder->heap_records == NULL) rb_raise(rb_eRuntimeError, "heap_recorder->heap_records was NULL in on_committed_object_record_cleanup");
917
+ if (record == NULL) rb_raise(rb_eRuntimeError, "record was NULL in on_committed_object_record_cleanup");
918
+
785
919
  // Starting with the associated heap record. There will now be one less tracked object pointing to it
786
920
  heap_record *heap_record = record->heap_record;
921
+
922
+ if (heap_record == NULL) rb_raise(rb_eRuntimeError, "heap_record was NULL in on_committed_object_record_cleanup");
923
+ if (heap_record->stack == NULL) rb_raise(rb_eRuntimeError, "heap_record->stack was NULL in on_committed_object_record_cleanup");
924
+
787
925
  heap_record->num_tracked_objects--;
788
926
 
789
927
  // One less object using this heap record, it may have become unused...
@@ -1073,3 +1211,25 @@ st_index_t heap_record_key_hash_st(st_data_t key) {
1073
1211
  return ddog_location_slice_hash(*record_key->location_slice, FNV1_32A_INIT);
1074
1212
  }
1075
1213
  }
1214
+
1215
+ static inline double ewma_stat(double previous, double current) {
1216
+ double alpha = 0.3;
1217
+ return (1 - alpha) * previous + alpha * current;
1218
+ }
1219
+
1220
+ VALUE heap_recorder_testonly_is_object_recorded(heap_recorder *heap_recorder, VALUE obj_id) {
1221
+ if (heap_recorder == NULL) {
1222
+ rb_raise(rb_eArgError, "heap_recorder is NULL");
1223
+ }
1224
+
1225
+ // Check if object records contains an object with this object_id
1226
+ return st_is_member(heap_recorder->object_records, FIX2LONG(obj_id)) ? Qtrue : Qfalse;
1227
+ }
1228
+
1229
+ void heap_recorder_testonly_reset_last_update(heap_recorder *heap_recorder) {
1230
+ if (heap_recorder == NULL) {
1231
+ rb_raise(rb_eArgError, "heap_recorder is NULL");
1232
+ }
1233
+
1234
+ heap_recorder->last_update_ns = 0;
1235
+ }
@@ -118,6 +118,11 @@ void start_heap_allocation_recording(heap_recorder *heap_recorder, VALUE new_obj
118
118
  __attribute__((warn_unused_result))
119
119
  int end_heap_allocation_recording_with_rb_protect(heap_recorder *heap_recorder, ddog_prof_Slice_Location locations);
120
120
 
121
+ // Update the heap recorder, **checking young objects only**. The idea here is to align with GC: most young objects never
122
+ // survive enough GC generations, and thus periodically running this method reduces memory usage (we get rid of
123
+ // these objects quicker) and hopefully reduces tail latency (because there's less objects at serialization time to check).
124
+ void heap_recorder_update_young_objects(heap_recorder *heap_recorder);
125
+
121
126
  // Update the heap recorder to reflect the latest state of the VM and prepare internal structures
122
127
  // for efficient iteration.
123
128
  //
@@ -166,3 +171,9 @@ void heap_recorder_testonly_assert_hash_matches(ddog_prof_Slice_Location locatio
166
171
  // Returns a Ruby string with a representation of internal data helpful to
167
172
  // troubleshoot issues such as unexpected test failures.
168
173
  VALUE heap_recorder_testonly_debug(heap_recorder *heap_recorder);
174
+
175
+ // Check if a given object_id is being tracked or not
176
+ VALUE heap_recorder_testonly_is_object_recorded(heap_recorder *heap_recorder, VALUE obj_id);
177
+
178
+ // Used to ensure that a GC actually triggers an update of the objects
179
+ void heap_recorder_testonly_reset_last_update(heap_recorder *heap_recorder);
@@ -77,6 +77,32 @@ static VALUE _native_validate_exporter(DDTRACE_UNUSED VALUE _self, VALUE exporte
77
77
  return rb_ary_new_from_args(2, ok_symbol, Qnil);
78
78
  }
79
79
 
80
+ static ddog_prof_Endpoint endpoint_from(VALUE exporter_configuration) {
81
+ ENFORCE_TYPE(exporter_configuration, T_ARRAY);
82
+
83
+ VALUE exporter_working_mode = rb_ary_entry(exporter_configuration, 0);
84
+ ENFORCE_TYPE(exporter_working_mode, T_SYMBOL);
85
+ ID working_mode = SYM2ID(exporter_working_mode);
86
+
87
+ ID agentless_id = rb_intern("agentless");
88
+ ID agent_id = rb_intern("agent");
89
+
90
+ if (working_mode != agentless_id && working_mode != agent_id) {
91
+ rb_raise(rb_eArgError, "Failed to initialize transport: Unexpected working mode, expected :agentless or :agent");
92
+ }
93
+
94
+ if (working_mode == agentless_id) {
95
+ VALUE site = rb_ary_entry(exporter_configuration, 1);
96
+ VALUE api_key = rb_ary_entry(exporter_configuration, 2);
97
+
98
+ return ddog_prof_Endpoint_agentless(char_slice_from_ruby_string(site), char_slice_from_ruby_string(api_key));
99
+ } else { // agent_id
100
+ VALUE base_url = rb_ary_entry(exporter_configuration, 1);
101
+
102
+ return ddog_prof_Endpoint_agent(char_slice_from_ruby_string(base_url));
103
+ }
104
+ }
105
+
80
106
  static ddog_prof_Exporter_NewResult create_exporter(VALUE exporter_configuration, VALUE tags_as_array) {
81
107
  ENFORCE_TYPE(exporter_configuration, T_ARRAY);
82
108
  ENFORCE_TYPE(tags_as_array, T_ARRAY);
@@ -115,8 +141,7 @@ static VALUE perform_export(
115
141
  ddog_prof_Exporter_Slice_File files_to_export_unmodified,
116
142
  ddog_Vec_Tag *additional_tags,
117
143
  ddog_CharSlice internal_metadata,
118
- ddog_CharSlice info,
119
- uint64_t timeout_milliseconds
144
+ ddog_CharSlice info
120
145
  ) {
121
146
  ddog_prof_ProfiledEndpointsStats *endpoints_stats = NULL; // Not in use yet
122
147
  ddog_prof_Exporter_Request_BuildResult build_result = ddog_prof_Exporter_Request_build(
@@ -128,8 +153,7 @@ static VALUE perform_export(
128
153
  additional_tags,
129
154
  endpoints_stats,
130
155
  &internal_metadata,
131
- &info,
132
- timeout_milliseconds
156
+ &info
133
157
  );
134
158
 
135
159
  if (build_result.tag == DDOG_PROF_EXPORTER_REQUEST_BUILD_RESULT_ERR) {
@@ -254,6 +278,15 @@ static VALUE _native_do_export(
254
278
  VALUE failure_tuple = handle_exporter_failure(exporter_result);
255
279
  if (!NIL_P(failure_tuple)) return failure_tuple;
256
280
 
281
+ ddog_prof_MaybeError timeout_result = ddog_prof_Exporter_set_timeout(exporter_result.ok, timeout_milliseconds);
282
+ if (timeout_result.tag == DDOG_PROF_OPTION_ERROR_SOME_ERROR) {
283
+ // NOTE: Seems a bit harsh to fail the upload if we can't set a timeout. OTOH, this is only expected to fail
284
+ // if the exporter is not well built. Because such a situation should already be caught above I think it's
285
+ // preferable to leave this here as a virtually unreachable exception rather than ignoring it.
286
+ ddog_prof_Exporter_drop(exporter_result.ok);
287
+ return rb_ary_new_from_args(2, error_symbol, get_error_details_and_drop(&timeout_result.some));
288
+ }
289
+
257
290
  return perform_export(
258
291
  exporter_result.ok,
259
292
  start,
@@ -262,8 +295,7 @@ static VALUE _native_do_export(
262
295
  files_to_export_unmodified,
263
296
  null_additional_tags,
264
297
  internal_metadata,
265
- info,
266
- timeout_milliseconds
298
+ info
267
299
  );
268
300
  }
269
301
 
@@ -9,7 +9,7 @@ module Datadog
9
9
  # Can be set to force rubygems to fail gem installation when profiling extension could not be built
10
10
  ENV_FAIL_INSTALL_IF_MISSING_EXTENSION = "DD_PROFILING_FAIL_INSTALL_IF_MISSING_EXTENSION"
11
11
 
12
- # The MJIT header was introduced on 2.6 and removed on 3.3; for other Rubies we rely on debase-ruby_core_source
12
+ # The MJIT header was introduced on 2.6 and removed on 3.3; for other Rubies we rely on datadog-ruby_core_source
13
13
  CAN_USE_MJIT_HEADER = RUBY_VERSION.start_with?("2.6", "2.7", "3.0.", "3.1.", "3.2.")
14
14
 
15
15
  def self.fail_install_if_missing_extension?
@@ -13,7 +13,7 @@
13
13
  #include RUBY_MJIT_HEADER
14
14
  #else
15
15
  // The MJIT header was introduced on 2.6 and removed on 3.3; for other Rubies we rely on
16
- // the debase-ruby_core_source gem to get access to private VM headers.
16
+ // the datadog-ruby_core_source gem to get access to private VM headers.
17
17
 
18
18
  // We can't do anything about warnings in VM headers, so we just use this technique to suppress them.
19
19
  // See https://nelkinda.com/blog/suppress-warnings-in-gcc-and-clang/#d11e364 for details.
@@ -182,7 +182,7 @@ uint64_t native_thread_id_for(VALUE thread) {
182
182
  #if !defined(NO_THREAD_TID) && defined(RB_THREAD_T_HAS_NATIVE_ID)
183
183
  #ifndef NO_RB_NATIVE_THREAD
184
184
  struct rb_native_thread* native_thread = thread_struct_from_object(thread)->nt;
185
- if (native_thread == NULL) rb_raise(rb_eRuntimeError, "BUG: rb_native_thread* is null. Is this Ruby running with RUBY_MN_THREADS=1?");
185
+ if (native_thread == NULL) return 0;
186
186
  return native_thread->tid;
187
187
  #else
188
188
  return thread_struct_from_object(thread)->tid;
@@ -755,3 +755,54 @@ static inline int ddtrace_imemo_type(VALUE imemo) {
755
755
  return GET_VM()->objspace;
756
756
  }
757
757
  #endif
758
+
759
+ #ifdef USE_GVL_PROFILING_3_2_WORKAROUNDS // Ruby 3.2
760
+ #include "gvl_profiling_helper.h"
761
+
762
+ gvl_profiling_thread thread_from_thread_object(VALUE thread) {
763
+ return (gvl_profiling_thread) {.thread = thread_struct_from_object(thread)};
764
+ }
765
+
766
+ // Hack: In Ruby 3.3+ we attach gvl profiling state to Ruby threads using the
767
+ // rb_internal_thread_specific_* APIs. These APIs did not exist on Ruby 3.2. On Ruby 3.2 we instead store the
768
+ // needed data inside the `rb_thread_t` structure, specifically in `stat_insn_usage` as a Ruby FIXNUM.
769
+ //
770
+ // Why `stat_insn_usage`? We needed some per-thread storage, and while looking at the Ruby VM sources I noticed
771
+ // that `stat_insn_usage` has been in `rb_thread_t` for a long time, but is not used anywhere in the VM
772
+ // code. There's a comment attached to it "/* statistics data for profiler */" but other than marking this
773
+ // field for GC, I could not find any place in the VM commit history or on GitHub where this has ever been used.
774
+ //
775
+ // Thus, since this hack is only for 3.2, which presumably will never see this field either removed or used
776
+ // during its remaining maintenance release period we... kinda take it for our own usage. It's ugly, I know...
777
+ intptr_t gvl_profiling_state_get(gvl_profiling_thread thread) {
778
+ if (thread.thread == NULL) return 0;
779
+
780
+ VALUE current_value = ((rb_thread_t *)thread.thread)->stat_insn_usage;
781
+ intptr_t result = current_value == Qnil ? 0 : FIX2LONG(current_value);
782
+ return result;
783
+ }
784
+
785
+ void gvl_profiling_state_set(gvl_profiling_thread thread, intptr_t value) {
786
+ if (thread.thread == NULL) return;
787
+ ((rb_thread_t *)thread.thread)->stat_insn_usage = LONG2FIX(value);
788
+ }
789
+
790
+ // Because Ruby 3.2 does not give us the current thread when calling the RUBY_INTERNAL_THREAD_EVENT_READY and
791
+ // RUBY_INTERNAL_THREAD_EVENT_RESUMED APIs, we need to figure out this info ourselves.
792
+ //
793
+ // Specifically, this method was created to be called from a RUBY_INTERNAL_THREAD_EVENT_RESUMED callback --
794
+ // when it's triggered, we know the thread the code gets executed on is holding the GVL, so we use this
795
+ // opportunity to initialize our thread-local value.
796
+ gvl_profiling_thread gvl_profiling_state_maybe_initialize(void) {
797
+ gvl_profiling_thread current_thread = gvl_waiting_tls;
798
+
799
+ if (current_thread.thread == NULL) {
800
+ // threads.sched.running is the thread currently holding the GVL, which when this gets executed is the
801
+ // current thread!
802
+ current_thread = (gvl_profiling_thread) {.thread = (void *) rb_current_ractor()->threads.sched.running};
803
+ gvl_waiting_tls = current_thread;
804
+ }
805
+
806
+ return current_thread;
807
+ }
808
+ #endif
@@ -65,3 +65,6 @@ const char *imemo_kind(VALUE imemo);
65
65
  #ifdef NO_POSTPONED_TRIGGER
66
66
  void *objspace_ptr_for_gc_finalize_deferred_workaround(void);
67
67
  #endif
68
+
69
+ #define ENFORCE_THREAD(value) \
70
+ { if (RB_UNLIKELY(!rb_typeddata_is_kind_of(value, RTYPEDDATA_TYPE(rb_thread_current())))) raise_unexpected_type(value, ADD_QUOTES(value), "Thread", __FILE__, __LINE__, __func__); }
@@ -253,7 +253,7 @@ static VALUE _native_enforce_success(DDTRACE_UNUSED VALUE _self, VALUE syserr_er
253
253
 
254
254
  static void *trigger_enforce_success(void *trigger_args) {
255
255
  intptr_t syserr_errno = (intptr_t) trigger_args;
256
- ENFORCE_SUCCESS_NO_GVL(syserr_errno);
256
+ ENFORCE_SUCCESS_NO_GVL((int) syserr_errno);
257
257
  return NULL;
258
258
  }
259
259
 
@@ -219,16 +219,19 @@ static bool ruby_is_obj_with_class(VALUE obj) {
219
219
  return false;
220
220
  }
221
221
 
222
- VALUE ruby_safe_inspect(VALUE obj) {
223
- if (!ruby_is_obj_with_class(obj)) {
224
- return rb_str_new_cstr("(Not an object)");
225
- }
222
+ // These two functions are not present in the VM headers, but are public symbols that can be invoked.
223
+ int rb_objspace_internal_object_p(VALUE obj);
224
+ const char *rb_obj_info(VALUE obj);
226
225
 
227
- if (rb_respond_to(obj, inspect_id)) {
228
- return rb_sprintf("%+"PRIsVALUE, obj);
229
- } else if (rb_respond_to(obj, to_s_id)) {
230
- return rb_sprintf("%"PRIsVALUE, obj);
231
- } else {
232
- return rb_str_new_cstr("(Not inspectable)");
233
- }
226
+ VALUE ruby_safe_inspect(VALUE obj) {
227
+ if (!ruby_is_obj_with_class(obj)) return rb_str_new_cstr("(Not an object)");
228
+ if (rb_objspace_internal_object_p(obj)) return rb_sprintf("(VM Internal, %s)", rb_obj_info(obj));
229
+ // @ivoanjo: I saw crashes on Ruby 3.1.4 when trying to #inspect matchdata objects. I'm not entirely sure why this
230
+ // is needed, but since we only use this method for debug purposes I put in this alternative and decided not to
231
+ // dig deeper.
232
+ if (rb_type(obj) == RUBY_T_MATCH) return rb_sprintf("(MatchData, %s)", rb_obj_info(obj));
233
+ if (rb_respond_to(obj, inspect_id)) return rb_sprintf("%+"PRIsVALUE, obj);
234
+ if (rb_respond_to(obj, to_s_id)) return rb_sprintf("%"PRIsVALUE, obj);
235
+
236
+ return rb_str_new_cstr("(Not inspectable)");
234
237
  }