ddtrace 1.17.0 → 1.19.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +85 -2
- data/ext/ddtrace_profiling_native_extension/clock_id_from_pthread.c +3 -0
- data/ext/ddtrace_profiling_native_extension/collectors_cpu_and_wall_time_worker.c +67 -52
- data/ext/ddtrace_profiling_native_extension/collectors_dynamic_sampling_rate.c +22 -14
- data/ext/ddtrace_profiling_native_extension/collectors_dynamic_sampling_rate.h +4 -0
- data/ext/ddtrace_profiling_native_extension/collectors_gc_profiling_helper.c +156 -0
- data/ext/ddtrace_profiling_native_extension/collectors_gc_profiling_helper.h +5 -0
- data/ext/ddtrace_profiling_native_extension/collectors_stack.c +43 -102
- data/ext/ddtrace_profiling_native_extension/collectors_stack.h +10 -3
- data/ext/ddtrace_profiling_native_extension/collectors_thread_context.c +167 -125
- data/ext/ddtrace_profiling_native_extension/collectors_thread_context.h +2 -1
- data/ext/ddtrace_profiling_native_extension/extconf.rb +44 -10
- data/ext/ddtrace_profiling_native_extension/heap_recorder.c +970 -0
- data/ext/ddtrace_profiling_native_extension/heap_recorder.h +155 -0
- data/ext/ddtrace_profiling_native_extension/helpers.h +2 -0
- data/ext/ddtrace_profiling_native_extension/http_transport.c +5 -2
- data/ext/ddtrace_profiling_native_extension/libdatadog_helpers.c +20 -0
- data/ext/ddtrace_profiling_native_extension/libdatadog_helpers.h +11 -0
- data/ext/ddtrace_profiling_native_extension/private_vm_api_access.c +83 -18
- data/ext/ddtrace_profiling_native_extension/private_vm_api_access.h +6 -0
- data/ext/ddtrace_profiling_native_extension/profiling.c +2 -0
- data/ext/ddtrace_profiling_native_extension/ruby_helpers.c +147 -0
- data/ext/ddtrace_profiling_native_extension/ruby_helpers.h +28 -0
- data/ext/ddtrace_profiling_native_extension/stack_recorder.c +330 -13
- data/ext/ddtrace_profiling_native_extension/stack_recorder.h +3 -0
- data/lib/datadog/appsec/component.rb +4 -1
- data/lib/datadog/appsec/configuration/settings.rb +4 -0
- data/lib/datadog/appsec/contrib/devise/patcher/registration_controller_patch.rb +2 -0
- data/lib/datadog/appsec/processor/rule_loader.rb +60 -0
- data/lib/datadog/appsec/remote.rb +12 -9
- data/lib/datadog/core/configuration/settings.rb +139 -22
- data/lib/datadog/core/configuration.rb +4 -0
- data/lib/datadog/core/remote/worker.rb +1 -0
- data/lib/datadog/core/telemetry/collector.rb +10 -0
- data/lib/datadog/core/telemetry/event.rb +2 -1
- data/lib/datadog/core/telemetry/ext.rb +3 -0
- data/lib/datadog/core/telemetry/v1/app_event.rb +8 -1
- data/lib/datadog/core/telemetry/v1/install_signature.rb +38 -0
- data/lib/datadog/core/workers/async.rb +1 -0
- data/lib/datadog/kit/enable_core_dumps.rb +5 -6
- data/lib/datadog/profiling/collectors/cpu_and_wall_time_worker.rb +7 -11
- data/lib/datadog/profiling/collectors/idle_sampling_helper.rb +1 -0
- data/lib/datadog/profiling/component.rb +210 -18
- data/lib/datadog/profiling/scheduler.rb +4 -6
- data/lib/datadog/profiling/stack_recorder.rb +13 -2
- data/lib/datadog/tracing/contrib/mysql2/configuration/settings.rb +4 -0
- data/lib/datadog/tracing/contrib/mysql2/instrumentation.rb +2 -1
- data/lib/datadog/tracing/contrib/pg/configuration/settings.rb +5 -0
- data/lib/datadog/tracing/contrib/pg/instrumentation.rb +24 -0
- data/lib/datadog/tracing/contrib/rails/auto_instrument_railtie.rb +0 -2
- data/lib/datadog/tracing/workers.rb +1 -0
- data/lib/ddtrace/version.rb +1 -1
- metadata +11 -6
@@ -0,0 +1,155 @@
|
|
1
|
+
#pragma once
|
2
|
+
|
3
|
+
#include <datadog/profiling.h>
|
4
|
+
#include <ruby.h>
|
5
|
+
|
6
|
+
// A heap recorder keeps track of a collection of live heap objects.
|
7
|
+
//
|
8
|
+
// All allocations observed by this recorder for which a corresponding free was
|
9
|
+
// not yet observed are deemed as alive and can be iterated on to produce a
|
10
|
+
// live heap profile.
|
11
|
+
//
|
12
|
+
// NOTE: All public APIs of heap_recorder support receiving a NULL heap_recorder
|
13
|
+
// in which case the behaviour will be a noop.
|
14
|
+
//
|
15
|
+
// WARN: Unless otherwise stated the heap recorder APIs assume calls are done
|
16
|
+
// under the GVL.
|
17
|
+
typedef struct heap_recorder heap_recorder;
|
18
|
+
|
19
|
+
// Extra data associated with each live object being tracked.
|
20
|
+
typedef struct live_object_data {
|
21
|
+
// The weight of this object from a sampling perspective.
|
22
|
+
//
|
23
|
+
// A notion of weight is preserved for each tracked object to allow for an approximate
|
24
|
+
// extrapolation to an unsampled view.
|
25
|
+
//
|
26
|
+
// Example: If we were sampling every 50 objects, then each sampled object
|
27
|
+
// could be seen as being representative of 50 objects.
|
28
|
+
unsigned int weight;
|
29
|
+
|
30
|
+
// Size of this object on last flush/update.
|
31
|
+
size_t size;
|
32
|
+
|
33
|
+
// The class of the object that we're tracking.
|
34
|
+
// NOTE: This is optional and will be set to NULL if not set.
|
35
|
+
char* class;
|
36
|
+
|
37
|
+
// The GC allocation gen in which we saw this object being allocated.
|
38
|
+
//
|
39
|
+
// This enables us to calculate the age of this object in terms of GC executions.
|
40
|
+
size_t alloc_gen;
|
41
|
+
|
42
|
+
// Whether this object was previously seen as being frozen. If this is the case,
|
43
|
+
// we'll skip any further size updates since frozen objects are supposed to be
|
44
|
+
// immutable.
|
45
|
+
bool is_frozen;
|
46
|
+
} live_object_data;
|
47
|
+
|
48
|
+
// Data that is made available to iterators of heap recorder data for each live object
|
49
|
+
// tracked therein.
|
50
|
+
typedef struct {
|
51
|
+
ddog_prof_Slice_Location locations;
|
52
|
+
live_object_data object_data;
|
53
|
+
} heap_recorder_iteration_data;
|
54
|
+
|
55
|
+
// Initialize a new heap recorder.
|
56
|
+
heap_recorder* heap_recorder_new(void);
|
57
|
+
|
58
|
+
// Free a previously initialized heap recorder.
|
59
|
+
void heap_recorder_free(heap_recorder *heap_recorder);
|
60
|
+
|
61
|
+
// Sets whether this heap recorder should keep track of sizes or not.
|
62
|
+
//
|
63
|
+
// If set to true, the heap recorder will attempt to determine the approximate sizes of
|
64
|
+
// tracked objects and wield them during iteration.
|
65
|
+
// If set to false, sizes returned during iteration should not be used/relied on (they
|
66
|
+
// may be 0 or the last determined size before disabling the tracking of sizes).
|
67
|
+
//
|
68
|
+
// NOTE: Default is true, i.e., it will attempt to determine approximate sizes of tracked
|
69
|
+
// objects.
|
70
|
+
void heap_recorder_set_size_enabled(heap_recorder *heap_recorder, bool size_enabled);
|
71
|
+
|
72
|
+
// Set sample rate used by this heap recorder.
|
73
|
+
//
|
74
|
+
// Controls how many recordings will be ignored before committing a heap allocation and
|
75
|
+
// the weight of the committed heap allocation.
|
76
|
+
//
|
77
|
+
// A value of 1 will effectively track all objects that are passed through
|
78
|
+
// start/end_heap_allocation_recording pairs. A value of 10 will only track every 10th
|
79
|
+
// object passed through such calls and its effective weight for the purposes of heap
|
80
|
+
// profiling will be multiplied by 10.
|
81
|
+
//
|
82
|
+
// NOTE: Default is 1, i.e., track all heap allocation recordings.
|
83
|
+
//
|
84
|
+
// WARN: Non-positive values will lead to an exception being thrown.
|
85
|
+
void heap_recorder_set_sample_rate(heap_recorder *heap_recorder, int sample_rate);
|
86
|
+
|
87
|
+
// Do any cleanup needed after forking.
|
88
|
+
// WARN: Assumes this gets called before profiler is reinitialized on the fork
|
89
|
+
void heap_recorder_after_fork(heap_recorder *heap_recorder);
|
90
|
+
|
91
|
+
// Start a heap allocation recording on the heap recorder for a new object.
|
92
|
+
//
|
93
|
+
// This heap allocation recording needs to be ended via ::end_heap_allocation_recording
|
94
|
+
// before it will become fully committed and able to be iterated on.
|
95
|
+
//
|
96
|
+
// @param new_obj
|
97
|
+
// The newly allocated Ruby object/value.
|
98
|
+
// @param weight
|
99
|
+
// The sampling weight of this object.
|
100
|
+
//
|
101
|
+
// WARN: It needs to be paired with a ::end_heap_allocation_recording call.
|
102
|
+
void start_heap_allocation_recording(heap_recorder *heap_recorder, VALUE new_obj, unsigned int weight, ddog_CharSlice *alloc_class);
|
103
|
+
|
104
|
+
// End a previously started heap allocation recording on the heap recorder.
|
105
|
+
//
|
106
|
+
// It is at this point that an allocated object will become fully tracked and able to be iterated on.
|
107
|
+
//
|
108
|
+
// @param locations The stacktrace representing the location of the allocation.
|
109
|
+
//
|
110
|
+
// WARN: It is illegal to call this without previously having called ::start_heap_allocation_recording.
|
111
|
+
void end_heap_allocation_recording(heap_recorder *heap_recorder, ddog_prof_Slice_Location locations);
|
112
|
+
|
113
|
+
// Update the heap recorder to reflect the latest state of the VM and prepare internal structures
|
114
|
+
// for efficient iteration.
|
115
|
+
//
|
116
|
+
// WARN: This must be called strictly before iteration. Failing to do so will result in exceptions.
|
117
|
+
void heap_recorder_prepare_iteration(heap_recorder *heap_recorder);
|
118
|
+
|
119
|
+
// Optimize the heap recorder by cleaning up any data that might have been prepared specifically
|
120
|
+
// for the purpose of iterating over the heap recorder data.
|
121
|
+
//
|
122
|
+
// WARN: This must be called strictly after iteration to ensure proper cleanup and to keep the memory
|
123
|
+
// profile of the heap recorder low.
|
124
|
+
void heap_recorder_finish_iteration(heap_recorder *heap_recorder);
|
125
|
+
|
126
|
+
// Iterate over each live object being tracked by the heap recorder.
|
127
|
+
//
|
128
|
+
// NOTE: Iteration can be called without holding the Ruby Global VM lock.
|
129
|
+
// WARN: This must be called strictly after heap_recorder_prepare_iteration and before
|
130
|
+
// heap_recorder_finish_iteration.
|
131
|
+
//
|
132
|
+
// @param for_each_callback
|
133
|
+
// A callback function that shall be called for each live object being tracked
|
134
|
+
// by the heap recorder. Alongside the iteration_data for each live object,
|
135
|
+
// a second argument will be forwarded with the contents of the optional
|
136
|
+
// for_each_callback_extra_arg. Iteration will continue until the callback
|
137
|
+
// returns false or we run out of objects.
|
138
|
+
// @param for_each_callback_extra_arg
|
139
|
+
// Optional (NULL if empty) extra data that should be passed to the
|
140
|
+
// callback function alongside the data for each live tracked object.
|
141
|
+
// @return true if iteration ran, false if something prevented it from running.
|
142
|
+
bool heap_recorder_for_each_live_object(
|
143
|
+
heap_recorder *heap_recorder,
|
144
|
+
bool (*for_each_callback)(heap_recorder_iteration_data data, void* extra_arg),
|
145
|
+
void *for_each_callback_extra_arg);
|
146
|
+
|
147
|
+
// v--- TEST-ONLY APIs ---v
|
148
|
+
|
149
|
+
// Assert internal hashing logic is valid for the provided locations and its
|
150
|
+
// corresponding internal representations in heap recorder.
|
151
|
+
void heap_recorder_testonly_assert_hash_matches(ddog_prof_Slice_Location locations);
|
152
|
+
|
153
|
+
// Returns a Ruby string with a representation of internal data helpful to
|
154
|
+
// troubleshoot issues such as unexpected test failures.
|
155
|
+
VALUE heap_recorder_testonly_debug(heap_recorder *heap_recorder);
|
@@ -15,3 +15,5 @@
|
|
15
15
|
// don't like C and I just implemented this as a function.
|
16
16
|
inline static uint64_t uint64_max_of(uint64_t a, uint64_t b) { return a > b ? a : b; }
|
17
17
|
inline static uint64_t uint64_min_of(uint64_t a, uint64_t b) { return a > b ? b : a; }
|
18
|
+
inline static long long_max_of(long a, long b) { return a > b ? a : b; }
|
19
|
+
inline static long long_min_of(long a, long b) { return a > b ? b : a; }
|
@@ -16,7 +16,6 @@ static ID agent_id; // id of :agent in Ruby
|
|
16
16
|
|
17
17
|
static ID log_failure_to_process_tag_id; // id of :log_failure_to_process_tag in Ruby
|
18
18
|
|
19
|
-
static VALUE http_transport_class = Qnil;
|
20
19
|
static VALUE library_version_string = Qnil;
|
21
20
|
|
22
21
|
struct call_exporter_without_gvl_arguments {
|
@@ -54,7 +53,7 @@ static void interrupt_exporter_call(void *cancel_token);
|
|
54
53
|
static VALUE ddtrace_version(void);
|
55
54
|
|
56
55
|
void http_transport_init(VALUE profiling_module) {
|
57
|
-
http_transport_class = rb_define_class_under(profiling_module, "HttpTransport", rb_cObject);
|
56
|
+
VALUE http_transport_class = rb_define_class_under(profiling_module, "HttpTransport", rb_cObject);
|
58
57
|
|
59
58
|
rb_define_singleton_method(http_transport_class, "_native_validate_exporter", _native_validate_exporter, 1);
|
60
59
|
rb_define_singleton_method(http_transport_class, "_native_do_export", _native_do_export, 12);
|
@@ -180,6 +179,10 @@ static ddog_Vec_Tag convert_tags(VALUE tags_as_array) {
|
|
180
179
|
}
|
181
180
|
|
182
181
|
static VALUE log_failure_to_process_tag(VALUE err_details) {
|
182
|
+
VALUE datadog_module = rb_const_get(rb_cObject, rb_intern("Datadog"));
|
183
|
+
VALUE profiling_module = rb_const_get(datadog_module, rb_intern("Profiling"));
|
184
|
+
VALUE http_transport_class = rb_const_get(profiling_module, rb_intern("HttpTransport"));
|
185
|
+
|
183
186
|
return rb_funcall(http_transport_class, log_failure_to_process_tag_id, 1, err_details);
|
184
187
|
}
|
185
188
|
|
@@ -40,3 +40,23 @@ ddog_CharSlice ruby_value_type_to_char_slice(enum ruby_value_type type) {
|
|
40
40
|
default: return DDOG_CHARSLICE_C("BUG: Unknown value for ruby_value_type");
|
41
41
|
}
|
42
42
|
}
|
43
|
+
|
44
|
+
size_t read_ddogerr_string_and_drop(ddog_Error *error, char *string, size_t capacity) {
|
45
|
+
if (capacity == 0 || string == NULL) {
|
46
|
+
// short-circuit, we can't write anything
|
47
|
+
ddog_Error_drop(error);
|
48
|
+
return 0;
|
49
|
+
}
|
50
|
+
|
51
|
+
ddog_CharSlice error_msg_slice = ddog_Error_message(error);
|
52
|
+
size_t error_msg_size = error_msg_slice.len;
|
53
|
+
// Account for extra null char for proper cstring
|
54
|
+
if (error_msg_size >= capacity) {
|
55
|
+
// Error message too big, lets truncate it to capacity - 1 to allow for extra null at end
|
56
|
+
error_msg_size = capacity - 1;
|
57
|
+
}
|
58
|
+
strncpy(string, error_msg_slice.ptr, error_msg_size);
|
59
|
+
string[error_msg_size] = '\0';
|
60
|
+
ddog_Error_drop(error);
|
61
|
+
return error_msg_size;
|
62
|
+
}
|
@@ -24,8 +24,19 @@ inline static VALUE get_error_details_and_drop(ddog_Error *error) {
|
|
24
24
|
return result;
|
25
25
|
}
|
26
26
|
|
27
|
+
// Utility function to be able to extract an error cstring from a ddog_Error.
|
28
|
+
// Returns the amount of characters written to string (which are necessarily
|
29
|
+
// bounded by capacity - 1 since the string will be null-terminated).
|
30
|
+
size_t read_ddogerr_string_and_drop(ddog_Error *error, char *string, size_t capacity);
|
31
|
+
|
27
32
|
// Used for pretty printing this Ruby enum. Returns "T_UNKNOWN_OR_MISSING_RUBY_VALUE_TYPE_ENTRY" for unknown elements.
|
28
33
|
// In practice, there's a few types that the profiler will probably never encounter, but I've added all entries of
|
29
34
|
// ruby_value_type that Ruby uses so that we can also use this for debugging.
|
30
35
|
const char *ruby_value_type_to_string(enum ruby_value_type type);
|
31
36
|
ddog_CharSlice ruby_value_type_to_char_slice(enum ruby_value_type type);
|
37
|
+
|
38
|
+
// Returns a dynamically allocated string from the provided char slice.
|
39
|
+
// WARN: The returned string must be explicitly freed with ruby_xfree.
|
40
|
+
inline static char* string_from_char_slice(ddog_CharSlice slice) {
|
41
|
+
return ruby_strndup(slice.ptr, slice.len);
|
42
|
+
}
|
@@ -58,9 +58,12 @@ static inline rb_thread_t *thread_struct_from_object(VALUE thread) {
|
|
58
58
|
}
|
59
59
|
|
60
60
|
rb_nativethread_id_t pthread_id_for(VALUE thread) {
|
61
|
-
// struct rb_native_thread was introduced in Ruby 3.2
|
61
|
+
// struct rb_native_thread was introduced in Ruby 3.2: https://github.com/ruby/ruby/pull/5836
|
62
62
|
#ifndef NO_RB_NATIVE_THREAD
|
63
|
-
|
63
|
+
struct rb_native_thread* native_thread = thread_struct_from_object(thread)->nt;
|
64
|
+
// This can be NULL on Ruby 3.3 with MN threads (RUBY_MN_THREADS=1)
|
65
|
+
if (native_thread == NULL) return 0;
|
66
|
+
return native_thread->thread_id;
|
64
67
|
#else
|
65
68
|
return thread_struct_from_object(thread)->thread_id;
|
66
69
|
#endif
|
@@ -113,15 +116,16 @@ bool is_current_thread_holding_the_gvl(void) {
|
|
113
116
|
|
114
117
|
if (current_owner == NULL) return (current_gvl_owner) {.valid = false};
|
115
118
|
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
119
|
+
#ifndef NO_RB_NATIVE_THREAD
|
120
|
+
struct rb_native_thread* current_owner_native_thread = current_owner->nt;
|
121
|
+
|
122
|
+
// This can be NULL on Ruby 3.3 with MN threads (RUBY_MN_THREADS=1)
|
123
|
+
if (current_owner_native_thread == NULL) return (current_gvl_owner) {.valid = false};
|
124
|
+
|
125
|
+
return (current_gvl_owner) {.valid = true, .owner = current_owner_native_thread->thread_id};
|
126
|
+
#else
|
127
|
+
return (current_gvl_owner) {.valid = true, .owner = current_owner->thread_id};
|
128
|
+
#endif
|
125
129
|
}
|
126
130
|
#else
|
127
131
|
current_gvl_owner gvl_owner(void) {
|
@@ -182,7 +186,9 @@ uint64_t native_thread_id_for(VALUE thread) {
|
|
182
186
|
// The tid is only available on Ruby >= 3.1 + Linux (and FreeBSD). It's the same as `gettid()` aka the task id as seen in /proc
|
183
187
|
#if !defined(NO_THREAD_TID) && defined(RB_THREAD_T_HAS_NATIVE_ID)
|
184
188
|
#ifndef NO_RB_NATIVE_THREAD
|
185
|
-
|
189
|
+
struct rb_native_thread* native_thread = thread_struct_from_object(thread)->nt;
|
190
|
+
if (native_thread == NULL) rb_raise(rb_eRuntimeError, "BUG: rb_native_thread* is null. Is this Ruby running with RUBY_MN_THREADS=1?");
|
191
|
+
return native_thread->tid;
|
186
192
|
#else
|
187
193
|
return thread_struct_from_object(thread)->tid;
|
188
194
|
#endif
|
@@ -407,6 +413,7 @@ calc_lineno(const rb_iseq_t *iseq, const VALUE *pc)
|
|
407
413
|
// the `VALUE` returned by rb_profile_frames returns `(eval)` instead of the path of the file where the `eval`
|
408
414
|
// was called from.
|
409
415
|
// * Imported fix from https://github.com/ruby/ruby/pull/7116 to avoid sampling threads that are still being created
|
416
|
+
// * Imported fix from https://github.com/ruby/ruby/pull/8415 to avoid potential crash when using YJIT.
|
410
417
|
//
|
411
418
|
// What is rb_profile_frames?
|
412
419
|
// `rb_profile_frames` is a Ruby VM debug API added for use by profilers for sampling the stack trace of a Ruby thread.
|
@@ -442,12 +449,15 @@ int ddtrace_rb_profile_frames(VALUE thread, int start, int limit, VALUE *buff, i
|
|
442
449
|
// Modified from upstream: Instead of using `GET_EC` to collect info from the current thread,
|
443
450
|
// support sampling any thread (including the current) passed as an argument
|
444
451
|
rb_thread_t *th = thread_struct_from_object(thread);
|
445
|
-
#ifndef USE_THREAD_INSTEAD_OF_EXECUTION_CONTEXT // Modern Rubies
|
446
|
-
|
447
|
-
#else // Ruby < 2.5
|
448
|
-
|
449
|
-
#endif
|
452
|
+
#ifndef USE_THREAD_INSTEAD_OF_EXECUTION_CONTEXT // Modern Rubies
|
453
|
+
const rb_execution_context_t *ec = th->ec;
|
454
|
+
#else // Ruby < 2.5
|
455
|
+
const rb_thread_t *ec = th;
|
456
|
+
#endif
|
450
457
|
const rb_control_frame_t *cfp = ec->cfp, *end_cfp = RUBY_VM_END_CONTROL_FRAME(ec);
|
458
|
+
#ifndef NO_JIT_RETURN
|
459
|
+
const rb_control_frame_t *top = cfp;
|
460
|
+
#endif
|
451
461
|
const rb_callable_method_entry_t *cme;
|
452
462
|
|
453
463
|
// Avoid sampling dead threads
|
@@ -461,6 +471,11 @@ int ddtrace_rb_profile_frames(VALUE thread, int start, int limit, VALUE *buff, i
|
|
461
471
|
// it from https://github.com/ruby/ruby/pull/7116 in a "just in case" kind of mindset.
|
462
472
|
if (cfp == NULL) return 0;
|
463
473
|
|
474
|
+
// As of this writing, we don't support profiling with MN enabled, and this only happens in that mode, but as we
|
475
|
+
// probably want to experiment with it in the future, I've decided to import https://github.com/ruby/ruby/pull/9310
|
476
|
+
// here.
|
477
|
+
if (ec == NULL) return 0;
|
478
|
+
|
464
479
|
// Fix: Skip dummy frame that shows up in main thread.
|
465
480
|
//
|
466
481
|
// According to a comment in `backtrace_each` (`vm_backtrace.c`), there's two dummy frames that we should ignore
|
@@ -522,7 +537,20 @@ int ddtrace_rb_profile_frames(VALUE thread, int start, int limit, VALUE *buff, i
|
|
522
537
|
buff[i] = (VALUE)cfp->iseq;
|
523
538
|
}
|
524
539
|
|
525
|
-
|
540
|
+
// The topmost frame may not have an updated PC because the JIT
|
541
|
+
// may not have set one. The JIT compiler will update the PC
|
542
|
+
// before entering a new function (so that `caller` will work),
|
543
|
+
// so only the topmost frame could possibly have an out of date PC
|
544
|
+
#ifndef NO_JIT_RETURN
|
545
|
+
if (cfp == top && cfp->jit_return) {
|
546
|
+
lines[i] = 0;
|
547
|
+
} else {
|
548
|
+
lines[i] = calc_lineno(cfp->iseq, cfp->pc);
|
549
|
+
}
|
550
|
+
#else // Ruby < 3.1
|
551
|
+
lines[i] = calc_lineno(cfp->iseq, cfp->pc);
|
552
|
+
#endif
|
553
|
+
|
526
554
|
is_ruby_frame[i] = true;
|
527
555
|
i++;
|
528
556
|
}
|
@@ -811,3 +839,40 @@ VALUE invoke_location_for(VALUE thread, int *line_location) {
|
|
811
839
|
*line_location = NUM2INT(rb_iseq_first_lineno(iseq));
|
812
840
|
return rb_iseq_path(iseq);
|
813
841
|
}
|
842
|
+
|
843
|
+
void self_test_mn_enabled(void) {
|
844
|
+
#ifdef NO_MN_THREADS_AVAILABLE
|
845
|
+
return;
|
846
|
+
#else
|
847
|
+
if (ddtrace_get_ractor()->threads.sched.enable_mn_threads == true) {
|
848
|
+
rb_raise(rb_eRuntimeError, "Ruby VM is running with RUBY_MN_THREADS=1. This is not yet supported");
|
849
|
+
}
|
850
|
+
#endif
|
851
|
+
}
|
852
|
+
|
853
|
+
// Taken from upstream imemo.h at commit 6ebcf25de2859b5b6402b7e8b181066c32d0e0bf (November 2023, master branch)
|
854
|
+
// (See the Ruby project copyright and license above)
|
855
|
+
// to enable calling rb_imemo_name
|
856
|
+
//
|
857
|
+
// Modifications:
|
858
|
+
// * Added IMEMO_MASK define
|
859
|
+
// * Changed return type to int to avoid having to define `enum imemo_type`
|
860
|
+
static inline int ddtrace_imemo_type(VALUE imemo) {
|
861
|
+
// This mask is the same between Ruby 2.5 and 3.3-preview3. Furthermore, the intention of this method is to be used
|
862
|
+
// to call `rb_imemo_name` which correctly handles invalid numbers so even if the mask changes in the future, at most
|
863
|
+
// we'll get incorrect results (and never a VM crash)
|
864
|
+
#define IMEMO_MASK 0x0f
|
865
|
+
return (RBASIC(imemo)->flags >> FL_USHIFT) & IMEMO_MASK;
|
866
|
+
}
|
867
|
+
|
868
|
+
// Safety: This function assumes the object passed in is of the imemo type. But in the worst case, you'll just get
|
869
|
+
// a string that doesn't make any sense.
|
870
|
+
#ifndef NO_IMEMO_NAME
|
871
|
+
const char *imemo_kind(VALUE imemo) {
|
872
|
+
return rb_imemo_name(ddtrace_imemo_type(imemo));
|
873
|
+
}
|
874
|
+
#else
|
875
|
+
const char *imemo_kind(__attribute__((unused)) VALUE imemo) {
|
876
|
+
return NULL;
|
877
|
+
}
|
878
|
+
#endif
|
@@ -49,3 +49,9 @@ bool ddtrace_rb_ractor_main_p(void);
|
|
49
49
|
// This is what Ruby shows in `Thread#to_s`.
|
50
50
|
// The file is returned directly, and the line is recorded onto *line_location.
|
51
51
|
VALUE invoke_location_for(VALUE thread, int *line_location);
|
52
|
+
|
53
|
+
// Check if RUBY_MN_THREADS is enabled (aka main Ractor is not doing 1:1 threads)
|
54
|
+
void self_test_mn_enabled(void);
|
55
|
+
|
56
|
+
// Provides more specific information on what kind an imemo is
|
57
|
+
const char *imemo_kind(VALUE imemo);
|
@@ -41,6 +41,7 @@ void DDTRACE_EXPORT Init_ddtrace_profiling_native_extension(void) {
|
|
41
41
|
rb_define_singleton_method(native_extension_module, "native_working?", native_working_p, 0);
|
42
42
|
rb_funcall(native_extension_module, rb_intern("private_class_method"), 1, ID2SYM(rb_intern("native_working?")));
|
43
43
|
|
44
|
+
ruby_helpers_init();
|
44
45
|
collectors_cpu_and_wall_time_worker_init(profiling_module);
|
45
46
|
collectors_dynamic_sampling_rate_init(profiling_module);
|
46
47
|
collectors_idle_sampling_helper_init(profiling_module);
|
@@ -68,6 +69,7 @@ void DDTRACE_EXPORT Init_ddtrace_profiling_native_extension(void) {
|
|
68
69
|
|
69
70
|
static VALUE native_working_p(DDTRACE_UNUSED VALUE _self) {
|
70
71
|
self_test_clock_id();
|
72
|
+
self_test_mn_enabled();
|
71
73
|
|
72
74
|
return Qtrue;
|
73
75
|
}
|
@@ -4,6 +4,22 @@
|
|
4
4
|
#include "ruby_helpers.h"
|
5
5
|
#include "private_vm_api_access.h"
|
6
6
|
|
7
|
+
// The following global variables are initialized at startup to save expensive lookups later.
|
8
|
+
// They are not expected to be mutated outside of init.
|
9
|
+
static VALUE module_object_space = Qnil;
|
10
|
+
static ID _id2ref_id = Qnil;
|
11
|
+
static ID inspect_id = Qnil;
|
12
|
+
static ID to_s_id = Qnil;
|
13
|
+
|
14
|
+
void ruby_helpers_init(void) {
|
15
|
+
rb_global_variable(&module_object_space);
|
16
|
+
|
17
|
+
module_object_space = rb_const_get(rb_cObject, rb_intern("ObjectSpace"));
|
18
|
+
_id2ref_id = rb_intern("_id2ref");
|
19
|
+
inspect_id = rb_intern("inspect");
|
20
|
+
to_s_id = rb_intern("to_s");
|
21
|
+
}
|
22
|
+
|
7
23
|
void raise_unexpected_type(
|
8
24
|
VALUE value,
|
9
25
|
const char *value_name,
|
@@ -108,3 +124,134 @@ void raise_syserr(
|
|
108
124
|
grab_gvl_and_raise_syserr(syserr_errno, "Failure returned by '%s' at %s:%d:in `%s'", expression, file, line, function_name);
|
109
125
|
}
|
110
126
|
}
|
127
|
+
|
128
|
+
char* ruby_strndup(const char *str, size_t size) {
|
129
|
+
char *dup;
|
130
|
+
|
131
|
+
dup = xmalloc(size + 1);
|
132
|
+
memcpy(dup, str, size);
|
133
|
+
dup[size] = '\0';
|
134
|
+
|
135
|
+
return dup;
|
136
|
+
}
|
137
|
+
|
138
|
+
static VALUE _id2ref(VALUE obj_id) {
|
139
|
+
// Call ::ObjectSpace._id2ref natively. It will raise if the id is no longer valid
|
140
|
+
return rb_funcall(module_object_space, _id2ref_id, 1, obj_id);
|
141
|
+
}
|
142
|
+
|
143
|
+
static VALUE _id2ref_failure(DDTRACE_UNUSED VALUE _unused1, DDTRACE_UNUSED VALUE _unused2) {
|
144
|
+
return Qfalse;
|
145
|
+
}
|
146
|
+
|
147
|
+
// Native wrapper to get an object ref from an id. Returns true on success and
|
148
|
+
// writes the ref to the value pointer parameter if !NULL. False if id doesn't
|
149
|
+
// reference a valid object (in which case value is not changed).
|
150
|
+
bool ruby_ref_from_id(VALUE obj_id, VALUE *value) {
|
151
|
+
// Call ::ObjectSpace._id2ref natively. It will raise if the id is no longer valid
|
152
|
+
// so we need to call it via rb_rescue2
|
153
|
+
// TODO: Benchmark rb_rescue2 vs rb_protect here
|
154
|
+
VALUE result = rb_rescue2(
|
155
|
+
_id2ref,
|
156
|
+
obj_id,
|
157
|
+
_id2ref_failure,
|
158
|
+
Qnil,
|
159
|
+
rb_eRangeError, // rb_eRangeError is the error used to flag invalid ids
|
160
|
+
0 // Required by API to be the last argument
|
161
|
+
);
|
162
|
+
|
163
|
+
if (result == Qfalse) {
|
164
|
+
return false;
|
165
|
+
}
|
166
|
+
|
167
|
+
if (value != NULL) {
|
168
|
+
(*value) = result;
|
169
|
+
}
|
170
|
+
|
171
|
+
return true;
|
172
|
+
}
|
173
|
+
|
174
|
+
// Not part of public headers but is externed from Ruby
|
175
|
+
size_t rb_obj_memsize_of(VALUE obj);
|
176
|
+
|
177
|
+
// Wrapper around rb_obj_memsize_of to avoid hitting crashing paths.
|
178
|
+
//
|
179
|
+
// The crashing paths are due to calls to rb_bug so should hopefully
|
180
|
+
// be situations that can't happen. But given that rb_obj_memsize_of
|
181
|
+
// isn't fully public (it's externed but not part of public headers)
|
182
|
+
// there is a possibility that it is just assumed that whoever calls
|
183
|
+
// it, will do proper checking for those cases. We want to be cautious
|
184
|
+
// so we'll assume that's the case and will skip over known crashing
|
185
|
+
// paths in this wrapper.
|
186
|
+
size_t ruby_obj_memsize_of(VALUE obj) {
|
187
|
+
switch (rb_type(obj)) {
|
188
|
+
case T_OBJECT:
|
189
|
+
case T_MODULE:
|
190
|
+
case T_CLASS:
|
191
|
+
case T_ICLASS:
|
192
|
+
case T_STRING:
|
193
|
+
case T_ARRAY:
|
194
|
+
case T_HASH:
|
195
|
+
case T_REGEXP:
|
196
|
+
case T_DATA:
|
197
|
+
case T_MATCH:
|
198
|
+
case T_FILE:
|
199
|
+
case T_RATIONAL:
|
200
|
+
case T_COMPLEX:
|
201
|
+
case T_IMEMO:
|
202
|
+
case T_FLOAT:
|
203
|
+
case T_SYMBOL:
|
204
|
+
case T_BIGNUM:
|
205
|
+
// case T_NODE: -> Crashes the vm in rb_obj_memsize_of
|
206
|
+
case T_STRUCT:
|
207
|
+
case T_ZOMBIE:
|
208
|
+
#ifndef NO_T_MOVED
|
209
|
+
case T_MOVED:
|
210
|
+
#endif
|
211
|
+
return rb_obj_memsize_of(obj);
|
212
|
+
default:
|
213
|
+
// Unsupported, return 0 instead of erroring like rb_obj_memsize_of likes doing
|
214
|
+
return 0;
|
215
|
+
}
|
216
|
+
}
|
217
|
+
|
218
|
+
// Inspired by rb_class_of but without actually returning classes or potentially doing assertions
|
219
|
+
static bool ruby_is_obj_with_class(VALUE obj) {
|
220
|
+
if (!RB_SPECIAL_CONST_P(obj)) {
|
221
|
+
return true;
|
222
|
+
}
|
223
|
+
if (obj == RUBY_Qfalse) {
|
224
|
+
return true;
|
225
|
+
}
|
226
|
+
else if (obj == RUBY_Qnil) {
|
227
|
+
return true;
|
228
|
+
}
|
229
|
+
else if (obj == RUBY_Qtrue) {
|
230
|
+
return true;
|
231
|
+
}
|
232
|
+
else if (RB_FIXNUM_P(obj)) {
|
233
|
+
return true;
|
234
|
+
}
|
235
|
+
else if (RB_STATIC_SYM_P(obj)) {
|
236
|
+
return true;
|
237
|
+
}
|
238
|
+
else if (RB_FLONUM_P(obj)) {
|
239
|
+
return true;
|
240
|
+
}
|
241
|
+
|
242
|
+
return false;
|
243
|
+
}
|
244
|
+
|
245
|
+
VALUE ruby_safe_inspect(VALUE obj) {
|
246
|
+
if (!ruby_is_obj_with_class(obj)) {
|
247
|
+
return rb_str_new_cstr("(Not an object)");
|
248
|
+
}
|
249
|
+
|
250
|
+
if (rb_respond_to(obj, inspect_id)) {
|
251
|
+
return rb_sprintf("%+"PRIsVALUE, obj);
|
252
|
+
} else if (rb_respond_to(obj, to_s_id)) {
|
253
|
+
return rb_sprintf("%"PRIsVALUE, obj);
|
254
|
+
} else {
|
255
|
+
return rb_str_new_cstr("(Not inspectable)");
|
256
|
+
}
|
257
|
+
}
|
@@ -5,6 +5,10 @@
|
|
5
5
|
|
6
6
|
#include "helpers.h"
|
7
7
|
|
8
|
+
// Initialize internal data needed by some ruby helpers. Should be called during start, before any actual
|
9
|
+
// usage of ruby helpers.
|
10
|
+
void ruby_helpers_init(void);
|
11
|
+
|
8
12
|
// Processes any pending interruptions, including exceptions to be raised.
|
9
13
|
// If there's an exception to be raised, it raises it. In that case, this function does not return.
|
10
14
|
static inline VALUE process_pending_interruptions(DDTRACE_UNUSED VALUE _) {
|
@@ -87,3 +91,27 @@ NORETURN(void raise_syserr(
|
|
87
91
|
int line,
|
88
92
|
const char *function_name
|
89
93
|
));
|
94
|
+
|
95
|
+
// Alternative to ruby_strdup that takes a size argument.
|
96
|
+
// Similar to C's strndup but slightly less smart as size is expected to
|
97
|
+
// be smaller or equal to the real size of str (minus null termination if it
|
98
|
+
// exists).
|
99
|
+
// A new string will be returned with size+1 bytes and last byte set to '\0'.
|
100
|
+
// The returned string must be freed explicitly.
|
101
|
+
//
|
102
|
+
// WARN: Cannot be used during GC or outside the GVL.
|
103
|
+
char* ruby_strndup(const char *str, size_t size);
|
104
|
+
|
105
|
+
// Native wrapper to get an object ref from an id. Returns true on success and
|
106
|
+
// writes the ref to the value pointer parameter if !NULL. False if id doesn't
|
107
|
+
// reference a valid object (in which case value is not changed).
|
108
|
+
bool ruby_ref_from_id(size_t id, VALUE *value);
|
109
|
+
|
110
|
+
// Native wrapper to get the approximate/estimated current size of the passed
|
111
|
+
// object.
|
112
|
+
size_t ruby_obj_memsize_of(VALUE obj);
|
113
|
+
|
114
|
+
// Safely inspect any ruby object. If the object responds to 'inspect',
|
115
|
+
// return a string with the result of that call. Elsif the object responds to
|
116
|
+
// 'to_s', return a string with the result of that call. Otherwise, return Qnil.
|
117
|
+
VALUE ruby_safe_inspect(VALUE obj);
|