datadog 2.8.0 → 2.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +36 -1
- data/ext/datadog_profiling_native_extension/clock_id.h +2 -2
- data/ext/datadog_profiling_native_extension/collectors_cpu_and_wall_time_worker.c +64 -54
- data/ext/datadog_profiling_native_extension/collectors_discrete_dynamic_sampler.c +1 -1
- data/ext/datadog_profiling_native_extension/collectors_discrete_dynamic_sampler.h +1 -1
- data/ext/datadog_profiling_native_extension/collectors_idle_sampling_helper.c +16 -16
- data/ext/datadog_profiling_native_extension/collectors_stack.c +7 -7
- data/ext/datadog_profiling_native_extension/collectors_thread_context.c +219 -122
- data/ext/datadog_profiling_native_extension/heap_recorder.h +1 -1
- data/ext/datadog_profiling_native_extension/http_transport.c +4 -4
- data/ext/datadog_profiling_native_extension/private_vm_api_access.c +3 -0
- data/ext/datadog_profiling_native_extension/private_vm_api_access.h +3 -1
- data/ext/datadog_profiling_native_extension/profiling.c +10 -8
- data/ext/datadog_profiling_native_extension/ruby_helpers.c +8 -8
- data/ext/datadog_profiling_native_extension/stack_recorder.c +54 -54
- data/ext/datadog_profiling_native_extension/stack_recorder.h +1 -1
- data/ext/datadog_profiling_native_extension/time_helpers.h +1 -1
- data/ext/datadog_profiling_native_extension/unsafe_api_calls_check.c +47 -0
- data/ext/datadog_profiling_native_extension/unsafe_api_calls_check.h +31 -0
- data/ext/libdatadog_api/crashtracker.c +3 -0
- data/lib/datadog/appsec/assets/waf_rules/recommended.json +355 -157
- data/lib/datadog/appsec/assets/waf_rules/strict.json +62 -32
- data/lib/datadog/appsec/context.rb +54 -0
- data/lib/datadog/appsec/contrib/active_record/instrumentation.rb +7 -7
- data/lib/datadog/appsec/contrib/devise/patcher/authenticatable_patch.rb +6 -6
- data/lib/datadog/appsec/contrib/devise/patcher/registration_controller_patch.rb +4 -4
- data/lib/datadog/appsec/contrib/graphql/gateway/watcher.rb +19 -28
- data/lib/datadog/appsec/contrib/graphql/reactive/multiplex.rb +5 -5
- data/lib/datadog/appsec/contrib/rack/gateway/response.rb +3 -3
- data/lib/datadog/appsec/contrib/rack/gateway/watcher.rb +64 -96
- data/lib/datadog/appsec/contrib/rack/reactive/request.rb +10 -10
- data/lib/datadog/appsec/contrib/rack/reactive/request_body.rb +5 -5
- data/lib/datadog/appsec/contrib/rack/reactive/response.rb +6 -6
- data/lib/datadog/appsec/contrib/rack/request_body_middleware.rb +10 -11
- data/lib/datadog/appsec/contrib/rack/request_middleware.rb +43 -49
- data/lib/datadog/appsec/contrib/rails/gateway/watcher.rb +21 -32
- data/lib/datadog/appsec/contrib/rails/patcher.rb +1 -1
- data/lib/datadog/appsec/contrib/rails/reactive/action.rb +6 -6
- data/lib/datadog/appsec/contrib/sinatra/gateway/watcher.rb +41 -63
- data/lib/datadog/appsec/contrib/sinatra/patcher.rb +2 -2
- data/lib/datadog/appsec/contrib/sinatra/reactive/routed.rb +5 -5
- data/lib/datadog/appsec/event.rb +6 -6
- data/lib/datadog/appsec/ext.rb +3 -1
- data/lib/datadog/appsec/monitor/gateway/watcher.rb +22 -32
- data/lib/datadog/appsec/monitor/reactive/set_user.rb +5 -5
- data/lib/datadog/appsec/processor/rule_loader.rb +0 -3
- data/lib/datadog/appsec.rb +3 -3
- data/lib/datadog/auto_instrument.rb +3 -0
- data/lib/datadog/core/configuration/agent_settings_resolver.rb +39 -11
- data/lib/datadog/core/configuration/components.rb +4 -2
- data/lib/datadog/core/configuration.rb +1 -1
- data/lib/datadog/{tracing → core}/contrib/rails/utils.rb +1 -3
- data/lib/datadog/core/crashtracking/component.rb +1 -3
- data/lib/datadog/core/telemetry/event.rb +87 -3
- data/lib/datadog/core/telemetry/logging.rb +2 -2
- data/lib/datadog/core/telemetry/metric.rb +22 -0
- data/lib/datadog/core/telemetry/worker.rb +33 -0
- data/lib/datadog/di/base.rb +115 -0
- data/lib/datadog/di/code_tracker.rb +7 -4
- data/lib/datadog/di/component.rb +17 -11
- data/lib/datadog/di/configuration/settings.rb +11 -1
- data/lib/datadog/di/contrib/railtie.rb +15 -0
- data/lib/datadog/di/contrib.rb +26 -0
- data/lib/datadog/di/error.rb +5 -0
- data/lib/datadog/di/instrumenter.rb +39 -18
- data/lib/datadog/di/{init.rb → preload.rb} +2 -4
- data/lib/datadog/di/probe_manager.rb +4 -4
- data/lib/datadog/di/probe_notification_builder.rb +16 -2
- data/lib/datadog/di/probe_notifier_worker.rb +5 -6
- data/lib/datadog/di/remote.rb +4 -4
- data/lib/datadog/di/transport.rb +2 -4
- data/lib/datadog/di.rb +5 -108
- data/lib/datadog/kit/appsec/events.rb +3 -3
- data/lib/datadog/kit/identity.rb +4 -4
- data/lib/datadog/profiling/component.rb +55 -53
- data/lib/datadog/profiling/http_transport.rb +1 -26
- data/lib/datadog/tracing/contrib/action_cable/integration.rb +5 -2
- data/lib/datadog/tracing/contrib/action_mailer/integration.rb +6 -2
- data/lib/datadog/tracing/contrib/action_pack/integration.rb +5 -2
- data/lib/datadog/tracing/contrib/action_view/integration.rb +5 -2
- data/lib/datadog/tracing/contrib/active_job/integration.rb +5 -2
- data/lib/datadog/tracing/contrib/active_record/integration.rb +6 -2
- data/lib/datadog/tracing/contrib/active_support/cache/events/cache.rb +3 -1
- data/lib/datadog/tracing/contrib/active_support/cache/instrumentation.rb +3 -1
- data/lib/datadog/tracing/contrib/active_support/configuration/settings.rb +10 -0
- data/lib/datadog/tracing/contrib/active_support/integration.rb +5 -2
- data/lib/datadog/tracing/contrib/auto_instrument.rb +2 -2
- data/lib/datadog/tracing/contrib/aws/integration.rb +3 -0
- data/lib/datadog/tracing/contrib/concurrent_ruby/integration.rb +3 -0
- data/lib/datadog/tracing/contrib/httprb/integration.rb +3 -0
- data/lib/datadog/tracing/contrib/kafka/integration.rb +3 -0
- data/lib/datadog/tracing/contrib/mongodb/integration.rb +3 -0
- data/lib/datadog/tracing/contrib/opensearch/integration.rb +3 -0
- data/lib/datadog/tracing/contrib/presto/integration.rb +3 -0
- data/lib/datadog/tracing/contrib/rack/integration.rb +2 -2
- data/lib/datadog/tracing/contrib/rails/framework.rb +2 -2
- data/lib/datadog/tracing/contrib/rails/patcher.rb +1 -1
- data/lib/datadog/tracing/contrib/rest_client/integration.rb +3 -0
- data/lib/datadog/tracing/span.rb +12 -4
- data/lib/datadog/tracing/span_event.rb +123 -3
- data/lib/datadog/tracing/span_operation.rb +6 -0
- data/lib/datadog/tracing/transport/serializable_trace.rb +24 -6
- data/lib/datadog/version.rb +1 -1
- metadata +19 -10
- data/lib/datadog/appsec/reactive/operation.rb +0 -68
- data/lib/datadog/appsec/scope.rb +0 -58
- data/lib/datadog/core/crashtracking/agent_base_url.rb +0 -21
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 5666f79db7cd7abdb43c961e51978ef2711ec4566ba8090ebafeb074d06dec33
|
4
|
+
data.tar.gz: 907d0787aedf67f9db8ff57a2f16b6216cbc5514bb2a47005026f0e5263e7168
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 22a55675bb273845ec829245812e9f447767878d924083d505ddc8ec6dc97c28770252b3bbb25f0811ab4bf92e44bf2e7439815b61bc14c8d98a8025c7f1db13
|
7
|
+
data.tar.gz: 6a71cfdfbb3bd78949c23e11cbfcc9cea6a582925812862e0ff9a8c8f6ecded1807ee0d52e36c4422cb49181e0138e5e5d566a9895ccf46c0a0cd91fc74cfd64
|
data/CHANGELOG.md
CHANGED
@@ -2,6 +2,28 @@
|
|
2
2
|
|
3
3
|
## [Unreleased]
|
4
4
|
|
5
|
+
## [2.9.0] - 2025-01-15
|
6
|
+
|
7
|
+
### Added
|
8
|
+
|
9
|
+
* Core: add support for Ruby 3.4 ([#4249][])
|
10
|
+
* Integrations: add a new option for `ActiveSupport` to disable adding the `cache_key` as a Span Tag with the `cache_key_enabled` option ([#4022][])
|
11
|
+
|
12
|
+
### Changed
|
13
|
+
|
14
|
+
* Dynamic instrumentation: move DI preloading to `datadog/di/preload` ([#4288][])
|
15
|
+
* Dynamic instrumentation: dd-trace-rb now reports whether dynamic instrumentation is enabled in startup summary report ([#4285][])
|
16
|
+
* Dynamic instrumentation: improve loading of DI components ([#4272][], [#4239][])
|
17
|
+
* Dynamic instrumentation: logging of internal conditions is now done on debug level ([#4266][])
|
18
|
+
* Dynamic instrumentation: report instrumentation error for line probes when the target file is loaded but not in code tracker registry ([#4208][])
|
19
|
+
* Profiling: require datadog-ruby_core_source >= 3.3.7 to ensure Ruby 3.4 support ([#4228][])
|
20
|
+
|
21
|
+
### Fixed
|
22
|
+
|
23
|
+
* Core: fix a crash in crashtracker when agent hostname is an IPv6 address ([#4237][])
|
24
|
+
* Profiling: fix allocation profiling + otel tracing causing Ruby crash ([#4240][])
|
25
|
+
* Profiling: fix profiling warnings being really hard to silence ([#4232][])
|
26
|
+
|
5
27
|
## [2.8.0] - 2024-12-10
|
6
28
|
|
7
29
|
### Added
|
@@ -3057,7 +3079,8 @@ Release notes: https://github.com/DataDog/dd-trace-rb/releases/tag/v0.3.1
|
|
3057
3079
|
Git diff: https://github.com/DataDog/dd-trace-rb/compare/v0.3.0...v0.3.1
|
3058
3080
|
|
3059
3081
|
|
3060
|
-
[Unreleased]: https://github.com/DataDog/dd-trace-rb/compare/v2.
|
3082
|
+
[Unreleased]: https://github.com/DataDog/dd-trace-rb/compare/v2.9.0...master
|
3083
|
+
[2.9.0]: https://github.com/DataDog/dd-trace-rb/compare/v2.8.0...v2.9.0
|
3061
3084
|
[2.8.0]: https://github.com/DataDog/dd-trace-rb/compare/v2.7.1...v2.8.0
|
3062
3085
|
[2.7.0]: https://github.com/DataDog/dd-trace-rb/compare/v2.6.0...v2.7.0
|
3063
3086
|
[2.6.0]: https://github.com/DataDog/dd-trace-rb/compare/v2.5.0...v2.6.0
|
@@ -4499,6 +4522,7 @@ Git diff: https://github.com/DataDog/dd-trace-rb/compare/v0.3.0...v0.3.1
|
|
4499
4522
|
[#3997]: https://github.com/DataDog/dd-trace-rb/issues/3997
|
4500
4523
|
[#4014]: https://github.com/DataDog/dd-trace-rb/issues/4014
|
4501
4524
|
[#4020]: https://github.com/DataDog/dd-trace-rb/issues/4020
|
4525
|
+
[#4022]: https://github.com/DataDog/dd-trace-rb/issues/4022
|
4502
4526
|
[#4024]: https://github.com/DataDog/dd-trace-rb/issues/4024
|
4503
4527
|
[#4027]: https://github.com/DataDog/dd-trace-rb/issues/4027
|
4504
4528
|
[#4033]: https://github.com/DataDog/dd-trace-rb/issues/4033
|
@@ -4519,6 +4543,17 @@ Git diff: https://github.com/DataDog/dd-trace-rb/compare/v0.3.0...v0.3.1
|
|
4519
4543
|
[#4195]: https://github.com/DataDog/dd-trace-rb/issues/4195
|
4520
4544
|
[#4196]: https://github.com/DataDog/dd-trace-rb/issues/4196
|
4521
4545
|
[#4197]: https://github.com/DataDog/dd-trace-rb/issues/4197
|
4546
|
+
[#4208]: https://github.com/DataDog/dd-trace-rb/issues/4208
|
4547
|
+
[#4228]: https://github.com/DataDog/dd-trace-rb/issues/4228
|
4548
|
+
[#4232]: https://github.com/DataDog/dd-trace-rb/issues/4232
|
4549
|
+
[#4237]: https://github.com/DataDog/dd-trace-rb/issues/4237
|
4550
|
+
[#4239]: https://github.com/DataDog/dd-trace-rb/issues/4239
|
4551
|
+
[#4240]: https://github.com/DataDog/dd-trace-rb/issues/4240
|
4552
|
+
[#4249]: https://github.com/DataDog/dd-trace-rb/issues/4249
|
4553
|
+
[#4266]: https://github.com/DataDog/dd-trace-rb/issues/4266
|
4554
|
+
[#4272]: https://github.com/DataDog/dd-trace-rb/issues/4272
|
4555
|
+
[#4285]: https://github.com/DataDog/dd-trace-rb/issues/4285
|
4556
|
+
[#4288]: https://github.com/DataDog/dd-trace-rb/issues/4288
|
4522
4557
|
[@AdrianLC]: https://github.com/AdrianLC
|
4523
4558
|
[@Azure7111]: https://github.com/Azure7111
|
4524
4559
|
[@BabyGroot]: https://github.com/BabyGroot
|
@@ -5,13 +5,13 @@
|
|
5
5
|
#include <ruby.h>
|
6
6
|
|
7
7
|
// Contains the operating-system specific identifier needed to fetch CPU-time, and a flag to indicate if we failed to fetch it
|
8
|
-
typedef struct
|
8
|
+
typedef struct {
|
9
9
|
bool valid;
|
10
10
|
clockid_t clock_id;
|
11
11
|
} thread_cpu_time_id;
|
12
12
|
|
13
13
|
// Contains the current cpu time, and a flag to indicate if we failed to fetch it
|
14
|
-
typedef struct
|
14
|
+
typedef struct {
|
15
15
|
bool valid;
|
16
16
|
long result_ns;
|
17
17
|
} thread_cpu_time;
|
@@ -92,7 +92,7 @@ unsigned int MAX_ALLOC_WEIGHT = 10000;
|
|
92
92
|
#endif
|
93
93
|
|
94
94
|
// Contains state for a single CpuAndWallTimeWorker instance
|
95
|
-
struct
|
95
|
+
typedef struct {
|
96
96
|
// These are immutable after initialization
|
97
97
|
|
98
98
|
bool gc_profiling_enabled;
|
@@ -187,7 +187,7 @@ struct cpu_and_wall_time_worker_state {
|
|
187
187
|
uint64_t gvl_sampling_time_ns_max;
|
188
188
|
uint64_t gvl_sampling_time_ns_total;
|
189
189
|
} stats;
|
190
|
-
};
|
190
|
+
} cpu_and_wall_time_worker_state;
|
191
191
|
|
192
192
|
static VALUE _native_new(VALUE klass);
|
193
193
|
static VALUE _native_initialize(int argc, VALUE *argv, DDTRACE_UNUSED VALUE _self);
|
@@ -195,7 +195,7 @@ static void cpu_and_wall_time_worker_typed_data_mark(void *state_ptr);
|
|
195
195
|
static VALUE _native_sampling_loop(VALUE self, VALUE instance);
|
196
196
|
static VALUE _native_stop(DDTRACE_UNUSED VALUE _self, VALUE self_instance, VALUE worker_thread);
|
197
197
|
static VALUE stop(VALUE self_instance, VALUE optional_exception);
|
198
|
-
static void stop_state(
|
198
|
+
static void stop_state(cpu_and_wall_time_worker_state *state, VALUE optional_exception);
|
199
199
|
static void handle_sampling_signal(DDTRACE_UNUSED int _signal, DDTRACE_UNUSED siginfo_t *_info, DDTRACE_UNUSED void *_ucontext);
|
200
200
|
static void *run_sampling_trigger_loop(void *state_ptr);
|
201
201
|
static void interrupt_sampling_trigger_loop(void *state_ptr);
|
@@ -221,14 +221,14 @@ static VALUE _native_stats(DDTRACE_UNUSED VALUE self, VALUE instance);
|
|
221
221
|
static VALUE _native_stats_reset_not_thread_safe(DDTRACE_UNUSED VALUE self, VALUE instance);
|
222
222
|
void *simulate_sampling_signal_delivery(DDTRACE_UNUSED void *_unused);
|
223
223
|
static void grab_gvl_and_sample(void);
|
224
|
-
static void reset_stats_not_thread_safe(
|
224
|
+
static void reset_stats_not_thread_safe(cpu_and_wall_time_worker_state *state);
|
225
225
|
static void sleep_for(uint64_t time_ns);
|
226
226
|
static VALUE _native_allocation_count(DDTRACE_UNUSED VALUE self);
|
227
227
|
static void on_newobj_event(DDTRACE_UNUSED VALUE unused1, DDTRACE_UNUSED void *unused2);
|
228
|
-
static void disable_tracepoints(
|
228
|
+
static void disable_tracepoints(cpu_and_wall_time_worker_state *state);
|
229
229
|
static VALUE _native_with_blocked_sigprof(DDTRACE_UNUSED VALUE self);
|
230
230
|
static VALUE rescued_sample_allocation(VALUE tracepoint_data);
|
231
|
-
static void delayed_error(
|
231
|
+
static void delayed_error(cpu_and_wall_time_worker_state *state, const char *error);
|
232
232
|
static VALUE _native_delayed_error(DDTRACE_UNUSED VALUE self, VALUE instance, VALUE error_msg);
|
233
233
|
static VALUE _native_hold_signals(DDTRACE_UNUSED VALUE self);
|
234
234
|
static VALUE _native_resume_signals(DDTRACE_UNUSED VALUE self);
|
@@ -262,7 +262,7 @@ static VALUE _native_gvl_profiling_hook_active(DDTRACE_UNUSED VALUE self, VALUE
|
|
262
262
|
// This global state is needed because a bunch of functions on this file need to access it from situations
|
263
263
|
// (e.g. signal handler) where it's impossible or just awkward to pass it as an argument.
|
264
264
|
static VALUE active_sampler_instance = Qnil;
|
265
|
-
static
|
265
|
+
static cpu_and_wall_time_worker_state *active_sampler_instance_state = NULL;
|
266
266
|
|
267
267
|
// See handle_sampling_signal for details on what this does
|
268
268
|
#ifdef NO_POSTPONED_TRIGGER
|
@@ -334,7 +334,7 @@ void collectors_cpu_and_wall_time_worker_init(VALUE profiling_module) {
|
|
334
334
|
rb_define_singleton_method(testing_module, "_native_gvl_profiling_hook_active", _native_gvl_profiling_hook_active, 1);
|
335
335
|
}
|
336
336
|
|
337
|
-
// This structure is used to define a Ruby object that stores a pointer to a
|
337
|
+
// This structure is used to define a Ruby object that stores a pointer to a cpu_and_wall_time_worker_state
|
338
338
|
// See also https://github.com/ruby/ruby/blob/master/doc/extension.rdoc for how this works
|
339
339
|
static const rb_data_type_t cpu_and_wall_time_worker_typed_data = {
|
340
340
|
.wrap_struct_name = "Datadog::Profiling::Collectors::CpuAndWallTimeWorker",
|
@@ -350,7 +350,7 @@ static const rb_data_type_t cpu_and_wall_time_worker_typed_data = {
|
|
350
350
|
static VALUE _native_new(VALUE klass) {
|
351
351
|
long now = monotonic_wall_time_now_ns(RAISE_ON_FAILURE);
|
352
352
|
|
353
|
-
|
353
|
+
cpu_and_wall_time_worker_state *state = ruby_xcalloc(1, sizeof(cpu_and_wall_time_worker_state));
|
354
354
|
|
355
355
|
// Note: Any exceptions raised from this note until the TypedData_Wrap_Struct call will lead to the state memory
|
356
356
|
// being leaked.
|
@@ -414,8 +414,8 @@ static VALUE _native_initialize(int argc, VALUE *argv, DDTRACE_UNUSED VALUE _sel
|
|
414
414
|
ENFORCE_BOOLEAN(gvl_profiling_enabled);
|
415
415
|
ENFORCE_BOOLEAN(skip_idle_samples_for_testing)
|
416
416
|
|
417
|
-
|
418
|
-
TypedData_Get_Struct(self_instance,
|
417
|
+
cpu_and_wall_time_worker_state *state;
|
418
|
+
TypedData_Get_Struct(self_instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
|
419
419
|
|
420
420
|
state->gc_profiling_enabled = (gc_profiling_enabled == Qtrue);
|
421
421
|
state->no_signals_workaround_enabled = (no_signals_workaround_enabled == Qtrue);
|
@@ -445,7 +445,7 @@ static VALUE _native_initialize(int argc, VALUE *argv, DDTRACE_UNUSED VALUE _sel
|
|
445
445
|
|
446
446
|
// Since our state contains references to Ruby objects, we need to tell the Ruby GC about them
|
447
447
|
static void cpu_and_wall_time_worker_typed_data_mark(void *state_ptr) {
|
448
|
-
|
448
|
+
cpu_and_wall_time_worker_state *state = (cpu_and_wall_time_worker_state *) state_ptr;
|
449
449
|
|
450
450
|
rb_gc_mark(state->thread_context_collector_instance);
|
451
451
|
rb_gc_mark(state->idle_sampling_helper_instance);
|
@@ -457,8 +457,8 @@ static void cpu_and_wall_time_worker_typed_data_mark(void *state_ptr) {
|
|
457
457
|
|
458
458
|
// Called in a background thread created in CpuAndWallTimeWorker#start
|
459
459
|
static VALUE _native_sampling_loop(DDTRACE_UNUSED VALUE _self, VALUE instance) {
|
460
|
-
|
461
|
-
TypedData_Get_Struct(instance,
|
460
|
+
cpu_and_wall_time_worker_state *state;
|
461
|
+
TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
|
462
462
|
|
463
463
|
// If we already got a delayed exception registered even before starting, raise before starting
|
464
464
|
if (state->failure_exception != Qnil) {
|
@@ -466,7 +466,7 @@ static VALUE _native_sampling_loop(DDTRACE_UNUSED VALUE _self, VALUE instance) {
|
|
466
466
|
rb_exc_raise(state->failure_exception);
|
467
467
|
}
|
468
468
|
|
469
|
-
|
469
|
+
cpu_and_wall_time_worker_state *old_state = active_sampler_instance_state;
|
470
470
|
if (old_state != NULL) {
|
471
471
|
if (is_thread_alive(old_state->owner_thread)) {
|
472
472
|
rb_raise(
|
@@ -546,15 +546,15 @@ static VALUE _native_sampling_loop(DDTRACE_UNUSED VALUE _self, VALUE instance) {
|
|
546
546
|
}
|
547
547
|
|
548
548
|
static VALUE _native_stop(DDTRACE_UNUSED VALUE _self, VALUE self_instance, VALUE worker_thread) {
|
549
|
-
|
550
|
-
TypedData_Get_Struct(self_instance,
|
549
|
+
cpu_and_wall_time_worker_state *state;
|
550
|
+
TypedData_Get_Struct(self_instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
|
551
551
|
|
552
552
|
state->stop_thread = worker_thread;
|
553
553
|
|
554
554
|
return stop(self_instance, /* optional_exception: */ Qnil);
|
555
555
|
}
|
556
556
|
|
557
|
-
static void stop_state(
|
557
|
+
static void stop_state(cpu_and_wall_time_worker_state *state, VALUE optional_exception) {
|
558
558
|
atomic_store(&state->should_run, false);
|
559
559
|
state->failure_exception = optional_exception;
|
560
560
|
|
@@ -563,8 +563,8 @@ static void stop_state(struct cpu_and_wall_time_worker_state *state, VALUE optio
|
|
563
563
|
}
|
564
564
|
|
565
565
|
static VALUE stop(VALUE self_instance, VALUE optional_exception) {
|
566
|
-
|
567
|
-
TypedData_Get_Struct(self_instance,
|
566
|
+
cpu_and_wall_time_worker_state *state;
|
567
|
+
TypedData_Get_Struct(self_instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
|
568
568
|
|
569
569
|
stop_state(state, optional_exception);
|
570
570
|
|
@@ -575,7 +575,7 @@ static VALUE stop(VALUE self_instance, VALUE optional_exception) {
|
|
575
575
|
// We need to be careful not to change any state that may be observed OR to restore it if we do. For instance, if anything
|
576
576
|
// we do here can set `errno`, then we must be careful to restore the old `errno` after the fact.
|
577
577
|
static void handle_sampling_signal(DDTRACE_UNUSED int _signal, DDTRACE_UNUSED siginfo_t *_info, DDTRACE_UNUSED void *_ucontext) {
|
578
|
-
|
578
|
+
cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
|
579
579
|
|
580
580
|
// This can potentially happen if the CpuAndWallTimeWorker was stopped while the signal delivery was happening; nothing to do
|
581
581
|
if (state == NULL) return;
|
@@ -650,7 +650,7 @@ static void handle_sampling_signal(DDTRACE_UNUSED int _signal, DDTRACE_UNUSED si
|
|
650
650
|
|
651
651
|
// The actual sampling trigger loop always runs **without** the global vm lock.
|
652
652
|
static void *run_sampling_trigger_loop(void *state_ptr) {
|
653
|
-
|
653
|
+
cpu_and_wall_time_worker_state *state = (cpu_and_wall_time_worker_state *) state_ptr;
|
654
654
|
|
655
655
|
uint64_t minimum_time_between_signals = MILLIS_AS_NS(10);
|
656
656
|
|
@@ -709,13 +709,13 @@ static void *run_sampling_trigger_loop(void *state_ptr) {
|
|
709
709
|
|
710
710
|
// This is called by the Ruby VM when it wants to shut down the background thread
|
711
711
|
static void interrupt_sampling_trigger_loop(void *state_ptr) {
|
712
|
-
|
712
|
+
cpu_and_wall_time_worker_state *state = (cpu_and_wall_time_worker_state *) state_ptr;
|
713
713
|
|
714
714
|
atomic_store(&state->should_run, false);
|
715
715
|
}
|
716
716
|
|
717
717
|
static void sample_from_postponed_job(DDTRACE_UNUSED void *_unused) {
|
718
|
-
|
718
|
+
cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
|
719
719
|
|
720
720
|
// This can potentially happen if the CpuAndWallTimeWorker was stopped while the postponed job was waiting to be executed; nothing to do
|
721
721
|
if (state == NULL) return;
|
@@ -735,8 +735,8 @@ static void sample_from_postponed_job(DDTRACE_UNUSED void *_unused) {
|
|
735
735
|
}
|
736
736
|
|
737
737
|
static VALUE rescued_sample_from_postponed_job(VALUE self_instance) {
|
738
|
-
|
739
|
-
TypedData_Get_Struct(self_instance,
|
738
|
+
cpu_and_wall_time_worker_state *state;
|
739
|
+
TypedData_Get_Struct(self_instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
|
740
740
|
|
741
741
|
long wall_time_ns_before_sample = monotonic_wall_time_now_ns(RAISE_ON_FAILURE);
|
742
742
|
|
@@ -791,8 +791,8 @@ static VALUE _native_current_sigprof_signal_handler(DDTRACE_UNUSED VALUE self) {
|
|
791
791
|
}
|
792
792
|
|
793
793
|
static VALUE release_gvl_and_run_sampling_trigger_loop(VALUE instance) {
|
794
|
-
|
795
|
-
TypedData_Get_Struct(instance,
|
794
|
+
cpu_and_wall_time_worker_state *state;
|
795
|
+
TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
|
796
796
|
|
797
797
|
// Final preparations: Setup signal handler and enable tracepoints. We run these here and not in `_native_sampling_loop`
|
798
798
|
// because they may raise exceptions.
|
@@ -842,7 +842,7 @@ static VALUE release_gvl_and_run_sampling_trigger_loop(VALUE instance) {
|
|
842
842
|
// This method exists only to enable testing Datadog::Profiling::Collectors::CpuAndWallTimeWorker behavior using RSpec.
|
843
843
|
// It SHOULD NOT be used for other purposes.
|
844
844
|
static VALUE _native_is_running(DDTRACE_UNUSED VALUE self, VALUE instance) {
|
845
|
-
|
845
|
+
cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
|
846
846
|
|
847
847
|
return (state != NULL && is_thread_alive(state->owner_thread) && state->self_instance == instance) ? Qtrue : Qfalse;
|
848
848
|
}
|
@@ -875,8 +875,8 @@ static VALUE _native_trigger_sample(DDTRACE_UNUSED VALUE self) {
|
|
875
875
|
// This method exists only to enable testing Datadog::Profiling::Collectors::CpuAndWallTimeWorker behavior using RSpec.
|
876
876
|
// It SHOULD NOT be used for other purposes.
|
877
877
|
static VALUE _native_gc_tracepoint(DDTRACE_UNUSED VALUE self, VALUE instance) {
|
878
|
-
|
879
|
-
TypedData_Get_Struct(instance,
|
878
|
+
cpu_and_wall_time_worker_state *state;
|
879
|
+
TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
|
880
880
|
|
881
881
|
return state->gc_tracepoint;
|
882
882
|
}
|
@@ -902,7 +902,7 @@ static void on_gc_event(VALUE tracepoint_data, DDTRACE_UNUSED void *unused) {
|
|
902
902
|
int event = rb_tracearg_event_flag(rb_tracearg_from_tracepoint(tracepoint_data));
|
903
903
|
if (event != RUBY_INTERNAL_EVENT_GC_ENTER && event != RUBY_INTERNAL_EVENT_GC_EXIT) return; // Unknown event
|
904
904
|
|
905
|
-
|
905
|
+
cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
|
906
906
|
|
907
907
|
// This should not happen in a normal situation because the tracepoint is always enabled after the instance is set
|
908
908
|
// and disabled before it is cleared, but just in case...
|
@@ -926,7 +926,7 @@ static void on_gc_event(VALUE tracepoint_data, DDTRACE_UNUSED void *unused) {
|
|
926
926
|
}
|
927
927
|
|
928
928
|
static void after_gc_from_postponed_job(DDTRACE_UNUSED void *_unused) {
|
929
|
-
|
929
|
+
cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
|
930
930
|
|
931
931
|
// This can potentially happen if the CpuAndWallTimeWorker was stopped while the postponed job was waiting to be executed; nothing to do
|
932
932
|
if (state == NULL) return;
|
@@ -981,8 +981,8 @@ static VALUE _native_simulate_sample_from_postponed_job(DDTRACE_UNUSED VALUE sel
|
|
981
981
|
// In the future, if we add more other components with tracepoints, we will need to coordinate stopping all such
|
982
982
|
// tracepoints before doing the other cleaning steps.
|
983
983
|
static VALUE _native_reset_after_fork(DDTRACE_UNUSED VALUE self, VALUE instance) {
|
984
|
-
|
985
|
-
TypedData_Get_Struct(instance,
|
984
|
+
cpu_and_wall_time_worker_state *state;
|
985
|
+
TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
|
986
986
|
|
987
987
|
// Disable all tracepoints, so that there are no more attempts to mutate the profile
|
988
988
|
disable_tracepoints(state);
|
@@ -1000,8 +1000,8 @@ static VALUE _native_is_sigprof_blocked_in_current_thread(DDTRACE_UNUSED VALUE s
|
|
1000
1000
|
}
|
1001
1001
|
|
1002
1002
|
static VALUE _native_stats(DDTRACE_UNUSED VALUE self, VALUE instance) {
|
1003
|
-
|
1004
|
-
TypedData_Get_Struct(instance,
|
1003
|
+
cpu_and_wall_time_worker_state *state;
|
1004
|
+
TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
|
1005
1005
|
|
1006
1006
|
unsigned long total_cpu_samples_attempted = state->stats.cpu_sampled + state->stats.cpu_skipped;
|
1007
1007
|
VALUE effective_cpu_sample_rate =
|
@@ -1059,14 +1059,14 @@ static VALUE _native_stats(DDTRACE_UNUSED VALUE self, VALUE instance) {
|
|
1059
1059
|
}
|
1060
1060
|
|
1061
1061
|
static VALUE _native_stats_reset_not_thread_safe(DDTRACE_UNUSED VALUE self, VALUE instance) {
|
1062
|
-
|
1063
|
-
TypedData_Get_Struct(instance,
|
1062
|
+
cpu_and_wall_time_worker_state *state;
|
1063
|
+
TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
|
1064
1064
|
reset_stats_not_thread_safe(state);
|
1065
1065
|
return Qnil;
|
1066
1066
|
}
|
1067
1067
|
|
1068
1068
|
void *simulate_sampling_signal_delivery(DDTRACE_UNUSED void *_unused) {
|
1069
|
-
|
1069
|
+
cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
|
1070
1070
|
|
1071
1071
|
// This can potentially happen if the CpuAndWallTimeWorker was stopped while the IdleSamplingHelper was trying to execute this action
|
1072
1072
|
if (state == NULL) return NULL;
|
@@ -1082,7 +1082,7 @@ void *simulate_sampling_signal_delivery(DDTRACE_UNUSED void *_unused) {
|
|
1082
1082
|
|
1083
1083
|
static void grab_gvl_and_sample(void) { rb_thread_call_with_gvl(simulate_sampling_signal_delivery, NULL); }
|
1084
1084
|
|
1085
|
-
static void reset_stats_not_thread_safe(
|
1085
|
+
static void reset_stats_not_thread_safe(cpu_and_wall_time_worker_state *state) {
|
1086
1086
|
// NOTE: This is not really thread safe so ongoing sampling operations that are concurrent with a reset can have their stats:
|
1087
1087
|
// * Lost (writes after stats retrieval but before reset).
|
1088
1088
|
// * Included in the previous stats window (writes before stats retrieval and reset).
|
@@ -1116,7 +1116,7 @@ static void sleep_for(uint64_t time_ns) {
|
|
1116
1116
|
}
|
1117
1117
|
|
1118
1118
|
static VALUE _native_allocation_count(DDTRACE_UNUSED VALUE self) {
|
1119
|
-
|
1119
|
+
cpu_and_wall_time_worker_state *state = active_sampler_instance_state;
|
1120
1120
|
|
1121
1121
|
bool are_allocations_being_tracked = state != NULL && state->allocation_profiling_enabled && state->allocation_counting_enabled;
|
1122
1122
|
|
@@ -1149,7 +1149,7 @@ static VALUE _native_allocation_count(DDTRACE_UNUSED VALUE self) {
|
|
1149
1149
|
// call `rb_tracearg_from_tracepoint(anything)` anywhere during this function or its callees to get the data, so that's
|
1150
1150
|
// why it's not being passed as an argument.
|
1151
1151
|
static void on_newobj_event(DDTRACE_UNUSED VALUE unused1, DDTRACE_UNUSED void *unused2) {
|
1152
|
-
|
1152
|
+
cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
|
1153
1153
|
|
1154
1154
|
// This should not happen in a normal situation because the tracepoint is always enabled after the instance is set
|
1155
1155
|
// and disabled before it is cleared, but just in case...
|
@@ -1171,6 +1171,16 @@ static void on_newobj_event(DDTRACE_UNUSED VALUE unused1, DDTRACE_UNUSED void *u
|
|
1171
1171
|
return;
|
1172
1172
|
}
|
1173
1173
|
|
1174
|
+
// If Ruby is in the middle of raising an exception, we don't want to try to sample. This is because if we accidentally
|
1175
|
+
// trigger an exception inside the profiler code, bad things will happen (specifically, Ruby will try to kill off the
|
1176
|
+
// thread even though we may try to catch the exception).
|
1177
|
+
//
|
1178
|
+
// Note that "in the middle of raising an exception" means the exception itself has already been allocated.
|
1179
|
+
// What's getting allocated now is probably the backtrace objects (@ivoanjo or at least that's what I've observed)
|
1180
|
+
if (is_raised_flag_set(rb_thread_current())) {
|
1181
|
+
return;
|
1182
|
+
}
|
1183
|
+
|
1174
1184
|
// Hot path: Dynamic sampling rate is usually enabled and the sampling decision is usually false
|
1175
1185
|
if (RB_LIKELY(state->dynamic_sampling_rate_enabled && !discrete_dynamic_sampler_should_sample(&state->allocation_sampler))) {
|
1176
1186
|
state->stats.allocation_skipped++;
|
@@ -1225,7 +1235,7 @@ static void on_newobj_event(DDTRACE_UNUSED VALUE unused1, DDTRACE_UNUSED void *u
|
|
1225
1235
|
state->during_sample = false;
|
1226
1236
|
}
|
1227
1237
|
|
1228
|
-
static void disable_tracepoints(
|
1238
|
+
static void disable_tracepoints(cpu_and_wall_time_worker_state *state) {
|
1229
1239
|
if (state->gc_tracepoint != Qnil) {
|
1230
1240
|
rb_tracepoint_disable(state->gc_tracepoint);
|
1231
1241
|
}
|
@@ -1254,7 +1264,7 @@ static VALUE _native_with_blocked_sigprof(DDTRACE_UNUSED VALUE self) {
|
|
1254
1264
|
}
|
1255
1265
|
|
1256
1266
|
static VALUE rescued_sample_allocation(DDTRACE_UNUSED VALUE unused) {
|
1257
|
-
|
1267
|
+
cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
|
1258
1268
|
|
1259
1269
|
// This should not happen in a normal situation because on_newobj_event already checked for this, but just in case...
|
1260
1270
|
if (state == NULL) return Qnil;
|
@@ -1283,7 +1293,7 @@ static VALUE rescued_sample_allocation(DDTRACE_UNUSED VALUE unused) {
|
|
1283
1293
|
return Qnil;
|
1284
1294
|
}
|
1285
1295
|
|
1286
|
-
static void delayed_error(
|
1296
|
+
static void delayed_error(cpu_and_wall_time_worker_state *state, const char *error) {
|
1287
1297
|
// If we can't raise an immediate exception at the calling site, use the asynchronous flow through the main worker loop.
|
1288
1298
|
stop_state(state, rb_exc_new_cstr(rb_eRuntimeError, error));
|
1289
1299
|
}
|
@@ -1291,8 +1301,8 @@ static void delayed_error(struct cpu_and_wall_time_worker_state *state, const ch
|
|
1291
1301
|
static VALUE _native_delayed_error(DDTRACE_UNUSED VALUE self, VALUE instance, VALUE error_msg) {
|
1292
1302
|
ENFORCE_TYPE(error_msg, T_STRING);
|
1293
1303
|
|
1294
|
-
|
1295
|
-
TypedData_Get_Struct(instance,
|
1304
|
+
cpu_and_wall_time_worker_state *state;
|
1305
|
+
TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
|
1296
1306
|
|
1297
1307
|
delayed_error(state, rb_string_value_cstr(&error_msg));
|
1298
1308
|
|
@@ -1345,7 +1355,7 @@ static VALUE _native_resume_signals(DDTRACE_UNUSED VALUE self) {
|
|
1345
1355
|
rb_postponed_job_register_one(0, after_gvl_running_from_postponed_job, NULL);
|
1346
1356
|
#endif
|
1347
1357
|
} else if (result == ON_GVL_RUNNING_DONT_SAMPLE) {
|
1348
|
-
|
1358
|
+
cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
|
1349
1359
|
|
1350
1360
|
if (state == NULL) return; // This should not happen, but just in case...
|
1351
1361
|
|
@@ -1358,7 +1368,7 @@ static VALUE _native_resume_signals(DDTRACE_UNUSED VALUE self) {
|
|
1358
1368
|
}
|
1359
1369
|
|
1360
1370
|
static void after_gvl_running_from_postponed_job(DDTRACE_UNUSED void *_unused) {
|
1361
|
-
|
1371
|
+
cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above
|
1362
1372
|
|
1363
1373
|
// This can potentially happen if the CpuAndWallTimeWorker was stopped while the postponed job was waiting to be executed; nothing to do
|
1364
1374
|
if (state == NULL) return;
|
@@ -1372,8 +1382,8 @@ static VALUE _native_resume_signals(DDTRACE_UNUSED VALUE self) {
|
|
1372
1382
|
}
|
1373
1383
|
|
1374
1384
|
static VALUE rescued_after_gvl_running_from_postponed_job(VALUE self_instance) {
|
1375
|
-
|
1376
|
-
TypedData_Get_Struct(self_instance,
|
1385
|
+
cpu_and_wall_time_worker_state *state;
|
1386
|
+
TypedData_Get_Struct(self_instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
|
1377
1387
|
|
1378
1388
|
long wall_time_ns_before_sample = monotonic_wall_time_now_ns(RAISE_ON_FAILURE);
|
1379
1389
|
thread_context_collector_sample_after_gvl_running(state->thread_context_collector_instance, rb_thread_current(), wall_time_ns_before_sample);
|
@@ -1394,8 +1404,8 @@ static VALUE _native_resume_signals(DDTRACE_UNUSED VALUE self) {
|
|
1394
1404
|
}
|
1395
1405
|
|
1396
1406
|
static VALUE _native_gvl_profiling_hook_active(DDTRACE_UNUSED VALUE self, VALUE instance) {
|
1397
|
-
|
1398
|
-
TypedData_Get_Struct(instance,
|
1407
|
+
cpu_and_wall_time_worker_state *state;
|
1408
|
+
TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
|
1399
1409
|
|
1400
1410
|
return state->gvl_profiling_hook != NULL ? Qtrue : Qfalse;
|
1401
1411
|
}
|
@@ -333,7 +333,7 @@ static VALUE _native_should_sample(VALUE self, VALUE now);
|
|
333
333
|
static VALUE _native_after_sample(VALUE self, VALUE now);
|
334
334
|
static VALUE _native_state_snapshot(VALUE self);
|
335
335
|
|
336
|
-
typedef struct
|
336
|
+
typedef struct {
|
337
337
|
discrete_dynamic_sampler sampler;
|
338
338
|
} sampler_state;
|
339
339
|
|
@@ -16,7 +16,7 @@
|
|
16
16
|
// every event and is thus, in theory, susceptible to some pattern
|
17
17
|
// biases. In practice, the dynamic readjustment of sampling interval
|
18
18
|
// and randomized starting point should help with avoiding heavy biases.
|
19
|
-
typedef struct
|
19
|
+
typedef struct {
|
20
20
|
// --- Config ---
|
21
21
|
// Name of this sampler for debug logs.
|
22
22
|
const char *debug_name;
|
@@ -21,15 +21,15 @@
|
|
21
21
|
typedef enum { ACTION_WAIT, ACTION_RUN, ACTION_STOP } action;
|
22
22
|
|
23
23
|
// Contains state for a single CpuAndWallTimeWorker instance
|
24
|
-
struct
|
24
|
+
typedef struct {
|
25
25
|
pthread_mutex_t wakeup_mutex;
|
26
26
|
pthread_cond_t wakeup;
|
27
27
|
action requested_action;
|
28
28
|
void (*run_action_function)(void);
|
29
|
-
};
|
29
|
+
} idle_sampling_loop_state;
|
30
30
|
|
31
31
|
static VALUE _native_new(VALUE klass);
|
32
|
-
static void reset_state(
|
32
|
+
static void reset_state(idle_sampling_loop_state *state);
|
33
33
|
static VALUE _native_idle_sampling_loop(DDTRACE_UNUSED VALUE self, VALUE self_instance);
|
34
34
|
static VALUE _native_stop(DDTRACE_UNUSED VALUE self, VALUE self_instance);
|
35
35
|
static void *run_idle_sampling_loop(void *state_ptr);
|
@@ -62,7 +62,7 @@ void collectors_idle_sampling_helper_init(VALUE profiling_module) {
|
|
62
62
|
rb_define_singleton_method(testing_module, "_native_idle_sampling_helper_request_action", _native_idle_sampling_helper_request_action, 1);
|
63
63
|
}
|
64
64
|
|
65
|
-
// This structure is used to define a Ruby object that stores a pointer to a
|
65
|
+
// This structure is used to define a Ruby object that stores a pointer to a idle_sampling_loop_state
|
66
66
|
// See also https://github.com/ruby/ruby/blob/master/doc/extension.rdoc for how this works
|
67
67
|
static const rb_data_type_t idle_sampling_helper_typed_data = {
|
68
68
|
.wrap_struct_name = "Datadog::Profiling::Collectors::IdleSamplingHelper",
|
@@ -76,7 +76,7 @@ static const rb_data_type_t idle_sampling_helper_typed_data = {
|
|
76
76
|
};
|
77
77
|
|
78
78
|
static VALUE _native_new(VALUE klass) {
|
79
|
-
|
79
|
+
idle_sampling_loop_state *state = ruby_xcalloc(1, sizeof(idle_sampling_loop_state));
|
80
80
|
|
81
81
|
// Note: Any exceptions raised from this note until the TypedData_Wrap_Struct call will lead to the state memory
|
82
82
|
// being leaked.
|
@@ -90,7 +90,7 @@ static VALUE _native_new(VALUE klass) {
|
|
90
90
|
return TypedData_Wrap_Struct(klass, &idle_sampling_helper_typed_data, state);
|
91
91
|
}
|
92
92
|
|
93
|
-
static void reset_state(
|
93
|
+
static void reset_state(idle_sampling_loop_state *state) {
|
94
94
|
state->wakeup_mutex = (pthread_mutex_t) PTHREAD_MUTEX_INITIALIZER;
|
95
95
|
state->wakeup = (pthread_cond_t) PTHREAD_COND_INITIALIZER;
|
96
96
|
state->requested_action = ACTION_WAIT;
|
@@ -101,8 +101,8 @@ static void reset_state(struct idle_sampling_loop_state *state) {
|
|
101
101
|
// a pristine state before recreating the worker thread (this includes resetting the mutex in case it was left
|
102
102
|
// locked halfway through the VM forking)
|
103
103
|
static VALUE _native_reset(DDTRACE_UNUSED VALUE self, VALUE self_instance) {
|
104
|
-
|
105
|
-
TypedData_Get_Struct(self_instance,
|
104
|
+
idle_sampling_loop_state *state;
|
105
|
+
TypedData_Get_Struct(self_instance, idle_sampling_loop_state, &idle_sampling_helper_typed_data, state);
|
106
106
|
|
107
107
|
reset_state(state);
|
108
108
|
|
@@ -110,8 +110,8 @@ static VALUE _native_reset(DDTRACE_UNUSED VALUE self, VALUE self_instance) {
|
|
110
110
|
}
|
111
111
|
|
112
112
|
static VALUE _native_idle_sampling_loop(DDTRACE_UNUSED VALUE self, VALUE self_instance) {
|
113
|
-
|
114
|
-
TypedData_Get_Struct(self_instance,
|
113
|
+
idle_sampling_loop_state *state;
|
114
|
+
TypedData_Get_Struct(self_instance, idle_sampling_loop_state, &idle_sampling_helper_typed_data, state);
|
115
115
|
|
116
116
|
// Release GVL and run the loop waiting for requests
|
117
117
|
rb_thread_call_without_gvl(run_idle_sampling_loop, state, interrupt_idle_sampling_loop, state);
|
@@ -120,7 +120,7 @@ static VALUE _native_idle_sampling_loop(DDTRACE_UNUSED VALUE self, VALUE self_in
|
|
120
120
|
}
|
121
121
|
|
122
122
|
static void *run_idle_sampling_loop(void *state_ptr) {
|
123
|
-
|
123
|
+
idle_sampling_loop_state *state = (idle_sampling_loop_state *) state_ptr;
|
124
124
|
int error = 0;
|
125
125
|
|
126
126
|
while (true) {
|
@@ -164,7 +164,7 @@ static void *run_idle_sampling_loop(void *state_ptr) {
|
|
164
164
|
}
|
165
165
|
|
166
166
|
static void interrupt_idle_sampling_loop(void *state_ptr) {
|
167
|
-
|
167
|
+
idle_sampling_loop_state *state = (idle_sampling_loop_state *) state_ptr;
|
168
168
|
int error = 0;
|
169
169
|
|
170
170
|
// Note about the error handling in this situation: Something bad happening at this stage is really really awkward to
|
@@ -189,8 +189,8 @@ static void interrupt_idle_sampling_loop(void *state_ptr) {
|
|
189
189
|
}
|
190
190
|
|
191
191
|
static VALUE _native_stop(DDTRACE_UNUSED VALUE self, VALUE self_instance) {
|
192
|
-
|
193
|
-
TypedData_Get_Struct(self_instance,
|
192
|
+
idle_sampling_loop_state *state;
|
193
|
+
TypedData_Get_Struct(self_instance, idle_sampling_loop_state, &idle_sampling_helper_typed_data, state);
|
194
194
|
|
195
195
|
ENFORCE_SUCCESS_GVL(pthread_mutex_lock(&state->wakeup_mutex));
|
196
196
|
state->requested_action = ACTION_STOP;
|
@@ -204,12 +204,12 @@ static VALUE _native_stop(DDTRACE_UNUSED VALUE self, VALUE self_instance) {
|
|
204
204
|
|
205
205
|
// Assumption: Function gets called without the global VM lock
|
206
206
|
void idle_sampling_helper_request_action(VALUE self_instance, void (*run_action_function)(void)) {
|
207
|
-
|
207
|
+
idle_sampling_loop_state *state;
|
208
208
|
if (!rb_typeddata_is_kind_of(self_instance, &idle_sampling_helper_typed_data)) {
|
209
209
|
grab_gvl_and_raise(rb_eTypeError, "Wrong argument for idle_sampling_helper_request_action");
|
210
210
|
}
|
211
211
|
// This should never fail the the above check passes
|
212
|
-
TypedData_Get_Struct(self_instance,
|
212
|
+
TypedData_Get_Struct(self_instance, idle_sampling_loop_state, &idle_sampling_helper_typed_data, state);
|
213
213
|
|
214
214
|
ENFORCE_SUCCESS_NO_GVL(pthread_mutex_lock(&state->wakeup_mutex));
|
215
215
|
if (state->requested_action == ACTION_WAIT) {
|