ddtrace 1.19.0 → 1.20.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +33 -1
- data/ext/ddtrace_profiling_native_extension/collectors_cpu_and_wall_time_worker.c +38 -23
- data/ext/ddtrace_profiling_native_extension/collectors_discrete_dynamic_sampler.c +349 -0
- data/ext/ddtrace_profiling_native_extension/collectors_discrete_dynamic_sampler.h +89 -0
- data/ext/ddtrace_profiling_native_extension/extconf.rb +3 -0
- data/ext/ddtrace_profiling_native_extension/helpers.h +4 -0
- data/ext/ddtrace_profiling_native_extension/profiling.c +16 -0
- data/ext/ddtrace_profiling_native_extension/time_helpers.h +2 -0
- data/lib/datadog/appsec/contrib/rack/request_middleware.rb +2 -1
- data/lib/datadog/core/configuration/settings.rb +22 -7
- data/lib/datadog/core/environment/class_count.rb +6 -6
- data/lib/datadog/core/remote/component.rb +25 -12
- data/lib/datadog/core/remote/ext.rb +1 -0
- data/lib/datadog/core/remote/tie/tracing.rb +39 -0
- data/lib/datadog/core/remote/tie.rb +27 -0
- data/lib/datadog/opentelemetry/sdk/propagator.rb +3 -2
- data/lib/datadog/opentelemetry.rb +3 -0
- data/lib/datadog/profiling/collectors/cpu_and_wall_time_worker.rb +0 -2
- data/lib/datadog/profiling/component.rb +3 -17
- data/lib/datadog/tracing/configuration/ext.rb +0 -1
- data/lib/datadog/tracing/configuration/settings.rb +2 -1
- data/lib/datadog/tracing/contrib/action_cable/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/action_cable/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/action_mailer/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/action_mailer/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/action_pack/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/action_pack/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/action_view/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/action_view/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/active_job/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/active_job/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/active_model_serializers/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/active_model_serializers/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/active_record/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/active_record/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/active_support/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/active_support/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/analytics.rb +0 -1
- data/lib/datadog/tracing/contrib/aws/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/aws/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/dalli/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/dalli/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/delayed_job/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/delayed_job/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/elasticsearch/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/elasticsearch/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/ethon/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/ethon/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/excon/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/excon/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/faraday/configuration/settings.rb +7 -0
- data/lib/datadog/tracing/contrib/faraday/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/faraday/middleware.rb +1 -1
- data/lib/datadog/tracing/contrib/grape/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/grape/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/graphql/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/graphql/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/grpc/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/grpc/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/http/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/http/distributed/fetcher.rb +2 -2
- data/lib/datadog/tracing/contrib/http/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/httpclient/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/httpclient/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/httprb/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/httprb/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/kafka/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/kafka/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/mongodb/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/mongodb/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/mysql2/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/mysql2/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/opensearch/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/opensearch/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/pg/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/pg/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/presto/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/presto/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/qless/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/qless/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/que/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/que/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/racecar/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/racecar/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/rack/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/rack/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/rack/middlewares.rb +9 -2
- data/lib/datadog/tracing/contrib/rails/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/rails/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/rake/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/rake/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/redis/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/redis/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/redis/instrumentation.rb +2 -2
- data/lib/datadog/tracing/contrib/redis/patcher.rb +34 -21
- data/lib/datadog/tracing/contrib/resque/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/resque/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/rest_client/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/rest_client/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/roda/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/roda/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/sequel/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/sequel/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/shoryuken/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/shoryuken/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/sidekiq/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/sidekiq/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/sinatra/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/sinatra/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/sneakers/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/sneakers/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/stripe/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/stripe/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/sucker_punch/configuration/settings.rb +1 -0
- data/lib/datadog/tracing/contrib/sucker_punch/ext.rb +1 -0
- data/lib/datadog/tracing/contrib/trilogy/configuration/settings.rb +58 -0
- data/lib/datadog/tracing/contrib/trilogy/ext.rb +27 -0
- data/lib/datadog/tracing/contrib/trilogy/instrumentation.rb +94 -0
- data/lib/datadog/tracing/contrib/trilogy/integration.rb +43 -0
- data/lib/datadog/tracing/contrib/trilogy/patcher.rb +31 -0
- data/lib/datadog/tracing/contrib.rb +1 -0
- data/lib/datadog/tracing.rb +8 -2
- data/lib/ddtrace/version.rb +1 -1
- metadata +14 -5
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 1baa733b2ad3335ab214de620e849f1eab598effb15d24f336aa20147e6e34be
|
4
|
+
data.tar.gz: 684bbf501475346f6e2a32b5a9284d59e193344e23d63b912624de77ac8bbbad
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: a10dcafb4ff9e9655e06e149ae83a872a0b614f44d69db15ed120f6aa6990f858bb94897d7b8fe30c1b41fe74dc71f054bdf7f0387ded0c0b66058d0d9a15492
|
7
|
+
data.tar.gz: 87be0dc85c2fc044a0f95096d147dbc79dda101babad85bfdfd29a37c42cd9cb70180fdedae00ff95321fe6eec0e4d61a6aaab852c9c6d3f10fc39ff7f06d87c
|
data/CHANGELOG.md
CHANGED
@@ -2,6 +2,27 @@
|
|
2
2
|
|
3
3
|
## [Unreleased]
|
4
4
|
|
5
|
+
## [1.20.0] - 2024-02-05
|
6
|
+
|
7
|
+
### Added
|
8
|
+
|
9
|
+
* Tracing: Add `Trilogy` instrumentation ([#3274][])
|
10
|
+
* Rack: Add remote configuration boot tags ([#3315][])
|
11
|
+
* Faraday: Add `on_error` option ([#3431][])
|
12
|
+
* Profiling: Add dynamic allocation sampling ([#3395][])
|
13
|
+
|
14
|
+
### Changed
|
15
|
+
|
16
|
+
* Bump `datadog-ci` dependency to 0.7.0 ([#3408][])
|
17
|
+
* Improve performance of gathering ClassCount metric ([#3386][])
|
18
|
+
|
19
|
+
### Fixed
|
20
|
+
|
21
|
+
* OpenTelemetry: Fix internal loading ([#3400][])
|
22
|
+
* Core: Fix logger deadlock ([#3426][])
|
23
|
+
* Rack: Fix missing active trace ([#3420][])
|
24
|
+
* Redis: Fix instance configuration ([#3278][])
|
25
|
+
|
5
26
|
## [1.19.0] - 2024-01-10
|
6
27
|
|
7
28
|
### Highlights
|
@@ -2707,7 +2728,8 @@ Release notes: https://github.com/DataDog/dd-trace-rb/releases/tag/v0.3.1
|
|
2707
2728
|
Git diff: https://github.com/DataDog/dd-trace-rb/compare/v0.3.0...v0.3.1
|
2708
2729
|
|
2709
2730
|
|
2710
|
-
[Unreleased]: https://github.com/DataDog/dd-trace-rb/compare/v1.
|
2731
|
+
[Unreleased]: https://github.com/DataDog/dd-trace-rb/compare/v1.20.0...master
|
2732
|
+
[1.20.0]: https://github.com/DataDog/dd-trace-rb/compare/v1.19.0...v1.20.0
|
2711
2733
|
[1.19.0]: https://github.com/DataDog/dd-trace-rb/compare/v1.18.0...v1.19.0
|
2712
2734
|
[1.18.0]: https://github.com/DataDog/dd-trace-rb/compare/v1.17.0...v1.18.0
|
2713
2735
|
[1.17.0]: https://github.com/DataDog/dd-trace-rb/compare/v1.16.2...v1.17.0
|
@@ -3936,6 +3958,8 @@ Git diff: https://github.com/DataDog/dd-trace-rb/compare/v0.3.0...v0.3.1
|
|
3936
3958
|
[#3270]: https://github.com/DataDog/dd-trace-rb/issues/3270
|
3937
3959
|
[#3271]: https://github.com/DataDog/dd-trace-rb/issues/3271
|
3938
3960
|
[#3273]: https://github.com/DataDog/dd-trace-rb/issues/3273
|
3961
|
+
[#3274]: https://github.com/DataDog/dd-trace-rb/issues/3274
|
3962
|
+
[#3278]: https://github.com/DataDog/dd-trace-rb/issues/3278
|
3939
3963
|
[#3279]: https://github.com/DataDog/dd-trace-rb/issues/3279
|
3940
3964
|
[#3280]: https://github.com/DataDog/dd-trace-rb/issues/3280
|
3941
3965
|
[#3281]: https://github.com/DataDog/dd-trace-rb/issues/3281
|
@@ -3948,6 +3972,7 @@ Git diff: https://github.com/DataDog/dd-trace-rb/compare/v0.3.0...v0.3.1
|
|
3948
3972
|
[#3308]: https://github.com/DataDog/dd-trace-rb/issues/3308
|
3949
3973
|
[#3310]: https://github.com/DataDog/dd-trace-rb/issues/3310
|
3950
3974
|
[#3313]: https://github.com/DataDog/dd-trace-rb/issues/3313
|
3975
|
+
[#3315]: https://github.com/DataDog/dd-trace-rb/issues/3315
|
3951
3976
|
[#3316]: https://github.com/DataDog/dd-trace-rb/issues/3316
|
3952
3977
|
[#3317]: https://github.com/DataDog/dd-trace-rb/issues/3317
|
3953
3978
|
[#3320]: https://github.com/DataDog/dd-trace-rb/issues/3320
|
@@ -3965,6 +3990,13 @@ Git diff: https://github.com/DataDog/dd-trace-rb/compare/v0.3.0...v0.3.1
|
|
3965
3990
|
[#3366]: https://github.com/DataDog/dd-trace-rb/issues/3366
|
3966
3991
|
[#3373]: https://github.com/DataDog/dd-trace-rb/issues/3373
|
3967
3992
|
[#3374]: https://github.com/DataDog/dd-trace-rb/issues/3374
|
3993
|
+
[#3386]: https://github.com/DataDog/dd-trace-rb/issues/3386
|
3994
|
+
[#3395]: https://github.com/DataDog/dd-trace-rb/issues/3395
|
3995
|
+
[#3400]: https://github.com/DataDog/dd-trace-rb/issues/3400
|
3996
|
+
[#3408]: https://github.com/DataDog/dd-trace-rb/issues/3408
|
3997
|
+
[#3420]: https://github.com/DataDog/dd-trace-rb/issues/3420
|
3998
|
+
[#3426]: https://github.com/DataDog/dd-trace-rb/issues/3426
|
3999
|
+
[#3431]: https://github.com/DataDog/dd-trace-rb/issues/3431
|
3968
4000
|
[@AdrianLC]: https://github.com/AdrianLC
|
3969
4001
|
[@Azure7111]: https://github.com/Azure7111
|
3970
4002
|
[@BabyGroot]: https://github.com/BabyGroot
|
@@ -12,10 +12,14 @@
|
|
12
12
|
#include "collectors_thread_context.h"
|
13
13
|
#include "collectors_dynamic_sampling_rate.h"
|
14
14
|
#include "collectors_idle_sampling_helper.h"
|
15
|
+
#include "collectors_discrete_dynamic_sampler.h"
|
15
16
|
#include "private_vm_api_access.h"
|
16
17
|
#include "setup_signal_handler.h"
|
17
18
|
#include "time_helpers.h"
|
18
19
|
|
20
|
+
// Maximum allowed value for an allocation weight. Attempts to use higher values will result in clamping.
|
21
|
+
unsigned int MAX_ALLOC_WEIGHT = 65535;
|
22
|
+
|
19
23
|
// Used to trigger the execution of Collectors::ThreadState, which implements all of the sampling logic
|
20
24
|
// itself; this class only implements the "when to do it" part.
|
21
25
|
//
|
@@ -89,13 +93,13 @@ struct cpu_and_wall_time_worker_state {
|
|
89
93
|
bool gc_profiling_enabled;
|
90
94
|
bool no_signals_workaround_enabled;
|
91
95
|
bool dynamic_sampling_rate_enabled;
|
92
|
-
int allocation_sample_every;
|
93
96
|
bool allocation_profiling_enabled;
|
94
97
|
VALUE self_instance;
|
95
98
|
VALUE thread_context_collector_instance;
|
96
99
|
VALUE idle_sampling_helper_instance;
|
97
100
|
VALUE owner_thread;
|
98
|
-
dynamic_sampling_rate_state
|
101
|
+
dynamic_sampling_rate_state cpu_dynamic_sampling_rate;
|
102
|
+
discrete_dynamic_sampler allocation_sampler;
|
99
103
|
VALUE gc_tracepoint; // Used to get gc start/finish information
|
100
104
|
VALUE object_allocation_tracepoint; // Used to get allocation counts and allocation profiling
|
101
105
|
|
@@ -159,7 +163,6 @@ static VALUE _native_initialize(
|
|
159
163
|
VALUE no_signals_workaround_enabled,
|
160
164
|
VALUE dynamic_sampling_rate_enabled,
|
161
165
|
VALUE dynamic_sampling_rate_overhead_target_percentage,
|
162
|
-
VALUE allocation_sample_every,
|
163
166
|
VALUE allocation_profiling_enabled
|
164
167
|
);
|
165
168
|
static void cpu_and_wall_time_worker_typed_data_mark(void *state_ptr);
|
@@ -244,7 +247,7 @@ void collectors_cpu_and_wall_time_worker_init(VALUE profiling_module) {
|
|
244
247
|
// https://bugs.ruby-lang.org/issues/18007 for a discussion around this.
|
245
248
|
rb_define_alloc_func(collectors_cpu_and_wall_time_worker_class, _native_new);
|
246
249
|
|
247
|
-
rb_define_singleton_method(collectors_cpu_and_wall_time_worker_class, "_native_initialize", _native_initialize,
|
250
|
+
rb_define_singleton_method(collectors_cpu_and_wall_time_worker_class, "_native_initialize", _native_initialize, 8);
|
248
251
|
rb_define_singleton_method(collectors_cpu_and_wall_time_worker_class, "_native_sampling_loop", _native_sampling_loop, 1);
|
249
252
|
rb_define_singleton_method(collectors_cpu_and_wall_time_worker_class, "_native_stop", _native_stop, 2);
|
250
253
|
rb_define_singleton_method(collectors_cpu_and_wall_time_worker_class, "_native_reset_after_fork", _native_reset_after_fork, 1);
|
@@ -284,12 +287,12 @@ static VALUE _native_new(VALUE klass) {
|
|
284
287
|
state->gc_profiling_enabled = false;
|
285
288
|
state->no_signals_workaround_enabled = false;
|
286
289
|
state->dynamic_sampling_rate_enabled = true;
|
287
|
-
state->allocation_sample_every = 0;
|
288
290
|
state->allocation_profiling_enabled = false;
|
289
291
|
state->thread_context_collector_instance = Qnil;
|
290
292
|
state->idle_sampling_helper_instance = Qnil;
|
291
293
|
state->owner_thread = Qnil;
|
292
|
-
dynamic_sampling_rate_init(&state->
|
294
|
+
dynamic_sampling_rate_init(&state->cpu_dynamic_sampling_rate);
|
295
|
+
discrete_dynamic_sampler_init(&state->allocation_sampler, "allocation");
|
293
296
|
state->gc_tracepoint = Qnil;
|
294
297
|
state->object_allocation_tracepoint = Qnil;
|
295
298
|
|
@@ -313,13 +316,11 @@ static VALUE _native_initialize(
|
|
313
316
|
VALUE no_signals_workaround_enabled,
|
314
317
|
VALUE dynamic_sampling_rate_enabled,
|
315
318
|
VALUE dynamic_sampling_rate_overhead_target_percentage,
|
316
|
-
VALUE allocation_sample_every,
|
317
319
|
VALUE allocation_profiling_enabled
|
318
320
|
) {
|
319
321
|
ENFORCE_BOOLEAN(gc_profiling_enabled);
|
320
322
|
ENFORCE_BOOLEAN(no_signals_workaround_enabled);
|
321
323
|
ENFORCE_BOOLEAN(dynamic_sampling_rate_enabled);
|
322
|
-
ENFORCE_TYPE(allocation_sample_every, T_FIXNUM);
|
323
324
|
ENFORCE_TYPE(dynamic_sampling_rate_overhead_target_percentage, T_FLOAT);
|
324
325
|
ENFORCE_BOOLEAN(allocation_profiling_enabled);
|
325
326
|
|
@@ -329,12 +330,16 @@ static VALUE _native_initialize(
|
|
329
330
|
state->gc_profiling_enabled = (gc_profiling_enabled == Qtrue);
|
330
331
|
state->no_signals_workaround_enabled = (no_signals_workaround_enabled == Qtrue);
|
331
332
|
state->dynamic_sampling_rate_enabled = (dynamic_sampling_rate_enabled == Qtrue);
|
332
|
-
dynamic_sampling_rate_set_overhead_target_percentage(&state->dynamic_sampling_rate, NUM2DBL(dynamic_sampling_rate_overhead_target_percentage));
|
333
|
-
state->allocation_sample_every = NUM2INT(allocation_sample_every);
|
334
333
|
state->allocation_profiling_enabled = (allocation_profiling_enabled == Qtrue);
|
335
334
|
|
336
|
-
|
337
|
-
|
335
|
+
double total_overhead_target_percentage = NUM2DBL(dynamic_sampling_rate_overhead_target_percentage);
|
336
|
+
if (!state->allocation_profiling_enabled) {
|
337
|
+
dynamic_sampling_rate_set_overhead_target_percentage(&state->cpu_dynamic_sampling_rate, total_overhead_target_percentage);
|
338
|
+
} else {
|
339
|
+
// TODO: May be nice to offer customization here? Distribute available "overhead" margin with a bias towards one or the other
|
340
|
+
// sampler.
|
341
|
+
dynamic_sampling_rate_set_overhead_target_percentage(&state->cpu_dynamic_sampling_rate, total_overhead_target_percentage / 2);
|
342
|
+
discrete_dynamic_sampler_set_overhead_target_percentage(&state->allocation_sampler, total_overhead_target_percentage / 2);
|
338
343
|
}
|
339
344
|
|
340
345
|
state->thread_context_collector_instance = enforce_thread_context_collector_instance(thread_context_collector_instance);
|
@@ -387,7 +392,8 @@ static VALUE _native_sampling_loop(DDTRACE_UNUSED VALUE _self, VALUE instance) {
|
|
387
392
|
if (state->stop_thread == rb_thread_current()) return Qnil;
|
388
393
|
|
389
394
|
// Reset the dynamic sampling rate state, if any (reminder: the monotonic clock reference may change after a fork)
|
390
|
-
dynamic_sampling_rate_reset(&state->
|
395
|
+
dynamic_sampling_rate_reset(&state->cpu_dynamic_sampling_rate);
|
396
|
+
discrete_dynamic_sampler_reset(&state->allocation_sampler);
|
391
397
|
|
392
398
|
// This write to a global is thread-safe BECAUSE we're still holding on to the global VM lock at this point
|
393
399
|
active_sampler_instance_state = state;
|
@@ -560,7 +566,7 @@ static void *run_sampling_trigger_loop(void *state_ptr) {
|
|
560
566
|
// Note that we deliberately should NOT combine this sleep_for with the one above because the result of
|
561
567
|
// `dynamic_sampling_rate_get_sleep` may have changed while the above sleep was ongoing.
|
562
568
|
uint64_t extra_sleep =
|
563
|
-
dynamic_sampling_rate_get_sleep(&state->
|
569
|
+
dynamic_sampling_rate_get_sleep(&state->cpu_dynamic_sampling_rate, monotonic_wall_time_now_ns(DO_NOT_RAISE_ON_FAILURE));
|
564
570
|
if (state->dynamic_sampling_rate_enabled && extra_sleep > 0) sleep_for(extra_sleep);
|
565
571
|
}
|
566
572
|
|
@@ -600,7 +606,7 @@ static VALUE rescued_sample_from_postponed_job(VALUE self_instance) {
|
|
600
606
|
|
601
607
|
long wall_time_ns_before_sample = monotonic_wall_time_now_ns(RAISE_ON_FAILURE);
|
602
608
|
|
603
|
-
if (state->dynamic_sampling_rate_enabled && !dynamic_sampling_rate_should_sample(&state->
|
609
|
+
if (state->dynamic_sampling_rate_enabled && !dynamic_sampling_rate_should_sample(&state->cpu_dynamic_sampling_rate, wall_time_ns_before_sample)) {
|
604
610
|
state->stats.skipped_sample_because_of_dynamic_sampling_rate++;
|
605
611
|
return Qnil;
|
606
612
|
}
|
@@ -620,7 +626,7 @@ static VALUE rescued_sample_from_postponed_job(VALUE self_instance) {
|
|
620
626
|
state->stats.sampling_time_ns_max = uint64_max_of(sampling_time_ns, state->stats.sampling_time_ns_max);
|
621
627
|
state->stats.sampling_time_ns_total += sampling_time_ns;
|
622
628
|
|
623
|
-
dynamic_sampling_rate_after_sample(&state->
|
629
|
+
dynamic_sampling_rate_after_sample(&state->cpu_dynamic_sampling_rate, wall_time_ns_after_sample, sampling_time_ns);
|
624
630
|
|
625
631
|
// Return a dummy VALUE because we're called from rb_rescue2 which requires it
|
626
632
|
return Qnil;
|
@@ -931,18 +937,20 @@ static void on_newobj_event(VALUE tracepoint_data, DDTRACE_UNUSED void *unused)
|
|
931
937
|
return;
|
932
938
|
}
|
933
939
|
|
940
|
+
if (state->dynamic_sampling_rate_enabled && !discrete_dynamic_sampler_should_sample(&state->allocation_sampler)) {
|
941
|
+
return;
|
942
|
+
}
|
943
|
+
|
934
944
|
// @ivoanjo: Strictly speaking, this is not needed because Ruby should not call the same tracepoint while a previous
|
935
945
|
// invocation is still pending, (e.g. it wouldn't call `on_newobj_event` while it's already running), but I decided
|
936
946
|
// to keep this here for consistency -- every call to the thread context (other than the special gc calls which are
|
937
947
|
// defined as not being able to allocate) sets this.
|
938
948
|
state->during_sample = true;
|
939
949
|
|
940
|
-
//
|
941
|
-
|
942
|
-
|
943
|
-
|
944
|
-
safely_call(rescued_sample_allocation, tracepoint_data, state->self_instance);
|
945
|
-
}
|
950
|
+
// Rescue against any exceptions that happen during sampling
|
951
|
+
safely_call(rescued_sample_allocation, tracepoint_data, state->self_instance);
|
952
|
+
|
953
|
+
discrete_dynamic_sampler_after_sample(&state->allocation_sampler);
|
946
954
|
|
947
955
|
state->during_sample = false;
|
948
956
|
}
|
@@ -974,7 +982,14 @@ static VALUE rescued_sample_allocation(VALUE tracepoint_data) {
|
|
974
982
|
rb_trace_arg_t *data = rb_tracearg_from_tracepoint(tracepoint_data);
|
975
983
|
VALUE new_object = rb_tracearg_object(data);
|
976
984
|
|
977
|
-
|
985
|
+
unsigned long allocations_since_last_sample = state->dynamic_sampling_rate_enabled ?
|
986
|
+
// if we're doing dynamic sampling, ask the sampler how many events since last sample
|
987
|
+
discrete_dynamic_sampler_events_since_last_sample(&state->allocation_sampler) :
|
988
|
+
// if we aren't, then we're sampling every event
|
989
|
+
1;
|
990
|
+
// TODO: Signal in the profile that clamping happened?
|
991
|
+
unsigned int weight = allocations_since_last_sample > MAX_ALLOC_WEIGHT ? MAX_ALLOC_WEIGHT : (unsigned int) allocations_since_last_sample;
|
992
|
+
thread_context_collector_sample_allocation(state->thread_context_collector_instance, weight, new_object);
|
978
993
|
|
979
994
|
// Return a dummy VALUE because we're called from rb_rescue2 which requires it
|
980
995
|
return Qnil;
|
@@ -0,0 +1,349 @@
|
|
1
|
+
#include "collectors_discrete_dynamic_sampler.h"
|
2
|
+
|
3
|
+
#include <ruby.h>
|
4
|
+
#include "helpers.h"
|
5
|
+
#include "time_helpers.h"
|
6
|
+
#include "ruby_helpers.h"
|
7
|
+
|
8
|
+
#define BASE_OVERHEAD_PCT 1.0
|
9
|
+
#define BASE_SAMPLING_INTERVAL 50
|
10
|
+
|
11
|
+
#define ADJUSTMENT_WINDOW_NS SECONDS_AS_NS(1)
|
12
|
+
|
13
|
+
#define EMA_SMOOTHING_FACTOR 0.6
|
14
|
+
#define EXP_MOVING_AVERAGE(last, avg, first) first ? last : (1-EMA_SMOOTHING_FACTOR) * avg + EMA_SMOOTHING_FACTOR * last
|
15
|
+
|
16
|
+
void discrete_dynamic_sampler_init(discrete_dynamic_sampler *sampler, const char *debug_name) {
|
17
|
+
sampler->debug_name = debug_name;
|
18
|
+
discrete_dynamic_sampler_set_overhead_target_percentage(sampler, BASE_OVERHEAD_PCT);
|
19
|
+
}
|
20
|
+
|
21
|
+
static void _discrete_dynamic_sampler_reset(discrete_dynamic_sampler *sampler, long now_ns) {
|
22
|
+
const char *debug_name = sampler->debug_name;
|
23
|
+
double target_overhead = sampler->target_overhead;
|
24
|
+
(*sampler) = (discrete_dynamic_sampler) {
|
25
|
+
.debug_name = debug_name,
|
26
|
+
.target_overhead = target_overhead,
|
27
|
+
// Act as if a reset is a readjustment (it kinda is!) and wait for a full adjustment window
|
28
|
+
// to compute stats. Otherwise, we'd readjust on the next event that comes and thus be operating
|
29
|
+
// with very incomplete information
|
30
|
+
.last_readjust_time_ns = now_ns,
|
31
|
+
// This fake readjustment will use a hardcoded sampling interval
|
32
|
+
.sampling_interval = BASE_SAMPLING_INTERVAL,
|
33
|
+
.sampling_probability = 1.0 / BASE_SAMPLING_INTERVAL,
|
34
|
+
// But we want to make sure we sample at least once in the next window so that our first
|
35
|
+
// real readjustment has some notion of how heavy sampling is. Therefore, we'll make it so that
|
36
|
+
// the next event is automatically sampled by artificially locating it in the interval threshold.
|
37
|
+
.events_since_last_sample = BASE_SAMPLING_INTERVAL - 1,
|
38
|
+
};
|
39
|
+
}
|
40
|
+
|
41
|
+
void discrete_dynamic_sampler_reset(discrete_dynamic_sampler *sampler) {
|
42
|
+
long now = monotonic_wall_time_now_ns(DO_NOT_RAISE_ON_FAILURE);
|
43
|
+
_discrete_dynamic_sampler_reset(sampler, now);
|
44
|
+
}
|
45
|
+
|
46
|
+
static void _discrete_dynamic_sampler_set_overhead_target_percentage(discrete_dynamic_sampler *sampler, double target_overhead, long now_ns) {
|
47
|
+
if (target_overhead <= 0 || target_overhead > 100) {
|
48
|
+
rb_raise(rb_eArgError, "Target overhead must be a double between ]0,100] was %f", target_overhead);
|
49
|
+
}
|
50
|
+
sampler->target_overhead = target_overhead;
|
51
|
+
_discrete_dynamic_sampler_reset(sampler, now_ns);
|
52
|
+
}
|
53
|
+
|
54
|
+
void discrete_dynamic_sampler_set_overhead_target_percentage(discrete_dynamic_sampler *sampler, double target_overhead) {
|
55
|
+
long now = monotonic_wall_time_now_ns(DO_NOT_RAISE_ON_FAILURE);
|
56
|
+
_discrete_dynamic_sampler_set_overhead_target_percentage(sampler, target_overhead, now);
|
57
|
+
}
|
58
|
+
|
59
|
+
static void maybe_readjust(discrete_dynamic_sampler *sampler, long now);
|
60
|
+
|
61
|
+
static bool _discrete_dynamic_sampler_should_sample(discrete_dynamic_sampler *sampler, long now_ns) {
|
62
|
+
// For efficiency reasons we don't do true random sampling but rather systematic
|
63
|
+
// sampling following a sample interval/skip. This can be biased and hide patterns
|
64
|
+
// but the dynamic interval and rather indeterministic pattern of allocations in
|
65
|
+
// most real applications should help reduce the bias impact.
|
66
|
+
sampler->events_since_last_sample++;
|
67
|
+
sampler->events_since_last_readjustment++;
|
68
|
+
bool should_sample = sampler->sampling_interval > 0 && sampler->events_since_last_sample >= sampler->sampling_interval;
|
69
|
+
|
70
|
+
if (should_sample) {
|
71
|
+
sampler->sample_start_time_ns = now_ns;
|
72
|
+
} else {
|
73
|
+
// check if we should readjust our sampler after this event, even if we didn't sample it
|
74
|
+
maybe_readjust(sampler, now_ns);
|
75
|
+
}
|
76
|
+
|
77
|
+
return should_sample;
|
78
|
+
}
|
79
|
+
|
80
|
+
bool discrete_dynamic_sampler_should_sample(discrete_dynamic_sampler *sampler) {
|
81
|
+
long now = monotonic_wall_time_now_ns(DO_NOT_RAISE_ON_FAILURE);
|
82
|
+
return _discrete_dynamic_sampler_should_sample(sampler, now);
|
83
|
+
}
|
84
|
+
|
85
|
+
static long _discrete_dynamic_sampler_after_sample(discrete_dynamic_sampler *sampler, long now_ns) {
|
86
|
+
long last_sampling_time_ns = sampler->sample_start_time_ns == 0 ? 0 : long_max_of(0, now_ns - sampler->sample_start_time_ns);
|
87
|
+
sampler->samples_since_last_readjustment++;
|
88
|
+
sampler->sampling_time_since_last_readjustment_ns += last_sampling_time_ns;
|
89
|
+
sampler->events_since_last_sample = 0;
|
90
|
+
|
91
|
+
// check if we should readjust our sampler after this sample
|
92
|
+
maybe_readjust(sampler, now_ns);
|
93
|
+
|
94
|
+
return last_sampling_time_ns;
|
95
|
+
}
|
96
|
+
|
97
|
+
long discrete_dynamic_sampler_after_sample(discrete_dynamic_sampler *sampler) {
|
98
|
+
long now = monotonic_wall_time_now_ns(DO_NOT_RAISE_ON_FAILURE);
|
99
|
+
return _discrete_dynamic_sampler_after_sample(sampler, now);
|
100
|
+
}
|
101
|
+
|
102
|
+
double discrete_dynamic_sampler_probability(discrete_dynamic_sampler *sampler) {
|
103
|
+
return sampler->sampling_probability * 100.;
|
104
|
+
}
|
105
|
+
|
106
|
+
size_t discrete_dynamic_sampler_events_since_last_sample(discrete_dynamic_sampler *sampler) {
|
107
|
+
return sampler->events_since_last_sample;
|
108
|
+
}
|
109
|
+
|
110
|
+
static void maybe_readjust(discrete_dynamic_sampler *sampler, long now) {
|
111
|
+
long window_time_ns = sampler->last_readjust_time_ns == 0 ? ADJUSTMENT_WINDOW_NS : now - sampler->last_readjust_time_ns;
|
112
|
+
|
113
|
+
if (window_time_ns < ADJUSTMENT_WINDOW_NS) {
|
114
|
+
// not enough time has passed to perform a readjustment
|
115
|
+
return;
|
116
|
+
}
|
117
|
+
|
118
|
+
// If we got this far, lets recalculate our sampling params based on new observations
|
119
|
+
bool first_readjustment = !sampler->has_completed_full_adjustment_window;
|
120
|
+
|
121
|
+
// Update our running average of events/sec with latest observation
|
122
|
+
sampler->events_per_ns = EXP_MOVING_AVERAGE(
|
123
|
+
(double) sampler->events_since_last_readjustment / window_time_ns,
|
124
|
+
sampler->events_per_ns,
|
125
|
+
first_readjustment
|
126
|
+
);
|
127
|
+
|
128
|
+
// Update our running average of sampling time for a specific event
|
129
|
+
long sampling_window_time_ns = sampler->sampling_time_since_last_readjustment_ns;
|
130
|
+
long sampling_overshoot_time_ns = -1;
|
131
|
+
if (sampler->samples_since_last_readjustment > 0) {
|
132
|
+
// We can only update sampling-related stats if we actually sampled on the last window...
|
133
|
+
|
134
|
+
// Lets update our average sampling time per event
|
135
|
+
long avg_sampling_time_in_window_ns = sampler->samples_since_last_readjustment == 0 ? 0 : sampling_window_time_ns / sampler->samples_since_last_readjustment;
|
136
|
+
sampler->sampling_time_ns = EXP_MOVING_AVERAGE(
|
137
|
+
avg_sampling_time_in_window_ns,
|
138
|
+
sampler->sampling_time_ns,
|
139
|
+
first_readjustment
|
140
|
+
);
|
141
|
+
}
|
142
|
+
|
143
|
+
// Are we meeting our target in practice? If we're consistently overshooting our estimate due to non-uniform allocation patterns lets
|
144
|
+
// adjust our overhead target.
|
145
|
+
// NOTE: Updating this even when no samples occur is a conscious choice which enables us to cooldown extreme adjustments over time.
|
146
|
+
// If we didn't do this, whenever a big spike caused target_overhead_adjustment to equal target_overhead, we'd get stuck
|
147
|
+
// in a "probability = 0" state.
|
148
|
+
long reference_target_sampling_time_ns = window_time_ns * (sampler->target_overhead / 100.);
|
149
|
+
// Overshoot by definition is always >= 0. < 0 would be undershooting!
|
150
|
+
sampling_overshoot_time_ns = long_max_of(0, sampler->sampling_time_since_last_readjustment_ns - reference_target_sampling_time_ns);
|
151
|
+
// Our overhead adjustment should always be between [-target_overhead, 0]. Higher adjustments would lead to negative overhead targets
|
152
|
+
// which don't make much sense.
|
153
|
+
double last_target_overhead_adjustment = -double_min_of(sampler->target_overhead, sampling_overshoot_time_ns * 100. / window_time_ns);
|
154
|
+
sampler->target_overhead_adjustment = EXP_MOVING_AVERAGE(
|
155
|
+
last_target_overhead_adjustment,
|
156
|
+
sampler->target_overhead_adjustment,
|
157
|
+
first_readjustment
|
158
|
+
);
|
159
|
+
|
160
|
+
// Apply our overhead adjustment to figure out our real targets for this readjustment.
|
161
|
+
double target_overhead = double_max_of(0, sampler->target_overhead + sampler->target_overhead_adjustment);
|
162
|
+
long target_sampling_time_ns = window_time_ns * (target_overhead / 100.);
|
163
|
+
|
164
|
+
// Recalculate target sampling probability so that the following 2 hold:
|
165
|
+
// * window_time_ns = working_window_time_ns + sampling_window_time_ns
|
166
|
+
// │ │ │
|
167
|
+
// │ │ └ how much time is spent sampling
|
168
|
+
// │ └── how much time is spent doing actual app stuff
|
169
|
+
// └── total (wall) time in this adjustment window
|
170
|
+
// * sampling_window_time_ns <= window_time_ns * target_overhead / 100
|
171
|
+
//
|
172
|
+
// Note that
|
173
|
+
//
|
174
|
+
// sampling_window_time_ns = samples_in_window * sampling_time_ns =
|
175
|
+
// ┌─ assuming no events will be emitted during sampling
|
176
|
+
// │
|
177
|
+
// = events_per_ns * working_window_time_ns * sampling_probability * sampling_time_ns
|
178
|
+
//
|
179
|
+
// Re-ordering for sampling_probability and solving for the upper-bound of sampling_window_time_ns:
|
180
|
+
//
|
181
|
+
// sampling_window_time_ns = window_time_ns * target_overhead / 100
|
182
|
+
// sampling_probability = window_time_ns * target_overhead / 100 / (events_per_ns * working_window_time_ns * sampling_time_ns) =
|
183
|
+
//
|
184
|
+
// Which you can intuitively understand as:
|
185
|
+
//
|
186
|
+
// sampling_probability = max_allowed_time_for_sampling_ns / time_to_sample_all_events_ns
|
187
|
+
//
|
188
|
+
// As a quick sanity check:
|
189
|
+
// * If app is eventing very little or we're sampling very fast, so that time_to_sample_all_events_ns < max_allowed_time_for_sampling_ns
|
190
|
+
// then probability will be > 1 (but we should clamp to 1 since probabilities higher than 1 don't make sense).
|
191
|
+
// * If app is eventing a lot or our sampling overhead is big, then as time_to_sample_all_events_ns grows, sampling_probability will
|
192
|
+
// tend to 0.
|
193
|
+
long working_window_time_ns = long_max_of(0, window_time_ns - sampling_window_time_ns);
|
194
|
+
double max_allowed_time_for_sampling_ns = target_sampling_time_ns;
|
195
|
+
long time_to_sample_all_events_ns = sampler->events_per_ns * working_window_time_ns * sampler->sampling_time_ns;
|
196
|
+
if (max_allowed_time_for_sampling_ns == 0) {
|
197
|
+
// if we aren't allowed any sampling time at all, probability has to be 0
|
198
|
+
sampler->sampling_probability = 0;
|
199
|
+
} else {
|
200
|
+
// otherwise apply the formula described above (protecting against div by 0)
|
201
|
+
sampler->sampling_probability = time_to_sample_all_events_ns == 0 ? 1. :
|
202
|
+
double_min_of(1., max_allowed_time_for_sampling_ns / time_to_sample_all_events_ns);
|
203
|
+
}
|
204
|
+
|
205
|
+
// Doing true random selection would involve "tossing a coin" on every allocation. Lets do systematic sampling instead so that our
|
206
|
+
// sampling decision can rely solely on a sampling skip/interval (i.e. more efficient).
|
207
|
+
//
|
208
|
+
// sampling_interval = events / samples =
|
209
|
+
// = event_rate * working_window_time_ns / (event_rate * working_window_time_ns * sampling_probability)
|
210
|
+
// = 1 / sampling_probability
|
211
|
+
//
|
212
|
+
// NOTE: The sampling interval has to be an integer since we're dealing with discrete events here. This means that there'll be
|
213
|
+
// a loss of precision (and thus control) when adjusting between probabilities that lead to non-integer granularity
|
214
|
+
// changes (e.g. probabilities in the range of ]50%, 100%[ which map to intervals in the range of ]1, 2[). Our approach
|
215
|
+
// when the sampling interval is a non-integer is to ceil it (i.e. we'll always choose to sample less often).
|
216
|
+
// NOTE: Overhead target adjustments or very big sampling times can in theory bring probability so close to 0 as to effectively
|
217
|
+
// round down to full 0. This means we have to be careful to handle div-by-0 as well as resulting double intervals that
|
218
|
+
// are so big they don't fit into the sampling_interval. In both cases lets just disable sampling until next readjustment
|
219
|
+
// by setting interval to 0.
|
220
|
+
double sampling_interval = sampler->sampling_probability == 0 ? 0 : ceil(1.0 / sampler->sampling_probability);
|
221
|
+
sampler->sampling_interval = sampling_interval > ULONG_MAX ? 0 : sampling_interval;
|
222
|
+
|
223
|
+
#ifdef DD_DEBUG
|
224
|
+
double allocs_in_60s = sampler->events_per_ns * 1e9 * 60;
|
225
|
+
double samples_in_60s = allocs_in_60s * sampler->sampling_probability;
|
226
|
+
double expected_total_sampling_time_in_60s =
|
227
|
+
samples_in_60s * sampler->sampling_time_ns / 1e9;
|
228
|
+
double real_total_sampling_time_in_60s = sampling_window_time_ns / 1e9 * 60 / (window_time_ns / 1e9);
|
229
|
+
|
230
|
+
fprintf(stderr, "[dds.%s] readjusting...\n", sampler->debug_name);
|
231
|
+
fprintf(stderr, "samples_since_last_readjustment=%ld\n", sampler->samples_since_last_readjustment);
|
232
|
+
fprintf(stderr, "window_time=%ld\n", window_time_ns);
|
233
|
+
fprintf(stderr, "events_per_sec=%f\n", sampler->events_per_ns * 1e9);
|
234
|
+
fprintf(stderr, "sampling_time=%ld\n", sampler->sampling_time_ns);
|
235
|
+
fprintf(stderr, "sampling_window_time=%ld\n", sampling_window_time_ns);
|
236
|
+
fprintf(stderr, "sampling_target_time=%ld\n", reference_target_sampling_time_ns);
|
237
|
+
fprintf(stderr, "sampling_overshoot_time=%ld\n", sampling_overshoot_time_ns);
|
238
|
+
fprintf(stderr, "working_window_time=%ld\n", working_window_time_ns);
|
239
|
+
fprintf(stderr, "sampling_interval=%zu\n", sampler->sampling_interval);
|
240
|
+
fprintf(stderr, "sampling_probability=%f\n", sampler->sampling_probability);
|
241
|
+
fprintf(stderr, "expected allocs in 60s=%f\n", allocs_in_60s);
|
242
|
+
fprintf(stderr, "expected samples in 60s=%f\n", samples_in_60s);
|
243
|
+
fprintf(stderr, "expected sampling time in 60s=%f (previous real=%f)\n", expected_total_sampling_time_in_60s, real_total_sampling_time_in_60s);
|
244
|
+
fprintf(stderr, "target_overhead=%f\n", sampler->target_overhead);
|
245
|
+
fprintf(stderr, "target_overhead_adjustment=%f\n", sampler->target_overhead_adjustment);
|
246
|
+
fprintf(stderr, "target_sampling_time=%ld\n", target_sampling_time_ns);
|
247
|
+
fprintf(stderr, "expected max overhead in 60s=%f\n", target_overhead / 100.0 * 60);
|
248
|
+
fprintf(stderr, "-------\n");
|
249
|
+
#endif
|
250
|
+
|
251
|
+
sampler->events_since_last_readjustment = 0;
|
252
|
+
sampler->samples_since_last_readjustment = 0;
|
253
|
+
sampler->sampling_time_since_last_readjustment_ns = 0;
|
254
|
+
sampler->last_readjust_time_ns = now;
|
255
|
+
sampler->has_completed_full_adjustment_window = true;
|
256
|
+
}
|
257
|
+
|
258
|
+
// ---
|
259
|
+
// Below here is boilerplate to expose the above code to Ruby so that we can test it with RSpec as usual.
|
260
|
+
|
261
|
+
static VALUE _native_new(VALUE klass);
|
262
|
+
static VALUE _native_reset(VALUE self, VALUE now);
|
263
|
+
static VALUE _native_set_overhead_target_percentage(VALUE self, VALUE target_overhead, VALUE now);
|
264
|
+
static VALUE _native_should_sample(VALUE self, VALUE now);
|
265
|
+
static VALUE _native_after_sample(VALUE self, VALUE now);
|
266
|
+
static VALUE _native_probability(VALUE self);
|
267
|
+
|
268
|
+
typedef struct sampler_state {
|
269
|
+
discrete_dynamic_sampler sampler;
|
270
|
+
} sampler_state;
|
271
|
+
|
272
|
+
void collectors_discrete_dynamic_sampler_init(VALUE profiling_module) {
|
273
|
+
VALUE collectors_module = rb_define_module_under(profiling_module, "Collectors");
|
274
|
+
VALUE discrete_sampler_module = rb_define_module_under(collectors_module, "DiscreteDynamicSampler");
|
275
|
+
VALUE testing_module = rb_define_module_under(discrete_sampler_module, "Testing");
|
276
|
+
VALUE sampler_class = rb_define_class_under(testing_module, "Sampler", rb_cObject);
|
277
|
+
|
278
|
+
rb_define_alloc_func(sampler_class, _native_new);
|
279
|
+
|
280
|
+
rb_define_method(sampler_class, "_native_reset", _native_reset, 1);
|
281
|
+
rb_define_method(sampler_class, "_native_set_overhead_target_percentage", _native_set_overhead_target_percentage, 2);
|
282
|
+
rb_define_method(sampler_class, "_native_should_sample", _native_should_sample, 1);
|
283
|
+
rb_define_method(sampler_class, "_native_after_sample", _native_after_sample, 1);
|
284
|
+
rb_define_method(sampler_class, "_native_probability", _native_probability, 0);
|
285
|
+
}
|
286
|
+
|
287
|
+
static const rb_data_type_t sampler_typed_data = {
|
288
|
+
.wrap_struct_name = "Datadog::Profiling::DiscreteDynamicSampler::Testing::Sampler",
|
289
|
+
.function = {
|
290
|
+
.dfree = RUBY_DEFAULT_FREE,
|
291
|
+
.dsize = NULL,
|
292
|
+
},
|
293
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY
|
294
|
+
};
|
295
|
+
|
296
|
+
static VALUE _native_new(VALUE klass) {
|
297
|
+
sampler_state *state = ruby_xcalloc(sizeof(sampler_state), 1);
|
298
|
+
|
299
|
+
discrete_dynamic_sampler_init(&state->sampler, "test sampler");
|
300
|
+
|
301
|
+
return TypedData_Wrap_Struct(klass, &sampler_typed_data, state);
|
302
|
+
}
|
303
|
+
|
304
|
+
static VALUE _native_reset(VALUE self, VALUE now_ns) {
|
305
|
+
ENFORCE_TYPE(now_ns, T_FIXNUM);
|
306
|
+
|
307
|
+
sampler_state *state;
|
308
|
+
TypedData_Get_Struct(self, sampler_state, &sampler_typed_data, state);
|
309
|
+
|
310
|
+
_discrete_dynamic_sampler_reset(&state->sampler, NUM2LONG(now_ns));
|
311
|
+
return Qtrue;
|
312
|
+
}
|
313
|
+
|
314
|
+
static VALUE _native_set_overhead_target_percentage(VALUE self, VALUE target_overhead, VALUE now_ns) {
|
315
|
+
ENFORCE_TYPE(target_overhead, T_FLOAT);
|
316
|
+
ENFORCE_TYPE(now_ns, T_FIXNUM);
|
317
|
+
|
318
|
+
sampler_state *state;
|
319
|
+
TypedData_Get_Struct(self, sampler_state, &sampler_typed_data, state);
|
320
|
+
|
321
|
+
_discrete_dynamic_sampler_set_overhead_target_percentage(&state->sampler, NUM2DBL(target_overhead), NUM2LONG(now_ns));
|
322
|
+
|
323
|
+
return Qnil;
|
324
|
+
}
|
325
|
+
|
326
|
+
VALUE _native_should_sample(VALUE self, VALUE now_ns) {
|
327
|
+
ENFORCE_TYPE(now_ns, T_FIXNUM);
|
328
|
+
|
329
|
+
sampler_state *state;
|
330
|
+
TypedData_Get_Struct(self, sampler_state, &sampler_typed_data, state);
|
331
|
+
|
332
|
+
return _discrete_dynamic_sampler_should_sample(&state->sampler, NUM2LONG(now_ns)) ? Qtrue : Qfalse;
|
333
|
+
}
|
334
|
+
|
335
|
+
VALUE _native_after_sample(VALUE self, VALUE now_ns) {
|
336
|
+
ENFORCE_TYPE(now_ns, T_FIXNUM);
|
337
|
+
|
338
|
+
sampler_state *state;
|
339
|
+
TypedData_Get_Struct(self, sampler_state, &sampler_typed_data, state);
|
340
|
+
|
341
|
+
return LONG2NUM(_discrete_dynamic_sampler_after_sample(&state->sampler, NUM2LONG(now_ns)));
|
342
|
+
}
|
343
|
+
|
344
|
+
VALUE _native_probability(VALUE self) {
|
345
|
+
sampler_state *state;
|
346
|
+
TypedData_Get_Struct(self, sampler_state, &sampler_typed_data, state);
|
347
|
+
|
348
|
+
return DBL2NUM(discrete_dynamic_sampler_probability(&state->sampler));
|
349
|
+
}
|