ddtrace 1.6.1 → 1.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +40 -2
- data/ext/ddtrace_profiling_loader/extconf.rb +1 -1
- data/ext/ddtrace_profiling_native_extension/collectors_cpu_and_wall_time.c +66 -6
- data/ext/ddtrace_profiling_native_extension/collectors_cpu_and_wall_time_worker.c +51 -54
- data/ext/ddtrace_profiling_native_extension/collectors_stack.c +11 -13
- data/ext/ddtrace_profiling_native_extension/extconf.rb +1 -1
- data/ext/ddtrace_profiling_native_extension/private_vm_api_access.c +3 -2
- data/ext/ddtrace_profiling_native_extension/setup_signal_handler.c +96 -0
- data/ext/ddtrace_profiling_native_extension/setup_signal_handler.h +7 -0
- data/ext/ddtrace_profiling_native_extension/stack_recorder.c +70 -18
- data/ext/ddtrace_profiling_native_extension/stack_recorder.h +1 -0
- data/lib/datadog/appsec/assets/blocked.html +98 -3
- data/lib/datadog/appsec/assets/blocked.json +1 -0
- data/lib/datadog/appsec/assets/blocked.text +5 -0
- data/lib/datadog/appsec/assets/waf_rules/recommended.json +35 -46
- data/lib/datadog/appsec/assets/waf_rules/risky.json +1 -1
- data/lib/datadog/appsec/assets/waf_rules/strict.json +46 -1
- data/lib/datadog/appsec/assets.rb +2 -2
- data/lib/datadog/appsec/configuration/settings.rb +6 -0
- data/lib/datadog/appsec/configuration.rb +4 -0
- data/lib/datadog/appsec/contrib/rack/reactive/request.rb +4 -8
- data/lib/datadog/appsec/contrib/rack/request.rb +17 -0
- data/lib/datadog/appsec/contrib/rack/request_body_middleware.rb +2 -2
- data/lib/datadog/appsec/contrib/rack/request_middleware.rb +2 -2
- data/lib/datadog/appsec/contrib/rails/patcher.rb +3 -6
- data/lib/datadog/appsec/contrib/sinatra/ext.rb +1 -0
- data/lib/datadog/appsec/contrib/sinatra/gateway/watcher.rb +1 -1
- data/lib/datadog/appsec/contrib/sinatra/patcher.rb +11 -8
- data/lib/datadog/appsec/extensions.rb +10 -0
- data/lib/datadog/appsec/processor.rb +18 -0
- data/lib/datadog/appsec/response.rb +54 -0
- data/lib/datadog/core/runtime/ext.rb +1 -1
- data/lib/datadog/opentracer/distributed_headers.rb +5 -7
- data/lib/datadog/opentracer/rack_propagator.rb +0 -3
- data/lib/datadog/opentracer/text_map_propagator.rb +5 -7
- data/lib/datadog/profiling/collectors/cpu_and_wall_time.rb +10 -4
- data/lib/datadog/profiling/collectors/cpu_and_wall_time_worker.rb +4 -0
- data/lib/datadog/profiling/collectors/old_stack.rb +7 -0
- data/lib/datadog/profiling/exporter.rb +5 -0
- data/lib/datadog/profiling/old_recorder.rb +8 -0
- data/lib/datadog/profiling/profiler.rb +7 -0
- data/lib/datadog/profiling/scheduler.rb +4 -7
- data/lib/datadog/profiling/stack_recorder.rb +22 -0
- data/lib/datadog/profiling/tasks/setup.rb +0 -7
- data/lib/datadog/tracing/contrib/delayed_job/plugin.rb +4 -0
- data/lib/datadog/tracing/contrib/grpc/datadog_interceptor/client.rb +2 -1
- data/lib/datadog/tracing/contrib/grpc/datadog_interceptor/server.rb +6 -12
- data/lib/datadog/tracing/contrib/grpc/distributed/fetcher.rb +27 -0
- data/lib/datadog/tracing/contrib/grpc/distributed/propagation.rb +38 -0
- data/lib/datadog/tracing/contrib/grpc/patcher.rb +0 -2
- data/lib/datadog/tracing/contrib/http/distributed/fetcher.rb +32 -0
- data/lib/datadog/tracing/contrib/http/distributed/propagation.rb +33 -0
- data/lib/datadog/tracing/contrib/kafka/consumer_event.rb +1 -0
- data/lib/datadog/tracing/contrib/kafka/events/produce_operation/send_messages.rb +1 -0
- data/lib/datadog/tracing/contrib/kafka/events/producer/deliver_messages.rb +1 -0
- data/lib/datadog/tracing/contrib/mongodb/subscribers.rb +2 -0
- data/lib/datadog/tracing/contrib/que/tracer.rb +2 -0
- data/lib/datadog/tracing/contrib/racecar/events/batch.rb +4 -1
- data/lib/datadog/tracing/contrib/racecar/events/message.rb +4 -1
- data/lib/datadog/tracing/contrib/rack/middlewares.rb +2 -0
- data/lib/datadog/tracing/contrib/redis/instrumentation.rb +2 -0
- data/lib/datadog/tracing/contrib/resque/resque_job.rb +2 -0
- data/lib/datadog/tracing/contrib/shoryuken/tracer.rb +2 -0
- data/lib/datadog/tracing/contrib/sidekiq/client_tracer.rb +5 -0
- data/lib/datadog/tracing/contrib/sidekiq/server_tracer.rb +5 -0
- data/lib/datadog/tracing/contrib/sneakers/tracer.rb +2 -0
- data/lib/datadog/tracing/distributed/b3.rb +66 -0
- data/lib/datadog/tracing/distributed/b3_single.rb +66 -0
- data/lib/datadog/tracing/distributed/datadog.rb +153 -0
- data/lib/datadog/tracing/distributed/datadog_tags_codec.rb +1 -0
- data/lib/datadog/tracing/distributed/fetcher.rb +30 -0
- data/lib/datadog/tracing/distributed/headers/ext.rb +18 -16
- data/lib/datadog/tracing/distributed/helpers.rb +7 -6
- data/lib/datadog/tracing/distributed/propagation.rb +127 -0
- data/lib/datadog/tracing/propagation/http.rb +3 -106
- data/lib/datadog/tracing/trace_segment.rb +1 -1
- data/lib/ddtrace/transport/trace_formatter.rb +2 -5
- data/lib/ddtrace/version.rb +2 -2
- metadata +19 -14
- data/lib/datadog/tracing/distributed/headers/b3.rb +0 -55
- data/lib/datadog/tracing/distributed/headers/b3_single.rb +0 -67
- data/lib/datadog/tracing/distributed/headers/datadog.rb +0 -144
- data/lib/datadog/tracing/distributed/headers/parser.rb +0 -37
- data/lib/datadog/tracing/distributed/metadata/b3.rb +0 -55
- data/lib/datadog/tracing/distributed/metadata/b3_single.rb +0 -66
- data/lib/datadog/tracing/distributed/metadata/datadog.rb +0 -73
- data/lib/datadog/tracing/distributed/metadata/parser.rb +0 -34
- data/lib/datadog/tracing/propagation/grpc.rb +0 -98
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 38409b6f228216767057d98b4acb4cc0effeecd34e6b98e202dde946aa4aec99
|
|
4
|
+
data.tar.gz: 6a5b909f6a73516c965c1125b7c70fe212972bf89ac829e0cabc356d82a353a0
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 73d2114938c7a420a6846042c3172f4b08b5a9fb5b68efc6aa155441fa80d6221201ff1a09afbcbd8ccb12655590622326510cf17995443cd1e5cadca0fedbb3
|
|
7
|
+
data.tar.gz: 6c8005ac09cb122bbcf32ee569799f61ada81f8e7a735b2b754e0b0f2969a77980b95a1d8871440efe8e617a3c53c77b4f286dceb98be00dd446684ed5bec43b
|
data/CHANGELOG.md
CHANGED
|
@@ -2,6 +2,26 @@
|
|
|
2
2
|
|
|
3
3
|
## [Unreleased]
|
|
4
4
|
|
|
5
|
+
## [1.7.0] - 2022-11-29
|
|
6
|
+
|
|
7
|
+
### Added
|
|
8
|
+
* Integrations: Support que 2 ([#2382][]) ([@danhodge][])
|
|
9
|
+
* Tracing: Unified tagging `span.kind` as `server` and `client` ([#2365][])
|
|
10
|
+
* Tracing: Adds `span.kind` tag for `kafka`, `sidekiq`, `racecar`, `que`, `shoryuken`, `sneakers`, and `resque` ([#2420][], [#2419][], [#2413][], [#2394][])
|
|
11
|
+
* Tracing: Adds `span.kind` with values `producer` and `consumer` for `delayed_job` ([#2393][])
|
|
12
|
+
* Tracing: Adds `span.kind` as `client` for `redis` ([#2392][])
|
|
13
|
+
* Appsec: Pass HTTP client IP to WAF ([#2316][])
|
|
14
|
+
* Unified tagging `process_id` ([#2276][])
|
|
15
|
+
|
|
16
|
+
### Changed
|
|
17
|
+
* Allow `debase-ruby_core_source` 0.10.18 to be used ([#2435][])
|
|
18
|
+
* Update AppSec ruleset to v1.4.2 ([#2390][])
|
|
19
|
+
* Refactored clearing of profile data after Ruby app forks ([#2362][], [#2367][])
|
|
20
|
+
* Tracing: Move distributed propagation to Contrib ([#2352][])
|
|
21
|
+
|
|
22
|
+
### Fixed
|
|
23
|
+
* Fix ddtrace installation issue when users have CI=true ([#2378][])
|
|
24
|
+
|
|
5
25
|
## [1.6.1] - 2022-11-16
|
|
6
26
|
|
|
7
27
|
### Changed
|
|
@@ -2198,7 +2218,8 @@ Release notes: https://github.com/DataDog/dd-trace-rb/releases/tag/v0.3.1
|
|
|
2198
2218
|
|
|
2199
2219
|
Git diff: https://github.com/DataDog/dd-trace-rb/compare/v0.3.0...v0.3.1
|
|
2200
2220
|
|
|
2201
|
-
[Unreleased]: https://github.com/DataDog/dd-trace-rb/compare/v1.
|
|
2221
|
+
[Unreleased]: https://github.com/DataDog/dd-trace-rb/compare/v1.7.0...master
|
|
2222
|
+
[1.7.0]: https://github.com/DataDog/dd-trace-rb/compare/v1.6.1...v1.7.0
|
|
2202
2223
|
[1.6.1]: https://github.com/DataDog/dd-trace-rb/compare/v1.6.0...v1.6.1
|
|
2203
2224
|
[1.6.0]: https://github.com/DataDog/dd-trace-rb/compare/v1.5.2...v1.6.0
|
|
2204
2225
|
[1.5.2]: https://github.com/DataDog/dd-trace-rb/compare/v1.5.1...v1.5.2
|
|
@@ -3102,6 +3123,7 @@ Git diff: https://github.com/DataDog/dd-trace-rb/compare/v0.3.0...v0.3.1
|
|
|
3102
3123
|
[#2260]: https://github.com/DataDog/dd-trace-rb/issues/2260
|
|
3103
3124
|
[#2265]: https://github.com/DataDog/dd-trace-rb/issues/2265
|
|
3104
3125
|
[#2267]: https://github.com/DataDog/dd-trace-rb/issues/2267
|
|
3126
|
+
[#2276]: https://github.com/DataDog/dd-trace-rb/issues/2276
|
|
3105
3127
|
[#2279]: https://github.com/DataDog/dd-trace-rb/issues/2279
|
|
3106
3128
|
[#2283]: https://github.com/DataDog/dd-trace-rb/issues/2283
|
|
3107
3129
|
[#2289]: https://github.com/DataDog/dd-trace-rb/issues/2289
|
|
@@ -3114,6 +3136,7 @@ Git diff: https://github.com/DataDog/dd-trace-rb/compare/v0.3.0...v0.3.1
|
|
|
3114
3136
|
[#2310]: https://github.com/DataDog/dd-trace-rb/issues/2310
|
|
3115
3137
|
[#2311]: https://github.com/DataDog/dd-trace-rb/issues/2311
|
|
3116
3138
|
[#2313]: https://github.com/DataDog/dd-trace-rb/issues/2313
|
|
3139
|
+
[#2316]: https://github.com/DataDog/dd-trace-rb/issues/2316
|
|
3117
3140
|
[#2317]: https://github.com/DataDog/dd-trace-rb/issues/2317
|
|
3118
3141
|
[#2318]: https://github.com/DataDog/dd-trace-rb/issues/2318
|
|
3119
3142
|
[#2319]: https://github.com/DataDog/dd-trace-rb/issues/2319
|
|
@@ -3124,7 +3147,21 @@ Git diff: https://github.com/DataDog/dd-trace-rb/compare/v0.3.0...v0.3.1
|
|
|
3124
3147
|
[#2331]: https://github.com/DataDog/dd-trace-rb/issues/2331
|
|
3125
3148
|
[#2335]: https://github.com/DataDog/dd-trace-rb/issues/2335
|
|
3126
3149
|
[#2339]: https://github.com/DataDog/dd-trace-rb/issues/2339
|
|
3150
|
+
[#2352]: https://github.com/DataDog/dd-trace-rb/issues/2352
|
|
3151
|
+
[#2362]: https://github.com/DataDog/dd-trace-rb/issues/2362
|
|
3127
3152
|
[#2363]: https://github.com/DataDog/dd-trace-rb/issues/2363
|
|
3153
|
+
[#2365]: https://github.com/DataDog/dd-trace-rb/issues/2365
|
|
3154
|
+
[#2367]: https://github.com/DataDog/dd-trace-rb/issues/2367
|
|
3155
|
+
[#2378]: https://github.com/DataDog/dd-trace-rb/issues/2378
|
|
3156
|
+
[#2382]: https://github.com/DataDog/dd-trace-rb/issues/2382
|
|
3157
|
+
[#2390]: https://github.com/DataDog/dd-trace-rb/issues/2390
|
|
3158
|
+
[#2392]: https://github.com/DataDog/dd-trace-rb/issues/2392
|
|
3159
|
+
[#2393]: https://github.com/DataDog/dd-trace-rb/issues/2393
|
|
3160
|
+
[#2394]: https://github.com/DataDog/dd-trace-rb/issues/2394
|
|
3161
|
+
[#2413]: https://github.com/DataDog/dd-trace-rb/issues/2413
|
|
3162
|
+
[#2419]: https://github.com/DataDog/dd-trace-rb/issues/2419
|
|
3163
|
+
[#2420]: https://github.com/DataDog/dd-trace-rb/issues/2420
|
|
3164
|
+
[#2435]: https://github.com/DataDog/dd-trace-rb/issues/2435
|
|
3128
3165
|
[@AdrianLC]: https://github.com/AdrianLC
|
|
3129
3166
|
[@Azure7111]: https://github.com/Azure7111
|
|
3130
3167
|
[@BabyGroot]: https://github.com/BabyGroot
|
|
@@ -3170,6 +3207,7 @@ Git diff: https://github.com/DataDog/dd-trace-rb/compare/v0.3.0...v0.3.1
|
|
|
3170
3207
|
[@components]: https://github.com/components
|
|
3171
3208
|
[@cswatt]: https://github.com/cswatt
|
|
3172
3209
|
[@cwoodcox]: https://github.com/cwoodcox
|
|
3210
|
+
[@danhodge]: https://github.com/danhodge
|
|
3173
3211
|
[@dasch]: https://github.com/dasch
|
|
3174
3212
|
[@dim]: https://github.com/dim
|
|
3175
3213
|
[@dirk]: https://github.com/dirk
|
|
@@ -3268,4 +3306,4 @@ Git diff: https://github.com/DataDog/dd-trace-rb/compare/v0.3.0...v0.3.1
|
|
|
3268
3306
|
[@y-yagi]: https://github.com/y-yagi
|
|
3269
3307
|
[@yujideveloper]: https://github.com/yujideveloper
|
|
3270
3308
|
[@yukimurasawa]: https://github.com/yukimurasawa
|
|
3271
|
-
[@zachmccormick]: https://github.com/zachmccormick
|
|
3309
|
+
[@zachmccormick]: https://github.com/zachmccormick
|
|
@@ -26,7 +26,7 @@ end
|
|
|
26
26
|
|
|
27
27
|
# Because we can't control what compiler versions our customers use, shipping with -Werror by default is a no-go.
|
|
28
28
|
# But we can enable it in CI, so that we quickly spot any new warnings that just got introduced.
|
|
29
|
-
add_compiler_flag '-Werror' if ENV['
|
|
29
|
+
add_compiler_flag '-Werror' if ENV['DDTRACE_CI'] == 'true'
|
|
30
30
|
|
|
31
31
|
# Older gcc releases may not default to C99 and we need to ask for this. This is also used:
|
|
32
32
|
# * by upstream Ruby -- search for gnu99 in the codebase
|
|
@@ -66,10 +66,12 @@
|
|
|
66
66
|
#define IS_NOT_WALL_TIME false
|
|
67
67
|
#define MISSING_TRACER_CONTEXT_KEY 0
|
|
68
68
|
|
|
69
|
-
static ID at_active_trace_id; // id of :@active_trace in Ruby
|
|
70
|
-
static ID at_root_span_id; // id of :@root_span in Ruby
|
|
71
69
|
static ID at_active_span_id; // id of :@active_span in Ruby
|
|
70
|
+
static ID at_active_trace_id; // id of :@active_trace in Ruby
|
|
72
71
|
static ID at_id_id; // id of :@id in Ruby
|
|
72
|
+
static ID at_resource_id; // id of :@resource in Ruby
|
|
73
|
+
static ID at_root_span_id; // id of :@root_span in Ruby
|
|
74
|
+
static ID at_type_id; // id of :@type in Ruby
|
|
73
75
|
|
|
74
76
|
// Contains state for a single CpuAndWallTime instance
|
|
75
77
|
struct cpu_and_wall_time_collector_state {
|
|
@@ -91,7 +93,7 @@ struct cpu_and_wall_time_collector_state {
|
|
|
91
93
|
// is not (just) a stat.
|
|
92
94
|
unsigned int sample_count;
|
|
93
95
|
|
|
94
|
-
struct {
|
|
96
|
+
struct stats {
|
|
95
97
|
// Track how many garbage collection samples we've taken.
|
|
96
98
|
unsigned int gc_samples;
|
|
97
99
|
// See cpu_and_wall_time_collector_on_gc_start for details
|
|
@@ -129,6 +131,7 @@ struct trace_identifiers {
|
|
|
129
131
|
ddog_CharSlice span_id;
|
|
130
132
|
char local_root_span_id_buffer[MAXIMUM_LENGTH_64_BIT_IDENTIFIER];
|
|
131
133
|
char span_id_buffer[MAXIMUM_LENGTH_64_BIT_IDENTIFIER];
|
|
134
|
+
VALUE trace_endpoint;
|
|
132
135
|
};
|
|
133
136
|
|
|
134
137
|
static void cpu_and_wall_time_collector_typed_data_mark(void *state_ptr);
|
|
@@ -165,6 +168,8 @@ static long wall_time_now_ns(bool raise_on_failure);
|
|
|
165
168
|
static long thread_id_for(VALUE thread);
|
|
166
169
|
static VALUE _native_stats(VALUE self, VALUE collector_instance);
|
|
167
170
|
static void trace_identifiers_for(struct cpu_and_wall_time_collector_state *state, VALUE thread, struct trace_identifiers *trace_identifiers_result);
|
|
171
|
+
static bool is_type_web(VALUE root_span_type);
|
|
172
|
+
static VALUE _native_reset_after_fork(DDTRACE_UNUSED VALUE self, VALUE collector_instance);
|
|
168
173
|
|
|
169
174
|
void collectors_cpu_and_wall_time_init(VALUE profiling_module) {
|
|
170
175
|
VALUE collectors_module = rb_define_module_under(profiling_module, "Collectors");
|
|
@@ -184,6 +189,7 @@ void collectors_cpu_and_wall_time_init(VALUE profiling_module) {
|
|
|
184
189
|
|
|
185
190
|
rb_define_singleton_method(collectors_cpu_and_wall_time_class, "_native_initialize", _native_initialize, 4);
|
|
186
191
|
rb_define_singleton_method(collectors_cpu_and_wall_time_class, "_native_inspect", _native_inspect, 1);
|
|
192
|
+
rb_define_singleton_method(collectors_cpu_and_wall_time_class, "_native_reset_after_fork", _native_reset_after_fork, 1);
|
|
187
193
|
rb_define_singleton_method(testing_module, "_native_sample", _native_sample, 1);
|
|
188
194
|
rb_define_singleton_method(testing_module, "_native_on_gc_start", _native_on_gc_start, 1);
|
|
189
195
|
rb_define_singleton_method(testing_module, "_native_on_gc_finish", _native_on_gc_finish, 1);
|
|
@@ -192,10 +198,12 @@ void collectors_cpu_and_wall_time_init(VALUE profiling_module) {
|
|
|
192
198
|
rb_define_singleton_method(testing_module, "_native_per_thread_context", _native_per_thread_context, 1);
|
|
193
199
|
rb_define_singleton_method(testing_module, "_native_stats", _native_stats, 1);
|
|
194
200
|
|
|
195
|
-
at_active_trace_id = rb_intern_const("@active_trace");
|
|
196
|
-
at_root_span_id = rb_intern_const("@root_span");
|
|
197
201
|
at_active_span_id = rb_intern_const("@active_span");
|
|
202
|
+
at_active_trace_id = rb_intern_const("@active_trace");
|
|
198
203
|
at_id_id = rb_intern_const("@id");
|
|
204
|
+
at_resource_id = rb_intern_const("@resource");
|
|
205
|
+
at_root_span_id = rb_intern_const("@root_span");
|
|
206
|
+
at_type_id = rb_intern_const("@type");
|
|
199
207
|
}
|
|
200
208
|
|
|
201
209
|
// This structure is used to define a Ruby object that stores a pointer to a struct cpu_and_wall_time_collector_state
|
|
@@ -570,12 +578,29 @@ static void trigger_sample_for_thread(
|
|
|
570
578
|
};
|
|
571
579
|
}
|
|
572
580
|
|
|
573
|
-
struct trace_identifiers trace_identifiers_result = {.valid = false};
|
|
581
|
+
struct trace_identifiers trace_identifiers_result = {.valid = false, .trace_endpoint = Qnil};
|
|
574
582
|
trace_identifiers_for(state, thread, &trace_identifiers_result);
|
|
575
583
|
|
|
576
584
|
if (trace_identifiers_result.valid) {
|
|
577
585
|
labels[label_pos++] = (ddog_Label) {.key = DDOG_CHARSLICE_C("local root span id"), .str = trace_identifiers_result.local_root_span_id};
|
|
578
586
|
labels[label_pos++] = (ddog_Label) {.key = DDOG_CHARSLICE_C("span id"), .str = trace_identifiers_result.span_id};
|
|
587
|
+
|
|
588
|
+
if (trace_identifiers_result.trace_endpoint != Qnil) {
|
|
589
|
+
// The endpoint gets recorded in a different way because it is mutable in the tracer and can change during a
|
|
590
|
+
// trace.
|
|
591
|
+
//
|
|
592
|
+
// Instead of each sample for the same local_root_span_id getting a potentially-different endpoint,
|
|
593
|
+
// `record_endpoint` (via libdatadog) keeps a list of local_root_span_id values and their most-recently-seen
|
|
594
|
+
// endpoint values, and at serialization time the most-recently-seen endpoint is applied to all relevant samples.
|
|
595
|
+
//
|
|
596
|
+
// This is why the endpoint is not directly added in this function to the labels array, although it will later
|
|
597
|
+
// show up in the array in the output pprof.
|
|
598
|
+
record_endpoint(
|
|
599
|
+
state->recorder_instance,
|
|
600
|
+
trace_identifiers_result.local_root_span_id,
|
|
601
|
+
char_slice_from_ruby_string(trace_identifiers_result.trace_endpoint)
|
|
602
|
+
);
|
|
603
|
+
}
|
|
579
604
|
}
|
|
580
605
|
|
|
581
606
|
// The number of times `label_pos++` shows up in this function needs to match `max_label_count`. To avoid "oops I
|
|
@@ -862,4 +887,39 @@ static void trace_identifiers_for(struct cpu_and_wall_time_collector_state *stat
|
|
|
862
887
|
};
|
|
863
888
|
|
|
864
889
|
trace_identifiers_result->valid = true;
|
|
890
|
+
|
|
891
|
+
VALUE root_span_type = rb_ivar_get(root_span, at_type_id /* @type */);
|
|
892
|
+
if (root_span_type == Qnil || !is_type_web(root_span_type)) return;
|
|
893
|
+
|
|
894
|
+
VALUE trace_resource = rb_ivar_get(active_trace, at_resource_id /* @resource */);
|
|
895
|
+
if (RB_TYPE_P(trace_resource, T_STRING)) {
|
|
896
|
+
trace_identifiers_result->trace_endpoint = trace_resource;
|
|
897
|
+
} else if (trace_resource == Qnil) {
|
|
898
|
+
// Fall back to resource from span, if any
|
|
899
|
+
trace_identifiers_result->trace_endpoint = rb_ivar_get(root_span, at_resource_id /* @resource */);
|
|
900
|
+
}
|
|
901
|
+
}
|
|
902
|
+
|
|
903
|
+
static bool is_type_web(VALUE root_span_type) {
|
|
904
|
+
ENFORCE_TYPE(root_span_type, T_STRING);
|
|
905
|
+
|
|
906
|
+
return RSTRING_LEN(root_span_type) == strlen("web") &&
|
|
907
|
+
(memcmp("web", StringValuePtr(root_span_type), strlen("web")) == 0);
|
|
908
|
+
}
|
|
909
|
+
|
|
910
|
+
// After the Ruby VM forks, this method gets called in the child process to clean up any leftover state from the parent.
|
|
911
|
+
//
|
|
912
|
+
// Assumption: This method gets called BEFORE restarting profiling -- e.g. there are no components attempting to
|
|
913
|
+
// trigger samples at the same time.
|
|
914
|
+
static VALUE _native_reset_after_fork(DDTRACE_UNUSED VALUE self, VALUE collector_instance) {
|
|
915
|
+
struct cpu_and_wall_time_collector_state *state;
|
|
916
|
+
TypedData_Get_Struct(collector_instance, struct cpu_and_wall_time_collector_state, &cpu_and_wall_time_collector_typed_data, state);
|
|
917
|
+
|
|
918
|
+
st_clear(state->hash_map_per_thread_context);
|
|
919
|
+
|
|
920
|
+
state->stats = (struct stats) {}; // Resets all stats back to zero
|
|
921
|
+
|
|
922
|
+
rb_funcall(state->recorder_instance, rb_intern("reset_after_fork"), 0);
|
|
923
|
+
|
|
924
|
+
return Qtrue;
|
|
865
925
|
}
|
|
@@ -8,6 +8,7 @@
|
|
|
8
8
|
#include "ruby_helpers.h"
|
|
9
9
|
#include "collectors_cpu_and_wall_time.h"
|
|
10
10
|
#include "private_vm_api_access.h"
|
|
11
|
+
#include "setup_signal_handler.h"
|
|
11
12
|
|
|
12
13
|
// Used to trigger the periodic execution of Collectors::CpuAndWallTime, which implements all of the sampling logic
|
|
13
14
|
// itself; this class only implements the "doing it periodically" part.
|
|
@@ -29,7 +30,7 @@
|
|
|
29
30
|
// internals, we may be able to figure out a way of overcoming it. But it's definitely going to be hard so for now
|
|
30
31
|
// we're considering it as a given.
|
|
31
32
|
//
|
|
32
|
-
// ### Flow for triggering samples
|
|
33
|
+
// ### Flow for triggering CPU/Wall-time samples
|
|
33
34
|
//
|
|
34
35
|
// The flow for triggering samples is as follows:
|
|
35
36
|
//
|
|
@@ -56,6 +57,16 @@
|
|
|
56
57
|
// 4. The Ruby VM calls our `sample_from_postponed_job` from a thread holding the global VM lock. A sample is recorded by
|
|
57
58
|
// calling `cpu_and_wall_time_collector_sample`.
|
|
58
59
|
//
|
|
60
|
+
// ### TracePoints and Forking
|
|
61
|
+
//
|
|
62
|
+
// When the Ruby VM forks, the CPU/Wall-time profiling stops naturally because it's triggered by a background thread
|
|
63
|
+
// that doesn't get automatically restarted by the VM on the child process. (The profiler does trigger its restart at
|
|
64
|
+
// some point -- see `Profiling::Tasks::Setup` for details).
|
|
65
|
+
//
|
|
66
|
+
// But this doesn't apply to any `TracePoint`s this class may use, which will continue to be active. Thus, we need to
|
|
67
|
+
// always remember consider this case of -- the worker thread may not be alive but the `TracePoint`s can continue to
|
|
68
|
+
// trigger samples.
|
|
69
|
+
//
|
|
59
70
|
// ---
|
|
60
71
|
|
|
61
72
|
// Contains state for a single CpuAndWallTimeWorker instance
|
|
@@ -86,9 +97,6 @@ static void cpu_and_wall_time_worker_typed_data_mark(void *state_ptr);
|
|
|
86
97
|
static VALUE _native_sampling_loop(VALUE self, VALUE instance);
|
|
87
98
|
static VALUE _native_stop(DDTRACE_UNUSED VALUE _self, VALUE self_instance);
|
|
88
99
|
static VALUE stop(VALUE self_instance, VALUE optional_exception);
|
|
89
|
-
static void install_sigprof_signal_handler(void (*signal_handler_function)(int, siginfo_t *, void *));
|
|
90
|
-
static void remove_sigprof_signal_handler(void);
|
|
91
|
-
static void block_sigprof_signal_handler_from_running_in_current_thread(void);
|
|
92
100
|
static void handle_sampling_signal(DDTRACE_UNUSED int _signal, DDTRACE_UNUSED siginfo_t *_info, DDTRACE_UNUSED void *_ucontext);
|
|
93
101
|
static void *run_sampling_trigger_loop(void *state_ptr);
|
|
94
102
|
static void interrupt_sampling_trigger_loop(void *state_ptr);
|
|
@@ -107,6 +115,7 @@ static void after_gc_from_postponed_job(DDTRACE_UNUSED void *_unused);
|
|
|
107
115
|
static void safely_call(VALUE (*function_to_call_safely)(VALUE), VALUE function_to_call_safely_arg, VALUE instance);
|
|
108
116
|
static VALUE _native_simulate_handle_sampling_signal(DDTRACE_UNUSED VALUE self);
|
|
109
117
|
static VALUE _native_simulate_sample_from_postponed_job(DDTRACE_UNUSED VALUE self);
|
|
118
|
+
static VALUE _native_reset_after_fork(DDTRACE_UNUSED VALUE self, VALUE instance);
|
|
110
119
|
|
|
111
120
|
// Global state -- be very careful when accessing or modifying it
|
|
112
121
|
|
|
@@ -139,6 +148,7 @@ void collectors_cpu_and_wall_time_worker_init(VALUE profiling_module) {
|
|
|
139
148
|
rb_define_singleton_method(collectors_cpu_and_wall_time_worker_class, "_native_initialize", _native_initialize, 3);
|
|
140
149
|
rb_define_singleton_method(collectors_cpu_and_wall_time_worker_class, "_native_sampling_loop", _native_sampling_loop, 1);
|
|
141
150
|
rb_define_singleton_method(collectors_cpu_and_wall_time_worker_class, "_native_stop", _native_stop, 1);
|
|
151
|
+
rb_define_singleton_method(collectors_cpu_and_wall_time_worker_class, "_native_reset_after_fork", _native_reset_after_fork, 1);
|
|
142
152
|
rb_define_singleton_method(testing_module, "_native_current_sigprof_signal_handler", _native_current_sigprof_signal_handler, 0);
|
|
143
153
|
rb_define_singleton_method(testing_module, "_native_is_running?", _native_is_running, 1);
|
|
144
154
|
rb_define_singleton_method(testing_module, "_native_install_testing_signal_handler", _native_install_testing_signal_handler, 0);
|
|
@@ -235,7 +245,7 @@ static VALUE _native_sampling_loop(DDTRACE_UNUSED VALUE _self, VALUE instance) {
|
|
|
235
245
|
|
|
236
246
|
block_sigprof_signal_handler_from_running_in_current_thread(); // We want to interrupt the thread with the global VM lock, never this one
|
|
237
247
|
|
|
238
|
-
install_sigprof_signal_handler(handle_sampling_signal);
|
|
248
|
+
install_sigprof_signal_handler(handle_sampling_signal, "handle_sampling_signal");
|
|
239
249
|
if (state->gc_profiling_enabled) rb_tracepoint_enable(state->gc_tracepoint);
|
|
240
250
|
|
|
241
251
|
// Release GVL, get to the actual work!
|
|
@@ -245,7 +255,18 @@ static VALUE _native_sampling_loop(DDTRACE_UNUSED VALUE _self, VALUE instance) {
|
|
|
245
255
|
// The sample trigger loop finished (either cleanly or with an error); let's clean up
|
|
246
256
|
|
|
247
257
|
rb_tracepoint_disable(state->gc_tracepoint);
|
|
248
|
-
|
|
258
|
+
|
|
259
|
+
// Why replace and not use remove the signal handler? We do this because when a process receives a SIGPROF without
|
|
260
|
+
// having an explicit signal handler set up, the process will instantly terminate with a confusing
|
|
261
|
+
// "Profiling timer expired" message left behind. (This message doesn't come from us -- it's the default message for
|
|
262
|
+
// an unhandled SIGPROF. Pretty confusing UNIX/POSIX behavior...)
|
|
263
|
+
//
|
|
264
|
+
// Unfortunately, because signal delivery is asynchronous, there's no way to guarantee that there are no pending
|
|
265
|
+
// profiler-sent signals by the time we get here and want to clean up.
|
|
266
|
+
// @ivoanjo: I suspect this will never happen, but the cost of getting it wrong is really high (VM terminates) so this
|
|
267
|
+
// is a just-in-case situation.
|
|
268
|
+
replace_sigprof_signal_handler_with_empty_handler(handle_sampling_signal);
|
|
269
|
+
|
|
249
270
|
active_sampler_instance = Qnil;
|
|
250
271
|
active_sampler_owner_thread = Qnil;
|
|
251
272
|
|
|
@@ -274,53 +295,6 @@ static VALUE stop(VALUE self_instance, VALUE optional_exception) {
|
|
|
274
295
|
return Qtrue;
|
|
275
296
|
}
|
|
276
297
|
|
|
277
|
-
static void install_sigprof_signal_handler(void (*signal_handler_function)(int, siginfo_t *, void *)) {
|
|
278
|
-
struct sigaction existing_signal_handler_config = {.sa_sigaction = NULL};
|
|
279
|
-
struct sigaction signal_handler_config = {
|
|
280
|
-
.sa_flags = SA_RESTART | SA_SIGINFO,
|
|
281
|
-
.sa_sigaction = signal_handler_function
|
|
282
|
-
};
|
|
283
|
-
sigemptyset(&signal_handler_config.sa_mask);
|
|
284
|
-
|
|
285
|
-
if (sigaction(SIGPROF, &signal_handler_config, &existing_signal_handler_config) != 0) {
|
|
286
|
-
rb_sys_fail("Could not start CpuAndWallTimeWorker: Could not install signal handler");
|
|
287
|
-
}
|
|
288
|
-
|
|
289
|
-
// In some corner cases (e.g. after a fork), our signal handler may still be around, and that's ok
|
|
290
|
-
if (existing_signal_handler_config.sa_sigaction == handle_sampling_signal) return;
|
|
291
|
-
|
|
292
|
-
if (existing_signal_handler_config.sa_handler != NULL || existing_signal_handler_config.sa_sigaction != NULL) {
|
|
293
|
-
// A previous signal handler already existed. Currently we don't support this situation, so let's just back out
|
|
294
|
-
// of the installation.
|
|
295
|
-
|
|
296
|
-
if (sigaction(SIGPROF, &existing_signal_handler_config, NULL) != 0) {
|
|
297
|
-
rb_sys_fail(
|
|
298
|
-
"Could not start CpuAndWallTimeWorker: Could not re-install pre-existing SIGPROF signal handler. " \
|
|
299
|
-
"This may break the component had installed it."
|
|
300
|
-
);
|
|
301
|
-
}
|
|
302
|
-
|
|
303
|
-
rb_raise(rb_eRuntimeError, "Could not start CpuAndWallTimeWorker: There's a pre-existing SIGPROF signal handler");
|
|
304
|
-
}
|
|
305
|
-
}
|
|
306
|
-
|
|
307
|
-
static void remove_sigprof_signal_handler(void) {
|
|
308
|
-
struct sigaction signal_handler_config = {
|
|
309
|
-
.sa_handler = SIG_DFL, // Reset back to default
|
|
310
|
-
.sa_flags = SA_RESTART // TODO: Unclear if this is actually needed/does anything at all
|
|
311
|
-
};
|
|
312
|
-
sigemptyset(&signal_handler_config.sa_mask);
|
|
313
|
-
|
|
314
|
-
if (sigaction(SIGPROF, &signal_handler_config, NULL) != 0) rb_sys_fail("Failure while removing the signal handler");
|
|
315
|
-
}
|
|
316
|
-
|
|
317
|
-
static void block_sigprof_signal_handler_from_running_in_current_thread(void) {
|
|
318
|
-
sigset_t signals_to_block;
|
|
319
|
-
sigemptyset(&signals_to_block);
|
|
320
|
-
sigaddset(&signals_to_block, SIGPROF);
|
|
321
|
-
pthread_sigmask(SIG_BLOCK, &signals_to_block, NULL);
|
|
322
|
-
}
|
|
323
|
-
|
|
324
298
|
// NOTE: Remember that this will run in the thread and within the scope of user code, including user C code.
|
|
325
299
|
// We need to be careful not to change any state that may be observed OR to restore it if we do. For instance, if anything
|
|
326
300
|
// we do here can set `errno`, then we must be careful to restore the old `errno` after the fact.
|
|
@@ -403,6 +377,8 @@ static VALUE _native_current_sigprof_signal_handler(DDTRACE_UNUSED VALUE self) {
|
|
|
403
377
|
|
|
404
378
|
if (existing_signal_handler_config.sa_sigaction == handle_sampling_signal) {
|
|
405
379
|
return ID2SYM(rb_intern("profiling"));
|
|
380
|
+
} else if (existing_signal_handler_config.sa_sigaction == empty_signal_handler) {
|
|
381
|
+
return ID2SYM(rb_intern("empty"));
|
|
406
382
|
} else if (existing_signal_handler_config.sa_sigaction != NULL) {
|
|
407
383
|
return ID2SYM(rb_intern("other"));
|
|
408
384
|
} else {
|
|
@@ -437,7 +413,7 @@ static void testing_signal_handler(DDTRACE_UNUSED int _signal, DDTRACE_UNUSED si
|
|
|
437
413
|
// This method exists only to enable testing Datadog::Profiling::Collectors::CpuAndWallTimeWorker behavior using RSpec.
|
|
438
414
|
// It SHOULD NOT be used for other purposes.
|
|
439
415
|
static VALUE _native_install_testing_signal_handler(DDTRACE_UNUSED VALUE self) {
|
|
440
|
-
install_sigprof_signal_handler(testing_signal_handler);
|
|
416
|
+
install_sigprof_signal_handler(testing_signal_handler, "testing_signal_handler");
|
|
441
417
|
return Qtrue;
|
|
442
418
|
}
|
|
443
419
|
|
|
@@ -567,3 +543,24 @@ static VALUE _native_simulate_sample_from_postponed_job(DDTRACE_UNUSED VALUE sel
|
|
|
567
543
|
sample_from_postponed_job(NULL);
|
|
568
544
|
return Qtrue;
|
|
569
545
|
}
|
|
546
|
+
|
|
547
|
+
// After the Ruby VM forks, this method gets called in the child process to clean up any leftover state from the parent.
|
|
548
|
+
//
|
|
549
|
+
// Assumption: This method gets called BEFORE restarting profiling. Note that profiling-related tracepoints may still
|
|
550
|
+
// be active, so we make sure to disable them before calling into anything else, so that there are no components
|
|
551
|
+
// attempting to trigger samples at the same time as the reset is done.
|
|
552
|
+
//
|
|
553
|
+
// In the future, if we add more other components with tracepoints, we will need to coordinate stopping all such
|
|
554
|
+
// tracepoints before doing the other cleaning steps.
|
|
555
|
+
static VALUE _native_reset_after_fork(DDTRACE_UNUSED VALUE self, VALUE instance) {
|
|
556
|
+
struct cpu_and_wall_time_worker_state *state;
|
|
557
|
+
TypedData_Get_Struct(instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state);
|
|
558
|
+
|
|
559
|
+
// Disable all tracepoints, so that there are no more attempts to mutate the profile
|
|
560
|
+
rb_tracepoint_disable(state->gc_tracepoint);
|
|
561
|
+
|
|
562
|
+
// Remove all state from the `Collectors::CpuAndWallTime` and connected downstream components
|
|
563
|
+
rb_funcall(state->cpu_and_wall_time_collector_instance, rb_intern("reset_after_fork"), 0);
|
|
564
|
+
|
|
565
|
+
return Qtrue;
|
|
566
|
+
}
|
|
@@ -144,11 +144,10 @@ void sample_thread(
|
|
|
144
144
|
|
|
145
145
|
// Samples thread into recorder, including as a top frame in the stack a frame named "Garbage Collection"
|
|
146
146
|
if (type == SAMPLE_IN_GC) {
|
|
147
|
+
ddog_CharSlice function_name = DDOG_CHARSLICE_C("");
|
|
148
|
+
ddog_CharSlice function_filename = DDOG_CHARSLICE_C("Garbage Collection");
|
|
147
149
|
buffer->lines[0] = (ddog_Line) {
|
|
148
|
-
.function = (ddog_Function) {
|
|
149
|
-
.name = DDOG_CHARSLICE_C(""),
|
|
150
|
-
.filename = DDOG_CHARSLICE_C("Garbage Collection")
|
|
151
|
-
},
|
|
150
|
+
.function = (ddog_Function) {.name = function_name, .filename = function_filename},
|
|
152
151
|
.line = 0
|
|
153
152
|
};
|
|
154
153
|
// To avoid changing sample_thread_internal, we just prepare a new buffer struct that uses the same underlying storage as the
|
|
@@ -300,11 +299,10 @@ static void maybe_add_placeholder_frames_omitted(VALUE thread, sampling_buffer*
|
|
|
300
299
|
|
|
301
300
|
// Important note: `frames_omitted_message` MUST have a lifetime that is at least as long as the call to
|
|
302
301
|
// `record_sample`. So be careful where it gets allocated. (We do have tests for this, at least!)
|
|
302
|
+
ddog_CharSlice function_name = DDOG_CHARSLICE_C("");
|
|
303
|
+
ddog_CharSlice function_filename = {.ptr = frames_omitted_message, .len = strlen(frames_omitted_message)};
|
|
303
304
|
buffer->lines[buffer->max_frames - 1] = (ddog_Line) {
|
|
304
|
-
.function = (ddog_Function) {
|
|
305
|
-
.name = DDOG_CHARSLICE_C(""),
|
|
306
|
-
.filename = ((ddog_CharSlice) {.ptr = frames_omitted_message, .len = strlen(frames_omitted_message)})
|
|
307
|
-
},
|
|
305
|
+
.function = (ddog_Function) {.name = function_name, .filename = function_filename},
|
|
308
306
|
.line = 0,
|
|
309
307
|
};
|
|
310
308
|
}
|
|
@@ -337,11 +335,10 @@ static void record_placeholder_stack_in_native_code(
|
|
|
337
335
|
sampling_buffer *record_buffer,
|
|
338
336
|
int extra_frames_in_record_buffer
|
|
339
337
|
) {
|
|
338
|
+
ddog_CharSlice function_name = DDOG_CHARSLICE_C("");
|
|
339
|
+
ddog_CharSlice function_filename = DDOG_CHARSLICE_C("In native code");
|
|
340
340
|
buffer->lines[0] = (ddog_Line) {
|
|
341
|
-
.function = (ddog_Function) {
|
|
342
|
-
.name = DDOG_CHARSLICE_C(""),
|
|
343
|
-
.filename = DDOG_CHARSLICE_C("In native code")
|
|
344
|
-
},
|
|
341
|
+
.function = (ddog_Function) {.name = function_name, .filename = function_filename},
|
|
345
342
|
.line = 0
|
|
346
343
|
};
|
|
347
344
|
|
|
@@ -373,7 +370,8 @@ sampling_buffer *sampling_buffer_new(unsigned int max_frames) {
|
|
|
373
370
|
// Currently we have a 1-to-1 correspondence between lines and locations, so we just initialize the locations once
|
|
374
371
|
// here and then only mutate the contents of the lines.
|
|
375
372
|
for (unsigned int i = 0; i < max_frames; i++) {
|
|
376
|
-
|
|
373
|
+
ddog_Slice_line lines = (ddog_Slice_line) {.ptr = &buffer->lines[i], .len = 1};
|
|
374
|
+
buffer->locations[i] = (ddog_Location) {.lines = lines};
|
|
377
375
|
}
|
|
378
376
|
|
|
379
377
|
return buffer;
|
|
@@ -79,7 +79,7 @@ end
|
|
|
79
79
|
|
|
80
80
|
# Because we can't control what compiler versions our customers use, shipping with -Werror by default is a no-go.
|
|
81
81
|
# But we can enable it in CI, so that we quickly spot any new warnings that just got introduced.
|
|
82
|
-
add_compiler_flag '-Werror' if ENV['
|
|
82
|
+
add_compiler_flag '-Werror' if ENV['DDTRACE_CI'] == 'true'
|
|
83
83
|
|
|
84
84
|
# Older gcc releases may not default to C99 and we need to ask for this. This is also used:
|
|
85
85
|
# * by upstream Ruby -- search for gnu99 in the codebase
|
|
@@ -14,10 +14,11 @@
|
|
|
14
14
|
#else
|
|
15
15
|
// On older Rubies, use a copy of the VM internal headers shipped in the debase-ruby_core_source gem
|
|
16
16
|
|
|
17
|
-
// We can't do anything about warnings in VM headers, so we just use this technique to
|
|
17
|
+
// We can't do anything about warnings in VM headers, so we just use this technique to suppress them.
|
|
18
18
|
// See https://nelkinda.com/blog/suppress-warnings-in-gcc-and-clang/#d11e364 for details.
|
|
19
19
|
#pragma GCC diagnostic push
|
|
20
20
|
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
|
21
|
+
#pragma GCC diagnostic ignored "-Wattributes"
|
|
21
22
|
#include <vm_core.h>
|
|
22
23
|
#pragma GCC diagnostic pop
|
|
23
24
|
#include <iseq.h>
|
|
@@ -35,7 +36,7 @@
|
|
|
35
36
|
// if the argument passed in is not actually a `Thread` instance.
|
|
36
37
|
static inline rb_thread_t *thread_struct_from_object(VALUE thread) {
|
|
37
38
|
static const rb_data_type_t *thread_data_type = NULL;
|
|
38
|
-
if (thread_data_type == NULL) thread_data_type = RTYPEDDATA_TYPE(rb_thread_current());
|
|
39
|
+
if (UNLIKELY(thread_data_type == NULL)) thread_data_type = RTYPEDDATA_TYPE(rb_thread_current());
|
|
39
40
|
|
|
40
41
|
return (rb_thread_t *) rb_check_typeddata(thread, thread_data_type);
|
|
41
42
|
}
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
#include <ruby.h>
|
|
2
|
+
#include <signal.h>
|
|
3
|
+
#include <errno.h>
|
|
4
|
+
#include <stdbool.h>
|
|
5
|
+
|
|
6
|
+
#include "helpers.h"
|
|
7
|
+
#include "setup_signal_handler.h"
|
|
8
|
+
|
|
9
|
+
static void install_sigprof_signal_handler_internal(
|
|
10
|
+
void (*signal_handler_function)(int, siginfo_t *, void *),
|
|
11
|
+
const char *handler_pretty_name,
|
|
12
|
+
void (*signal_handler_to_replace)(int, siginfo_t *, void *)
|
|
13
|
+
);
|
|
14
|
+
|
|
15
|
+
void empty_signal_handler(DDTRACE_UNUSED int _signal, DDTRACE_UNUSED siginfo_t *_info, DDTRACE_UNUSED void *_ucontext) { }
|
|
16
|
+
|
|
17
|
+
void install_sigprof_signal_handler(void (*signal_handler_function)(int, siginfo_t *, void *), const char *handler_pretty_name) {
|
|
18
|
+
install_sigprof_signal_handler_internal(signal_handler_function, handler_pretty_name, NULL);
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
void replace_sigprof_signal_handler_with_empty_handler(void (*expected_existing_handler)(int, siginfo_t *, void *)) {
|
|
22
|
+
install_sigprof_signal_handler_internal(empty_signal_handler, "empty_signal_handler", expected_existing_handler);
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
static void install_sigprof_signal_handler_internal(
|
|
26
|
+
void (*signal_handler_function)(int, siginfo_t *, void *),
|
|
27
|
+
const char *handler_pretty_name,
|
|
28
|
+
void (*signal_handler_to_replace)(int, siginfo_t *, void *)
|
|
29
|
+
) {
|
|
30
|
+
struct sigaction existing_signal_handler_config = {.sa_sigaction = NULL};
|
|
31
|
+
struct sigaction signal_handler_config = {
|
|
32
|
+
.sa_flags = SA_RESTART | SA_SIGINFO,
|
|
33
|
+
.sa_sigaction = signal_handler_function
|
|
34
|
+
};
|
|
35
|
+
sigemptyset(&signal_handler_config.sa_mask);
|
|
36
|
+
|
|
37
|
+
if (sigaction(SIGPROF, &signal_handler_config, &existing_signal_handler_config) != 0) {
|
|
38
|
+
rb_exc_raise(rb_syserr_new_str(errno, rb_sprintf("Could not install profiling signal handler (%s)", handler_pretty_name)));
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
// Because signal handler functions are global, let's check if we're not stepping on someone else's toes.
|
|
42
|
+
|
|
43
|
+
// If the existing signal handler was our empty one, that's ok as well
|
|
44
|
+
if (existing_signal_handler_config.sa_sigaction == empty_signal_handler ||
|
|
45
|
+
// In some corner cases (e.g. after a fork), our signal handler may still be around, and that's ok
|
|
46
|
+
existing_signal_handler_config.sa_sigaction == signal_handler_function ||
|
|
47
|
+
// Are we replacing a known handler with another one?
|
|
48
|
+
(signal_handler_to_replace != NULL && existing_signal_handler_config.sa_sigaction == signal_handler_to_replace)
|
|
49
|
+
) { return; }
|
|
50
|
+
|
|
51
|
+
if (existing_signal_handler_config.sa_handler != NULL || existing_signal_handler_config.sa_sigaction != NULL) {
|
|
52
|
+
// An unexpected/unknown signal handler already existed. Currently we don't support this situation, so let's just back out
|
|
53
|
+
// of the installation.
|
|
54
|
+
|
|
55
|
+
if (sigaction(SIGPROF, &existing_signal_handler_config, NULL) != 0) {
|
|
56
|
+
rb_exc_raise(
|
|
57
|
+
rb_syserr_new_str(
|
|
58
|
+
errno,
|
|
59
|
+
rb_sprintf(
|
|
60
|
+
"Failed to install profiling signal handler (%s): " \
|
|
61
|
+
"While installing a SIGPROF signal handler, the profiler detected that another software/library/gem had " \
|
|
62
|
+
"previously installed a different SIGPROF signal handler. " \
|
|
63
|
+
"The profiler tried to restore the previous SIGPROF signal handler, but this failed. " \
|
|
64
|
+
"The other software/library/gem may have been left in a broken state. ",
|
|
65
|
+
handler_pretty_name
|
|
66
|
+
)
|
|
67
|
+
)
|
|
68
|
+
);
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
rb_raise(
|
|
72
|
+
rb_eRuntimeError,
|
|
73
|
+
"Could not install profiling signal handler (%s): There's a pre-existing SIGPROF signal handler",
|
|
74
|
+
handler_pretty_name
|
|
75
|
+
);
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
// Note: Be careful when using this; you probably want to use `replace_sigprof_signal_handler_with_empty_handler` instead.
|
|
80
|
+
// (See comments on `collectors_cpu_and_wall_time_worker.c` for details)
|
|
81
|
+
void remove_sigprof_signal_handler(void) {
|
|
82
|
+
struct sigaction signal_handler_config = {
|
|
83
|
+
.sa_handler = SIG_DFL, // Reset back to default
|
|
84
|
+
.sa_flags = SA_RESTART // TODO: Unclear if this is actually needed/does anything at all
|
|
85
|
+
};
|
|
86
|
+
sigemptyset(&signal_handler_config.sa_mask);
|
|
87
|
+
|
|
88
|
+
if (sigaction(SIGPROF, &signal_handler_config, NULL) != 0) rb_sys_fail("Failure while removing the signal handler");
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
void block_sigprof_signal_handler_from_running_in_current_thread(void) {
|
|
92
|
+
sigset_t signals_to_block;
|
|
93
|
+
sigemptyset(&signals_to_block);
|
|
94
|
+
sigaddset(&signals_to_block, SIGPROF);
|
|
95
|
+
pthread_sigmask(SIG_BLOCK, &signals_to_block, NULL);
|
|
96
|
+
}
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
void empty_signal_handler(DDTRACE_UNUSED int _signal, DDTRACE_UNUSED siginfo_t *_info, DDTRACE_UNUSED void *_ucontext);
|
|
4
|
+
void install_sigprof_signal_handler(void (*signal_handler_function)(int, siginfo_t *, void *), const char *handler_pretty_name);
|
|
5
|
+
void replace_sigprof_signal_handler_with_empty_handler(void (*expected_existing_handler)(int, siginfo_t *, void *));
|
|
6
|
+
void remove_sigprof_signal_handler(void);
|
|
7
|
+
void block_sigprof_signal_handler_from_running_in_current_thread(void);
|