pf2 0.1.0 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +29 -2
- data/Cargo.lock +650 -0
- data/Cargo.toml +3 -0
- data/README.md +110 -13
- data/Rakefile +8 -0
- data/crates/backtrace-sys2/.gitignore +1 -0
- data/crates/backtrace-sys2/Cargo.toml +9 -0
- data/crates/backtrace-sys2/build.rs +48 -0
- data/crates/backtrace-sys2/src/lib.rs +5 -0
- data/crates/backtrace-sys2/src/libbacktrace/.gitignore +15 -0
- data/crates/backtrace-sys2/src/libbacktrace/Isaac.Newton-Opticks.txt +9286 -0
- data/crates/backtrace-sys2/src/libbacktrace/LICENSE +29 -0
- data/crates/backtrace-sys2/src/libbacktrace/Makefile.am +623 -0
- data/crates/backtrace-sys2/src/libbacktrace/Makefile.in +2666 -0
- data/crates/backtrace-sys2/src/libbacktrace/README.md +36 -0
- data/crates/backtrace-sys2/src/libbacktrace/aclocal.m4 +864 -0
- data/crates/backtrace-sys2/src/libbacktrace/alloc.c +167 -0
- data/crates/backtrace-sys2/src/libbacktrace/allocfail.c +136 -0
- data/crates/backtrace-sys2/src/libbacktrace/allocfail.sh +104 -0
- data/crates/backtrace-sys2/src/libbacktrace/atomic.c +113 -0
- data/crates/backtrace-sys2/src/libbacktrace/backtrace-supported.h.in +66 -0
- data/crates/backtrace-sys2/src/libbacktrace/backtrace.c +129 -0
- data/crates/backtrace-sys2/src/libbacktrace/backtrace.h +189 -0
- data/crates/backtrace-sys2/src/libbacktrace/btest.c +501 -0
- data/crates/backtrace-sys2/src/libbacktrace/compile +348 -0
- data/crates/backtrace-sys2/src/libbacktrace/config/enable.m4 +38 -0
- data/crates/backtrace-sys2/src/libbacktrace/config/lead-dot.m4 +31 -0
- data/crates/backtrace-sys2/src/libbacktrace/config/libtool.m4 +7436 -0
- data/crates/backtrace-sys2/src/libbacktrace/config/ltoptions.m4 +369 -0
- data/crates/backtrace-sys2/src/libbacktrace/config/ltsugar.m4 +123 -0
- data/crates/backtrace-sys2/src/libbacktrace/config/ltversion.m4 +23 -0
- data/crates/backtrace-sys2/src/libbacktrace/config/lt~obsolete.m4 +98 -0
- data/crates/backtrace-sys2/src/libbacktrace/config/multi.m4 +68 -0
- data/crates/backtrace-sys2/src/libbacktrace/config/override.m4 +117 -0
- data/crates/backtrace-sys2/src/libbacktrace/config/unwind_ipinfo.m4 +37 -0
- data/crates/backtrace-sys2/src/libbacktrace/config/warnings.m4 +227 -0
- data/crates/backtrace-sys2/src/libbacktrace/config.guess +1700 -0
- data/crates/backtrace-sys2/src/libbacktrace/config.h.in +182 -0
- data/crates/backtrace-sys2/src/libbacktrace/config.sub +1885 -0
- data/crates/backtrace-sys2/src/libbacktrace/configure +15740 -0
- data/crates/backtrace-sys2/src/libbacktrace/configure.ac +613 -0
- data/crates/backtrace-sys2/src/libbacktrace/dwarf.c +4402 -0
- data/crates/backtrace-sys2/src/libbacktrace/edtest.c +120 -0
- data/crates/backtrace-sys2/src/libbacktrace/edtest2.c +43 -0
- data/crates/backtrace-sys2/src/libbacktrace/elf.c +7443 -0
- data/crates/backtrace-sys2/src/libbacktrace/fileline.c +407 -0
- data/crates/backtrace-sys2/src/libbacktrace/filenames.h +52 -0
- data/crates/backtrace-sys2/src/libbacktrace/filetype.awk +13 -0
- data/crates/backtrace-sys2/src/libbacktrace/install-debuginfo-for-buildid.sh.in +65 -0
- data/crates/backtrace-sys2/src/libbacktrace/install-sh +501 -0
- data/crates/backtrace-sys2/src/libbacktrace/instrumented_alloc.c +114 -0
- data/crates/backtrace-sys2/src/libbacktrace/internal.h +389 -0
- data/crates/backtrace-sys2/src/libbacktrace/libtool.m4 +7436 -0
- data/crates/backtrace-sys2/src/libbacktrace/ltmain.sh +8636 -0
- data/crates/backtrace-sys2/src/libbacktrace/ltoptions.m4 +369 -0
- data/crates/backtrace-sys2/src/libbacktrace/ltsugar.m4 +123 -0
- data/crates/backtrace-sys2/src/libbacktrace/ltversion.m4 +23 -0
- data/crates/backtrace-sys2/src/libbacktrace/lt~obsolete.m4 +98 -0
- data/crates/backtrace-sys2/src/libbacktrace/macho.c +1355 -0
- data/crates/backtrace-sys2/src/libbacktrace/missing +215 -0
- data/crates/backtrace-sys2/src/libbacktrace/mmap.c +331 -0
- data/crates/backtrace-sys2/src/libbacktrace/mmapio.c +110 -0
- data/crates/backtrace-sys2/src/libbacktrace/move-if-change +83 -0
- data/crates/backtrace-sys2/src/libbacktrace/mtest.c +410 -0
- data/crates/backtrace-sys2/src/libbacktrace/nounwind.c +66 -0
- data/crates/backtrace-sys2/src/libbacktrace/pecoff.c +957 -0
- data/crates/backtrace-sys2/src/libbacktrace/posix.c +104 -0
- data/crates/backtrace-sys2/src/libbacktrace/print.c +92 -0
- data/crates/backtrace-sys2/src/libbacktrace/read.c +110 -0
- data/crates/backtrace-sys2/src/libbacktrace/simple.c +108 -0
- data/crates/backtrace-sys2/src/libbacktrace/sort.c +108 -0
- data/crates/backtrace-sys2/src/libbacktrace/state.c +72 -0
- data/crates/backtrace-sys2/src/libbacktrace/stest.c +137 -0
- data/crates/backtrace-sys2/src/libbacktrace/test-driver +148 -0
- data/crates/backtrace-sys2/src/libbacktrace/test_format.c +55 -0
- data/crates/backtrace-sys2/src/libbacktrace/testlib.c +234 -0
- data/crates/backtrace-sys2/src/libbacktrace/testlib.h +110 -0
- data/crates/backtrace-sys2/src/libbacktrace/ttest.c +161 -0
- data/crates/backtrace-sys2/src/libbacktrace/unittest.c +92 -0
- data/crates/backtrace-sys2/src/libbacktrace/unknown.c +65 -0
- data/crates/backtrace-sys2/src/libbacktrace/xcoff.c +1606 -0
- data/crates/backtrace-sys2/src/libbacktrace/xztest.c +508 -0
- data/crates/backtrace-sys2/src/libbacktrace/zstdtest.c +523 -0
- data/crates/backtrace-sys2/src/libbacktrace/ztest.c +541 -0
- data/ext/pf2/Cargo.toml +25 -0
- data/ext/pf2/build.rs +3 -0
- data/ext/pf2/extconf.rb +6 -1
- data/ext/pf2/src/backtrace.rs +126 -0
- data/ext/pf2/src/lib.rs +15 -0
- data/ext/pf2/src/profile.rs +65 -0
- data/ext/pf2/src/profile_serializer.rs +204 -0
- data/ext/pf2/src/ringbuffer.rs +152 -0
- data/ext/pf2/src/ruby_init.rs +74 -0
- data/ext/pf2/src/sample.rs +66 -0
- data/ext/pf2/src/siginfo_t.c +5 -0
- data/ext/pf2/src/signal_scheduler/configuration.rs +31 -0
- data/ext/pf2/src/signal_scheduler/timer_installer.rs +199 -0
- data/ext/pf2/src/signal_scheduler.rs +311 -0
- data/ext/pf2/src/timer_thread_scheduler.rs +319 -0
- data/ext/pf2/src/util.rs +30 -0
- data/lib/pf2/cli.rb +1 -1
- data/lib/pf2/reporter.rb +48 -16
- data/lib/pf2/version.rb +1 -1
- data/lib/pf2.rb +20 -5
- metadata +128 -5
- data/ext/pf2/pf2.c +0 -246
data/ext/pf2/src/lib.rs
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
extern crate serde;
|
|
2
|
+
#[macro_use]
|
|
3
|
+
extern crate serde_derive;
|
|
4
|
+
|
|
5
|
+
mod ruby_init;
|
|
6
|
+
|
|
7
|
+
mod backtrace;
|
|
8
|
+
mod profile;
|
|
9
|
+
mod profile_serializer;
|
|
10
|
+
mod ringbuffer;
|
|
11
|
+
mod sample;
|
|
12
|
+
#[cfg(target_os = "linux")]
|
|
13
|
+
mod signal_scheduler;
|
|
14
|
+
mod timer_thread_scheduler;
|
|
15
|
+
mod util;
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
use std::time::Instant;
|
|
2
|
+
use std::{collections::HashSet, ptr::null_mut};
|
|
3
|
+
|
|
4
|
+
use rb_sys::*;
|
|
5
|
+
|
|
6
|
+
use backtrace_sys2::backtrace_create_state;
|
|
7
|
+
|
|
8
|
+
use super::backtrace::{Backtrace, BacktraceState};
|
|
9
|
+
use super::ringbuffer::Ringbuffer;
|
|
10
|
+
use super::sample::Sample;
|
|
11
|
+
|
|
12
|
+
// Capacity large enough to hold 1 second worth of samples for 16 threads
|
|
13
|
+
// 16 threads * 20 samples per second * 1 second = 320
|
|
14
|
+
const DEFAULT_RINGBUFFER_CAPACITY: usize = 320;
|
|
15
|
+
|
|
16
|
+
#[derive(Debug)]
|
|
17
|
+
pub struct Profile {
|
|
18
|
+
pub start_timestamp: Instant,
|
|
19
|
+
pub samples: Vec<Sample>,
|
|
20
|
+
pub temporary_sample_buffer: Ringbuffer,
|
|
21
|
+
pub backtrace_state: BacktraceState,
|
|
22
|
+
known_values: HashSet<VALUE>,
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
impl Profile {
|
|
26
|
+
pub fn new() -> Self {
|
|
27
|
+
let backtrace_state = unsafe {
|
|
28
|
+
let ptr = backtrace_create_state(
|
|
29
|
+
null_mut(),
|
|
30
|
+
1,
|
|
31
|
+
Some(Backtrace::backtrace_error_callback),
|
|
32
|
+
null_mut(),
|
|
33
|
+
);
|
|
34
|
+
BacktraceState::new(ptr)
|
|
35
|
+
};
|
|
36
|
+
|
|
37
|
+
Self {
|
|
38
|
+
start_timestamp: Instant::now(),
|
|
39
|
+
samples: vec![],
|
|
40
|
+
temporary_sample_buffer: Ringbuffer::new(DEFAULT_RINGBUFFER_CAPACITY),
|
|
41
|
+
backtrace_state,
|
|
42
|
+
known_values: HashSet::new(),
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
pub fn flush_temporary_sample_buffer(&mut self) {
|
|
47
|
+
while let Some(sample) = self.temporary_sample_buffer.pop() {
|
|
48
|
+
self.known_values.insert(sample.ruby_thread);
|
|
49
|
+
for frame in sample.frames.iter() {
|
|
50
|
+
if frame == &0 {
|
|
51
|
+
break;
|
|
52
|
+
}
|
|
53
|
+
self.known_values.insert(*frame);
|
|
54
|
+
}
|
|
55
|
+
self.samples.push(sample);
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
pub unsafe fn dmark(&self) {
|
|
60
|
+
for value in self.known_values.iter() {
|
|
61
|
+
rb_gc_mark(*value);
|
|
62
|
+
}
|
|
63
|
+
self.temporary_sample_buffer.dmark();
|
|
64
|
+
}
|
|
65
|
+
}
|
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
use std::collections::HashMap;
|
|
2
|
+
use std::ffi::{c_char, CStr};
|
|
3
|
+
use std::hash::Hasher;
|
|
4
|
+
|
|
5
|
+
use rb_sys::*;
|
|
6
|
+
|
|
7
|
+
use crate::backtrace::Backtrace;
|
|
8
|
+
use crate::profile::Profile;
|
|
9
|
+
|
|
10
|
+
#[derive(Debug, Deserialize, Serialize)]
|
|
11
|
+
pub struct ProfileSerializer {
|
|
12
|
+
threads: HashMap<ThreadId, ThreadProfile>,
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
type ThreadId = VALUE;
|
|
16
|
+
|
|
17
|
+
#[derive(Debug, Deserialize, Serialize)]
|
|
18
|
+
struct ThreadProfile {
|
|
19
|
+
thread_id: ThreadId,
|
|
20
|
+
stack_tree: StackTreeNode,
|
|
21
|
+
#[serde(rename = "frames")]
|
|
22
|
+
frame_table: HashMap<FrameTableId, FrameTableEntry>,
|
|
23
|
+
samples: Vec<ProfileSample>,
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
impl ThreadProfile {
|
|
27
|
+
fn new(thread_id: ThreadId) -> ThreadProfile {
|
|
28
|
+
ThreadProfile {
|
|
29
|
+
thread_id,
|
|
30
|
+
// The root node
|
|
31
|
+
stack_tree: StackTreeNode {
|
|
32
|
+
children: HashMap::new(),
|
|
33
|
+
node_id: 0,
|
|
34
|
+
frame_id: 0,
|
|
35
|
+
},
|
|
36
|
+
frame_table: HashMap::new(),
|
|
37
|
+
samples: vec![],
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
type StackTreeNodeId = i32;
|
|
43
|
+
|
|
44
|
+
// Arbitary value which is used inside StackTreeNode.
|
|
45
|
+
// This VALUE should not be dereferenced as a pointer; we're merely using its pointer as a unique value.
|
|
46
|
+
// (Probably should be reconsidered)
|
|
47
|
+
type FrameTableId = VALUE;
|
|
48
|
+
|
|
49
|
+
#[derive(Debug, Deserialize, Serialize)]
|
|
50
|
+
struct StackTreeNode {
|
|
51
|
+
// TODO: Maybe a Vec<StackTreeNode> is enough?
|
|
52
|
+
// There's no particular meaning in using FrameTableId as key
|
|
53
|
+
children: HashMap<FrameTableId, StackTreeNode>,
|
|
54
|
+
// An arbitary ID (no particular meaning)
|
|
55
|
+
node_id: StackTreeNodeId,
|
|
56
|
+
// ?
|
|
57
|
+
frame_id: FrameTableId,
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
#[derive(Debug, Deserialize, Serialize)]
|
|
61
|
+
struct FrameTableEntry {
|
|
62
|
+
id: FrameTableId,
|
|
63
|
+
entry_type: FrameTableEntryType,
|
|
64
|
+
full_label: String,
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
#[derive(Debug, Deserialize, Serialize)]
|
|
68
|
+
enum FrameTableEntryType {
|
|
69
|
+
Ruby,
|
|
70
|
+
Native,
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
// Represents leaf (末端)
|
|
74
|
+
#[derive(Debug, Deserialize, Serialize)]
|
|
75
|
+
struct ProfileSample {
|
|
76
|
+
elapsed_ns: u128,
|
|
77
|
+
stack_tree_id: StackTreeNodeId,
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
impl ProfileSerializer {
|
|
81
|
+
pub fn serialize(profile: &Profile) -> String {
|
|
82
|
+
let mut sequence = 1;
|
|
83
|
+
|
|
84
|
+
let mut serializer = ProfileSerializer {
|
|
85
|
+
threads: HashMap::new(),
|
|
86
|
+
};
|
|
87
|
+
|
|
88
|
+
unsafe {
|
|
89
|
+
// Process each sample
|
|
90
|
+
for sample in profile.samples.iter() {
|
|
91
|
+
let mut merged_stack: Vec<FrameTableEntry> = vec![];
|
|
92
|
+
|
|
93
|
+
// Process C-level stack
|
|
94
|
+
|
|
95
|
+
// A vec to keep the "programmer's" C stack trace.
|
|
96
|
+
// A single PC may be mapped to multiple inlined frames,
|
|
97
|
+
// so we keep the expanded stack frame in this Vec.
|
|
98
|
+
let mut c_stack: Vec<String> = vec![];
|
|
99
|
+
for i in 0..sample.c_backtrace_pcs[0] {
|
|
100
|
+
let pc = sample.c_backtrace_pcs[i + 1];
|
|
101
|
+
Backtrace::backtrace_syminfo(
|
|
102
|
+
&profile.backtrace_state,
|
|
103
|
+
pc,
|
|
104
|
+
|_pc: usize, symname: *const c_char, _symval: usize, _symsize: usize| {
|
|
105
|
+
if symname.is_null() {
|
|
106
|
+
c_stack.push("(no symbol information)".to_owned());
|
|
107
|
+
} else {
|
|
108
|
+
c_stack.push(CStr::from_ptr(symname).to_str().unwrap().to_owned());
|
|
109
|
+
}
|
|
110
|
+
},
|
|
111
|
+
Some(Backtrace::backtrace_error_callback),
|
|
112
|
+
);
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
// Strip the C stack trace:
|
|
116
|
+
// - Remove Pf2-related frames which are always captured
|
|
117
|
+
// - Remove frames below rb_vm_exec
|
|
118
|
+
let mut reached_ruby = false;
|
|
119
|
+
c_stack.retain(|frame| {
|
|
120
|
+
if reached_ruby {
|
|
121
|
+
return false;
|
|
122
|
+
}
|
|
123
|
+
if frame.contains("pf2") {
|
|
124
|
+
return false;
|
|
125
|
+
}
|
|
126
|
+
if frame.contains("rb_vm_exec") || frame.contains("vm_call_cfunc_with_frame") {
|
|
127
|
+
reached_ruby = true;
|
|
128
|
+
return false;
|
|
129
|
+
}
|
|
130
|
+
true
|
|
131
|
+
});
|
|
132
|
+
|
|
133
|
+
for frame in c_stack.iter() {
|
|
134
|
+
merged_stack.push(FrameTableEntry {
|
|
135
|
+
id: calculate_id_for_c_frame(frame),
|
|
136
|
+
entry_type: FrameTableEntryType::Native,
|
|
137
|
+
full_label: frame.to_string(),
|
|
138
|
+
});
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
// Process Ruby-level stack
|
|
142
|
+
|
|
143
|
+
let ruby_stack_depth = sample.line_count;
|
|
144
|
+
for i in 0..ruby_stack_depth {
|
|
145
|
+
let frame: VALUE = sample.frames[i as usize];
|
|
146
|
+
merged_stack.push(FrameTableEntry {
|
|
147
|
+
id: frame,
|
|
148
|
+
entry_type: FrameTableEntryType::Ruby,
|
|
149
|
+
full_label: CStr::from_ptr(rb_string_value_cstr(
|
|
150
|
+
&mut rb_profile_frame_full_label(frame),
|
|
151
|
+
))
|
|
152
|
+
.to_str()
|
|
153
|
+
.unwrap()
|
|
154
|
+
.to_owned(),
|
|
155
|
+
});
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
// Find the Thread profile for this sample
|
|
159
|
+
let thread_serializer = serializer
|
|
160
|
+
.threads
|
|
161
|
+
.entry(sample.ruby_thread)
|
|
162
|
+
.or_insert(ThreadProfile::new(sample.ruby_thread));
|
|
163
|
+
|
|
164
|
+
// Stack frames, shallow to deep
|
|
165
|
+
let mut stack_tree = &mut thread_serializer.stack_tree;
|
|
166
|
+
|
|
167
|
+
while let Some(frame_table_entry) = merged_stack.pop() {
|
|
168
|
+
stack_tree = stack_tree.children.entry(frame_table_entry.id).or_insert({
|
|
169
|
+
let node = StackTreeNode {
|
|
170
|
+
children: HashMap::new(),
|
|
171
|
+
node_id: sequence,
|
|
172
|
+
frame_id: frame_table_entry.id,
|
|
173
|
+
};
|
|
174
|
+
sequence += 1;
|
|
175
|
+
node
|
|
176
|
+
});
|
|
177
|
+
|
|
178
|
+
if merged_stack.is_empty() {
|
|
179
|
+
// This is the leaf node, record a Sample
|
|
180
|
+
let elapsed_ns = (sample.timestamp - profile.start_timestamp).as_nanos();
|
|
181
|
+
thread_serializer.samples.push(ProfileSample {
|
|
182
|
+
elapsed_ns,
|
|
183
|
+
stack_tree_id: stack_tree.node_id,
|
|
184
|
+
});
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
// Register frame metadata to frame table, if not registered yet
|
|
188
|
+
thread_serializer
|
|
189
|
+
.frame_table
|
|
190
|
+
.entry(frame_table_entry.id)
|
|
191
|
+
.or_insert(frame_table_entry);
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
serde_json::to_string(&serializer).unwrap()
|
|
197
|
+
}
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
fn calculate_id_for_c_frame<T: std::hash::Hash>(t: &T) -> FrameTableId {
|
|
201
|
+
let mut s = std::collections::hash_map::DefaultHasher::new();
|
|
202
|
+
t.hash(&mut s);
|
|
203
|
+
s.finish()
|
|
204
|
+
}
|
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
use crate::sample::Sample;
|
|
2
|
+
|
|
3
|
+
#[derive(Debug)]
|
|
4
|
+
pub struct Ringbuffer {
|
|
5
|
+
capacity: usize,
|
|
6
|
+
buffer: Vec<Option<Sample>>,
|
|
7
|
+
read_index: usize,
|
|
8
|
+
write_index: usize,
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
#[derive(Debug, PartialEq)]
|
|
12
|
+
pub enum RingbufferError {
|
|
13
|
+
Full,
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
impl Ringbuffer {
|
|
17
|
+
pub fn new(capacity: usize) -> Self {
|
|
18
|
+
Self {
|
|
19
|
+
capacity,
|
|
20
|
+
buffer: std::iter::repeat_with(|| None)
|
|
21
|
+
.take(capacity + 1)
|
|
22
|
+
.collect::<Vec<_>>(),
|
|
23
|
+
read_index: 0,
|
|
24
|
+
write_index: 0,
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
// async-signal-safe
|
|
29
|
+
pub fn push(&mut self, sample: Sample) -> Result<(), RingbufferError> {
|
|
30
|
+
let next = (self.write_index + 1) % (self.capacity + 1);
|
|
31
|
+
if next == self.read_index {
|
|
32
|
+
return Err(RingbufferError::Full);
|
|
33
|
+
}
|
|
34
|
+
self.buffer[self.write_index] = Some(sample);
|
|
35
|
+
self.write_index = next;
|
|
36
|
+
Ok(())
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
pub fn pop(&mut self) -> Option<Sample> {
|
|
40
|
+
if self.read_index == self.write_index {
|
|
41
|
+
return None;
|
|
42
|
+
}
|
|
43
|
+
let sample = self.buffer[self.read_index].take();
|
|
44
|
+
self.read_index = (self.read_index + 1) % (self.capacity + 1);
|
|
45
|
+
sample
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
// This will call rb_gc_mark() for capacity * Sample::MAX_STACK_DEPTH * 2 times, which is a lot!
|
|
49
|
+
pub fn dmark(&self) {
|
|
50
|
+
for sample in self.buffer.iter().flatten() {
|
|
51
|
+
unsafe {
|
|
52
|
+
sample.dmark();
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
#[cfg(test)]
|
|
59
|
+
mod tests {
|
|
60
|
+
use super::*;
|
|
61
|
+
use std::time::Instant;
|
|
62
|
+
|
|
63
|
+
#[test]
|
|
64
|
+
fn test_ringbuffer() {
|
|
65
|
+
let mut ringbuffer = Ringbuffer::new(2);
|
|
66
|
+
assert_eq!(ringbuffer.pop(), None);
|
|
67
|
+
|
|
68
|
+
let sample1 = Sample {
|
|
69
|
+
ruby_thread: 1,
|
|
70
|
+
timestamp: Instant::now(),
|
|
71
|
+
line_count: 0,
|
|
72
|
+
frames: [0; 500],
|
|
73
|
+
linenos: [0; 500],
|
|
74
|
+
c_backtrace_pcs: [0; 1001],
|
|
75
|
+
};
|
|
76
|
+
let sample2 = Sample {
|
|
77
|
+
ruby_thread: 2,
|
|
78
|
+
timestamp: Instant::now(),
|
|
79
|
+
line_count: 0,
|
|
80
|
+
frames: [0; 500],
|
|
81
|
+
linenos: [0; 500],
|
|
82
|
+
c_backtrace_pcs: [0; 1001],
|
|
83
|
+
};
|
|
84
|
+
|
|
85
|
+
ringbuffer.push(sample1).unwrap();
|
|
86
|
+
ringbuffer.push(sample2).unwrap();
|
|
87
|
+
|
|
88
|
+
assert_eq!(ringbuffer.pop().unwrap().ruby_thread, 1);
|
|
89
|
+
assert_eq!(ringbuffer.pop().unwrap().ruby_thread, 2);
|
|
90
|
+
assert_eq!(ringbuffer.pop(), None);
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
#[test]
|
|
94
|
+
fn test_ringbuffer_full() {
|
|
95
|
+
let mut ringbuffer = Ringbuffer::new(1);
|
|
96
|
+
let sample1 = Sample {
|
|
97
|
+
ruby_thread: 1,
|
|
98
|
+
timestamp: Instant::now(),
|
|
99
|
+
line_count: 0,
|
|
100
|
+
frames: [0; 500],
|
|
101
|
+
linenos: [0; 500],
|
|
102
|
+
c_backtrace_pcs: [0; 1001],
|
|
103
|
+
};
|
|
104
|
+
let sample2 = Sample {
|
|
105
|
+
ruby_thread: 2,
|
|
106
|
+
timestamp: Instant::now(),
|
|
107
|
+
line_count: 0,
|
|
108
|
+
frames: [0; 500],
|
|
109
|
+
linenos: [0; 500],
|
|
110
|
+
c_backtrace_pcs: [0; 1001],
|
|
111
|
+
};
|
|
112
|
+
|
|
113
|
+
ringbuffer.push(sample1).unwrap();
|
|
114
|
+
assert_eq!(ringbuffer.push(sample2), Err(RingbufferError::Full));
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
#[test]
|
|
118
|
+
fn test_ringbuffer_write_a_lot() {
|
|
119
|
+
let mut ringbuffer = Ringbuffer::new(2);
|
|
120
|
+
let sample1 = Sample {
|
|
121
|
+
ruby_thread: 1,
|
|
122
|
+
timestamp: Instant::now(),
|
|
123
|
+
line_count: 0,
|
|
124
|
+
frames: [0; 500],
|
|
125
|
+
linenos: [0; 500],
|
|
126
|
+
c_backtrace_pcs: [0; 1001],
|
|
127
|
+
};
|
|
128
|
+
let sample2 = Sample {
|
|
129
|
+
ruby_thread: 2,
|
|
130
|
+
timestamp: Instant::now(),
|
|
131
|
+
line_count: 0,
|
|
132
|
+
frames: [0; 500],
|
|
133
|
+
linenos: [0; 500],
|
|
134
|
+
c_backtrace_pcs: [0; 1001],
|
|
135
|
+
};
|
|
136
|
+
let sample3 = Sample {
|
|
137
|
+
ruby_thread: 3,
|
|
138
|
+
timestamp: Instant::now(),
|
|
139
|
+
line_count: 0,
|
|
140
|
+
frames: [0; 500],
|
|
141
|
+
linenos: [0; 500],
|
|
142
|
+
c_backtrace_pcs: [0; 1001],
|
|
143
|
+
};
|
|
144
|
+
|
|
145
|
+
ringbuffer.push(sample1).unwrap();
|
|
146
|
+
ringbuffer.pop().unwrap();
|
|
147
|
+
ringbuffer.push(sample2).unwrap();
|
|
148
|
+
ringbuffer.pop().unwrap();
|
|
149
|
+
ringbuffer.push(sample3).unwrap();
|
|
150
|
+
assert_eq!(ringbuffer.pop().unwrap().ruby_thread, 3);
|
|
151
|
+
}
|
|
152
|
+
}
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
#![deny(unsafe_op_in_unsafe_fn)]
|
|
2
|
+
|
|
3
|
+
use rb_sys::*;
|
|
4
|
+
|
|
5
|
+
#[cfg(target_os = "linux")]
|
|
6
|
+
use crate::signal_scheduler::SignalScheduler;
|
|
7
|
+
use crate::timer_thread_scheduler::TimerThreadScheduler;
|
|
8
|
+
use crate::util::*;
|
|
9
|
+
|
|
10
|
+
#[allow(non_snake_case)]
|
|
11
|
+
#[no_mangle]
|
|
12
|
+
extern "C" fn Init_pf2() {
|
|
13
|
+
#[cfg(feature = "debug")]
|
|
14
|
+
{
|
|
15
|
+
env_logger::builder()
|
|
16
|
+
.format_timestamp(None)
|
|
17
|
+
.format_module_path(false)
|
|
18
|
+
.init();
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
unsafe {
|
|
22
|
+
let rb_mPf2: VALUE = rb_define_module(cstr!("Pf2"));
|
|
23
|
+
|
|
24
|
+
#[cfg(target_os = "linux")]
|
|
25
|
+
{
|
|
26
|
+
let rb_mPf2_SignalScheduler =
|
|
27
|
+
rb_define_class_under(rb_mPf2, cstr!("SignalScheduler"), rb_cObject);
|
|
28
|
+
rb_define_alloc_func(rb_mPf2_SignalScheduler, Some(SignalScheduler::rb_alloc));
|
|
29
|
+
rb_define_method(
|
|
30
|
+
rb_mPf2_SignalScheduler,
|
|
31
|
+
cstr!("initialize"),
|
|
32
|
+
Some(to_ruby_cfunc_with_args(SignalScheduler::rb_initialize)),
|
|
33
|
+
-1,
|
|
34
|
+
);
|
|
35
|
+
rb_define_method(
|
|
36
|
+
rb_mPf2_SignalScheduler,
|
|
37
|
+
cstr!("start"),
|
|
38
|
+
Some(to_ruby_cfunc_with_no_args(SignalScheduler::rb_start)),
|
|
39
|
+
0,
|
|
40
|
+
);
|
|
41
|
+
rb_define_method(
|
|
42
|
+
rb_mPf2_SignalScheduler,
|
|
43
|
+
cstr!("stop"),
|
|
44
|
+
Some(to_ruby_cfunc_with_no_args(SignalScheduler::rb_stop)),
|
|
45
|
+
0,
|
|
46
|
+
);
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
let rb_mPf2_TimerThreadScheduler =
|
|
50
|
+
rb_define_class_under(rb_mPf2, cstr!("TimerThreadScheduler"), rb_cObject);
|
|
51
|
+
rb_define_alloc_func(
|
|
52
|
+
rb_mPf2_TimerThreadScheduler,
|
|
53
|
+
Some(TimerThreadScheduler::rb_alloc),
|
|
54
|
+
);
|
|
55
|
+
rb_define_method(
|
|
56
|
+
rb_mPf2_TimerThreadScheduler,
|
|
57
|
+
cstr!("initialize"),
|
|
58
|
+
Some(to_ruby_cfunc_with_args(TimerThreadScheduler::rb_initialize)),
|
|
59
|
+
-1,
|
|
60
|
+
);
|
|
61
|
+
rb_define_method(
|
|
62
|
+
rb_mPf2_TimerThreadScheduler,
|
|
63
|
+
cstr!("start"),
|
|
64
|
+
Some(to_ruby_cfunc_with_no_args(TimerThreadScheduler::rb_start)),
|
|
65
|
+
0,
|
|
66
|
+
);
|
|
67
|
+
rb_define_method(
|
|
68
|
+
rb_mPf2_TimerThreadScheduler,
|
|
69
|
+
cstr!("stop"),
|
|
70
|
+
Some(to_ruby_cfunc_with_no_args(TimerThreadScheduler::rb_stop)),
|
|
71
|
+
0,
|
|
72
|
+
);
|
|
73
|
+
}
|
|
74
|
+
}
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
use std::time::Instant;
|
|
2
|
+
|
|
3
|
+
use rb_sys::*;
|
|
4
|
+
|
|
5
|
+
use crate::backtrace::{Backtrace, BacktraceState};
|
|
6
|
+
|
|
7
|
+
const MAX_STACK_DEPTH: usize = 500;
|
|
8
|
+
const MAX_C_STACK_DEPTH: usize = 1000;
|
|
9
|
+
|
|
10
|
+
#[derive(Debug, PartialEq)]
|
|
11
|
+
pub struct Sample {
|
|
12
|
+
pub ruby_thread: VALUE,
|
|
13
|
+
pub timestamp: Instant,
|
|
14
|
+
pub line_count: i32,
|
|
15
|
+
pub frames: [VALUE; MAX_STACK_DEPTH],
|
|
16
|
+
pub linenos: [i32; MAX_STACK_DEPTH],
|
|
17
|
+
pub c_backtrace_pcs: [usize; MAX_C_STACK_DEPTH + 1],
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
impl Sample {
|
|
21
|
+
// Nearly async-signal-safe
|
|
22
|
+
// (rb_profile_thread_frames isn't defined as a-s-s)
|
|
23
|
+
pub fn capture(ruby_thread: VALUE, backtrace_state: &BacktraceState) -> Self {
|
|
24
|
+
let mut c_backtrace_pcs = [0; MAX_C_STACK_DEPTH + 1];
|
|
25
|
+
|
|
26
|
+
Backtrace::backtrace_simple(
|
|
27
|
+
backtrace_state,
|
|
28
|
+
0,
|
|
29
|
+
|pc: usize| -> i32 {
|
|
30
|
+
if c_backtrace_pcs[0] >= MAX_C_STACK_DEPTH {
|
|
31
|
+
return 1;
|
|
32
|
+
}
|
|
33
|
+
c_backtrace_pcs[0] += 1;
|
|
34
|
+
c_backtrace_pcs[c_backtrace_pcs[0]] = pc;
|
|
35
|
+
0
|
|
36
|
+
},
|
|
37
|
+
Some(Backtrace::backtrace_error_callback),
|
|
38
|
+
);
|
|
39
|
+
|
|
40
|
+
let mut sample = Sample {
|
|
41
|
+
ruby_thread,
|
|
42
|
+
timestamp: Instant::now(),
|
|
43
|
+
line_count: 0,
|
|
44
|
+
frames: [0; MAX_STACK_DEPTH],
|
|
45
|
+
linenos: [0; MAX_STACK_DEPTH],
|
|
46
|
+
c_backtrace_pcs,
|
|
47
|
+
};
|
|
48
|
+
unsafe {
|
|
49
|
+
sample.line_count = rb_profile_thread_frames(
|
|
50
|
+
ruby_thread,
|
|
51
|
+
0,
|
|
52
|
+
2000,
|
|
53
|
+
sample.frames.as_mut_ptr(),
|
|
54
|
+
sample.linenos.as_mut_ptr(),
|
|
55
|
+
);
|
|
56
|
+
};
|
|
57
|
+
sample
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
pub unsafe fn dmark(&self) {
|
|
61
|
+
rb_gc_mark(self.ruby_thread);
|
|
62
|
+
for frame in self.frames.iter() {
|
|
63
|
+
rb_gc_mark(*frame);
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
use std::collections::HashSet;
|
|
2
|
+
use std::str::FromStr;
|
|
3
|
+
use std::time::Duration;
|
|
4
|
+
|
|
5
|
+
use rb_sys::VALUE;
|
|
6
|
+
|
|
7
|
+
#[derive(Clone, Debug)]
|
|
8
|
+
pub struct Configuration {
|
|
9
|
+
pub interval: Duration,
|
|
10
|
+
pub time_mode: TimeMode,
|
|
11
|
+
pub target_ruby_threads: HashSet<VALUE>,
|
|
12
|
+
pub track_new_threads: bool,
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
#[derive(Clone, Debug)]
|
|
16
|
+
pub enum TimeMode {
|
|
17
|
+
CpuTime,
|
|
18
|
+
WallTime,
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
impl FromStr for TimeMode {
|
|
22
|
+
type Err = ();
|
|
23
|
+
|
|
24
|
+
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
25
|
+
match s {
|
|
26
|
+
"cpu" => Ok(Self::CpuTime),
|
|
27
|
+
"wall" => Ok(Self::WallTime),
|
|
28
|
+
_ => Err(()),
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
}
|