pf2 0.3.0 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +31 -0
- data/Cargo.lock +2 -2
- data/README.md +16 -3
- data/ext/pf2/src/backtrace.rs +1 -0
- data/ext/pf2/src/lib.rs +4 -0
- data/ext/pf2/src/profile_serializer.rs +77 -33
- data/ext/pf2/src/ruby_init.rs +9 -40
- data/ext/pf2/src/ruby_internal_apis.rs +70 -0
- data/ext/pf2/src/scheduler.rs +10 -0
- data/ext/pf2/src/session/configuration.rs +106 -0
- data/ext/pf2/src/session/new_thread_watcher.rs +80 -0
- data/ext/pf2/src/session/ruby_object.rs +90 -0
- data/ext/pf2/src/session.rs +227 -0
- data/ext/pf2/src/signal_scheduler.rs +105 -221
- data/ext/pf2/src/timer_thread_scheduler.rs +92 -240
- data/lib/pf2/cli.rb +69 -7
- data/lib/pf2/reporter.rb +105 -4
- data/lib/pf2/serve.rb +60 -0
- data/lib/pf2/session.rb +7 -0
- data/lib/pf2/version.rb +1 -1
- data/lib/pf2.rb +7 -14
- metadata +24 -8
- data/ext/pf2/src/signal_scheduler/configuration.rs +0 -31
- data/ext/pf2/src/signal_scheduler/timer_installer.rs +0 -199
@@ -1,124 +1,39 @@
|
|
1
1
|
#![deny(unsafe_op_in_unsafe_fn)]
|
2
2
|
|
3
|
-
use std::ffi::{
|
3
|
+
use std::ffi::{c_void, CString};
|
4
4
|
use std::mem::ManuallyDrop;
|
5
|
-
use std::ptr::null_mut;
|
6
5
|
use std::sync::atomic::{AtomicBool, Ordering};
|
7
6
|
use std::sync::{Arc, RwLock};
|
8
7
|
use std::thread;
|
9
|
-
use std::time::Duration;
|
10
8
|
|
11
9
|
use rb_sys::*;
|
12
10
|
|
13
11
|
use crate::profile::Profile;
|
14
12
|
use crate::profile_serializer::ProfileSerializer;
|
15
13
|
use crate::sample::Sample;
|
14
|
+
use crate::scheduler::Scheduler;
|
15
|
+
use crate::session::configuration::{self, Configuration};
|
16
16
|
use crate::util::*;
|
17
17
|
|
18
18
|
#[derive(Clone, Debug)]
|
19
19
|
pub struct TimerThreadScheduler {
|
20
|
-
|
21
|
-
|
22
|
-
profile: Option<Arc<RwLock<Profile>>>,
|
20
|
+
configuration: Arc<Configuration>,
|
21
|
+
profile: Arc<RwLock<Profile>>,
|
23
22
|
stop_requested: Arc<AtomicBool>,
|
24
23
|
}
|
25
24
|
|
26
25
|
#[derive(Debug)]
|
27
26
|
struct PostponedJobArgs {
|
28
|
-
|
27
|
+
configuration: Arc<Configuration>,
|
29
28
|
profile: Arc<RwLock<Profile>>,
|
30
29
|
}
|
31
30
|
|
32
|
-
impl TimerThreadScheduler {
|
33
|
-
fn
|
34
|
-
|
35
|
-
ruby_threads: Arc::new(RwLock::new(vec![])),
|
36
|
-
interval: None,
|
37
|
-
profile: None,
|
38
|
-
stop_requested: Arc::new(AtomicBool::new(false)),
|
39
|
-
}
|
40
|
-
}
|
41
|
-
|
42
|
-
fn initialize(&mut self, argc: c_int, argv: *const VALUE, _rbself: VALUE) -> VALUE {
|
43
|
-
// Parse arguments
|
44
|
-
let kwargs: VALUE = Qnil.into();
|
45
|
-
unsafe {
|
46
|
-
rb_scan_args(argc, argv, cstr!(":"), &kwargs);
|
47
|
-
};
|
48
|
-
let mut kwargs_values: [VALUE; 3] = [Qnil.into(); 3];
|
49
|
-
unsafe {
|
50
|
-
rb_get_kwargs(
|
51
|
-
kwargs,
|
52
|
-
[
|
53
|
-
rb_intern(cstr!("interval_ms")),
|
54
|
-
rb_intern(cstr!("threads")),
|
55
|
-
rb_intern(cstr!("time_mode")),
|
56
|
-
]
|
57
|
-
.as_mut_ptr(),
|
58
|
-
0,
|
59
|
-
3,
|
60
|
-
kwargs_values.as_mut_ptr(),
|
61
|
-
);
|
62
|
-
};
|
63
|
-
let interval: Duration = if kwargs_values[0] != Qundef as VALUE {
|
64
|
-
let interval_ms = unsafe { rb_num2long(kwargs_values[0]) };
|
65
|
-
Duration::from_millis(interval_ms.try_into().unwrap_or_else(|_| {
|
66
|
-
eprintln!(
|
67
|
-
"[Pf2] Warning: Specified interval ({}) is not valid. Using default value (49ms).",
|
68
|
-
interval_ms
|
69
|
-
);
|
70
|
-
49
|
71
|
-
}))
|
72
|
-
} else {
|
73
|
-
Duration::from_millis(49)
|
74
|
-
};
|
75
|
-
let threads: VALUE = if kwargs_values[1] != Qundef as VALUE {
|
76
|
-
kwargs_values[1]
|
77
|
-
} else {
|
78
|
-
unsafe { rb_funcall(rb_cThread, rb_intern(cstr!("list")), 0) }
|
79
|
-
};
|
80
|
-
if kwargs_values[2] != Qundef as VALUE {
|
81
|
-
let specified_mode = unsafe {
|
82
|
-
let mut str = rb_funcall(kwargs_values[2], rb_intern(cstr!("to_s")), 0);
|
83
|
-
let ptr = rb_string_value_ptr(&mut str);
|
84
|
-
CStr::from_ptr(ptr).to_str().unwrap()
|
85
|
-
};
|
86
|
-
if specified_mode != "wall" {
|
87
|
-
// Raise an ArgumentError
|
88
|
-
unsafe {
|
89
|
-
rb_raise(
|
90
|
-
rb_eArgError,
|
91
|
-
cstr!("TimerThreadScheduler only supports :wall mode."),
|
92
|
-
)
|
93
|
-
}
|
94
|
-
}
|
95
|
-
}
|
96
|
-
|
97
|
-
let mut target_ruby_threads = Vec::new();
|
98
|
-
unsafe {
|
99
|
-
for i in 0..RARRAY_LEN(threads) {
|
100
|
-
let ruby_thread: VALUE = rb_ary_entry(threads, i);
|
101
|
-
target_ruby_threads.push(ruby_thread);
|
102
|
-
}
|
103
|
-
}
|
104
|
-
|
105
|
-
self.interval = Some(Arc::new(interval));
|
106
|
-
self.ruby_threads = Arc::new(RwLock::new(target_ruby_threads.into_iter().collect()));
|
107
|
-
|
108
|
-
Qnil.into()
|
109
|
-
}
|
110
|
-
|
111
|
-
fn start(&mut self, _rbself: VALUE) -> VALUE {
|
112
|
-
// Create Profile
|
113
|
-
let profile = Arc::new(RwLock::new(Profile::new()));
|
114
|
-
self.start_profile_buffer_flusher_thread(&profile);
|
115
|
-
|
116
|
-
// Start monitoring thread
|
117
|
-
let stop_requested = Arc::clone(&self.stop_requested);
|
118
|
-
let interval = Arc::clone(self.interval.as_ref().unwrap());
|
31
|
+
impl Scheduler for TimerThreadScheduler {
|
32
|
+
fn start(&self) -> VALUE {
|
33
|
+
// Register the Postponed Job which does the actual work of collecting samples
|
119
34
|
let postponed_job_args: Box<PostponedJobArgs> = Box::new(PostponedJobArgs {
|
120
|
-
|
121
|
-
profile: Arc::clone(&profile),
|
35
|
+
configuration: Arc::clone(&self.configuration),
|
36
|
+
profile: Arc::clone(&self.profile),
|
122
37
|
});
|
123
38
|
let postponed_job_handle: rb_postponed_job_handle_t = unsafe {
|
124
39
|
rb_postponed_job_preregister(
|
@@ -127,18 +42,79 @@ impl TimerThreadScheduler {
|
|
127
42
|
Box::into_raw(postponed_job_args) as *mut c_void, // FIXME: leak
|
128
43
|
)
|
129
44
|
};
|
45
|
+
|
46
|
+
// Start a timer thread that periodically triggers postponed jobs based on configuration
|
47
|
+
let configuration = Arc::clone(&self.configuration);
|
48
|
+
let stop_requested = Arc::clone(&self.stop_requested);
|
130
49
|
thread::spawn(move || {
|
131
|
-
Self::thread_main_loop(
|
50
|
+
Self::thread_main_loop(configuration, stop_requested, postponed_job_handle)
|
132
51
|
});
|
133
52
|
|
134
|
-
self.profile = Some(profile);
|
135
|
-
|
136
53
|
Qtrue.into()
|
137
54
|
}
|
138
55
|
|
56
|
+
fn stop(&self) -> VALUE {
|
57
|
+
// Stop the collector thread
|
58
|
+
self.stop_requested.store(true, Ordering::Relaxed);
|
59
|
+
|
60
|
+
// Finalize
|
61
|
+
match self.profile.try_write() {
|
62
|
+
Ok(mut profile) => {
|
63
|
+
profile.flush_temporary_sample_buffer();
|
64
|
+
}
|
65
|
+
Err(_) => {
|
66
|
+
println!("[pf2 ERROR] stop: Failed to acquire profile lock.");
|
67
|
+
return Qfalse.into();
|
68
|
+
}
|
69
|
+
}
|
70
|
+
|
71
|
+
let profile = self.profile.try_read().unwrap();
|
72
|
+
log::debug!("Number of samples: {}", profile.samples.len());
|
73
|
+
|
74
|
+
let serialized = ProfileSerializer::serialize(&profile);
|
75
|
+
let serialized = CString::new(serialized).unwrap();
|
76
|
+
unsafe { rb_str_new_cstr(serialized.as_ptr()) }
|
77
|
+
}
|
78
|
+
|
79
|
+
fn on_new_thread(&self, _thread: VALUE) {
|
80
|
+
todo!();
|
81
|
+
}
|
82
|
+
|
83
|
+
fn dmark(&self) {
|
84
|
+
match self.profile.read() {
|
85
|
+
Ok(profile) => unsafe {
|
86
|
+
profile.dmark();
|
87
|
+
},
|
88
|
+
Err(_) => {
|
89
|
+
panic!("[pf2 FATAL] dmark: Failed to acquire profile lock.");
|
90
|
+
}
|
91
|
+
}
|
92
|
+
}
|
93
|
+
|
94
|
+
fn dfree(&self) {
|
95
|
+
// No-op
|
96
|
+
}
|
97
|
+
|
98
|
+
fn dsize(&self) -> size_t {
|
99
|
+
// FIXME: Report something better
|
100
|
+
std::mem::size_of::<TimerThreadScheduler>() as size_t
|
101
|
+
}
|
102
|
+
}
|
103
|
+
|
104
|
+
impl TimerThreadScheduler {
|
105
|
+
pub fn new(configuration: &Configuration, profile: Arc<RwLock<Profile>>) -> Self {
|
106
|
+
Self {
|
107
|
+
configuration: Arc::new(configuration.clone()),
|
108
|
+
profile,
|
109
|
+
stop_requested: Arc::new(AtomicBool::new(false)),
|
110
|
+
}
|
111
|
+
|
112
|
+
// cstr!("TimerThreadScheduler only supports :wall mode."),
|
113
|
+
}
|
114
|
+
|
139
115
|
fn thread_main_loop(
|
116
|
+
configuration: Arc<Configuration>,
|
140
117
|
stop_requested: Arc<AtomicBool>,
|
141
|
-
interval: Arc<Duration>,
|
142
118
|
postponed_job_handle: rb_postponed_job_handle_t,
|
143
119
|
) {
|
144
120
|
loop {
|
@@ -146,37 +122,11 @@ impl TimerThreadScheduler {
|
|
146
122
|
break;
|
147
123
|
}
|
148
124
|
unsafe {
|
125
|
+
log::trace!("Triggering postponed job");
|
149
126
|
rb_postponed_job_trigger(postponed_job_handle);
|
150
127
|
}
|
151
128
|
|
152
|
-
thread::sleep(
|
153
|
-
}
|
154
|
-
}
|
155
|
-
|
156
|
-
fn stop(&self, _rbself: VALUE) -> VALUE {
|
157
|
-
// Stop the collector thread
|
158
|
-
self.stop_requested.store(true, Ordering::Relaxed);
|
159
|
-
|
160
|
-
if let Some(profile) = &self.profile {
|
161
|
-
// Finalize
|
162
|
-
match profile.try_write() {
|
163
|
-
Ok(mut profile) => {
|
164
|
-
profile.flush_temporary_sample_buffer();
|
165
|
-
}
|
166
|
-
Err(_) => {
|
167
|
-
println!("[pf2 ERROR] stop: Failed to acquire profile lock.");
|
168
|
-
return Qfalse.into();
|
169
|
-
}
|
170
|
-
}
|
171
|
-
|
172
|
-
let profile = profile.try_read().unwrap();
|
173
|
-
log::debug!("Number of samples: {}", profile.samples.len());
|
174
|
-
|
175
|
-
let serialized = ProfileSerializer::serialize(&profile);
|
176
|
-
let serialized = CString::new(serialized).unwrap();
|
177
|
-
unsafe { rb_str_new_cstr(serialized.as_ptr()) }
|
178
|
-
} else {
|
179
|
-
panic!("stop() called before start()");
|
129
|
+
thread::sleep(configuration.interval);
|
180
130
|
}
|
181
131
|
}
|
182
132
|
|
@@ -196,124 +146,26 @@ impl TimerThreadScheduler {
|
|
196
146
|
};
|
197
147
|
|
198
148
|
// Collect stack information from specified Ruby Threads
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
log::debug!("Temporary sample buffer full. Dropping sample.");
|
209
|
-
}
|
210
|
-
}
|
211
|
-
unsafe {
|
212
|
-
rb_gc_enable();
|
213
|
-
}
|
214
|
-
}
|
215
|
-
|
216
|
-
fn start_profile_buffer_flusher_thread(&self, profile: &Arc<RwLock<Profile>>) {
|
217
|
-
let profile = Arc::clone(profile);
|
218
|
-
thread::spawn(move || loop {
|
219
|
-
log::trace!("Flushing temporary sample buffer");
|
220
|
-
match profile.try_write() {
|
221
|
-
Ok(mut profile) => {
|
222
|
-
profile.flush_temporary_sample_buffer();
|
223
|
-
}
|
224
|
-
Err(_) => {
|
225
|
-
log::debug!("flusher: Failed to acquire profile lock");
|
226
|
-
}
|
227
|
-
}
|
228
|
-
thread::sleep(Duration::from_millis(500));
|
229
|
-
});
|
230
|
-
}
|
231
|
-
|
232
|
-
// Ruby Methods
|
233
|
-
|
234
|
-
pub unsafe extern "C" fn rb_initialize(
|
235
|
-
argc: c_int,
|
236
|
-
argv: *const VALUE,
|
237
|
-
rbself: VALUE,
|
238
|
-
) -> VALUE {
|
239
|
-
let mut collector = Self::get_struct_from(rbself);
|
240
|
-
collector.initialize(argc, argv, rbself)
|
241
|
-
}
|
242
|
-
|
243
|
-
// SampleCollector.start
|
244
|
-
pub unsafe extern "C" fn rb_start(rbself: VALUE) -> VALUE {
|
245
|
-
let mut collector = Self::get_struct_from(rbself);
|
246
|
-
collector.start(rbself)
|
247
|
-
}
|
248
|
-
|
249
|
-
// SampleCollector.stop
|
250
|
-
pub unsafe extern "C" fn rb_stop(rbself: VALUE) -> VALUE {
|
251
|
-
let collector = Self::get_struct_from(rbself);
|
252
|
-
collector.stop(rbself)
|
253
|
-
}
|
254
|
-
|
255
|
-
// Functions for TypedData
|
256
|
-
|
257
|
-
fn get_struct_from(obj: VALUE) -> ManuallyDrop<Box<Self>> {
|
258
|
-
unsafe {
|
259
|
-
let ptr = rb_check_typeddata(obj, &RBDATA);
|
260
|
-
ManuallyDrop::new(Box::from_raw(ptr as *mut TimerThreadScheduler))
|
261
|
-
}
|
262
|
-
}
|
263
|
-
|
264
|
-
#[allow(non_snake_case)]
|
265
|
-
pub unsafe extern "C" fn rb_alloc(_rbself: VALUE) -> VALUE {
|
266
|
-
let collector = TimerThreadScheduler::new();
|
267
|
-
|
268
|
-
unsafe {
|
269
|
-
let rb_mPf2: VALUE = rb_define_module(cstr!("Pf2"));
|
270
|
-
let rb_cTimerThreadScheduler =
|
271
|
-
rb_define_class_under(rb_mPf2, cstr!("TimerThreadScheduler"), rb_cObject);
|
272
|
-
|
273
|
-
rb_data_typed_object_wrap(
|
274
|
-
rb_cTimerThreadScheduler,
|
275
|
-
Box::into_raw(Box::new(collector)) as *mut _ as *mut c_void,
|
276
|
-
&RBDATA,
|
277
|
-
)
|
278
|
-
}
|
279
|
-
}
|
280
|
-
|
281
|
-
unsafe extern "C" fn dmark(ptr: *mut c_void) {
|
282
|
-
unsafe {
|
283
|
-
let collector = ManuallyDrop::new(Box::from_raw(ptr as *mut TimerThreadScheduler));
|
284
|
-
if let Some(profile) = &collector.profile {
|
285
|
-
match profile.read() {
|
286
|
-
Ok(profile) => {
|
287
|
-
profile.dmark();
|
149
|
+
match &args.configuration.target_ruby_threads {
|
150
|
+
configuration::Threads::All => todo!(),
|
151
|
+
configuration::Threads::Targeted(threads) => {
|
152
|
+
for ruby_thread in threads.iter() {
|
153
|
+
// Check if the thread is still alive
|
154
|
+
if unsafe { rb_funcall(*ruby_thread, rb_intern(cstr!("status")), 0) }
|
155
|
+
== Qfalse as u64
|
156
|
+
{
|
157
|
+
continue;
|
288
158
|
}
|
289
|
-
|
290
|
-
|
159
|
+
|
160
|
+
let sample = Sample::capture(*ruby_thread, &profile.backtrace_state);
|
161
|
+
if profile.temporary_sample_buffer.push(sample).is_err() {
|
162
|
+
log::debug!("Temporary sample buffer full. Dropping sample.");
|
291
163
|
}
|
292
164
|
}
|
293
165
|
}
|
294
166
|
}
|
295
|
-
}
|
296
|
-
unsafe extern "C" fn dfree(ptr: *mut c_void) {
|
297
167
|
unsafe {
|
298
|
-
|
168
|
+
rb_gc_enable();
|
299
169
|
}
|
300
170
|
}
|
301
|
-
unsafe extern "C" fn dsize(_: *const c_void) -> size_t {
|
302
|
-
// FIXME: Report something better
|
303
|
-
std::mem::size_of::<TimerThreadScheduler>() as size_t
|
304
|
-
}
|
305
171
|
}
|
306
|
-
|
307
|
-
static mut RBDATA: rb_data_type_t = rb_data_type_t {
|
308
|
-
wrap_struct_name: cstr!("TimerThreadScheduler"),
|
309
|
-
function: rb_data_type_struct__bindgen_ty_1 {
|
310
|
-
dmark: Some(TimerThreadScheduler::dmark),
|
311
|
-
dfree: Some(TimerThreadScheduler::dfree),
|
312
|
-
dsize: Some(TimerThreadScheduler::dsize),
|
313
|
-
dcompact: None,
|
314
|
-
reserved: [null_mut(); 1],
|
315
|
-
},
|
316
|
-
parent: null_mut(),
|
317
|
-
data: null_mut(),
|
318
|
-
flags: 0,
|
319
|
-
};
|
data/lib/pf2/cli.rb
CHANGED
@@ -10,24 +10,51 @@ module Pf2
|
|
10
10
|
end
|
11
11
|
|
12
12
|
def run(argv)
|
13
|
+
argv = argv.dup
|
14
|
+
program_name = File.basename($PROGRAM_NAME)
|
15
|
+
|
16
|
+
subcommand = argv.shift
|
17
|
+
case subcommand
|
18
|
+
when 'report'
|
19
|
+
subcommand_report(argv)
|
20
|
+
when 'serve'
|
21
|
+
subcommand_serve(argv)
|
22
|
+
when 'version'
|
23
|
+
puts VERSION
|
24
|
+
return 0
|
25
|
+
when '--help'
|
26
|
+
STDERR.puts <<~__EOS__
|
27
|
+
Usage: #{program_name} COMMAND [options]
|
28
|
+
|
29
|
+
Commands:
|
30
|
+
report Generate a report from a profile
|
31
|
+
serve Start an HTTP server alongside a target process
|
32
|
+
version Show version information
|
33
|
+
__EOS__
|
34
|
+
|
35
|
+
return 1
|
36
|
+
else
|
37
|
+
STDERR.puts "#{program_name}: Unknown subcommand '#{subcommand}'."
|
38
|
+
STDERR.puts "See '#{program_name} --help'"
|
39
|
+
return 1
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
def subcommand_report(argv)
|
13
44
|
options = {}
|
14
45
|
option_parser = OptionParser.new do |opts|
|
15
|
-
opts.
|
16
|
-
puts Pf2::VERSION
|
17
|
-
exit
|
18
|
-
end
|
19
|
-
|
46
|
+
opts.banner = "Usage: pf2 report [options] COMMAND"
|
20
47
|
opts.on('-h', '--help', 'Prints this help') do
|
21
48
|
puts opts
|
49
|
+
return 0
|
22
50
|
end
|
23
|
-
|
24
51
|
opts.on('-o', '--output FILE', 'Output file') do |path|
|
25
52
|
options[:output_file] = path
|
26
53
|
end
|
27
54
|
end
|
28
55
|
option_parser.parse!(argv)
|
29
56
|
|
30
|
-
profile = JSON.parse(File.read(
|
57
|
+
profile = JSON.parse(File.read(argv[0]), symbolize_names: true, max_nesting: false)
|
31
58
|
report = JSON.generate(Pf2::Reporter.new(profile).emit)
|
32
59
|
|
33
60
|
if options[:output_file]
|
@@ -38,5 +65,40 @@ module Pf2
|
|
38
65
|
|
39
66
|
return 0
|
40
67
|
end
|
68
|
+
|
69
|
+
def subcommand_serve(argv)
|
70
|
+
options = {}
|
71
|
+
option_parser = OptionParser.new do |opts|
|
72
|
+
opts.banner = "Usage: pf2 serve [options] COMMAND"
|
73
|
+
opts.on('-h', '--help', 'Prints this help') do
|
74
|
+
puts opts
|
75
|
+
return 0
|
76
|
+
end
|
77
|
+
opts.on('-b', '--bind ADDRESS', 'Address to bind') do |host|
|
78
|
+
options[:serve_host] = host
|
79
|
+
end
|
80
|
+
opts.on('-p', '--port PORT', '') do |port|
|
81
|
+
options[:serve_port] = port
|
82
|
+
end
|
83
|
+
end
|
84
|
+
option_parser.parse!(argv)
|
85
|
+
|
86
|
+
if argv.size == 0
|
87
|
+
# No subcommand was specified
|
88
|
+
STDERR.puts option_parser.help
|
89
|
+
return 1
|
90
|
+
end
|
91
|
+
|
92
|
+
# Inject the profiler (pf2/serve) into the target process via RUBYOPT (-r).
|
93
|
+
# This will have no effect if the target process is not Ruby.
|
94
|
+
env = {
|
95
|
+
'RUBYOPT' => '-rpf2/serve'
|
96
|
+
}
|
97
|
+
env['PF2_SERVE_HOST'] = options[:serve_host] if options[:serve_host]
|
98
|
+
env['PF2_SERVE_PORT'] = options[:serve_port] if options[:serve_port]
|
99
|
+
exec(env, *argv) # never returns if succesful
|
100
|
+
|
101
|
+
return 1
|
102
|
+
end
|
41
103
|
end
|
42
104
|
end
|
data/lib/pf2/reporter.rb
CHANGED
@@ -71,6 +71,8 @@ module Pf2
|
|
71
71
|
end
|
72
72
|
|
73
73
|
def emit
|
74
|
+
x = weave_native_stack(@thread[:stack_tree])
|
75
|
+
@thread[:stack_tree] = x
|
74
76
|
func_table = build_func_table
|
75
77
|
frame_table = build_frame_table
|
76
78
|
stack_table = build_stack_table(func_table, frame_table)
|
@@ -147,13 +149,13 @@ module Pf2
|
|
147
149
|
}
|
148
150
|
|
149
151
|
@thread[:frames].each.with_index do |(id, frame), i|
|
150
|
-
ret[:address] <<
|
152
|
+
ret[:address] << frame[:address].to_s
|
151
153
|
ret[:category] << 1
|
152
154
|
ret[:subcategory] << 1
|
153
155
|
ret[:func] << i # TODO
|
154
156
|
ret[:inner_window_id] << nil
|
155
157
|
ret[:implementation] << nil
|
156
|
-
ret[:line] <<
|
158
|
+
ret[:line] << frame[:callsite_lineno]
|
157
159
|
ret[:column] << nil
|
158
160
|
ret[:optimizations] << nil
|
159
161
|
ret[:inline_depth] << 0
|
@@ -184,8 +186,8 @@ module Pf2
|
|
184
186
|
ret[:is_js] << !native
|
185
187
|
ret[:relevant_for_js] << false
|
186
188
|
ret[:resource] << -1
|
187
|
-
ret[:file_name] <<
|
188
|
-
ret[:line_number] <<
|
189
|
+
ret[:file_name] << string_id(frame[:file_name])
|
190
|
+
ret[:line_number] << frame[:function_first_lineno]
|
189
191
|
ret[:column_number] << nil
|
190
192
|
|
191
193
|
@func_id_map[id] = i
|
@@ -195,6 +197,105 @@ module Pf2
|
|
195
197
|
ret
|
196
198
|
end
|
197
199
|
|
200
|
+
# "Weave" the native stack into the Ruby stack.
|
201
|
+
#
|
202
|
+
# Strategy:
|
203
|
+
# - Split the stack into Ruby and Native parts
|
204
|
+
# - Start from the root of the Native stack
|
205
|
+
# - Dig in to the native stack until we hit a rb_vm_exec(), which marks a call into Ruby code
|
206
|
+
# - Switch to Ruby stack. Keep digging until we hit a Cfunc call, then switch back to Native stack
|
207
|
+
# - Repeat until we consume the entire stack
|
208
|
+
def weave_native_stack(stack_tree)
|
209
|
+
collected_paths = []
|
210
|
+
tree_to_array_of_paths(stack_tree, @thread[:frames], [], collected_paths)
|
211
|
+
collected_paths = collected_paths.map do |path|
|
212
|
+
next if path.size == 0
|
213
|
+
|
214
|
+
new_path = []
|
215
|
+
new_path << path.shift # root
|
216
|
+
|
217
|
+
# Split the stack into Ruby and Native parts
|
218
|
+
native_path, ruby_path = path.partition do |frame|
|
219
|
+
frame_id = frame[:frame_id]
|
220
|
+
@thread[:frames][frame_id][:entry_type] == 'Native'
|
221
|
+
end
|
222
|
+
|
223
|
+
mode = :native
|
224
|
+
|
225
|
+
loop do
|
226
|
+
break if ruby_path.size == 0 && native_path.size == 0
|
227
|
+
|
228
|
+
case mode
|
229
|
+
when :ruby
|
230
|
+
if ruby_path.size == 0
|
231
|
+
mode = :native
|
232
|
+
next
|
233
|
+
end
|
234
|
+
|
235
|
+
next_node = ruby_path[0]
|
236
|
+
new_path << ruby_path.shift
|
237
|
+
next_node_frame = @thread[:frames][next_node[:frame_id]]
|
238
|
+
if native_path.size > 0
|
239
|
+
# Search the remainder of the native stack for the same address
|
240
|
+
# Note: This isn't a very efficient way for the job... but it still works
|
241
|
+
ruby_addr = next_node_frame[:address]
|
242
|
+
native_path[0..].each do |native_node|
|
243
|
+
native_addr = @thread[:frames][native_node[:frame_id]][:address]
|
244
|
+
if ruby_addr && native_addr && ruby_addr == native_addr
|
245
|
+
# A match has been found. Switch to native mode
|
246
|
+
mode = :native
|
247
|
+
break
|
248
|
+
end
|
249
|
+
end
|
250
|
+
end
|
251
|
+
when :native
|
252
|
+
if native_path.size == 0
|
253
|
+
mode = :ruby
|
254
|
+
next
|
255
|
+
end
|
256
|
+
|
257
|
+
# Dig until we meet a rb_vm_exec
|
258
|
+
next_node = native_path[0]
|
259
|
+
new_path << native_path.shift
|
260
|
+
if @thread[:frames][next_node[:frame_id]][:full_label] =~ /vm_exec_core/ # VM_EXEC in vm_exec.h
|
261
|
+
mode = :ruby
|
262
|
+
end
|
263
|
+
end
|
264
|
+
end
|
265
|
+
|
266
|
+
new_path
|
267
|
+
end
|
268
|
+
|
269
|
+
# reconstruct stack_tree
|
270
|
+
new_stack_tree = array_of_paths_to_tree(collected_paths)
|
271
|
+
new_stack_tree
|
272
|
+
end
|
273
|
+
|
274
|
+
def tree_to_array_of_paths(stack_tree, frames, path, collected_paths)
|
275
|
+
new_path = path + [{ frame_id: stack_tree[:frame_id], node_id: stack_tree[:node_id] }]
|
276
|
+
if stack_tree[:children].empty?
|
277
|
+
collected_paths << new_path
|
278
|
+
else
|
279
|
+
stack_tree[:children].each do |frame_id, child|
|
280
|
+
tree_to_array_of_paths(child, frames, new_path, collected_paths)
|
281
|
+
end
|
282
|
+
end
|
283
|
+
end
|
284
|
+
|
285
|
+
def array_of_paths_to_tree(paths)
|
286
|
+
new_stack_tree = { children: {}, node_id: 0, frame_id: 0 }
|
287
|
+
paths.each do |path|
|
288
|
+
current = new_stack_tree
|
289
|
+
path[1..].each do |frame|
|
290
|
+
frame_id = frame[:frame_id]
|
291
|
+
node_id = frame[:node_id]
|
292
|
+
current[:children][frame_id] ||= { children: {}, node_id: node_id, frame_id: frame_id }
|
293
|
+
current = current[:children][frame_id]
|
294
|
+
end
|
295
|
+
end
|
296
|
+
new_stack_tree
|
297
|
+
end
|
298
|
+
|
198
299
|
def build_stack_table(func_table, frame_table)
|
199
300
|
ret = {
|
200
301
|
frame: [],
|
data/lib/pf2/serve.rb
ADDED
@@ -0,0 +1,60 @@
|
|
1
|
+
require 'json'
|
2
|
+
require 'logger'
|
3
|
+
require 'uri'
|
4
|
+
require 'webrick'
|
5
|
+
|
6
|
+
require_relative '../pf2'
|
7
|
+
require_relative './reporter'
|
8
|
+
|
9
|
+
module Pf2
|
10
|
+
class Serve
|
11
|
+
CONFIG = {
|
12
|
+
Host: ENV.fetch('PF2_SERVE_HOST', 'localhost'),
|
13
|
+
Port: ENV.fetch('PF2_SERVE_PORT', '51502').to_i, # 1502 = 0xF2 (as in "Pf2")
|
14
|
+
Logger: Logger.new(nil),
|
15
|
+
AccessLog: [],
|
16
|
+
}
|
17
|
+
|
18
|
+
def self.start
|
19
|
+
|
20
|
+
# Ignore Bundler as in `bundle exec`.
|
21
|
+
if File.basename($PROGRAM_NAME) == 'bundle' && ARGV.first == 'exec'
|
22
|
+
return
|
23
|
+
end
|
24
|
+
|
25
|
+
server = WEBrick::HTTPServer.new(CONFIG)
|
26
|
+
server.mount_proc('/profile') do |req, res|
|
27
|
+
profile = Pf2.stop
|
28
|
+
profile = JSON.parse(profile, symbolize_names: true, max_nesting: false)
|
29
|
+
res.header['Content-Type'] = 'application/json'
|
30
|
+
res.header['Access-Control-Allow-Origin'] = '*'
|
31
|
+
res.body = JSON.generate(Pf2::Reporter.new((profile)).emit)
|
32
|
+
Pf2.start
|
33
|
+
end
|
34
|
+
|
35
|
+
Pf2.start
|
36
|
+
|
37
|
+
Thread.new do
|
38
|
+
hostport = "#{server.config[:Host]}:#{server.config[:Port]}"
|
39
|
+
# Print host:port to trigger VS Code's auto port-forwarding feature
|
40
|
+
STDERR.puts "[Pf2] Listening on #{hostport}."
|
41
|
+
STDERR.puts "[Pf2] Open https://profiler.firefox.com/from-url/#{URI.encode_www_form_component("http://#{hostport}/profile")} for visualization."
|
42
|
+
STDERR.puts ""
|
43
|
+
server.start
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
def self.at_exit
|
48
|
+
STDERR.puts ""
|
49
|
+
STDERR.puts "[Pf2] Script execution complete (Pf2 server is still listening). Hit Ctrl-C to quit."
|
50
|
+
|
51
|
+
# Allow the user to download the profile after the target program exits
|
52
|
+
sleep
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
57
|
+
Pf2::Serve.start
|
58
|
+
at_exit do
|
59
|
+
Pf2::Serve.at_exit
|
60
|
+
end
|