ruby-prof 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGES +523 -0
- data/LICENSE +25 -0
- data/README.rdoc +5 -0
- data/Rakefile +110 -0
- data/bin/ruby-prof +380 -0
- data/bin/ruby-prof-check-trace +45 -0
- data/ext/ruby_prof/extconf.rb +36 -0
- data/ext/ruby_prof/rp_allocation.c +292 -0
- data/ext/ruby_prof/rp_allocation.h +31 -0
- data/ext/ruby_prof/rp_call_info.c +283 -0
- data/ext/ruby_prof/rp_call_info.h +35 -0
- data/ext/ruby_prof/rp_measure_allocations.c +52 -0
- data/ext/ruby_prof/rp_measure_memory.c +42 -0
- data/ext/ruby_prof/rp_measure_process_time.c +63 -0
- data/ext/ruby_prof/rp_measure_wall_time.c +62 -0
- data/ext/ruby_prof/rp_measurement.c +236 -0
- data/ext/ruby_prof/rp_measurement.h +49 -0
- data/ext/ruby_prof/rp_method.c +642 -0
- data/ext/ruby_prof/rp_method.h +70 -0
- data/ext/ruby_prof/rp_profile.c +881 -0
- data/ext/ruby_prof/rp_profile.h +36 -0
- data/ext/ruby_prof/rp_stack.c +196 -0
- data/ext/ruby_prof/rp_stack.h +56 -0
- data/ext/ruby_prof/rp_thread.c +338 -0
- data/ext/ruby_prof/rp_thread.h +36 -0
- data/ext/ruby_prof/ruby_prof.c +48 -0
- data/ext/ruby_prof/ruby_prof.h +17 -0
- data/ext/ruby_prof/vc/ruby_prof.sln +31 -0
- data/ext/ruby_prof/vc/ruby_prof.vcxproj +143 -0
- data/lib/ruby-prof.rb +53 -0
- data/lib/ruby-prof/assets/call_stack_printer.css.html +117 -0
- data/lib/ruby-prof/assets/call_stack_printer.js.html +385 -0
- data/lib/ruby-prof/assets/call_stack_printer.png +0 -0
- data/lib/ruby-prof/assets/graph_printer.html.erb +356 -0
- data/lib/ruby-prof/call_info.rb +57 -0
- data/lib/ruby-prof/call_info_visitor.rb +38 -0
- data/lib/ruby-prof/compatibility.rb +109 -0
- data/lib/ruby-prof/exclude_common_methods.rb +198 -0
- data/lib/ruby-prof/measurement.rb +14 -0
- data/lib/ruby-prof/method_info.rb +90 -0
- data/lib/ruby-prof/printers/abstract_printer.rb +118 -0
- data/lib/ruby-prof/printers/call_info_printer.rb +51 -0
- data/lib/ruby-prof/printers/call_stack_printer.rb +269 -0
- data/lib/ruby-prof/printers/call_tree_printer.rb +151 -0
- data/lib/ruby-prof/printers/dot_printer.rb +132 -0
- data/lib/ruby-prof/printers/flat_printer.rb +52 -0
- data/lib/ruby-prof/printers/graph_html_printer.rb +64 -0
- data/lib/ruby-prof/printers/graph_printer.rb +114 -0
- data/lib/ruby-prof/printers/multi_printer.rb +127 -0
- data/lib/ruby-prof/profile.rb +33 -0
- data/lib/ruby-prof/rack.rb +171 -0
- data/lib/ruby-prof/task.rb +147 -0
- data/lib/ruby-prof/thread.rb +35 -0
- data/lib/ruby-prof/version.rb +3 -0
- data/lib/unprof.rb +10 -0
- data/ruby-prof.gemspec +58 -0
- data/test/abstract_printer_test.rb +26 -0
- data/test/alias_test.rb +129 -0
- data/test/basic_test.rb +129 -0
- data/test/call_info_visitor_test.rb +31 -0
- data/test/duplicate_names_test.rb +32 -0
- data/test/dynamic_method_test.rb +53 -0
- data/test/enumerable_test.rb +21 -0
- data/test/exceptions_test.rb +24 -0
- data/test/exclude_methods_test.rb +146 -0
- data/test/exclude_threads_test.rb +53 -0
- data/test/line_number_test.rb +161 -0
- data/test/marshal_test.rb +119 -0
- data/test/measure_allocations.rb +30 -0
- data/test/measure_allocations_test.rb +385 -0
- data/test/measure_allocations_trace_test.rb +385 -0
- data/test/measure_memory_trace_test.rb +756 -0
- data/test/measure_process_time_test.rb +849 -0
- data/test/measure_times.rb +54 -0
- data/test/measure_wall_time_test.rb +459 -0
- data/test/multi_printer_test.rb +71 -0
- data/test/no_method_class_test.rb +15 -0
- data/test/parser_timings.rb +24 -0
- data/test/pause_resume_test.rb +166 -0
- data/test/prime.rb +56 -0
- data/test/printer_call_tree_test.rb +31 -0
- data/test/printer_flat_test.rb +68 -0
- data/test/printer_graph_html_test.rb +60 -0
- data/test/printer_graph_test.rb +41 -0
- data/test/printers_test.rb +141 -0
- data/test/printing_recursive_graph_test.rb +81 -0
- data/test/rack_test.rb +157 -0
- data/test/recursive_test.rb +210 -0
- data/test/singleton_test.rb +38 -0
- data/test/stack_printer_test.rb +64 -0
- data/test/start_stop_test.rb +109 -0
- data/test/test_helper.rb +24 -0
- data/test/thread_test.rb +144 -0
- data/test/unique_call_path_test.rb +190 -0
- data/test/yarv_test.rb +56 -0
- metadata +189 -0
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
/* Copyright (C) 2005-2019 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
|
|
2
|
+
Please see the LICENSE file for copyright and distribution information */
|
|
3
|
+
|
|
4
|
+
#ifndef __RP_PROFILE_H__
|
|
5
|
+
#define __RP_PROFILE_H__
|
|
6
|
+
|
|
7
|
+
#include "ruby_prof.h"
|
|
8
|
+
#include "rp_measurement.h"
|
|
9
|
+
#include "rp_thread.h"
|
|
10
|
+
|
|
11
|
+
extern VALUE cProfile;
|
|
12
|
+
|
|
13
|
+
typedef struct
|
|
14
|
+
{
|
|
15
|
+
VALUE running;
|
|
16
|
+
VALUE paused;
|
|
17
|
+
|
|
18
|
+
prof_measurer_t* measurer;
|
|
19
|
+
VALUE threads;
|
|
20
|
+
|
|
21
|
+
VALUE tracepoints;
|
|
22
|
+
|
|
23
|
+
st_table* threads_tbl;
|
|
24
|
+
st_table* exclude_threads_tbl;
|
|
25
|
+
st_table* include_threads_tbl;
|
|
26
|
+
st_table* exclude_methods_tbl;
|
|
27
|
+
thread_data_t* last_thread_data;
|
|
28
|
+
double measurement_at_pause_resume;
|
|
29
|
+
bool allow_exceptions;
|
|
30
|
+
} prof_profile_t;
|
|
31
|
+
|
|
32
|
+
void rp_init_profile(void);
|
|
33
|
+
prof_profile_t* prof_get_profile(VALUE self);
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
#endif //__RP_PROFILE_H__
|
|
@@ -0,0 +1,196 @@
|
|
|
1
|
+
/* Copyright (C) 2005-2019 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
|
|
2
|
+
Please see the LICENSE file for copyright and distribution information */
|
|
3
|
+
|
|
4
|
+
#include "rp_stack.h"
|
|
5
|
+
|
|
6
|
+
#define INITIAL_STACK_SIZE 32
|
|
7
|
+
|
|
8
|
+
void
|
|
9
|
+
prof_frame_pause(prof_frame_t *frame, double current_measurement)
|
|
10
|
+
{
|
|
11
|
+
if (frame && prof_frame_is_unpaused(frame))
|
|
12
|
+
frame->pause_time = current_measurement;
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
void
|
|
16
|
+
prof_frame_unpause(prof_frame_t *frame, double current_measurement)
|
|
17
|
+
{
|
|
18
|
+
if (frame && prof_frame_is_paused(frame))
|
|
19
|
+
{
|
|
20
|
+
frame->dead_time += (current_measurement - frame->pause_time);
|
|
21
|
+
frame->pause_time = -1;
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
/* Creates a stack of prof_frame_t to keep track
|
|
26
|
+
of timings for active methods. */
|
|
27
|
+
prof_stack_t *
|
|
28
|
+
prof_stack_create()
|
|
29
|
+
{
|
|
30
|
+
prof_stack_t *stack = ALLOC(prof_stack_t);
|
|
31
|
+
stack->start = ZALLOC_N(prof_frame_t, INITIAL_STACK_SIZE);
|
|
32
|
+
stack->ptr = stack->start;
|
|
33
|
+
stack->end = stack->start + INITIAL_STACK_SIZE;
|
|
34
|
+
|
|
35
|
+
return stack;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
void
|
|
39
|
+
prof_stack_free(prof_stack_t *stack)
|
|
40
|
+
{
|
|
41
|
+
xfree(stack->start);
|
|
42
|
+
xfree(stack);
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
prof_frame_t *
|
|
46
|
+
prof_stack_push(prof_stack_t *stack, prof_call_info_t *call_info, double measurement, int paused)
|
|
47
|
+
{
|
|
48
|
+
prof_frame_t *result;
|
|
49
|
+
prof_frame_t* parent_frame;
|
|
50
|
+
|
|
51
|
+
/* Is there space on the stack? If not, double
|
|
52
|
+
its size. */
|
|
53
|
+
if (stack->ptr == stack->end - 1)
|
|
54
|
+
{
|
|
55
|
+
size_t len = stack->ptr - stack->start;
|
|
56
|
+
size_t new_capacity = (stack->end - stack->start) * 2;
|
|
57
|
+
REALLOC_N(stack->start, prof_frame_t, new_capacity);
|
|
58
|
+
|
|
59
|
+
/* Memory just got moved, reset pointers */
|
|
60
|
+
stack->ptr = stack->start + len;
|
|
61
|
+
stack->end = stack->start + new_capacity;
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
parent_frame = stack->ptr;
|
|
65
|
+
stack->ptr++;
|
|
66
|
+
|
|
67
|
+
result = stack->ptr;
|
|
68
|
+
result->call_info = call_info;
|
|
69
|
+
result->call_info->depth = (int)(stack->ptr - stack->start); // shortening of 64 bit into 32;
|
|
70
|
+
result->passes = 0;
|
|
71
|
+
|
|
72
|
+
result->start_time = measurement;
|
|
73
|
+
result->pause_time = -1; // init as not paused.
|
|
74
|
+
result->switch_time = 0;
|
|
75
|
+
result->wait_time = 0;
|
|
76
|
+
result->child_time = 0;
|
|
77
|
+
result->dead_time = 0;
|
|
78
|
+
result->source_file = Qnil;
|
|
79
|
+
result->source_line = 0;
|
|
80
|
+
|
|
81
|
+
call_info->measurement->called++;
|
|
82
|
+
call_info->visits++;
|
|
83
|
+
|
|
84
|
+
if (call_info->method->visits > 0)
|
|
85
|
+
{
|
|
86
|
+
call_info->method->recursive = true;
|
|
87
|
+
}
|
|
88
|
+
call_info->method->measurement->called++;
|
|
89
|
+
call_info->method->visits++;
|
|
90
|
+
|
|
91
|
+
// Unpause the parent frame, if it exists.
|
|
92
|
+
// If currently paused then:
|
|
93
|
+
// 1) The child frame will begin paused.
|
|
94
|
+
// 2) The parent will inherit the child's dead time.
|
|
95
|
+
prof_frame_unpause(parent_frame, measurement);
|
|
96
|
+
|
|
97
|
+
if (paused)
|
|
98
|
+
{
|
|
99
|
+
prof_frame_pause(result, measurement);
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
// Return the result
|
|
103
|
+
return result;
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
prof_frame_t *
|
|
107
|
+
prof_stack_pop(prof_stack_t *stack, double measurement)
|
|
108
|
+
{
|
|
109
|
+
prof_frame_t *frame;
|
|
110
|
+
prof_frame_t *parent_frame;
|
|
111
|
+
prof_call_info_t *call_info;
|
|
112
|
+
|
|
113
|
+
double total_time;
|
|
114
|
+
double self_time;
|
|
115
|
+
|
|
116
|
+
if (stack->ptr == stack->start)
|
|
117
|
+
return NULL;
|
|
118
|
+
|
|
119
|
+
frame = stack->ptr;
|
|
120
|
+
|
|
121
|
+
/* Match passes until we reach the frame itself. */
|
|
122
|
+
if (prof_frame_is_pass(frame))
|
|
123
|
+
{
|
|
124
|
+
frame->passes--;
|
|
125
|
+
/* Additional frames can be consumed. See pop_frames(). */
|
|
126
|
+
return frame;
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
/* Consume this frame. */
|
|
130
|
+
stack->ptr--;
|
|
131
|
+
|
|
132
|
+
parent_frame = stack->ptr;
|
|
133
|
+
|
|
134
|
+
/* Calculate the total time this method took */
|
|
135
|
+
prof_frame_unpause(frame, measurement);
|
|
136
|
+
|
|
137
|
+
total_time = measurement - frame->start_time - frame->dead_time;
|
|
138
|
+
self_time = total_time - frame->child_time - frame->wait_time;
|
|
139
|
+
|
|
140
|
+
/* Update information about the current method */
|
|
141
|
+
call_info = frame->call_info;
|
|
142
|
+
|
|
143
|
+
// Update method measurement
|
|
144
|
+
call_info->method->measurement->self_time += self_time;
|
|
145
|
+
call_info->method->measurement->wait_time += frame->wait_time;
|
|
146
|
+
if (call_info->method->visits == 1)
|
|
147
|
+
call_info->method->measurement->total_time += total_time;
|
|
148
|
+
|
|
149
|
+
call_info->method->visits--;
|
|
150
|
+
|
|
151
|
+
// Update method measurement
|
|
152
|
+
call_info->measurement->self_time += self_time;
|
|
153
|
+
call_info->measurement->wait_time += frame->wait_time;
|
|
154
|
+
if (call_info->visits == 1)
|
|
155
|
+
call_info->measurement->total_time += total_time;
|
|
156
|
+
|
|
157
|
+
call_info->visits--;
|
|
158
|
+
|
|
159
|
+
if (parent_frame)
|
|
160
|
+
{
|
|
161
|
+
parent_frame->child_time += total_time;
|
|
162
|
+
parent_frame->dead_time += frame->dead_time;
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
return frame;
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
prof_frame_t *
|
|
169
|
+
prof_stack_pass(prof_stack_t *stack)
|
|
170
|
+
{
|
|
171
|
+
prof_frame_t* frame = stack->ptr;
|
|
172
|
+
if (frame)
|
|
173
|
+
{
|
|
174
|
+
frame->passes++;
|
|
175
|
+
}
|
|
176
|
+
return frame;
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
prof_method_t*
|
|
180
|
+
prof_find_method(prof_stack_t* stack, VALUE source_file, int source_line)
|
|
181
|
+
{
|
|
182
|
+
prof_frame_t* frame = stack->ptr;
|
|
183
|
+
while (frame >= stack->start)
|
|
184
|
+
{
|
|
185
|
+
if (!frame->call_info)
|
|
186
|
+
return NULL;
|
|
187
|
+
|
|
188
|
+
if (rb_str_equal(source_file, frame->call_info->method->source_file) &&
|
|
189
|
+
source_line >= frame->call_info->method->source_line)
|
|
190
|
+
{
|
|
191
|
+
return frame->call_info->method;
|
|
192
|
+
}
|
|
193
|
+
frame--;
|
|
194
|
+
}
|
|
195
|
+
return NULL;
|
|
196
|
+
}
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
/* Copyright (C) 2005-2019 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
|
|
2
|
+
Please see the LICENSE file for copyright and distribution information */
|
|
3
|
+
|
|
4
|
+
#ifndef __RP_STACK__
|
|
5
|
+
#define __RP_STACK__
|
|
6
|
+
|
|
7
|
+
#include "ruby_prof.h"
|
|
8
|
+
#include "rp_call_info.h"
|
|
9
|
+
|
|
10
|
+
/* Temporary object that maintains profiling information
|
|
11
|
+
for active methods. They are created and destroyed
|
|
12
|
+
as the program moves up and down its stack. */
|
|
13
|
+
typedef struct
|
|
14
|
+
{
|
|
15
|
+
/* Caching prof_method_t values significantly
|
|
16
|
+
increases performance. */
|
|
17
|
+
prof_call_info_t *call_info;
|
|
18
|
+
|
|
19
|
+
VALUE source_file;
|
|
20
|
+
unsigned int source_line;
|
|
21
|
+
unsigned int passes; /* Count of "pass" frames, _after_ this one. */
|
|
22
|
+
|
|
23
|
+
double start_time;
|
|
24
|
+
double switch_time; /* Time at switch to different thread */
|
|
25
|
+
double wait_time;
|
|
26
|
+
double child_time;
|
|
27
|
+
double pause_time; // Time pause() was initiated
|
|
28
|
+
double dead_time; // Time to ignore (i.e. total amount of time between pause/resume blocks)
|
|
29
|
+
} prof_frame_t;
|
|
30
|
+
|
|
31
|
+
#define prof_frame_is_real(f) ((f)->passes == 0)
|
|
32
|
+
#define prof_frame_is_pass(f) ((f)->passes > 0)
|
|
33
|
+
|
|
34
|
+
#define prof_frame_is_paused(f) (f->pause_time >= 0)
|
|
35
|
+
#define prof_frame_is_unpaused(f) (f->pause_time < 0)
|
|
36
|
+
|
|
37
|
+
void prof_frame_pause(prof_frame_t*, double current_measurement);
|
|
38
|
+
void prof_frame_unpause(prof_frame_t*, double current_measurement);
|
|
39
|
+
|
|
40
|
+
/* Current stack of active methods.*/
|
|
41
|
+
typedef struct
|
|
42
|
+
{
|
|
43
|
+
prof_frame_t *start;
|
|
44
|
+
prof_frame_t *end;
|
|
45
|
+
prof_frame_t *ptr;
|
|
46
|
+
} prof_stack_t;
|
|
47
|
+
|
|
48
|
+
prof_stack_t *prof_stack_create(void);
|
|
49
|
+
void prof_stack_free(prof_stack_t *stack);
|
|
50
|
+
|
|
51
|
+
prof_frame_t *prof_stack_push(prof_stack_t *stack, prof_call_info_t *call_info, double measurement, int paused);
|
|
52
|
+
prof_frame_t *prof_stack_pop(prof_stack_t *stack, double measurement);
|
|
53
|
+
prof_frame_t *prof_stack_pass(prof_stack_t *stack);
|
|
54
|
+
prof_method_t *prof_find_method(prof_stack_t* stack, VALUE source_file, int source_line);
|
|
55
|
+
|
|
56
|
+
#endif //__RP_STACK__
|
|
@@ -0,0 +1,338 @@
|
|
|
1
|
+
/* Copyright (C) 2005-2013 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
|
|
2
|
+
Please see the LICENSE file for copyright and distribution information */
|
|
3
|
+
|
|
4
|
+
/* Document-class: RubyProf::Thread
|
|
5
|
+
|
|
6
|
+
The Thread class contains profile results for a single fiber (note a Ruby thread can run multiple fibers).
|
|
7
|
+
You cannot create an instance of RubyProf::Thread, instead you access it from a RubyProf::Profile object.
|
|
8
|
+
|
|
9
|
+
profile = RubyProf::Profile.profile do
|
|
10
|
+
...
|
|
11
|
+
end
|
|
12
|
+
|
|
13
|
+
profile.threads.each do |thread|
|
|
14
|
+
thread.root_methods.sort.each do |method|
|
|
15
|
+
puts method.total_time
|
|
16
|
+
end
|
|
17
|
+
end
|
|
18
|
+
*/
|
|
19
|
+
|
|
20
|
+
#include "rp_thread.h"
|
|
21
|
+
#include "rp_profile.h"
|
|
22
|
+
|
|
23
|
+
VALUE cRpThread;
|
|
24
|
+
|
|
25
|
+
/* ====== thread_data_t ====== */
|
|
26
|
+
thread_data_t*
|
|
27
|
+
thread_data_create(void)
|
|
28
|
+
{
|
|
29
|
+
thread_data_t* result = ALLOC(thread_data_t);
|
|
30
|
+
result->stack = prof_stack_create();
|
|
31
|
+
result->method_table = method_table_create();
|
|
32
|
+
result->object = Qnil;
|
|
33
|
+
result->methods = Qnil;
|
|
34
|
+
result->fiber_id = Qnil;
|
|
35
|
+
result->thread_id = Qnil;
|
|
36
|
+
result->trace = true;
|
|
37
|
+
result->fiber = Qnil;
|
|
38
|
+
return result;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
static void
|
|
42
|
+
prof_thread_free(thread_data_t* thread_data)
|
|
43
|
+
{
|
|
44
|
+
method_table_free(thread_data->method_table);
|
|
45
|
+
prof_stack_free(thread_data->stack);
|
|
46
|
+
|
|
47
|
+
xfree(thread_data);
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
static int
|
|
51
|
+
mark_methods(st_data_t key, st_data_t value, st_data_t result)
|
|
52
|
+
{
|
|
53
|
+
prof_method_t *method = (prof_method_t *) value;
|
|
54
|
+
prof_method_mark(method);
|
|
55
|
+
return ST_CONTINUE;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
size_t
|
|
59
|
+
prof_thread_size(const void *data)
|
|
60
|
+
{
|
|
61
|
+
return sizeof(prof_call_info_t);
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
void
|
|
65
|
+
prof_thread_mark(void *data)
|
|
66
|
+
{
|
|
67
|
+
thread_data_t *thread = (thread_data_t*)data;
|
|
68
|
+
|
|
69
|
+
if (thread->object != Qnil)
|
|
70
|
+
rb_gc_mark(thread->object);
|
|
71
|
+
|
|
72
|
+
if (thread->methods != Qnil)
|
|
73
|
+
rb_gc_mark(thread->methods);
|
|
74
|
+
|
|
75
|
+
if (thread->fiber_id != Qnil)
|
|
76
|
+
rb_gc_mark(thread->fiber_id);
|
|
77
|
+
|
|
78
|
+
if (thread->thread_id != Qnil)
|
|
79
|
+
rb_gc_mark(thread->thread_id);
|
|
80
|
+
|
|
81
|
+
st_foreach(thread->method_table, mark_methods, 0);
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
static const rb_data_type_t thread_type =
|
|
85
|
+
{
|
|
86
|
+
.wrap_struct_name = "ThreadInfo",
|
|
87
|
+
.function =
|
|
88
|
+
{
|
|
89
|
+
.dmark = prof_thread_mark,
|
|
90
|
+
.dfree = NULL, /* The profile class frees its thread table which frees each underlying thread_data instance */
|
|
91
|
+
.dsize = prof_thread_size,
|
|
92
|
+
},
|
|
93
|
+
.data = NULL,
|
|
94
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY
|
|
95
|
+
};
|
|
96
|
+
|
|
97
|
+
VALUE
|
|
98
|
+
prof_thread_wrap(thread_data_t *thread)
|
|
99
|
+
{
|
|
100
|
+
if (thread->object == Qnil)
|
|
101
|
+
{
|
|
102
|
+
thread->object = TypedData_Wrap_Struct(cRpThread, &thread_type, thread);
|
|
103
|
+
}
|
|
104
|
+
return thread->object;
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
static VALUE
|
|
108
|
+
prof_thread_allocate(VALUE klass)
|
|
109
|
+
{
|
|
110
|
+
thread_data_t* thread_data = thread_data_create();
|
|
111
|
+
thread_data->object = prof_thread_wrap(thread_data);
|
|
112
|
+
return thread_data->object;
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
static thread_data_t*
|
|
116
|
+
prof_get_thread(VALUE self)
|
|
117
|
+
{
|
|
118
|
+
/* Can't use Data_Get_Struct because that triggers the event hook
|
|
119
|
+
ending up in endless recursion. */
|
|
120
|
+
thread_data_t* result = DATA_PTR(self);
|
|
121
|
+
if (!result)
|
|
122
|
+
rb_raise(rb_eRuntimeError, "This RubyProf::Thread instance has already been freed, likely because its profile has been freed.");
|
|
123
|
+
|
|
124
|
+
return result;
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
/* ====== Thread Table ====== */
|
|
128
|
+
/* The thread table is hash keyed on ruby thread_id that stores instances
|
|
129
|
+
of thread_data_t. */
|
|
130
|
+
|
|
131
|
+
st_table *
|
|
132
|
+
threads_table_create()
|
|
133
|
+
{
|
|
134
|
+
return st_init_numtable();
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
static int
|
|
138
|
+
thread_table_free_iterator(st_data_t key, st_data_t value, st_data_t dummy)
|
|
139
|
+
{
|
|
140
|
+
prof_thread_free((thread_data_t*)value);
|
|
141
|
+
return ST_CONTINUE;
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
void
|
|
145
|
+
threads_table_free(st_table *table)
|
|
146
|
+
{
|
|
147
|
+
st_foreach(table, thread_table_free_iterator, 0);
|
|
148
|
+
st_free_table(table);
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
thread_data_t *
|
|
152
|
+
threads_table_lookup(void *prof, VALUE fiber)
|
|
153
|
+
{
|
|
154
|
+
prof_profile_t *profile = prof;
|
|
155
|
+
thread_data_t* result = NULL;
|
|
156
|
+
st_data_t val;
|
|
157
|
+
|
|
158
|
+
if (st_lookup(profile->threads_tbl, fiber, &val))
|
|
159
|
+
{
|
|
160
|
+
result = (thread_data_t*)val;
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
return result;
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
thread_data_t*
|
|
167
|
+
threads_table_insert(void *prof, VALUE fiber)
|
|
168
|
+
{
|
|
169
|
+
prof_profile_t *profile = prof;
|
|
170
|
+
thread_data_t *result = thread_data_create();
|
|
171
|
+
VALUE thread = rb_thread_current();
|
|
172
|
+
|
|
173
|
+
result->fiber = fiber;
|
|
174
|
+
result->fiber_id = rb_obj_id(fiber);
|
|
175
|
+
result->thread_id = rb_obj_id(thread);
|
|
176
|
+
st_insert(profile->threads_tbl, (st_data_t)fiber, (st_data_t)result);
|
|
177
|
+
|
|
178
|
+
// Are we tracing this thread?
|
|
179
|
+
if (profile->include_threads_tbl && !st_lookup(profile->include_threads_tbl, thread, 0))
|
|
180
|
+
{
|
|
181
|
+
result->trace= false;
|
|
182
|
+
}
|
|
183
|
+
else if (profile->exclude_threads_tbl && st_lookup(profile->exclude_threads_tbl, thread, 0))
|
|
184
|
+
{
|
|
185
|
+
result->trace = false;
|
|
186
|
+
}
|
|
187
|
+
else
|
|
188
|
+
{
|
|
189
|
+
result->trace = true;
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
return result;
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
void
|
|
196
|
+
switch_thread(void *prof, thread_data_t *thread_data, double measurement)
|
|
197
|
+
{
|
|
198
|
+
prof_profile_t *profile = prof;
|
|
199
|
+
|
|
200
|
+
/* Get current frame for this thread */
|
|
201
|
+
prof_frame_t* frame = thread_data->stack->ptr;
|
|
202
|
+
frame->wait_time += measurement - frame->switch_time;
|
|
203
|
+
frame->switch_time = measurement;
|
|
204
|
+
|
|
205
|
+
/* Save on the last thread the time of the context switch
|
|
206
|
+
and reset this thread's last context switch to 0.*/
|
|
207
|
+
if (profile->last_thread_data)
|
|
208
|
+
{
|
|
209
|
+
prof_frame_t* last_frame = profile->last_thread_data->stack->ptr;
|
|
210
|
+
if (last_frame)
|
|
211
|
+
last_frame->switch_time = measurement;
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
profile->last_thread_data = thread_data;
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
int pause_thread(st_data_t key, st_data_t value, st_data_t data)
|
|
218
|
+
{
|
|
219
|
+
thread_data_t* thread_data = (thread_data_t *) value;
|
|
220
|
+
prof_profile_t* profile = (prof_profile_t*)data;
|
|
221
|
+
|
|
222
|
+
prof_frame_t* frame = thread_data->stack->ptr;
|
|
223
|
+
prof_frame_pause(frame, profile->measurement_at_pause_resume);
|
|
224
|
+
|
|
225
|
+
return ST_CONTINUE;
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
int unpause_thread(st_data_t key, st_data_t value, st_data_t data)
|
|
229
|
+
{
|
|
230
|
+
thread_data_t* thread_data = (thread_data_t *) value;
|
|
231
|
+
prof_profile_t* profile = (prof_profile_t*)data;
|
|
232
|
+
|
|
233
|
+
prof_frame_t* frame = thread_data->stack->ptr;
|
|
234
|
+
prof_frame_unpause(frame, profile->measurement_at_pause_resume);
|
|
235
|
+
|
|
236
|
+
return ST_CONTINUE;
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
static int
|
|
240
|
+
collect_methods(st_data_t key, st_data_t value, st_data_t result)
|
|
241
|
+
{
|
|
242
|
+
/* Called for each method stored in a thread's method table.
|
|
243
|
+
We want to store the method info information into an array.*/
|
|
244
|
+
VALUE methods = (VALUE) result;
|
|
245
|
+
prof_method_t *method = (prof_method_t *) value;
|
|
246
|
+
|
|
247
|
+
if (!method->excluded)
|
|
248
|
+
{
|
|
249
|
+
rb_ary_push(methods, prof_method_wrap(method));
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
return ST_CONTINUE;
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
/* call-seq:
|
|
256
|
+
id -> number
|
|
257
|
+
|
|
258
|
+
Returns the thread id of this thread. */
|
|
259
|
+
static VALUE
|
|
260
|
+
prof_thread_id(VALUE self)
|
|
261
|
+
{
|
|
262
|
+
thread_data_t* thread = prof_get_thread(self);
|
|
263
|
+
return thread->thread_id;
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
/* call-seq:
|
|
267
|
+
fiber_id -> number
|
|
268
|
+
|
|
269
|
+
Returns the fiber id of this thread. */
|
|
270
|
+
static VALUE
|
|
271
|
+
prof_fiber_id(VALUE self)
|
|
272
|
+
{
|
|
273
|
+
thread_data_t* thread = prof_get_thread(self);
|
|
274
|
+
return thread->fiber_id;
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
/* call-seq:
|
|
278
|
+
methods -> [RubyProf::MethodInfo]
|
|
279
|
+
|
|
280
|
+
Returns an array of methods that were called from this
|
|
281
|
+
thread during program execution. */
|
|
282
|
+
static VALUE
|
|
283
|
+
prof_thread_methods(VALUE self)
|
|
284
|
+
{
|
|
285
|
+
thread_data_t* thread = prof_get_thread(self);
|
|
286
|
+
if (thread->methods == Qnil)
|
|
287
|
+
{
|
|
288
|
+
thread->methods = rb_ary_new();
|
|
289
|
+
st_foreach(thread->method_table, collect_methods, thread->methods);
|
|
290
|
+
}
|
|
291
|
+
return thread->methods;
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
/* :nodoc: */
|
|
295
|
+
static VALUE
|
|
296
|
+
prof_thread_dump(VALUE self)
|
|
297
|
+
{
|
|
298
|
+
thread_data_t* thread_data = DATA_PTR(self);
|
|
299
|
+
|
|
300
|
+
VALUE result = rb_hash_new();
|
|
301
|
+
rb_hash_aset(result, ID2SYM(rb_intern("fiber_id")), thread_data->fiber_id);
|
|
302
|
+
rb_hash_aset(result, ID2SYM(rb_intern("methods")), prof_thread_methods(self));
|
|
303
|
+
|
|
304
|
+
return result;
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
/* :nodoc: */
|
|
308
|
+
static VALUE
|
|
309
|
+
prof_thread_load(VALUE self, VALUE data)
|
|
310
|
+
{
|
|
311
|
+
thread_data_t* thread_data = DATA_PTR(self);
|
|
312
|
+
thread_data->object = self;
|
|
313
|
+
|
|
314
|
+
thread_data->fiber_id = rb_hash_aref(data, ID2SYM(rb_intern("fiber_id")));
|
|
315
|
+
VALUE methods = rb_hash_aref(data, ID2SYM(rb_intern("methods")));
|
|
316
|
+
|
|
317
|
+
for (int i = 0; i < rb_array_len(methods); i++)
|
|
318
|
+
{
|
|
319
|
+
VALUE method = rb_ary_entry(methods, i);
|
|
320
|
+
prof_method_t *method_data = DATA_PTR(method);
|
|
321
|
+
method_table_insert(thread_data->method_table, method_data->key, method_data);
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
return data;
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
void rp_init_thread(void)
|
|
328
|
+
{
|
|
329
|
+
cRpThread = rb_define_class_under(mProf, "Thread", rb_cData);
|
|
330
|
+
rb_undef_method(CLASS_OF(cRpThread), "new");
|
|
331
|
+
rb_define_alloc_func(cRpThread, prof_thread_allocate);
|
|
332
|
+
|
|
333
|
+
rb_define_method(cRpThread, "id", prof_thread_id, 0);
|
|
334
|
+
rb_define_method(cRpThread, "fiber_id", prof_fiber_id, 0);
|
|
335
|
+
rb_define_method(cRpThread, "methods", prof_thread_methods, 0);
|
|
336
|
+
rb_define_method(cRpThread, "_dump_data", prof_thread_dump, 0);
|
|
337
|
+
rb_define_method(cRpThread, "_load_data", prof_thread_load, 1);
|
|
338
|
+
}
|