ruby-prof 1.1.0-x64-mingw32 → 1.4.2-x64-mingw32
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGES +48 -1
- data/Rakefile +2 -14
- data/bin/ruby-prof +100 -152
- data/ext/ruby_prof/extconf.rb +8 -28
- data/ext/ruby_prof/rp_aggregate_call_tree.c +59 -0
- data/ext/ruby_prof/rp_aggregate_call_tree.h +13 -0
- data/ext/ruby_prof/rp_allocation.c +67 -59
- data/ext/ruby_prof/rp_allocation.h +3 -3
- data/ext/ruby_prof/rp_call_tree.c +369 -0
- data/ext/ruby_prof/rp_call_tree.h +43 -0
- data/ext/ruby_prof/rp_call_trees.c +288 -0
- data/ext/ruby_prof/rp_call_trees.h +28 -0
- data/ext/ruby_prof/rp_measure_allocations.c +12 -14
- data/ext/ruby_prof/rp_measure_process_time.c +12 -14
- data/ext/ruby_prof/rp_measure_wall_time.c +17 -15
- data/ext/ruby_prof/rp_measurement.c +47 -40
- data/ext/ruby_prof/rp_measurement.h +7 -7
- data/ext/ruby_prof/rp_method.c +116 -255
- data/ext/ruby_prof/rp_method.h +31 -39
- data/ext/ruby_prof/rp_profile.c +316 -303
- data/ext/ruby_prof/rp_profile.h +1 -3
- data/ext/ruby_prof/rp_stack.c +122 -106
- data/ext/ruby_prof/rp_stack.h +17 -20
- data/ext/ruby_prof/rp_thread.c +136 -111
- data/ext/ruby_prof/rp_thread.h +12 -9
- data/ext/ruby_prof/ruby_prof.c +27 -23
- data/ext/ruby_prof/ruby_prof.h +9 -0
- data/ext/ruby_prof/vc/ruby_prof.sln +8 -0
- data/ext/ruby_prof/vc/ruby_prof.vcxproj +22 -7
- data/lib/2.7/ruby_prof.so +0 -0
- data/lib/ruby-prof.rb +5 -5
- data/lib/ruby-prof/assets/call_stack_printer.html.erb +4 -7
- data/lib/ruby-prof/assets/graph_printer.html.erb +5 -6
- data/lib/ruby-prof/{call_info.rb → call_tree.rb} +6 -6
- data/lib/ruby-prof/call_tree_visitor.rb +36 -0
- data/lib/ruby-prof/compatibility.rb +0 -10
- data/lib/ruby-prof/measurement.rb +5 -2
- data/lib/ruby-prof/method_info.rb +3 -15
- data/lib/ruby-prof/printers/abstract_printer.rb +12 -2
- data/lib/ruby-prof/printers/call_info_printer.rb +12 -10
- data/lib/ruby-prof/printers/call_stack_printer.rb +20 -22
- data/lib/ruby-prof/printers/call_tree_printer.rb +1 -1
- data/lib/ruby-prof/printers/dot_printer.rb +3 -3
- data/lib/ruby-prof/printers/flat_printer.rb +3 -2
- data/lib/ruby-prof/printers/graph_printer.rb +4 -5
- data/lib/ruby-prof/printers/multi_printer.rb +2 -2
- data/lib/ruby-prof/profile.rb +8 -4
- data/lib/ruby-prof/rack.rb +51 -127
- data/lib/ruby-prof/thread.rb +3 -18
- data/lib/ruby-prof/version.rb +1 -1
- data/ruby-prof.gemspec +7 -0
- data/test/alias_test.rb +42 -45
- data/test/basic_test.rb +0 -86
- data/test/{call_info_visitor_test.rb → call_tree_visitor_test.rb} +6 -5
- data/test/call_trees_test.rb +66 -0
- data/test/exclude_methods_test.rb +17 -12
- data/test/fiber_test.rb +95 -39
- data/test/gc_test.rb +36 -42
- data/test/inverse_call_tree_test.rb +175 -0
- data/test/line_number_test.rb +67 -70
- data/test/marshal_test.rb +7 -13
- data/test/measure_allocations_test.rb +224 -234
- data/test/measure_allocations_trace_test.rb +224 -234
- data/test/measure_memory_trace_test.rb +814 -469
- data/test/measure_process_time_test.rb +0 -64
- data/test/measure_times.rb +2 -0
- data/test/measure_wall_time_test.rb +34 -58
- data/test/pause_resume_test.rb +19 -10
- data/test/prime.rb +1 -3
- data/test/prime_script.rb +6 -0
- data/test/printer_call_stack_test.rb +0 -1
- data/test/printer_call_tree_test.rb +0 -1
- data/test/printer_flat_test.rb +61 -30
- data/test/printer_graph_html_test.rb +0 -1
- data/test/printer_graph_test.rb +3 -4
- data/test/printers_test.rb +2 -2
- data/test/printing_recursive_graph_test.rb +1 -1
- data/test/profile_test.rb +16 -0
- data/test/rack_test.rb +0 -64
- data/test/recursive_test.rb +50 -54
- data/test/start_stop_test.rb +19 -19
- data/test/test_helper.rb +6 -17
- data/test/thread_test.rb +11 -11
- data/test/unique_call_path_test.rb +25 -95
- metadata +22 -11
- data/ext/ruby_prof/rp_call_info.c +0 -271
- data/ext/ruby_prof/rp_call_info.h +0 -35
- data/lib/2.6.5/ruby_prof.so +0 -0
- data/lib/ruby-prof/call_info_visitor.rb +0 -38
- data/test/parser_timings.rb +0 -24
data/ext/ruby_prof/rp_profile.h
CHANGED
@@ -10,13 +10,12 @@
|
|
10
10
|
|
11
11
|
extern VALUE cProfile;
|
12
12
|
|
13
|
-
typedef struct
|
13
|
+
typedef struct prof_profile_t
|
14
14
|
{
|
15
15
|
VALUE running;
|
16
16
|
VALUE paused;
|
17
17
|
|
18
18
|
prof_measurer_t* measurer;
|
19
|
-
VALUE threads;
|
20
19
|
|
21
20
|
VALUE tracepoints;
|
22
21
|
|
@@ -27,7 +26,6 @@ typedef struct
|
|
27
26
|
thread_data_t* last_thread_data;
|
28
27
|
double measurement_at_pause_resume;
|
29
28
|
bool allow_exceptions;
|
30
|
-
bool merge_fibers;
|
31
29
|
} prof_profile_t;
|
32
30
|
|
33
31
|
void rp_init_profile(void);
|
data/ext/ruby_prof/rp_stack.c
CHANGED
@@ -3,31 +3,12 @@
|
|
3
3
|
|
4
4
|
#include "rp_stack.h"
|
5
5
|
|
6
|
-
#define INITIAL_STACK_SIZE
|
6
|
+
#define INITIAL_STACK_SIZE 16
|
7
7
|
|
8
|
-
|
9
|
-
|
8
|
+
// Creates a stack of prof_frame_t to keep track of timings for active methods.
|
9
|
+
prof_stack_t* prof_stack_create()
|
10
10
|
{
|
11
|
-
|
12
|
-
frame->pause_time = current_measurement;
|
13
|
-
}
|
14
|
-
|
15
|
-
void
|
16
|
-
prof_frame_unpause(prof_frame_t *frame, double current_measurement)
|
17
|
-
{
|
18
|
-
if (frame && prof_frame_is_paused(frame))
|
19
|
-
{
|
20
|
-
frame->dead_time += (current_measurement - frame->pause_time);
|
21
|
-
frame->pause_time = -1;
|
22
|
-
}
|
23
|
-
}
|
24
|
-
|
25
|
-
/* Creates a stack of prof_frame_t to keep track
|
26
|
-
of timings for active methods. */
|
27
|
-
prof_stack_t *
|
28
|
-
prof_stack_create()
|
29
|
-
{
|
30
|
-
prof_stack_t *stack = ALLOC(prof_stack_t);
|
11
|
+
prof_stack_t* stack = ALLOC(prof_stack_t);
|
31
12
|
stack->start = ZALLOC_N(prof_frame_t, INITIAL_STACK_SIZE);
|
32
13
|
stack->ptr = stack->start;
|
33
14
|
stack->end = stack->start + INITIAL_STACK_SIZE;
|
@@ -35,22 +16,32 @@ prof_stack_create()
|
|
35
16
|
return stack;
|
36
17
|
}
|
37
18
|
|
38
|
-
void
|
39
|
-
prof_stack_free(prof_stack_t *stack)
|
19
|
+
void prof_stack_free(prof_stack_t* stack)
|
40
20
|
{
|
41
21
|
xfree(stack->start);
|
42
22
|
xfree(stack);
|
43
23
|
}
|
44
24
|
|
45
|
-
prof_frame_t *
|
46
|
-
prof_stack_push(prof_stack_t *stack, prof_call_info_t *call_info, double measurement, int paused)
|
25
|
+
prof_frame_t* prof_stack_parent(prof_stack_t* stack)
|
47
26
|
{
|
48
|
-
|
49
|
-
|
27
|
+
if (stack->ptr == stack->start || stack->ptr - 1 == stack->start)
|
28
|
+
return NULL;
|
29
|
+
else
|
30
|
+
return stack->ptr - 2;
|
31
|
+
}
|
50
32
|
|
51
|
-
|
52
|
-
|
53
|
-
if (stack->ptr == stack->
|
33
|
+
prof_frame_t* prof_stack_last(prof_stack_t* stack)
|
34
|
+
{
|
35
|
+
if (stack->ptr == stack->start)
|
36
|
+
return NULL;
|
37
|
+
else
|
38
|
+
return stack->ptr - 1;
|
39
|
+
}
|
40
|
+
|
41
|
+
void prof_stack_verify_size(prof_stack_t* stack)
|
42
|
+
{
|
43
|
+
// Is there space on the stack? If not, double its size.
|
44
|
+
if (stack->ptr == stack->end)
|
54
45
|
{
|
55
46
|
size_t len = stack->ptr - stack->start;
|
56
47
|
size_t new_capacity = (stack->end - stack->start) * 2;
|
@@ -60,14 +51,53 @@ prof_stack_push(prof_stack_t *stack, prof_call_info_t *call_info, double measure
|
|
60
51
|
stack->ptr = stack->start + len;
|
61
52
|
stack->end = stack->start + new_capacity;
|
62
53
|
}
|
54
|
+
}
|
63
55
|
|
64
|
-
|
56
|
+
prof_frame_t* prof_stack_push(prof_stack_t* stack)
|
57
|
+
{
|
58
|
+
prof_stack_verify_size(stack);
|
59
|
+
|
60
|
+
prof_frame_t* result = stack->ptr;
|
65
61
|
stack->ptr++;
|
62
|
+
return result;
|
63
|
+
}
|
64
|
+
|
65
|
+
prof_frame_t* prof_stack_pop(prof_stack_t* stack)
|
66
|
+
{
|
67
|
+
prof_frame_t* result = prof_stack_last(stack);
|
68
|
+
if (result)
|
69
|
+
stack->ptr--;
|
70
|
+
|
71
|
+
return result;
|
72
|
+
}
|
66
73
|
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
74
|
+
// ---------------- Frame Methods ----------------------------
|
75
|
+
void prof_frame_pause(prof_frame_t* frame, double current_measurement)
|
76
|
+
{
|
77
|
+
if (frame && prof_frame_is_unpaused(frame))
|
78
|
+
frame->pause_time = current_measurement;
|
79
|
+
}
|
80
|
+
|
81
|
+
void prof_frame_unpause(prof_frame_t* frame, double current_measurement)
|
82
|
+
{
|
83
|
+
if (prof_frame_is_paused(frame))
|
84
|
+
{
|
85
|
+
frame->dead_time += (current_measurement - frame->pause_time);
|
86
|
+
frame->pause_time = -1;
|
87
|
+
}
|
88
|
+
}
|
89
|
+
|
90
|
+
prof_frame_t* prof_frame_current(prof_stack_t* stack)
|
91
|
+
{
|
92
|
+
return prof_stack_last(stack);
|
93
|
+
}
|
94
|
+
|
95
|
+
prof_frame_t* prof_frame_push(prof_stack_t* stack, prof_call_tree_t* call_tree, double measurement, bool paused)
|
96
|
+
{
|
97
|
+
prof_frame_t* result = prof_stack_push(stack);
|
98
|
+
prof_frame_t* parent_frame = prof_stack_parent(stack);
|
99
|
+
|
100
|
+
result->call_tree = call_tree;
|
71
101
|
|
72
102
|
result->start_time = measurement;
|
73
103
|
result->pause_time = -1; // init as not paused.
|
@@ -78,21 +108,22 @@ prof_stack_push(prof_stack_t *stack, prof_call_info_t *call_info, double measure
|
|
78
108
|
result->source_file = Qnil;
|
79
109
|
result->source_line = 0;
|
80
110
|
|
81
|
-
|
82
|
-
|
111
|
+
call_tree->measurement->called++;
|
112
|
+
call_tree->visits++;
|
83
113
|
|
84
|
-
if (
|
114
|
+
if (call_tree->method->visits > 0)
|
85
115
|
{
|
86
|
-
|
116
|
+
call_tree->method->recursive = true;
|
87
117
|
}
|
88
|
-
|
89
|
-
|
118
|
+
call_tree->method->measurement->called++;
|
119
|
+
call_tree->method->visits++;
|
90
120
|
|
91
121
|
// Unpause the parent frame, if it exists.
|
92
122
|
// If currently paused then:
|
93
123
|
// 1) The child frame will begin paused.
|
94
124
|
// 2) The parent will inherit the child's dead time.
|
95
|
-
|
125
|
+
if (parent_frame)
|
126
|
+
prof_frame_unpause(parent_frame, measurement);
|
96
127
|
|
97
128
|
if (paused)
|
98
129
|
{
|
@@ -103,92 +134,77 @@ prof_stack_push(prof_stack_t *stack, prof_call_info_t *call_info, double measure
|
|
103
134
|
return result;
|
104
135
|
}
|
105
136
|
|
106
|
-
prof_frame_t *
|
107
|
-
prof_stack_pop(prof_stack_t *stack, double measurement)
|
137
|
+
prof_frame_t* prof_frame_unshift(prof_stack_t* stack, prof_call_tree_t* parent_call_tree, prof_call_tree_t* call_tree, double measurement)
|
108
138
|
{
|
109
|
-
|
110
|
-
|
111
|
-
prof_call_info_t *call_info;
|
112
|
-
|
113
|
-
double total_time;
|
114
|
-
double self_time;
|
139
|
+
if (prof_stack_last(stack))
|
140
|
+
rb_raise(rb_eRuntimeError, "Stack unshift can only be called with an empty stack");
|
115
141
|
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
frame = stack->ptr;
|
120
|
-
|
121
|
-
/* Match passes until we reach the frame itself. */
|
122
|
-
if (prof_frame_is_pass(frame))
|
123
|
-
{
|
124
|
-
frame->passes--;
|
125
|
-
/* Additional frames can be consumed. See pop_frames(). */
|
126
|
-
return frame;
|
127
|
-
}
|
142
|
+
parent_call_tree->measurement->total_time = call_tree->measurement->total_time;
|
143
|
+
parent_call_tree->measurement->self_time = 0;
|
144
|
+
parent_call_tree->measurement->wait_time = call_tree->measurement->wait_time;
|
128
145
|
|
129
|
-
|
130
|
-
|
146
|
+
parent_call_tree->method->measurement->total_time += call_tree->measurement->total_time;
|
147
|
+
parent_call_tree->method->measurement->wait_time += call_tree->measurement->wait_time;
|
131
148
|
|
132
|
-
|
149
|
+
return prof_frame_push(stack, parent_call_tree, measurement, false);
|
150
|
+
}
|
133
151
|
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
total_time = measurement - frame->start_time - frame->dead_time;
|
138
|
-
self_time = total_time - frame->child_time - frame->wait_time;
|
152
|
+
prof_frame_t* prof_frame_pop(prof_stack_t* stack, double measurement)
|
153
|
+
{
|
154
|
+
prof_frame_t* frame = prof_stack_pop(stack);
|
139
155
|
|
140
|
-
|
141
|
-
|
156
|
+
if (!frame)
|
157
|
+
return NULL;
|
142
158
|
|
143
|
-
|
144
|
-
|
145
|
-
call_info->method->measurement->wait_time += frame->wait_time;
|
146
|
-
if (call_info->method->visits == 1)
|
147
|
-
call_info->method->measurement->total_time += total_time;
|
159
|
+
/* Calculate the total time this method took */
|
160
|
+
prof_frame_unpause(frame, measurement);
|
148
161
|
|
149
|
-
|
162
|
+
double total_time = measurement - frame->start_time - frame->dead_time;
|
163
|
+
double self_time = total_time - frame->child_time - frame->wait_time;
|
150
164
|
|
151
|
-
|
152
|
-
|
153
|
-
call_info->measurement->wait_time += frame->wait_time;
|
154
|
-
if (call_info->visits == 1)
|
155
|
-
call_info->measurement->total_time += total_time;
|
165
|
+
/* Update information about the current method */
|
166
|
+
prof_call_tree_t* call_tree = frame->call_tree;
|
156
167
|
|
157
|
-
|
168
|
+
// Update method measurement
|
169
|
+
call_tree->method->measurement->self_time += self_time;
|
170
|
+
call_tree->method->measurement->wait_time += frame->wait_time;
|
171
|
+
if (call_tree->method->visits == 1)
|
172
|
+
call_tree->method->measurement->total_time += total_time;
|
158
173
|
|
159
|
-
|
160
|
-
{
|
161
|
-
parent_frame->child_time += total_time;
|
162
|
-
parent_frame->dead_time += frame->dead_time;
|
163
|
-
}
|
174
|
+
call_tree->method->visits--;
|
164
175
|
|
165
|
-
|
166
|
-
|
176
|
+
// Update method measurement
|
177
|
+
call_tree->measurement->self_time += self_time;
|
178
|
+
call_tree->measurement->wait_time += frame->wait_time;
|
179
|
+
if (call_tree->visits == 1)
|
180
|
+
call_tree->measurement->total_time += total_time;
|
167
181
|
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
if (frame)
|
182
|
+
call_tree->visits--;
|
183
|
+
|
184
|
+
prof_frame_t* parent_frame = prof_stack_last(stack);
|
185
|
+
if (parent_frame)
|
173
186
|
{
|
174
|
-
|
187
|
+
parent_frame->child_time += total_time;
|
188
|
+
parent_frame->dead_time += frame->dead_time;
|
175
189
|
}
|
190
|
+
|
191
|
+
frame->source_file = Qnil;
|
192
|
+
|
176
193
|
return frame;
|
177
194
|
}
|
178
195
|
|
179
|
-
prof_method_t*
|
180
|
-
prof_find_method(prof_stack_t* stack, VALUE source_file, int source_line)
|
196
|
+
prof_method_t* prof_find_method(prof_stack_t* stack, VALUE source_file, int source_line)
|
181
197
|
{
|
182
|
-
prof_frame_t* frame = stack
|
198
|
+
prof_frame_t* frame = prof_stack_last(stack);
|
183
199
|
while (frame >= stack->start)
|
184
200
|
{
|
185
|
-
if (!frame->
|
201
|
+
if (!frame->call_tree)
|
186
202
|
return NULL;
|
187
203
|
|
188
|
-
if (rb_str_equal(source_file, frame->
|
189
|
-
source_line >= frame->
|
204
|
+
if (rb_str_equal(source_file, frame->call_tree->method->source_file) &&
|
205
|
+
source_line >= frame->call_tree->method->source_line)
|
190
206
|
{
|
191
|
-
return frame->
|
207
|
+
return frame->call_tree->method;
|
192
208
|
}
|
193
209
|
frame--;
|
194
210
|
}
|
data/ext/ruby_prof/rp_stack.h
CHANGED
@@ -5,20 +5,19 @@
|
|
5
5
|
#define __RP_STACK__
|
6
6
|
|
7
7
|
#include "ruby_prof.h"
|
8
|
-
#include "
|
8
|
+
#include "rp_call_tree.h"
|
9
9
|
|
10
|
-
/* Temporary object that maintains profiling information
|
11
|
-
|
12
|
-
|
13
|
-
typedef struct
|
10
|
+
/* Temporary object that maintains profiling information
|
11
|
+
for active methods. They are created and destroyed
|
12
|
+
as the program moves up and down its stack. */
|
13
|
+
typedef struct prof_frame_t
|
14
14
|
{
|
15
15
|
/* Caching prof_method_t values significantly
|
16
16
|
increases performance. */
|
17
|
-
|
17
|
+
prof_call_tree_t* call_tree;
|
18
18
|
|
19
19
|
VALUE source_file;
|
20
20
|
unsigned int source_line;
|
21
|
-
unsigned int passes; /* Count of "pass" frames, _after_ this one. */
|
22
21
|
|
23
22
|
double start_time;
|
24
23
|
double switch_time; /* Time at switch to different thread */
|
@@ -28,9 +27,6 @@ typedef struct
|
|
28
27
|
double dead_time; // Time to ignore (i.e. total amount of time between pause/resume blocks)
|
29
28
|
} prof_frame_t;
|
30
29
|
|
31
|
-
#define prof_frame_is_real(f) ((f)->passes == 0)
|
32
|
-
#define prof_frame_is_pass(f) ((f)->passes > 0)
|
33
|
-
|
34
30
|
#define prof_frame_is_paused(f) (f->pause_time >= 0)
|
35
31
|
#define prof_frame_is_unpaused(f) (f->pause_time < 0)
|
36
32
|
|
@@ -38,19 +34,20 @@ void prof_frame_pause(prof_frame_t*, double current_measurement);
|
|
38
34
|
void prof_frame_unpause(prof_frame_t*, double current_measurement);
|
39
35
|
|
40
36
|
/* Current stack of active methods.*/
|
41
|
-
typedef struct
|
37
|
+
typedef struct prof_stack_t
|
42
38
|
{
|
43
|
-
prof_frame_t
|
44
|
-
prof_frame_t
|
45
|
-
prof_frame_t
|
39
|
+
prof_frame_t* start;
|
40
|
+
prof_frame_t* end;
|
41
|
+
prof_frame_t* ptr;
|
46
42
|
} prof_stack_t;
|
47
43
|
|
48
|
-
prof_stack_t
|
49
|
-
void prof_stack_free(prof_stack_t
|
44
|
+
prof_stack_t* prof_stack_create(void);
|
45
|
+
void prof_stack_free(prof_stack_t* stack);
|
50
46
|
|
51
|
-
prof_frame_t
|
52
|
-
prof_frame_t
|
53
|
-
prof_frame_t
|
54
|
-
|
47
|
+
prof_frame_t* prof_frame_current(prof_stack_t* stack);
|
48
|
+
prof_frame_t* prof_frame_push(prof_stack_t* stack, prof_call_tree_t* call_tree, double measurement, bool paused);
|
49
|
+
prof_frame_t* prof_frame_unshift(prof_stack_t* stack, prof_call_tree_t* parent_call_tree, prof_call_tree_t* call_tree, double measurement);
|
50
|
+
prof_frame_t* prof_frame_pop(prof_stack_t* stack, double measurement);
|
51
|
+
prof_method_t* prof_find_method(prof_stack_t* stack, VALUE source_file, int source_line);
|
55
52
|
|
56
53
|
#endif //__RP_STACK__
|
data/ext/ruby_prof/rp_thread.c
CHANGED
@@ -14,21 +14,20 @@ You cannot create an instance of RubyProf::Thread, instead you access it from a
|
|
14
14
|
thread.root_methods.sort.each do |method|
|
15
15
|
puts method.total_time
|
16
16
|
end
|
17
|
-
end
|
18
|
-
*/
|
17
|
+
end */
|
19
18
|
|
20
19
|
#include "rp_thread.h"
|
21
20
|
#include "rp_profile.h"
|
22
21
|
|
23
22
|
VALUE cRpThread;
|
24
23
|
|
25
|
-
|
26
|
-
thread_data_t*
|
27
|
-
thread_data_create(void)
|
24
|
+
// ====== thread_data_t ======
|
25
|
+
thread_data_t* thread_data_create(void)
|
28
26
|
{
|
29
27
|
thread_data_t* result = ALLOC(thread_data_t);
|
30
28
|
result->stack = prof_stack_create();
|
31
29
|
result->method_table = method_table_create();
|
30
|
+
result->call_tree = NULL;
|
32
31
|
result->object = Qnil;
|
33
32
|
result->methods = Qnil;
|
34
33
|
result->fiber_id = Qnil;
|
@@ -38,123 +37,141 @@ thread_data_create(void)
|
|
38
37
|
return result;
|
39
38
|
}
|
40
39
|
|
41
|
-
static
|
42
|
-
prof_thread_free(thread_data_t* thread_data)
|
40
|
+
static int mark_methods(st_data_t key, st_data_t value, st_data_t result)
|
43
41
|
{
|
44
|
-
|
45
|
-
yes then set its data to nil to avoid a segmentation fault on the next mark and sweep. */
|
46
|
-
if (thread_data->object != Qnil)
|
47
|
-
{
|
48
|
-
RDATA(thread_data->object)->dmark = NULL;
|
49
|
-
RDATA(thread_data->object)->dfree = NULL;
|
50
|
-
RDATA(thread_data->object)->data = NULL;
|
51
|
-
thread_data->object = Qnil;
|
52
|
-
}
|
53
|
-
|
54
|
-
method_table_free(thread_data->method_table);
|
55
|
-
prof_stack_free(thread_data->stack);
|
56
|
-
|
57
|
-
|
58
|
-
xfree(thread_data);
|
59
|
-
}
|
60
|
-
|
61
|
-
static int
|
62
|
-
mark_methods(st_data_t key, st_data_t value, st_data_t result)
|
63
|
-
{
|
64
|
-
prof_method_t *method = (prof_method_t *) value;
|
42
|
+
prof_method_t* method = (prof_method_t*)value;
|
65
43
|
prof_method_mark(method);
|
66
44
|
return ST_CONTINUE;
|
67
45
|
}
|
68
46
|
|
69
|
-
size_t
|
70
|
-
prof_thread_size(const void *data)
|
47
|
+
size_t prof_thread_size(const void* data)
|
71
48
|
{
|
72
|
-
return sizeof(
|
49
|
+
return sizeof(thread_data_t);
|
73
50
|
}
|
74
51
|
|
75
|
-
void
|
76
|
-
prof_thread_mark(void *data)
|
52
|
+
void prof_thread_mark(void* data)
|
77
53
|
{
|
78
|
-
|
79
|
-
|
54
|
+
if (!data)
|
55
|
+
return;
|
56
|
+
|
57
|
+
thread_data_t* thread = (thread_data_t*)data;
|
58
|
+
|
80
59
|
if (thread->object != Qnil)
|
81
60
|
rb_gc_mark(thread->object);
|
82
|
-
|
61
|
+
|
62
|
+
rb_gc_mark(thread->fiber);
|
63
|
+
|
83
64
|
if (thread->methods != Qnil)
|
84
65
|
rb_gc_mark(thread->methods);
|
85
|
-
|
66
|
+
|
86
67
|
if (thread->fiber_id != Qnil)
|
87
68
|
rb_gc_mark(thread->fiber_id);
|
88
|
-
|
69
|
+
|
89
70
|
if (thread->thread_id != Qnil)
|
90
71
|
rb_gc_mark(thread->thread_id);
|
91
|
-
|
92
|
-
|
72
|
+
|
73
|
+
if (thread->call_tree)
|
74
|
+
prof_call_tree_mark(thread->call_tree);
|
75
|
+
|
76
|
+
rb_st_foreach(thread->method_table, mark_methods, 0);
|
93
77
|
}
|
94
78
|
|
79
|
+
void prof_thread_ruby_gc_free(void* data)
|
80
|
+
{
|
81
|
+
if (data)
|
82
|
+
{
|
83
|
+
thread_data_t* thread_data = (thread_data_t*)data;
|
84
|
+
thread_data->object = Qnil;
|
85
|
+
}
|
86
|
+
}
|
87
|
+
|
88
|
+
static void prof_thread_free(thread_data_t* thread_data)
|
89
|
+
{
|
90
|
+
/* Has this method object been accessed by Ruby? If
|
91
|
+
yes then set its data to nil to avoid a segmentation fault on the next mark and sweep. */
|
92
|
+
if (thread_data->object != Qnil)
|
93
|
+
{
|
94
|
+
RTYPEDDATA(thread_data->object)->data = NULL;
|
95
|
+
thread_data->object = Qnil;
|
96
|
+
}
|
97
|
+
|
98
|
+
method_table_free(thread_data->method_table);
|
99
|
+
|
100
|
+
if (thread_data->call_tree)
|
101
|
+
prof_call_tree_free(thread_data->call_tree);
|
102
|
+
|
103
|
+
prof_stack_free(thread_data->stack);
|
104
|
+
|
105
|
+
xfree(thread_data);
|
106
|
+
}
|
95
107
|
|
96
|
-
|
97
|
-
|
108
|
+
static const rb_data_type_t thread_type =
|
109
|
+
{
|
110
|
+
.wrap_struct_name = "ThreadInfo",
|
111
|
+
.function =
|
112
|
+
{
|
113
|
+
.dmark = prof_thread_mark,
|
114
|
+
.dfree = prof_thread_ruby_gc_free,
|
115
|
+
.dsize = prof_thread_size,
|
116
|
+
},
|
117
|
+
.data = NULL,
|
118
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY
|
119
|
+
};
|
120
|
+
|
121
|
+
VALUE prof_thread_wrap(thread_data_t* thread)
|
98
122
|
{
|
99
123
|
if (thread->object == Qnil)
|
100
124
|
{
|
101
|
-
thread->object =
|
125
|
+
thread->object = TypedData_Wrap_Struct(cRpThread, &thread_type, thread);
|
102
126
|
}
|
103
127
|
return thread->object;
|
104
128
|
}
|
105
129
|
|
106
|
-
static VALUE
|
107
|
-
prof_thread_allocate(VALUE klass)
|
130
|
+
static VALUE prof_thread_allocate(VALUE klass)
|
108
131
|
{
|
109
132
|
thread_data_t* thread_data = thread_data_create();
|
110
133
|
thread_data->object = prof_thread_wrap(thread_data);
|
111
134
|
return thread_data->object;
|
112
135
|
}
|
113
136
|
|
114
|
-
|
115
|
-
prof_get_thread(VALUE self)
|
137
|
+
thread_data_t* prof_get_thread(VALUE self)
|
116
138
|
{
|
117
139
|
/* Can't use Data_Get_Struct because that triggers the event hook
|
118
140
|
ending up in endless recursion. */
|
119
|
-
thread_data_t* result =
|
141
|
+
thread_data_t* result = RTYPEDDATA_DATA(self);
|
120
142
|
if (!result)
|
121
143
|
rb_raise(rb_eRuntimeError, "This RubyProf::Thread instance has already been freed, likely because its profile has been freed.");
|
122
144
|
|
123
145
|
return result;
|
124
146
|
}
|
125
147
|
|
126
|
-
|
127
|
-
|
128
|
-
of thread_data_t. */
|
148
|
+
// ====== Thread Table ======
|
149
|
+
// The thread table is hash keyed on ruby fiber_id that stores instances of thread_data_t.
|
129
150
|
|
130
|
-
st_table
|
131
|
-
threads_table_create()
|
151
|
+
st_table* threads_table_create()
|
132
152
|
{
|
133
|
-
return
|
153
|
+
return rb_st_init_numtable();
|
134
154
|
}
|
135
155
|
|
136
|
-
static int
|
137
|
-
thread_table_free_iterator(st_data_t key, st_data_t value, st_data_t dummy)
|
156
|
+
static int thread_table_free_iterator(st_data_t key, st_data_t value, st_data_t dummy)
|
138
157
|
{
|
139
158
|
prof_thread_free((thread_data_t*)value);
|
140
159
|
return ST_CONTINUE;
|
141
160
|
}
|
142
161
|
|
143
|
-
void
|
144
|
-
threads_table_free(st_table *table)
|
162
|
+
void threads_table_free(st_table* table)
|
145
163
|
{
|
146
|
-
|
147
|
-
|
164
|
+
rb_st_foreach(table, thread_table_free_iterator, 0);
|
165
|
+
rb_st_free_table(table);
|
148
166
|
}
|
149
167
|
|
150
|
-
thread_data_t *
|
151
|
-
threads_table_lookup(void *prof, VALUE fiber)
|
168
|
+
thread_data_t* threads_table_lookup(void* prof, VALUE fiber)
|
152
169
|
{
|
153
|
-
prof_profile_t
|
170
|
+
prof_profile_t* profile = prof;
|
154
171
|
thread_data_t* result = NULL;
|
155
172
|
st_data_t val;
|
156
173
|
|
157
|
-
if (
|
174
|
+
if (rb_st_lookup(profile->threads_tbl, fiber, &val))
|
158
175
|
{
|
159
176
|
result = (thread_data_t*)val;
|
160
177
|
}
|
@@ -162,24 +179,23 @@ threads_table_lookup(void *prof, VALUE fiber)
|
|
162
179
|
return result;
|
163
180
|
}
|
164
181
|
|
165
|
-
thread_data_t*
|
166
|
-
threads_table_insert(void *prof, VALUE fiber)
|
182
|
+
thread_data_t* threads_table_insert(void* prof, VALUE fiber)
|
167
183
|
{
|
168
|
-
prof_profile_t
|
169
|
-
thread_data_t
|
184
|
+
prof_profile_t* profile = prof;
|
185
|
+
thread_data_t* result = thread_data_create();
|
170
186
|
VALUE thread = rb_thread_current();
|
171
187
|
|
172
188
|
result->fiber = fiber;
|
173
189
|
result->fiber_id = rb_obj_id(fiber);
|
174
190
|
result->thread_id = rb_obj_id(thread);
|
175
|
-
|
191
|
+
rb_st_insert(profile->threads_tbl, (st_data_t)fiber, (st_data_t)result);
|
176
192
|
|
177
193
|
// Are we tracing this thread?
|
178
|
-
if (profile->include_threads_tbl && !
|
194
|
+
if (profile->include_threads_tbl && !rb_st_lookup(profile->include_threads_tbl, thread, 0))
|
179
195
|
{
|
180
|
-
result->trace= false;
|
196
|
+
result->trace = false;
|
181
197
|
}
|
182
|
-
else if (profile->exclude_threads_tbl &&
|
198
|
+
else if (profile->exclude_threads_tbl && rb_st_lookup(profile->exclude_threads_tbl, thread, 0))
|
183
199
|
{
|
184
200
|
result->trace = false;
|
185
201
|
}
|
@@ -187,25 +203,28 @@ threads_table_insert(void *prof, VALUE fiber)
|
|
187
203
|
{
|
188
204
|
result->trace = true;
|
189
205
|
}
|
190
|
-
|
206
|
+
|
191
207
|
return result;
|
192
208
|
}
|
193
209
|
|
194
|
-
|
195
|
-
switch_thread(void
|
210
|
+
// ====== Profiling Methods ======
|
211
|
+
void switch_thread(void* prof, thread_data_t* thread_data, double measurement)
|
196
212
|
{
|
197
|
-
prof_profile_t
|
213
|
+
prof_profile_t* profile = prof;
|
198
214
|
|
199
215
|
/* Get current frame for this thread */
|
200
|
-
prof_frame_t* frame = thread_data->stack
|
201
|
-
|
202
|
-
|
203
|
-
|
216
|
+
prof_frame_t* frame = prof_frame_current(thread_data->stack);
|
217
|
+
if (frame)
|
218
|
+
{
|
219
|
+
frame->wait_time += measurement - frame->switch_time;
|
220
|
+
frame->switch_time = 0;
|
221
|
+
}
|
222
|
+
|
204
223
|
/* Save on the last thread the time of the context switch
|
205
224
|
and reset this thread's last context switch to 0.*/
|
206
225
|
if (profile->last_thread_data)
|
207
226
|
{
|
208
|
-
prof_frame_t* last_frame = profile->last_thread_data->stack
|
227
|
+
prof_frame_t* last_frame = prof_frame_current(profile->last_thread_data->stack);
|
209
228
|
if (last_frame)
|
210
229
|
last_frame->switch_time = measurement;
|
211
230
|
}
|
@@ -215,10 +234,10 @@ switch_thread(void *prof, thread_data_t *thread_data, double measurement)
|
|
215
234
|
|
216
235
|
int pause_thread(st_data_t key, st_data_t value, st_data_t data)
|
217
236
|
{
|
218
|
-
thread_data_t* thread_data = (thread_data_t
|
237
|
+
thread_data_t* thread_data = (thread_data_t*)value;
|
219
238
|
prof_profile_t* profile = (prof_profile_t*)data;
|
220
239
|
|
221
|
-
prof_frame_t* frame = thread_data->stack
|
240
|
+
prof_frame_t* frame = prof_frame_current(thread_data->stack);
|
222
241
|
prof_frame_pause(frame, profile->measurement_at_pause_resume);
|
223
242
|
|
224
243
|
return ST_CONTINUE;
|
@@ -226,37 +245,33 @@ int pause_thread(st_data_t key, st_data_t value, st_data_t data)
|
|
226
245
|
|
227
246
|
int unpause_thread(st_data_t key, st_data_t value, st_data_t data)
|
228
247
|
{
|
229
|
-
thread_data_t* thread_data = (thread_data_t
|
248
|
+
thread_data_t* thread_data = (thread_data_t*)value;
|
230
249
|
prof_profile_t* profile = (prof_profile_t*)data;
|
231
250
|
|
232
|
-
prof_frame_t* frame = thread_data->stack
|
251
|
+
prof_frame_t* frame = prof_frame_current(thread_data->stack);
|
233
252
|
prof_frame_unpause(frame, profile->measurement_at_pause_resume);
|
234
253
|
|
235
254
|
return ST_CONTINUE;
|
236
255
|
}
|
237
256
|
|
238
|
-
|
239
|
-
collect_methods(st_data_t key, st_data_t value, st_data_t result)
|
257
|
+
// ====== Helper Methods ======
|
258
|
+
static int collect_methods(st_data_t key, st_data_t value, st_data_t result)
|
240
259
|
{
|
241
260
|
/* Called for each method stored in a thread's method table.
|
242
261
|
We want to store the method info information into an array.*/
|
243
|
-
VALUE methods = (VALUE)
|
244
|
-
prof_method_t
|
245
|
-
|
246
|
-
if (!method->excluded)
|
247
|
-
{
|
248
|
-
rb_ary_push(methods, prof_method_wrap(method));
|
249
|
-
}
|
262
|
+
VALUE methods = (VALUE)result;
|
263
|
+
prof_method_t* method = (prof_method_t*)value;
|
264
|
+
rb_ary_push(methods, prof_method_wrap(method));
|
250
265
|
|
251
266
|
return ST_CONTINUE;
|
252
267
|
}
|
253
268
|
|
269
|
+
// ====== RubyProf::Thread ======
|
254
270
|
/* call-seq:
|
255
271
|
id -> number
|
256
272
|
|
257
273
|
Returns the thread id of this thread. */
|
258
|
-
static VALUE
|
259
|
-
prof_thread_id(VALUE self)
|
274
|
+
static VALUE prof_thread_id(VALUE self)
|
260
275
|
{
|
261
276
|
thread_data_t* thread = prof_get_thread(self);
|
262
277
|
return thread->thread_id;
|
@@ -266,57 +281,66 @@ prof_thread_id(VALUE self)
|
|
266
281
|
fiber_id -> number
|
267
282
|
|
268
283
|
Returns the fiber id of this thread. */
|
269
|
-
static VALUE
|
270
|
-
prof_fiber_id(VALUE self)
|
284
|
+
static VALUE prof_fiber_id(VALUE self)
|
271
285
|
{
|
272
286
|
thread_data_t* thread = prof_get_thread(self);
|
273
287
|
return thread->fiber_id;
|
274
288
|
}
|
275
289
|
|
290
|
+
/* call-seq:
|
291
|
+
call_tree -> CallTree
|
292
|
+
|
293
|
+
Returns the root of the call tree. */
|
294
|
+
static VALUE prof_call_tree(VALUE self)
|
295
|
+
{
|
296
|
+
thread_data_t* thread = prof_get_thread(self);
|
297
|
+
return prof_call_tree_wrap(thread->call_tree);
|
298
|
+
}
|
299
|
+
|
276
300
|
/* call-seq:
|
277
301
|
methods -> [RubyProf::MethodInfo]
|
278
302
|
|
279
303
|
Returns an array of methods that were called from this
|
280
304
|
thread during program execution. */
|
281
|
-
static VALUE
|
282
|
-
prof_thread_methods(VALUE self)
|
305
|
+
static VALUE prof_thread_methods(VALUE self)
|
283
306
|
{
|
284
307
|
thread_data_t* thread = prof_get_thread(self);
|
285
308
|
if (thread->methods == Qnil)
|
286
309
|
{
|
287
310
|
thread->methods = rb_ary_new();
|
288
|
-
|
311
|
+
rb_st_foreach(thread->method_table, collect_methods, thread->methods);
|
289
312
|
}
|
290
313
|
return thread->methods;
|
291
314
|
}
|
292
315
|
|
293
316
|
/* :nodoc: */
|
294
|
-
static VALUE
|
295
|
-
prof_thread_dump(VALUE self)
|
317
|
+
static VALUE prof_thread_dump(VALUE self)
|
296
318
|
{
|
297
|
-
thread_data_t* thread_data =
|
319
|
+
thread_data_t* thread_data = RTYPEDDATA_DATA(self);
|
298
320
|
|
299
321
|
VALUE result = rb_hash_new();
|
300
322
|
rb_hash_aset(result, ID2SYM(rb_intern("fiber_id")), thread_data->fiber_id);
|
301
323
|
rb_hash_aset(result, ID2SYM(rb_intern("methods")), prof_thread_methods(self));
|
324
|
+
rb_hash_aset(result, ID2SYM(rb_intern("call_tree")), prof_call_tree(self));
|
302
325
|
|
303
326
|
return result;
|
304
327
|
}
|
305
328
|
|
306
329
|
/* :nodoc: */
|
307
|
-
static VALUE
|
308
|
-
prof_thread_load(VALUE self, VALUE data)
|
330
|
+
static VALUE prof_thread_load(VALUE self, VALUE data)
|
309
331
|
{
|
310
|
-
thread_data_t* thread_data =
|
311
|
-
|
332
|
+
thread_data_t* thread_data = RTYPEDDATA_DATA(self);
|
333
|
+
|
334
|
+
VALUE call_tree = rb_hash_aref(data, ID2SYM(rb_intern("call_tree")));
|
335
|
+
thread_data->call_tree = prof_get_call_tree(call_tree);
|
312
336
|
|
313
337
|
thread_data->fiber_id = rb_hash_aref(data, ID2SYM(rb_intern("fiber_id")));
|
314
|
-
VALUE methods = rb_hash_aref(data, ID2SYM(rb_intern("methods")));
|
315
338
|
|
339
|
+
VALUE methods = rb_hash_aref(data, ID2SYM(rb_intern("methods")));
|
316
340
|
for (int i = 0; i < rb_array_len(methods); i++)
|
317
341
|
{
|
318
342
|
VALUE method = rb_ary_entry(methods, i);
|
319
|
-
prof_method_t
|
343
|
+
prof_method_t* method_data = RTYPEDDATA_DATA(method);
|
320
344
|
method_table_insert(thread_data->method_table, method_data->key, method_data);
|
321
345
|
}
|
322
346
|
|
@@ -325,11 +349,12 @@ prof_thread_load(VALUE self, VALUE data)
|
|
325
349
|
|
326
350
|
void rp_init_thread(void)
|
327
351
|
{
|
328
|
-
cRpThread = rb_define_class_under(mProf, "Thread",
|
352
|
+
cRpThread = rb_define_class_under(mProf, "Thread", rb_cObject);
|
329
353
|
rb_undef_method(CLASS_OF(cRpThread), "new");
|
330
354
|
rb_define_alloc_func(cRpThread, prof_thread_allocate);
|
331
355
|
|
332
356
|
rb_define_method(cRpThread, "id", prof_thread_id, 0);
|
357
|
+
rb_define_method(cRpThread, "call_tree", prof_call_tree, 0);
|
333
358
|
rb_define_method(cRpThread, "fiber_id", prof_fiber_id, 0);
|
334
359
|
rb_define_method(cRpThread, "methods", prof_thread_methods, 0);
|
335
360
|
rb_define_method(cRpThread, "_dump_data", prof_thread_dump, 0);
|