ruby-prof 0.18.0 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGES +23 -0
- data/LICENSE +2 -2
- data/README.rdoc +1 -483
- data/Rakefile +3 -6
- data/bin/ruby-prof +65 -30
- data/ext/ruby_prof/extconf.rb +6 -38
- data/ext/ruby_prof/rp_allocation.c +292 -0
- data/ext/ruby_prof/rp_allocation.h +31 -0
- data/ext/ruby_prof/rp_call_info.c +137 -279
- data/ext/ruby_prof/rp_call_info.h +16 -34
- data/ext/ruby_prof/rp_measure_allocations.c +25 -49
- data/ext/ruby_prof/rp_measure_memory.c +21 -56
- data/ext/ruby_prof/rp_measure_process_time.c +28 -36
- data/ext/ruby_prof/rp_measure_wall_time.c +36 -19
- data/ext/ruby_prof/rp_measurement.c +236 -0
- data/ext/ruby_prof/rp_measurement.h +49 -0
- data/ext/ruby_prof/rp_method.c +395 -383
- data/ext/ruby_prof/rp_method.h +34 -39
- data/ext/ruby_prof/rp_profile.c +881 -0
- data/ext/ruby_prof/rp_profile.h +36 -0
- data/ext/ruby_prof/rp_stack.c +103 -80
- data/ext/ruby_prof/rp_stack.h +5 -12
- data/ext/ruby_prof/rp_thread.c +149 -88
- data/ext/ruby_prof/rp_thread.h +15 -6
- data/ext/ruby_prof/ruby_prof.c +11 -757
- data/ext/ruby_prof/ruby_prof.h +4 -47
- data/ext/ruby_prof/vc/ruby_prof.vcxproj +10 -8
- data/lib/ruby-prof.rb +2 -17
- data/lib/ruby-prof/assets/graph_printer.html.erb +356 -0
- data/lib/ruby-prof/call_info.rb +35 -93
- data/lib/ruby-prof/call_info_visitor.rb +19 -21
- data/lib/ruby-prof/compatibility.rb +37 -107
- data/lib/ruby-prof/exclude_common_methods.rb +198 -0
- data/lib/ruby-prof/measurement.rb +14 -0
- data/lib/ruby-prof/method_info.rb +52 -83
- data/lib/ruby-prof/printers/abstract_printer.rb +66 -52
- data/lib/ruby-prof/printers/call_info_printer.rb +13 -3
- data/lib/ruby-prof/printers/call_stack_printer.rb +32 -28
- data/lib/ruby-prof/printers/call_tree_printer.rb +20 -12
- data/lib/ruby-prof/printers/dot_printer.rb +5 -5
- data/lib/ruby-prof/printers/flat_printer.rb +6 -24
- data/lib/ruby-prof/printers/graph_html_printer.rb +7 -192
- data/lib/ruby-prof/printers/graph_printer.rb +13 -15
- data/lib/ruby-prof/printers/multi_printer.rb +66 -23
- data/lib/ruby-prof/profile.rb +10 -3
- data/lib/ruby-prof/rack.rb +0 -3
- data/lib/ruby-prof/thread.rb +12 -12
- data/lib/ruby-prof/version.rb +1 -1
- data/ruby-prof.gemspec +2 -2
- data/test/abstract_printer_test.rb +0 -27
- data/test/alias_test.rb +129 -0
- data/test/basic_test.rb +41 -40
- data/test/call_info_visitor_test.rb +3 -3
- data/test/dynamic_method_test.rb +0 -2
- data/test/line_number_test.rb +120 -39
- data/test/marshal_test.rb +119 -0
- data/test/measure_allocations.rb +30 -0
- data/test/measure_allocations_test.rb +371 -12
- data/test/measure_allocations_trace_test.rb +385 -0
- data/test/measure_memory_trace_test.rb +756 -0
- data/test/measure_process_time_test.rb +821 -33
- data/test/measure_times.rb +54 -0
- data/test/measure_wall_time_test.rb +349 -145
- data/test/multi_printer_test.rb +1 -34
- data/test/parser_timings.rb +24 -0
- data/test/pause_resume_test.rb +5 -5
- data/test/prime.rb +2 -0
- data/test/printer_call_tree_test.rb +31 -0
- data/test/printer_flat_test.rb +68 -0
- data/test/printer_graph_html_test.rb +60 -0
- data/test/printer_graph_test.rb +41 -0
- data/test/printers_test.rb +32 -166
- data/test/printing_recursive_graph_test.rb +26 -72
- data/test/recursive_test.rb +72 -77
- data/test/stack_printer_test.rb +2 -15
- data/test/start_stop_test.rb +22 -25
- data/test/test_helper.rb +5 -248
- data/test/thread_test.rb +11 -54
- data/test/unique_call_path_test.rb +16 -28
- data/test/yarv_test.rb +1 -0
- metadata +24 -34
- data/examples/flat.txt +0 -50
- data/examples/graph.dot +0 -84
- data/examples/graph.html +0 -823
- data/examples/graph.txt +0 -139
- data/examples/multi.flat.txt +0 -23
- data/examples/multi.graph.html +0 -760
- data/examples/multi.grind.dat +0 -114
- data/examples/multi.stack.html +0 -547
- data/examples/stack.html +0 -547
- data/ext/ruby_prof/rp_measure.c +0 -40
- data/ext/ruby_prof/rp_measure.h +0 -45
- data/ext/ruby_prof/rp_measure_cpu_time.c +0 -136
- data/ext/ruby_prof/rp_measure_gc_runs.c +0 -73
- data/ext/ruby_prof/rp_measure_gc_time.c +0 -60
- data/lib/ruby-prof/aggregate_call_info.rb +0 -76
- data/lib/ruby-prof/printers/flat_printer_with_line_numbers.rb +0 -83
- data/lib/ruby-prof/profile/exclude_common_methods.rb +0 -207
- data/lib/ruby-prof/profile/legacy_method_elimination.rb +0 -50
- data/test/aggregate_test.rb +0 -136
- data/test/block_test.rb +0 -74
- data/test/call_info_test.rb +0 -78
- data/test/fiber_test.rb +0 -79
- data/test/issue137_test.rb +0 -63
- data/test/measure_cpu_time_test.rb +0 -212
- data/test/measure_gc_runs_test.rb +0 -32
- data/test/measure_gc_time_test.rb +0 -36
- data/test/measure_memory_test.rb +0 -33
- data/test/method_elimination_test.rb +0 -84
- data/test/module_test.rb +0 -45
- data/test/stack_test.rb +0 -138
@@ -0,0 +1,36 @@
|
|
1
|
+
/* Copyright (C) 2005-2019 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
|
2
|
+
Please see the LICENSE file for copyright and distribution information */
|
3
|
+
|
4
|
+
#ifndef __RP_PROFILE_H__
|
5
|
+
#define __RP_PROFILE_H__
|
6
|
+
|
7
|
+
#include "ruby_prof.h"
|
8
|
+
#include "rp_measurement.h"
|
9
|
+
#include "rp_thread.h"
|
10
|
+
|
11
|
+
extern VALUE cProfile;
|
12
|
+
|
13
|
+
typedef struct
|
14
|
+
{
|
15
|
+
VALUE running;
|
16
|
+
VALUE paused;
|
17
|
+
|
18
|
+
prof_measurer_t* measurer;
|
19
|
+
VALUE threads;
|
20
|
+
|
21
|
+
VALUE tracepoints;
|
22
|
+
|
23
|
+
st_table* threads_tbl;
|
24
|
+
st_table* exclude_threads_tbl;
|
25
|
+
st_table* include_threads_tbl;
|
26
|
+
st_table* exclude_methods_tbl;
|
27
|
+
thread_data_t* last_thread_data;
|
28
|
+
double measurement_at_pause_resume;
|
29
|
+
bool allow_exceptions;
|
30
|
+
} prof_profile_t;
|
31
|
+
|
32
|
+
void rp_init_profile(void);
|
33
|
+
prof_profile_t* prof_get_profile(VALUE self);
|
34
|
+
|
35
|
+
|
36
|
+
#endif //__RP_PROFILE_H__
|
data/ext/ruby_prof/rp_stack.c
CHANGED
@@ -3,7 +3,7 @@
|
|
3
3
|
|
4
4
|
#include "rp_stack.h"
|
5
5
|
|
6
|
-
#define INITIAL_STACK_SIZE
|
6
|
+
#define INITIAL_STACK_SIZE 32
|
7
7
|
|
8
8
|
void
|
9
9
|
prof_frame_pause(prof_frame_t *frame, double current_measurement)
|
@@ -15,20 +15,20 @@ prof_frame_pause(prof_frame_t *frame, double current_measurement)
|
|
15
15
|
void
|
16
16
|
prof_frame_unpause(prof_frame_t *frame, double current_measurement)
|
17
17
|
{
|
18
|
-
if (frame && prof_frame_is_paused(frame))
|
18
|
+
if (frame && prof_frame_is_paused(frame))
|
19
|
+
{
|
19
20
|
frame->dead_time += (current_measurement - frame->pause_time);
|
20
21
|
frame->pause_time = -1;
|
21
22
|
}
|
22
23
|
}
|
23
24
|
|
24
|
-
|
25
25
|
/* Creates a stack of prof_frame_t to keep track
|
26
26
|
of timings for active methods. */
|
27
27
|
prof_stack_t *
|
28
28
|
prof_stack_create()
|
29
29
|
{
|
30
30
|
prof_stack_t *stack = ALLOC(prof_stack_t);
|
31
|
-
stack->start =
|
31
|
+
stack->start = ZALLOC_N(prof_frame_t, INITIAL_STACK_SIZE);
|
32
32
|
stack->ptr = stack->start;
|
33
33
|
stack->end = stack->start + INITIAL_STACK_SIZE;
|
34
34
|
|
@@ -45,61 +45,62 @@ prof_stack_free(prof_stack_t *stack)
|
|
45
45
|
prof_frame_t *
|
46
46
|
prof_stack_push(prof_stack_t *stack, prof_call_info_t *call_info, double measurement, int paused)
|
47
47
|
{
|
48
|
-
|
49
|
-
|
50
|
-
prof_method_t *method;
|
48
|
+
prof_frame_t *result;
|
49
|
+
prof_frame_t* parent_frame;
|
51
50
|
|
52
|
-
|
51
|
+
/* Is there space on the stack? If not, double
|
53
52
|
its size. */
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
parent_frame = prof_stack_peek(stack);
|
65
|
-
|
66
|
-
// Reserve the next available frame pointer.
|
67
|
-
result = stack->ptr++;
|
68
|
-
|
69
|
-
result->call_info = call_info;
|
70
|
-
result->call_info->depth = (int)(stack->ptr - stack->start); // shortening of 64 bit into 32;
|
71
|
-
result->passes = 0;
|
72
|
-
|
73
|
-
result->start_time = measurement;
|
74
|
-
result->pause_time = -1; // init as not paused.
|
75
|
-
result->switch_time = 0;
|
76
|
-
result->wait_time = 0;
|
77
|
-
result->child_time = 0;
|
78
|
-
result->dead_time = 0;
|
79
|
-
|
80
|
-
method = call_info->target;
|
81
|
-
|
82
|
-
/* If the method was visited previously, it's recursive. */
|
83
|
-
if (method->visits > 0)
|
84
|
-
{
|
85
|
-
method->recursive = 1;
|
86
|
-
call_info->recursive = 1;
|
87
|
-
}
|
88
|
-
/* Enter the method. */
|
89
|
-
method->visits++;
|
90
|
-
|
91
|
-
// Unpause the parent frame, if it exists.
|
92
|
-
// If currently paused then:
|
93
|
-
// 1) The child frame will begin paused.
|
94
|
-
// 2) The parent will inherit the child's dead time.
|
95
|
-
prof_frame_unpause(parent_frame, measurement);
|
53
|
+
if (stack->ptr == stack->end - 1)
|
54
|
+
{
|
55
|
+
size_t len = stack->ptr - stack->start;
|
56
|
+
size_t new_capacity = (stack->end - stack->start) * 2;
|
57
|
+
REALLOC_N(stack->start, prof_frame_t, new_capacity);
|
58
|
+
|
59
|
+
/* Memory just got moved, reset pointers */
|
60
|
+
stack->ptr = stack->start + len;
|
61
|
+
stack->end = stack->start + new_capacity;
|
62
|
+
}
|
96
63
|
|
97
|
-
|
98
|
-
|
99
|
-
|
64
|
+
parent_frame = stack->ptr;
|
65
|
+
stack->ptr++;
|
66
|
+
|
67
|
+
result = stack->ptr;
|
68
|
+
result->call_info = call_info;
|
69
|
+
result->call_info->depth = (int)(stack->ptr - stack->start); // shortening of 64 bit into 32;
|
70
|
+
result->passes = 0;
|
71
|
+
|
72
|
+
result->start_time = measurement;
|
73
|
+
result->pause_time = -1; // init as not paused.
|
74
|
+
result->switch_time = 0;
|
75
|
+
result->wait_time = 0;
|
76
|
+
result->child_time = 0;
|
77
|
+
result->dead_time = 0;
|
78
|
+
result->source_file = Qnil;
|
79
|
+
result->source_line = 0;
|
80
|
+
|
81
|
+
call_info->measurement->called++;
|
82
|
+
call_info->visits++;
|
83
|
+
|
84
|
+
if (call_info->method->visits > 0)
|
85
|
+
{
|
86
|
+
call_info->method->recursive = true;
|
87
|
+
}
|
88
|
+
call_info->method->measurement->called++;
|
89
|
+
call_info->method->visits++;
|
90
|
+
|
91
|
+
// Unpause the parent frame, if it exists.
|
92
|
+
// If currently paused then:
|
93
|
+
// 1) The child frame will begin paused.
|
94
|
+
// 2) The parent will inherit the child's dead time.
|
95
|
+
prof_frame_unpause(parent_frame, measurement);
|
96
|
+
|
97
|
+
if (paused)
|
98
|
+
{
|
99
|
+
prof_frame_pause(result, measurement);
|
100
|
+
}
|
100
101
|
|
101
|
-
|
102
|
-
|
102
|
+
// Return the result
|
103
|
+
return result;
|
103
104
|
}
|
104
105
|
|
105
106
|
prof_frame_t *
|
@@ -108,23 +109,18 @@ prof_stack_pop(prof_stack_t *stack, double measurement)
|
|
108
109
|
prof_frame_t *frame;
|
109
110
|
prof_frame_t *parent_frame;
|
110
111
|
prof_call_info_t *call_info;
|
111
|
-
prof_method_t *method;
|
112
112
|
|
113
113
|
double total_time;
|
114
114
|
double self_time;
|
115
115
|
|
116
|
-
|
116
|
+
if (stack->ptr == stack->start)
|
117
|
+
return NULL;
|
117
118
|
|
118
|
-
|
119
|
-
RubProf.start is called from a method that exits. And it can happen if an
|
120
|
-
exception is raised in code that is being profiled and the stack unwinds
|
121
|
-
(RubyProf is not notified of that by the ruby runtime. */
|
122
|
-
if (!frame) {
|
123
|
-
return NULL;
|
124
|
-
}
|
119
|
+
frame = stack->ptr;
|
125
120
|
|
126
121
|
/* Match passes until we reach the frame itself. */
|
127
|
-
if (prof_frame_is_pass(frame))
|
122
|
+
if (prof_frame_is_pass(frame))
|
123
|
+
{
|
128
124
|
frame->passes--;
|
129
125
|
/* Additional frames can be consumed. See pop_frames(). */
|
130
126
|
return frame;
|
@@ -133,30 +129,37 @@ prof_stack_pop(prof_stack_t *stack, double measurement)
|
|
133
129
|
/* Consume this frame. */
|
134
130
|
stack->ptr--;
|
135
131
|
|
132
|
+
parent_frame = stack->ptr;
|
133
|
+
|
136
134
|
/* Calculate the total time this method took */
|
137
135
|
prof_frame_unpause(frame, measurement);
|
136
|
+
|
138
137
|
total_time = measurement - frame->start_time - frame->dead_time;
|
139
138
|
self_time = total_time - frame->child_time - frame->wait_time;
|
140
139
|
|
141
140
|
/* Update information about the current method */
|
142
141
|
call_info = frame->call_info;
|
143
|
-
method = call_info->target;
|
144
142
|
|
145
|
-
|
146
|
-
call_info->
|
147
|
-
call_info->
|
148
|
-
call_info->
|
143
|
+
// Update method measurement
|
144
|
+
call_info->method->measurement->self_time += self_time;
|
145
|
+
call_info->method->measurement->wait_time += frame->wait_time;
|
146
|
+
if (call_info->method->visits == 1)
|
147
|
+
call_info->method->measurement->total_time += total_time;
|
149
148
|
|
150
|
-
|
151
|
-
|
149
|
+
call_info->method->visits--;
|
150
|
+
|
151
|
+
// Update method measurement
|
152
|
+
call_info->measurement->self_time += self_time;
|
153
|
+
call_info->measurement->wait_time += frame->wait_time;
|
154
|
+
if (call_info->visits == 1)
|
155
|
+
call_info->measurement->total_time += total_time;
|
156
|
+
|
157
|
+
call_info->visits--;
|
152
158
|
|
153
|
-
parent_frame = prof_stack_peek(stack);
|
154
159
|
if (parent_frame)
|
155
160
|
{
|
156
161
|
parent_frame->child_time += total_time;
|
157
162
|
parent_frame->dead_time += frame->dead_time;
|
158
|
-
|
159
|
-
call_info->line = parent_frame->line;
|
160
163
|
}
|
161
164
|
|
162
165
|
return frame;
|
@@ -165,9 +168,29 @@ prof_stack_pop(prof_stack_t *stack, double measurement)
|
|
165
168
|
prof_frame_t *
|
166
169
|
prof_stack_pass(prof_stack_t *stack)
|
167
170
|
{
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
171
|
+
prof_frame_t* frame = stack->ptr;
|
172
|
+
if (frame)
|
173
|
+
{
|
174
|
+
frame->passes++;
|
175
|
+
}
|
176
|
+
return frame;
|
177
|
+
}
|
178
|
+
|
179
|
+
prof_method_t*
|
180
|
+
prof_find_method(prof_stack_t* stack, VALUE source_file, int source_line)
|
181
|
+
{
|
182
|
+
prof_frame_t* frame = stack->ptr;
|
183
|
+
while (frame >= stack->start)
|
184
|
+
{
|
185
|
+
if (!frame->call_info)
|
186
|
+
return NULL;
|
187
|
+
|
188
|
+
if (rb_str_equal(source_file, frame->call_info->method->source_file) &&
|
189
|
+
source_line >= frame->call_info->method->source_line)
|
190
|
+
{
|
191
|
+
return frame->call_info->method;
|
192
|
+
}
|
193
|
+
frame--;
|
194
|
+
}
|
195
|
+
return NULL;
|
173
196
|
}
|
data/ext/ruby_prof/rp_stack.h
CHANGED
@@ -4,12 +4,9 @@
|
|
4
4
|
#ifndef __RP_STACK__
|
5
5
|
#define __RP_STACK__
|
6
6
|
|
7
|
-
#include
|
8
|
-
|
9
|
-
#include "rp_measure.h"
|
7
|
+
#include "ruby_prof.h"
|
10
8
|
#include "rp_call_info.h"
|
11
9
|
|
12
|
-
|
13
10
|
/* Temporary object that maintains profiling information
|
14
11
|
for active methods. They are created and destroyed
|
15
12
|
as the program moves up and down its stack. */
|
@@ -19,7 +16,8 @@ typedef struct
|
|
19
16
|
increases performance. */
|
20
17
|
prof_call_info_t *call_info;
|
21
18
|
|
22
|
-
|
19
|
+
VALUE source_file;
|
20
|
+
unsigned int source_line;
|
23
21
|
unsigned int passes; /* Count of "pass" frames, _after_ this one. */
|
24
22
|
|
25
23
|
double start_time;
|
@@ -47,17 +45,12 @@ typedef struct
|
|
47
45
|
prof_frame_t *ptr;
|
48
46
|
} prof_stack_t;
|
49
47
|
|
50
|
-
prof_stack_t *prof_stack_create();
|
48
|
+
prof_stack_t *prof_stack_create(void);
|
51
49
|
void prof_stack_free(prof_stack_t *stack);
|
52
50
|
|
53
51
|
prof_frame_t *prof_stack_push(prof_stack_t *stack, prof_call_info_t *call_info, double measurement, int paused);
|
54
52
|
prof_frame_t *prof_stack_pop(prof_stack_t *stack, double measurement);
|
55
53
|
prof_frame_t *prof_stack_pass(prof_stack_t *stack);
|
56
|
-
|
57
|
-
static inline prof_frame_t *
|
58
|
-
prof_stack_peek(prof_stack_t *stack) {
|
59
|
-
return stack->ptr != stack->start ? stack->ptr - 1 : NULL;
|
60
|
-
}
|
61
|
-
|
54
|
+
prof_method_t *prof_find_method(prof_stack_t* stack, VALUE source_file, int source_line);
|
62
55
|
|
63
56
|
#endif //__RP_STACK__
|
data/ext/ruby_prof/rp_thread.c
CHANGED
@@ -1,51 +1,49 @@
|
|
1
1
|
/* Copyright (C) 2005-2013 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
|
2
2
|
Please see the LICENSE file for copyright and distribution information */
|
3
3
|
|
4
|
-
|
4
|
+
/* Document-class: RubyProf::Thread
|
5
|
+
|
6
|
+
The Thread class contains profile results for a single fiber (note a Ruby thread can run multiple fibers).
|
7
|
+
You cannot create an instance of RubyProf::Thread, instead you access it from a RubyProf::Profile object.
|
8
|
+
|
9
|
+
profile = RubyProf::Profile.profile do
|
10
|
+
...
|
11
|
+
end
|
12
|
+
|
13
|
+
profile.threads.each do |thread|
|
14
|
+
thread.root_methods.sort.each do |method|
|
15
|
+
puts method.total_time
|
16
|
+
end
|
17
|
+
end
|
18
|
+
*/
|
19
|
+
|
20
|
+
#include "rp_thread.h"
|
21
|
+
#include "rp_profile.h"
|
5
22
|
|
6
23
|
VALUE cRpThread;
|
7
24
|
|
8
25
|
/* ====== thread_data_t ====== */
|
9
26
|
thread_data_t*
|
10
|
-
thread_data_create()
|
27
|
+
thread_data_create(void)
|
11
28
|
{
|
12
29
|
thread_data_t* result = ALLOC(thread_data_t);
|
13
30
|
result->stack = prof_stack_create();
|
14
31
|
result->method_table = method_table_create();
|
15
32
|
result->object = Qnil;
|
16
33
|
result->methods = Qnil;
|
34
|
+
result->fiber_id = Qnil;
|
35
|
+
result->thread_id = Qnil;
|
36
|
+
result->trace = true;
|
37
|
+
result->fiber = Qnil;
|
17
38
|
return result;
|
18
39
|
}
|
19
40
|
|
20
|
-
/* The underlying c structures are freed when the parent profile is freed.
|
21
|
-
However, on shutdown the Ruby GC frees objects in any will-nilly order.
|
22
|
-
That means the ruby thread object wrapping the c thread struct may
|
23
|
-
be freed before the parent profile. Thus we add in a free function
|
24
|
-
for the garbage collector so that if it does get called will nil
|
25
|
-
out our Ruby object reference.*/
|
26
|
-
static void
|
27
|
-
thread_data_ruby_gc_free(thread_data_t* thread_data)
|
28
|
-
{
|
29
|
-
/* Has this thread object been accessed by Ruby? If
|
30
|
-
yes clean it up so to avoid a segmentation fault. */
|
31
|
-
if (thread_data->object != Qnil)
|
32
|
-
{
|
33
|
-
RDATA(thread_data->object)->data = NULL;
|
34
|
-
RDATA(thread_data->object)->dfree = NULL;
|
35
|
-
RDATA(thread_data->object)->dmark = NULL;
|
36
|
-
}
|
37
|
-
thread_data->object = Qnil;
|
38
|
-
}
|
39
|
-
|
40
41
|
static void
|
41
|
-
|
42
|
+
prof_thread_free(thread_data_t* thread_data)
|
42
43
|
{
|
43
|
-
thread_data_ruby_gc_free(thread_data);
|
44
44
|
method_table_free(thread_data->method_table);
|
45
45
|
prof_stack_free(thread_data->stack);
|
46
46
|
|
47
|
-
thread_data->thread_id = Qnil;
|
48
|
-
|
49
47
|
xfree(thread_data);
|
50
48
|
}
|
51
49
|
|
@@ -57,33 +55,63 @@ mark_methods(st_data_t key, st_data_t value, st_data_t result)
|
|
57
55
|
return ST_CONTINUE;
|
58
56
|
}
|
59
57
|
|
58
|
+
size_t
|
59
|
+
prof_thread_size(const void *data)
|
60
|
+
{
|
61
|
+
return sizeof(prof_call_info_t);
|
62
|
+
}
|
63
|
+
|
60
64
|
void
|
61
|
-
prof_thread_mark(
|
65
|
+
prof_thread_mark(void *data)
|
62
66
|
{
|
67
|
+
thread_data_t *thread = (thread_data_t*)data;
|
68
|
+
|
63
69
|
if (thread->object != Qnil)
|
64
70
|
rb_gc_mark(thread->object);
|
65
|
-
|
71
|
+
|
66
72
|
if (thread->methods != Qnil)
|
67
73
|
rb_gc_mark(thread->methods);
|
68
|
-
|
69
|
-
if (thread->thread_id != Qnil)
|
70
|
-
rb_gc_mark(thread->thread_id);
|
71
|
-
|
74
|
+
|
72
75
|
if (thread->fiber_id != Qnil)
|
73
76
|
rb_gc_mark(thread->fiber_id);
|
74
|
-
|
77
|
+
|
78
|
+
if (thread->thread_id != Qnil)
|
79
|
+
rb_gc_mark(thread->thread_id);
|
80
|
+
|
75
81
|
st_foreach(thread->method_table, mark_methods, 0);
|
76
82
|
}
|
77
83
|
|
84
|
+
static const rb_data_type_t thread_type =
|
85
|
+
{
|
86
|
+
.wrap_struct_name = "ThreadInfo",
|
87
|
+
.function =
|
88
|
+
{
|
89
|
+
.dmark = prof_thread_mark,
|
90
|
+
.dfree = NULL, /* The profile class frees its thread table which frees each underlying thread_data instance */
|
91
|
+
.dsize = prof_thread_size,
|
92
|
+
},
|
93
|
+
.data = NULL,
|
94
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY
|
95
|
+
};
|
96
|
+
|
78
97
|
VALUE
|
79
98
|
prof_thread_wrap(thread_data_t *thread)
|
80
99
|
{
|
81
|
-
if (thread->object == Qnil)
|
82
|
-
|
100
|
+
if (thread->object == Qnil)
|
101
|
+
{
|
102
|
+
thread->object = TypedData_Wrap_Struct(cRpThread, &thread_type, thread);
|
83
103
|
}
|
84
104
|
return thread->object;
|
85
105
|
}
|
86
106
|
|
107
|
+
static VALUE
|
108
|
+
prof_thread_allocate(VALUE klass)
|
109
|
+
{
|
110
|
+
thread_data_t* thread_data = thread_data_create();
|
111
|
+
thread_data->object = prof_thread_wrap(thread_data);
|
112
|
+
return thread_data->object;
|
113
|
+
}
|
114
|
+
|
87
115
|
static thread_data_t*
|
88
116
|
prof_get_thread(VALUE self)
|
89
117
|
{
|
@@ -109,7 +137,7 @@ threads_table_create()
|
|
109
137
|
static int
|
110
138
|
thread_table_free_iterator(st_data_t key, st_data_t value, st_data_t dummy)
|
111
139
|
{
|
112
|
-
|
140
|
+
prof_thread_free((thread_data_t*)value);
|
113
141
|
return ST_CONTINUE;
|
114
142
|
}
|
115
143
|
|
@@ -120,73 +148,70 @@ threads_table_free(st_table *table)
|
|
120
148
|
st_free_table(table);
|
121
149
|
}
|
122
150
|
|
123
|
-
|
124
|
-
|
151
|
+
thread_data_t *
|
152
|
+
threads_table_lookup(void *prof, VALUE fiber)
|
125
153
|
{
|
126
|
-
|
127
|
-
|
154
|
+
prof_profile_t *profile = prof;
|
155
|
+
thread_data_t* result = NULL;
|
156
|
+
st_data_t val;
|
157
|
+
|
158
|
+
if (st_lookup(profile->threads_tbl, fiber, &val))
|
159
|
+
{
|
160
|
+
result = (thread_data_t*)val;
|
161
|
+
}
|
162
|
+
|
163
|
+
return result;
|
128
164
|
}
|
129
165
|
|
130
|
-
thread_data_t
|
131
|
-
|
166
|
+
thread_data_t*
|
167
|
+
threads_table_insert(void *prof, VALUE fiber)
|
132
168
|
{
|
133
|
-
|
134
|
-
|
169
|
+
prof_profile_t *profile = prof;
|
170
|
+
thread_data_t *result = thread_data_create();
|
171
|
+
VALUE thread = rb_thread_current();
|
135
172
|
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
if (st_lookup(profile->
|
173
|
+
result->fiber = fiber;
|
174
|
+
result->fiber_id = rb_obj_id(fiber);
|
175
|
+
result->thread_id = rb_obj_id(thread);
|
176
|
+
st_insert(profile->threads_tbl, (st_data_t)fiber, (st_data_t)result);
|
177
|
+
|
178
|
+
// Are we tracing this thread?
|
179
|
+
if (profile->include_threads_tbl && !st_lookup(profile->include_threads_tbl, thread, 0))
|
180
|
+
{
|
181
|
+
result->trace= false;
|
182
|
+
}
|
183
|
+
else if (profile->exclude_threads_tbl && st_lookup(profile->exclude_threads_tbl, thread, 0))
|
143
184
|
{
|
144
|
-
|
185
|
+
result->trace = false;
|
145
186
|
}
|
146
187
|
else
|
147
188
|
{
|
148
|
-
result =
|
149
|
-
result->thread_id = thread_id;
|
150
|
-
/* We set fiber id to 0 in the merge fiber case. Real fibers never have id 0,
|
151
|
-
so we can identify them later during printing.
|
152
|
-
*/
|
153
|
-
result->fiber_id = profile->merge_fibers ? INT2FIX(0) : fiber_id;
|
154
|
-
/* Insert the table */
|
155
|
-
threads_table_insert(profile, key, result);
|
189
|
+
result->trace = true;
|
156
190
|
}
|
191
|
+
|
157
192
|
return result;
|
158
193
|
}
|
159
194
|
|
160
|
-
|
161
|
-
switch_thread(void*
|
195
|
+
void
|
196
|
+
switch_thread(void *prof, thread_data_t *thread_data, double measurement)
|
162
197
|
{
|
163
|
-
prof_profile_t*
|
164
|
-
double measurement = profile->measurer->measure();
|
165
|
-
|
166
|
-
/* Get new thread information. */
|
167
|
-
thread_data_t *thread_data = threads_table_lookup(profile, thread_id, fiber_id);
|
198
|
+
prof_profile_t *profile = prof;
|
168
199
|
|
169
200
|
/* Get current frame for this thread */
|
170
|
-
prof_frame_t
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
{
|
175
|
-
frame->wait_time += measurement - frame->switch_time;
|
176
|
-
frame->switch_time = measurement;
|
177
|
-
}
|
178
|
-
|
201
|
+
prof_frame_t* frame = thread_data->stack->ptr;
|
202
|
+
frame->wait_time += measurement - frame->switch_time;
|
203
|
+
frame->switch_time = measurement;
|
204
|
+
|
179
205
|
/* Save on the last thread the time of the context switch
|
180
206
|
and reset this thread's last context switch to 0.*/
|
181
207
|
if (profile->last_thread_data)
|
182
208
|
{
|
183
|
-
|
184
|
-
|
185
|
-
|
209
|
+
prof_frame_t* last_frame = profile->last_thread_data->stack->ptr;
|
210
|
+
if (last_frame)
|
211
|
+
last_frame->switch_time = measurement;
|
186
212
|
}
|
187
213
|
|
188
214
|
profile->last_thread_data = thread_data;
|
189
|
-
return thread_data;
|
190
215
|
}
|
191
216
|
|
192
217
|
int pause_thread(st_data_t key, st_data_t value, st_data_t data)
|
@@ -194,7 +219,7 @@ int pause_thread(st_data_t key, st_data_t value, st_data_t data)
|
|
194
219
|
thread_data_t* thread_data = (thread_data_t *) value;
|
195
220
|
prof_profile_t* profile = (prof_profile_t*)data;
|
196
221
|
|
197
|
-
prof_frame_t* frame =
|
222
|
+
prof_frame_t* frame = thread_data->stack->ptr;
|
198
223
|
prof_frame_pause(frame, profile->measurement_at_pause_resume);
|
199
224
|
|
200
225
|
return ST_CONTINUE;
|
@@ -205,7 +230,7 @@ int unpause_thread(st_data_t key, st_data_t value, st_data_t data)
|
|
205
230
|
thread_data_t* thread_data = (thread_data_t *) value;
|
206
231
|
prof_profile_t* profile = (prof_profile_t*)data;
|
207
232
|
|
208
|
-
prof_frame_t* frame =
|
233
|
+
prof_frame_t* frame = thread_data->stack->ptr;
|
209
234
|
prof_frame_unpause(frame, profile->measurement_at_pause_resume);
|
210
235
|
|
211
236
|
return ST_CONTINUE;
|
@@ -219,18 +244,18 @@ collect_methods(st_data_t key, st_data_t value, st_data_t result)
|
|
219
244
|
VALUE methods = (VALUE) result;
|
220
245
|
prof_method_t *method = (prof_method_t *) value;
|
221
246
|
|
222
|
-
if (!method->excluded)
|
247
|
+
if (!method->excluded)
|
248
|
+
{
|
223
249
|
rb_ary_push(methods, prof_method_wrap(method));
|
224
250
|
}
|
225
251
|
|
226
252
|
return ST_CONTINUE;
|
227
253
|
}
|
228
254
|
|
229
|
-
|
230
255
|
/* call-seq:
|
231
256
|
id -> number
|
232
257
|
|
233
|
-
Returns the id of this thread. */
|
258
|
+
Returns the thread id of this thread. */
|
234
259
|
static VALUE
|
235
260
|
prof_thread_id(VALUE self)
|
236
261
|
{
|
@@ -250,7 +275,7 @@ prof_fiber_id(VALUE self)
|
|
250
275
|
}
|
251
276
|
|
252
277
|
/* call-seq:
|
253
|
-
methods ->
|
278
|
+
methods -> [RubyProf::MethodInfo]
|
254
279
|
|
255
280
|
Returns an array of methods that were called from this
|
256
281
|
thread during program execution. */
|
@@ -266,12 +291,48 @@ prof_thread_methods(VALUE self)
|
|
266
291
|
return thread->methods;
|
267
292
|
}
|
268
293
|
|
269
|
-
|
294
|
+
/* :nodoc: */
|
295
|
+
static VALUE
|
296
|
+
prof_thread_dump(VALUE self)
|
297
|
+
{
|
298
|
+
thread_data_t* thread_data = DATA_PTR(self);
|
299
|
+
|
300
|
+
VALUE result = rb_hash_new();
|
301
|
+
rb_hash_aset(result, ID2SYM(rb_intern("fiber_id")), thread_data->fiber_id);
|
302
|
+
rb_hash_aset(result, ID2SYM(rb_intern("methods")), prof_thread_methods(self));
|
303
|
+
|
304
|
+
return result;
|
305
|
+
}
|
306
|
+
|
307
|
+
/* :nodoc: */
|
308
|
+
static VALUE
|
309
|
+
prof_thread_load(VALUE self, VALUE data)
|
310
|
+
{
|
311
|
+
thread_data_t* thread_data = DATA_PTR(self);
|
312
|
+
thread_data->object = self;
|
313
|
+
|
314
|
+
thread_data->fiber_id = rb_hash_aref(data, ID2SYM(rb_intern("fiber_id")));
|
315
|
+
VALUE methods = rb_hash_aref(data, ID2SYM(rb_intern("methods")));
|
316
|
+
|
317
|
+
for (int i = 0; i < rb_array_len(methods); i++)
|
318
|
+
{
|
319
|
+
VALUE method = rb_ary_entry(methods, i);
|
320
|
+
prof_method_t *method_data = DATA_PTR(method);
|
321
|
+
method_table_insert(thread_data->method_table, method_data->key, method_data);
|
322
|
+
}
|
323
|
+
|
324
|
+
return data;
|
325
|
+
}
|
326
|
+
|
327
|
+
void rp_init_thread(void)
|
270
328
|
{
|
271
|
-
cRpThread = rb_define_class_under(mProf, "Thread",
|
329
|
+
cRpThread = rb_define_class_under(mProf, "Thread", rb_cData);
|
272
330
|
rb_undef_method(CLASS_OF(cRpThread), "new");
|
331
|
+
rb_define_alloc_func(cRpThread, prof_thread_allocate);
|
273
332
|
|
274
333
|
rb_define_method(cRpThread, "id", prof_thread_id, 0);
|
275
334
|
rb_define_method(cRpThread, "fiber_id", prof_fiber_id, 0);
|
276
335
|
rb_define_method(cRpThread, "methods", prof_thread_methods, 0);
|
336
|
+
rb_define_method(cRpThread, "_dump_data", prof_thread_dump, 0);
|
337
|
+
rb_define_method(cRpThread, "_load_data", prof_thread_load, 1);
|
277
338
|
}
|