ruby-prof 0.11.0.rc1-x86-mingw32 → 0.11.0.rc2-x86-mingw32
Sign up to get free protection for your applications and to get access to all the features.
- data/CHANGES +20 -5
- data/README.rdoc +10 -3
- data/ext/ruby_prof/rp_call_info.c +108 -79
- data/ext/ruby_prof/rp_call_info.h +1 -0
- data/ext/ruby_prof/rp_measure_cpu_time.c +111 -111
- data/ext/ruby_prof/rp_measure_gc_runs.c +1 -1
- data/ext/ruby_prof/rp_measure_memory.c +1 -1
- data/ext/ruby_prof/rp_measure_process_time.c +71 -71
- data/ext/ruby_prof/rp_measure_wall_time.c +1 -1
- data/ext/ruby_prof/rp_method.c +143 -73
- data/ext/ruby_prof/rp_method.h +7 -4
- data/ext/ruby_prof/rp_stack.c +16 -1
- data/ext/ruby_prof/rp_stack.h +4 -1
- data/ext/ruby_prof/rp_thread.c +165 -35
- data/ext/ruby_prof/rp_thread.h +8 -2
- data/ext/ruby_prof/ruby_prof.c +164 -171
- data/ext/ruby_prof/ruby_prof.h +53 -54
- data/ext/ruby_prof/vc/ruby_prof.sln +26 -0
- data/ext/ruby_prof/vc/ruby_prof.vcxproj +109 -0
- data/ext/ruby_prof/vc/ruby_prof_18.vcxproj +105 -0
- data/lib/1.8/ruby_prof.so +0 -0
- data/lib/1.9/ruby_prof.so +0 -0
- data/lib/ruby-prof/aggregate_call_info.rb +9 -7
- data/lib/ruby-prof/call_info.rb +2 -27
- data/lib/ruby-prof/call_info_visitor.rb +42 -0
- data/lib/ruby-prof/{empty.png → images/empty.png} +0 -0
- data/lib/ruby-prof/{minus.png → images/minus.png} +0 -0
- data/lib/ruby-prof/{plus.png → images/plus.png} +0 -0
- data/lib/ruby-prof/method_info.rb +13 -15
- data/lib/ruby-prof/{abstract_printer.rb → printers/abstract_printer.rb} +36 -2
- data/lib/ruby-prof/printers/call_info_printer.rb +40 -0
- data/lib/ruby-prof/{call_stack_printer.rb → printers/call_stack_printer.rb} +11 -16
- data/lib/ruby-prof/{call_tree_printer.rb → printers/call_tree_printer.rb} +4 -4
- data/lib/ruby-prof/{dot_printer.rb → printers/dot_printer.rb} +11 -31
- data/lib/ruby-prof/{flat_printer.rb → printers/flat_printer.rb} +26 -35
- data/lib/ruby-prof/{flat_printer_with_line_numbers.rb → printers/flat_printer_with_line_numbers.rb} +14 -25
- data/lib/ruby-prof/printers/graph_html_printer.rb +248 -0
- data/lib/ruby-prof/{graph_printer.rb → printers/graph_printer.rb} +31 -73
- data/lib/ruby-prof/{multi_printer.rb → printers/multi_printer.rb} +0 -0
- data/lib/ruby-prof/profile.rb +27 -22
- data/lib/ruby-prof/rack.rb +22 -12
- data/ruby-prof.gemspec +58 -0
- data/test/aggregate_test.rb +6 -6
- data/test/call_info_visitor_test.rb +31 -0
- data/test/duplicate_names_test.rb +1 -1
- data/test/dynamic_method_test.rb +1 -1
- data/test/enumerable_test.rb +1 -1
- data/test/exclude_threads_test.rb +2 -2
- data/test/gc_test.rb +35 -0
- data/test/line_number_test.rb +2 -2
- data/test/measure_cpu_time_test.rb +5 -5
- data/test/measure_process_time_test.rb +5 -5
- data/test/measure_wall_time_test.rb +5 -5
- data/test/method_elimination_test.rb +3 -3
- data/test/module_test.rb +1 -1
- data/test/no_method_class_test.rb +1 -1
- data/test/printers_test.rb +16 -8
- data/test/recursive_test.rb +115 -91
- data/test/stack_test.rb +1 -1
- data/test/start_stop_test.rb +13 -13
- data/test/summarize_test.rb +48 -0
- data/test/test_suite.rb +1 -0
- data/test/thread_test.rb +16 -12
- data/test/unique_call_path_test.rb +10 -10
- metadata +65 -85
- data/lib/1.9/ruby_prof.exp +0 -0
- data/lib/1.9/ruby_prof.ilk +0 -0
- data/lib/1.9/ruby_prof.lib +0 -0
- data/lib/1.9/ruby_prof.pdb +0 -0
- data/lib/ruby-prof.rb +0 -70
- data/lib/ruby-prof/graph_html_printer.rb +0 -286
- data/lib/ruby-prof/symbol_to_proc.rb +0 -10
- data/lib/ruby_prof.exp +0 -0
- data/lib/ruby_prof.ilk +0 -0
- data/lib/ruby_prof.lib +0 -0
- data/lib/ruby_prof.pdb +0 -0
- data/lib/ruby_prof.so +0 -0
- data/lib/unprof.rb +0 -10
- data/test/do_nothing.rb +0 -0
data/ext/ruby_prof/rp_stack.c
CHANGED
@@ -29,6 +29,8 @@ stack_free(prof_stack_t *stack)
|
|
29
29
|
prof_frame_t *
|
30
30
|
stack_push(prof_stack_t *stack)
|
31
31
|
{
|
32
|
+
prof_frame_t* result = NULL;
|
33
|
+
|
32
34
|
/* Is there space on the stack? If not, double
|
33
35
|
its size. */
|
34
36
|
if (stack->ptr == stack->end )
|
@@ -36,10 +38,23 @@ stack_push(prof_stack_t *stack)
|
|
36
38
|
size_t len = stack->ptr - stack->start;
|
37
39
|
size_t new_capacity = (stack->end - stack->start) * 2;
|
38
40
|
REALLOC_N(stack->start, prof_frame_t, new_capacity);
|
41
|
+
/* Memory just got moved, reset pointers */
|
39
42
|
stack->ptr = stack->start + len;
|
40
43
|
stack->end = stack->start + new_capacity;
|
41
44
|
}
|
42
|
-
|
45
|
+
|
46
|
+
// Setup returned stack pointer to be valid
|
47
|
+
result = stack->ptr;
|
48
|
+
result->child_time = 0;
|
49
|
+
result->switch_time = 0;
|
50
|
+
result->wait_time = 0;
|
51
|
+
result->depth = (stack->ptr - stack->start);
|
52
|
+
|
53
|
+
// Increment the stack ptr for next time
|
54
|
+
stack->ptr++;
|
55
|
+
|
56
|
+
// Return the result
|
57
|
+
return result;
|
43
58
|
}
|
44
59
|
|
45
60
|
prof_frame_t *
|
data/ext/ruby_prof/rp_stack.h
CHANGED
@@ -11,15 +11,18 @@
|
|
11
11
|
|
12
12
|
|
13
13
|
/* Temporary object that maintains profiling information
|
14
|
-
for active methods
|
14
|
+
for active methods. They are created and destroyed
|
15
|
+
as the program moves up and down its stack. */
|
15
16
|
typedef struct
|
16
17
|
{
|
17
18
|
/* Caching prof_method_t values significantly
|
18
19
|
increases performance. */
|
19
20
|
prof_call_info_t *call_info;
|
20
21
|
double start_time;
|
22
|
+
double switch_time; /* Time at switch to different thread */
|
21
23
|
double wait_time;
|
22
24
|
double child_time;
|
25
|
+
int depth;
|
23
26
|
unsigned int line;
|
24
27
|
} prof_frame_t;
|
25
28
|
|
data/ext/ruby_prof/rp_thread.c
CHANGED
@@ -3,25 +3,96 @@
|
|
3
3
|
|
4
4
|
#include "ruby_prof.h"
|
5
5
|
|
6
|
+
VALUE cRpThread;
|
7
|
+
|
6
8
|
/* ====== thread_data_t ====== */
|
7
9
|
thread_data_t*
|
8
|
-
thread_data_create(
|
10
|
+
thread_data_create()
|
9
11
|
{
|
10
12
|
thread_data_t* result = ALLOC(thread_data_t);
|
11
13
|
result->stack = stack_create();
|
12
14
|
result->method_table = method_table_create();
|
13
|
-
|
15
|
+
result->top = NULL;
|
16
|
+
result->object = Qnil;
|
17
|
+
result->methods = Qnil;
|
14
18
|
return result;
|
15
19
|
}
|
16
20
|
|
17
|
-
|
21
|
+
/* The underlying c structures are freed when the parent profile is freed.
|
22
|
+
However, on shutdown the Ruby GC frees objects in any will-nilly order.
|
23
|
+
That means the ruby thread object wrapping the c thread struct may
|
24
|
+
be freed before the parent profile. Thus we add in a free function
|
25
|
+
for the garbage collector so that if it does get called will nil
|
26
|
+
out our Ruby object reference.*/
|
27
|
+
static void
|
28
|
+
thread_data_ruby_gc_free(thread_data_t* thread_data)
|
29
|
+
{
|
30
|
+
/* Has this thread object been accessed by Ruby? If
|
31
|
+
yes clean it up so to avoid a segmentation fault. */
|
32
|
+
if (thread_data->object != Qnil)
|
33
|
+
{
|
34
|
+
RDATA(thread_data->object)->data = NULL;
|
35
|
+
RDATA(thread_data->object)->dfree = NULL;
|
36
|
+
RDATA(thread_data->object)->dmark = NULL;
|
37
|
+
}
|
38
|
+
thread_data->object = Qnil;
|
39
|
+
}
|
40
|
+
|
41
|
+
static void
|
18
42
|
thread_data_free(thread_data_t* thread_data)
|
19
43
|
{
|
44
|
+
thread_data_ruby_gc_free(thread_data);
|
45
|
+
thread_data->top = NULL;
|
20
46
|
method_table_free(thread_data->method_table);
|
21
47
|
stack_free(thread_data->stack);
|
22
|
-
|
48
|
+
|
49
|
+
thread_data->thread_id = Qnil;
|
50
|
+
|
51
|
+
xfree(thread_data);
|
23
52
|
}
|
24
53
|
|
54
|
+
static int
|
55
|
+
mark_methods(st_data_t key, st_data_t value, st_data_t result)
|
56
|
+
{
|
57
|
+
prof_method_t *method = (prof_method_t *) value;
|
58
|
+
prof_method_mark(method);
|
59
|
+
return ST_CONTINUE;
|
60
|
+
}
|
61
|
+
|
62
|
+
void
|
63
|
+
prof_thread_mark(thread_data_t *thread)
|
64
|
+
{
|
65
|
+
if (thread->object != Qnil)
|
66
|
+
rb_gc_mark(thread->object);
|
67
|
+
|
68
|
+
if (thread->methods != Qnil)
|
69
|
+
rb_gc_mark(thread->methods);
|
70
|
+
|
71
|
+
prof_method_mark(thread->top);
|
72
|
+
st_foreach(thread->method_table, mark_methods, 0);
|
73
|
+
}
|
74
|
+
|
75
|
+
VALUE
|
76
|
+
prof_thread_wrap(thread_data_t *thread)
|
77
|
+
{
|
78
|
+
if (thread->object == Qnil)
|
79
|
+
{
|
80
|
+
thread->object = Data_Wrap_Struct(cRpThread, prof_thread_mark, thread_data_ruby_gc_free, thread);
|
81
|
+
}
|
82
|
+
return thread->object;
|
83
|
+
}
|
84
|
+
|
85
|
+
static thread_data_t*
|
86
|
+
prof_get_thread(VALUE self)
|
87
|
+
{
|
88
|
+
/* Can't use Data_Get_Struct because that triggers the event hook
|
89
|
+
ending up in endless recursion. */
|
90
|
+
thread_data_t* result = DATA_PTR(self);
|
91
|
+
if (!result)
|
92
|
+
rb_raise(rb_eRuntimeError, "This RubyProf::Thread instance has already been freed, likely because its profile has been freed.");
|
93
|
+
|
94
|
+
return result;
|
95
|
+
}
|
25
96
|
|
26
97
|
/* ====== Thread Table ====== */
|
27
98
|
/* The thread table is hash keyed on ruby thread_id that stores instances
|
@@ -33,6 +104,20 @@ threads_table_create()
|
|
33
104
|
return st_init_numtable();
|
34
105
|
}
|
35
106
|
|
107
|
+
static int
|
108
|
+
thread_table_free_iterator(st_data_t key, st_data_t value, st_data_t dummy)
|
109
|
+
{
|
110
|
+
thread_data_free((thread_data_t*)value);
|
111
|
+
return ST_CONTINUE;
|
112
|
+
}
|
113
|
+
|
114
|
+
void
|
115
|
+
threads_table_free(st_table *table)
|
116
|
+
{
|
117
|
+
st_foreach(table, thread_table_free_iterator, 0);
|
118
|
+
st_free_table(table);
|
119
|
+
}
|
120
|
+
|
36
121
|
size_t
|
37
122
|
threads_table_insert(prof_profile_t* profile, VALUE thread, thread_data_t *thread_data)
|
38
123
|
{
|
@@ -53,7 +138,7 @@ threads_table_lookup(prof_profile_t* profile, VALUE thread_id)
|
|
53
138
|
}
|
54
139
|
else
|
55
140
|
{
|
56
|
-
result = thread_data_create(
|
141
|
+
result = thread_data_create();
|
57
142
|
result->thread_id = thread_id;
|
58
143
|
|
59
144
|
/* Insert the table */
|
@@ -62,52 +147,97 @@ threads_table_lookup(prof_profile_t* profile, VALUE thread_id)
|
|
62
147
|
return result;
|
63
148
|
}
|
64
149
|
|
65
|
-
int
|
66
|
-
free_thread_data(st_data_t key, st_data_t value, st_data_t dummy)
|
67
|
-
{
|
68
|
-
thread_data_free((thread_data_t*)value);
|
69
|
-
return ST_CONTINUE;
|
70
|
-
}
|
71
|
-
|
72
|
-
void
|
73
|
-
threads_table_free(st_table *table)
|
74
|
-
{
|
75
|
-
st_foreach(table, free_thread_data, 0);
|
76
|
-
st_free_table(table);
|
77
|
-
}
|
78
|
-
|
79
150
|
thread_data_t *
|
80
151
|
switch_thread(void* prof, VALUE thread_id)
|
81
152
|
{
|
82
|
-
|
83
|
-
|
84
|
-
double wait_time = 0;
|
153
|
+
prof_profile_t* profile = (prof_profile_t*)prof;
|
154
|
+
double measurement = profile->measurer->measure();
|
85
155
|
|
86
156
|
/* Get new thread information. */
|
87
157
|
thread_data_t *thread_data = threads_table_lookup(profile, thread_id);
|
88
158
|
|
89
|
-
/*
|
90
|
-
|
91
|
-
|
92
|
-
thread_data->last_switch = profile->measurement; // XXXX a test that fails if this is 0
|
93
|
-
|
94
|
-
/* Get the frame at the top of the stack. This may represent
|
95
|
-
the current method (EVENT_LINE, EVENT_RETURN) or the
|
96
|
-
previous method (EVENT_CALL).*/
|
97
|
-
frame = stack_peek(thread_data->stack);
|
159
|
+
/* Get current frame for this thread */
|
160
|
+
prof_frame_t *frame = stack_peek(thread_data->stack);
|
98
161
|
|
162
|
+
/* Update the time this thread waited for another thread */
|
99
163
|
if (frame)
|
100
|
-
|
101
|
-
|
164
|
+
{
|
165
|
+
frame->wait_time += measurement - frame->switch_time;
|
166
|
+
frame->switch_time = measurement;
|
102
167
|
}
|
103
168
|
|
104
169
|
/* Save on the last thread the time of the context switch
|
105
170
|
and reset this thread's last context switch to 0.*/
|
106
171
|
if (profile->last_thread_data)
|
107
|
-
|
108
|
-
|
172
|
+
{
|
173
|
+
prof_frame_t *last_frame = stack_peek(profile->last_thread_data->stack);
|
174
|
+
if (last_frame)
|
175
|
+
last_frame->switch_time = measurement;
|
109
176
|
}
|
110
177
|
|
111
178
|
profile->last_thread_data = thread_data;
|
112
179
|
return thread_data;
|
113
180
|
}
|
181
|
+
|
182
|
+
static int
|
183
|
+
collect_methods(st_data_t key, st_data_t value, st_data_t result)
|
184
|
+
{
|
185
|
+
/* Called for each method stored in a thread's method table.
|
186
|
+
We want to store the method info information into an array.*/
|
187
|
+
VALUE methods = (VALUE) result;
|
188
|
+
prof_method_t *method = (prof_method_t *) value;
|
189
|
+
rb_ary_push(methods, prof_method_wrap(method));
|
190
|
+
|
191
|
+
return ST_CONTINUE;
|
192
|
+
}
|
193
|
+
|
194
|
+
|
195
|
+
/* call-seq:
|
196
|
+
id -> number
|
197
|
+
|
198
|
+
Returns the id of this thread. */
|
199
|
+
static VALUE
|
200
|
+
prof_thread_id(VALUE self)
|
201
|
+
{
|
202
|
+
thread_data_t* thread = prof_get_thread(self);
|
203
|
+
return thread->thread_id;
|
204
|
+
}
|
205
|
+
|
206
|
+
/* call-seq:
|
207
|
+
methods -> Array of MethodInfo
|
208
|
+
|
209
|
+
Returns an array of methods that were called from this
|
210
|
+
thread during program execution. */
|
211
|
+
static VALUE
|
212
|
+
prof_thread_methods(VALUE self)
|
213
|
+
{
|
214
|
+
thread_data_t* thread = prof_get_thread(self);
|
215
|
+
if (thread->methods == Qnil)
|
216
|
+
{
|
217
|
+
thread->methods = rb_ary_new();
|
218
|
+
st_foreach(thread->method_table, collect_methods, thread->methods);
|
219
|
+
}
|
220
|
+
return thread->methods;
|
221
|
+
}
|
222
|
+
|
223
|
+
/* call-seq:
|
224
|
+
method -> MethodInfo
|
225
|
+
|
226
|
+
Returns the top level method for this thread (ie, the starting
|
227
|
+
method). */
|
228
|
+
static VALUE
|
229
|
+
prof_thread_top_method(VALUE self)
|
230
|
+
{
|
231
|
+
thread_data_t* thread = prof_get_thread(self);
|
232
|
+
return prof_method_wrap(thread->top);
|
233
|
+
}
|
234
|
+
|
235
|
+
void rp_init_thread()
|
236
|
+
{
|
237
|
+
cRpThread = rb_define_class_under(mProf, "Thread", rb_cObject);
|
238
|
+
rb_undef_method(CLASS_OF(cRpThread), "new");
|
239
|
+
|
240
|
+
rb_define_method(cRpThread, "id", prof_thread_id, 0);
|
241
|
+
rb_define_method(cRpThread, "methods", prof_thread_methods, 0);
|
242
|
+
rb_define_method(cRpThread, "top_method", prof_thread_top_method, 0);
|
243
|
+
}
|
data/ext/ruby_prof/rp_thread.h
CHANGED
@@ -7,14 +7,20 @@
|
|
7
7
|
/* Profiling information for a thread. */
|
8
8
|
typedef struct
|
9
9
|
{
|
10
|
+
VALUE object; /* Cache to wrapped object */
|
11
|
+
VALUE methods; /* Array of RubyProf::MethodInfo */
|
10
12
|
VALUE thread_id; /* Thread id */
|
11
13
|
st_table* method_table; /* Methods called in the thread */
|
12
|
-
prof_stack_t* stack; /*
|
13
|
-
|
14
|
+
prof_stack_t* stack; /* Stack of frames */
|
15
|
+
prof_method_t* top; /* The top method called in this thread */
|
14
16
|
} thread_data_t;
|
15
17
|
|
18
|
+
void rp_init_thread();
|
16
19
|
st_table * threads_table_create();
|
17
20
|
thread_data_t* switch_thread(void* prof, VALUE thread_id);
|
18
21
|
void threads_table_free(st_table *table);
|
22
|
+
VALUE prof_thread_wrap(thread_data_t *thread);
|
23
|
+
void prof_thread_mark(thread_data_t *thread);
|
24
|
+
|
19
25
|
|
20
26
|
#endif //__RP_THREAD__
|
data/ext/ruby_prof/ruby_prof.c
CHANGED
@@ -24,7 +24,6 @@
|
|
24
24
|
*/
|
25
25
|
|
26
26
|
#include "ruby_prof.h"
|
27
|
-
#include <stdio.h>
|
28
27
|
#include <assert.h>
|
29
28
|
|
30
29
|
VALUE mProf;
|
@@ -44,14 +43,6 @@ prof_get_profile(VALUE self)
|
|
44
43
|
return (prof_profile_t*)RDATA(self)->data;
|
45
44
|
}
|
46
45
|
|
47
|
-
void
|
48
|
-
method_key(prof_method_key_t* key, VALUE klass, ID mid)
|
49
|
-
{
|
50
|
-
key->klass = klass;
|
51
|
-
key->mid = mid;
|
52
|
-
key->key = (klass << 4) + (mid << 2);
|
53
|
-
}
|
54
|
-
|
55
46
|
/* support tracing ruby events from ruby-prof. useful for getting at
|
56
47
|
what actually happens inside the ruby interpreter (and ruby-prof).
|
57
48
|
set environment variable RUBY_PROF_TRACE to filename you want to
|
@@ -91,81 +82,85 @@ get_event_name(rb_event_flag_t event)
|
|
91
82
|
}
|
92
83
|
}
|
93
84
|
|
85
|
+
static prof_method_t*
|
86
|
+
create_method(rb_event_flag_t event, VALUE klass, ID mid, const char* source_file, int line)
|
87
|
+
{
|
88
|
+
/* Line numbers are not accurate for c method calls */
|
89
|
+
if (event == RUBY_EVENT_C_CALL)
|
90
|
+
{
|
91
|
+
line = 0;
|
92
|
+
source_file = NULL;
|
93
|
+
}
|
94
|
+
|
95
|
+
return prof_method_create(klass, mid, source_file, line);
|
96
|
+
}
|
97
|
+
|
94
98
|
|
95
99
|
static prof_method_t*
|
96
100
|
#ifdef RUBY_VM
|
97
|
-
get_method(rb_event_flag_t event, VALUE klass, ID mid,
|
101
|
+
get_method(rb_event_flag_t event, VALUE klass, ID mid, thread_data_t* thread_data)
|
98
102
|
# else
|
99
|
-
get_method(rb_event_flag_t event, NODE *node, VALUE klass, ID mid,
|
103
|
+
get_method(rb_event_flag_t event, NODE *node, VALUE klass, ID mid, thread_data_t* thread_data)
|
100
104
|
#endif
|
101
105
|
{
|
102
106
|
prof_method_key_t key;
|
103
107
|
prof_method_t *method = NULL;
|
104
108
|
|
105
109
|
method_key(&key, klass, mid);
|
106
|
-
method = method_table_lookup(method_table, &key);
|
110
|
+
method = method_table_lookup(thread_data->method_table, &key);
|
107
111
|
|
108
112
|
if (!method)
|
109
113
|
{
|
110
|
-
|
114
|
+
const char* source_file = rb_sourcefile();
|
111
115
|
int line = rb_sourceline();
|
112
116
|
|
113
|
-
|
114
|
-
|
115
|
-
{
|
116
|
-
line = 0;
|
117
|
-
source_file = NULL;
|
118
|
-
}
|
117
|
+
method = create_method(event, klass, mid, source_file, line);
|
118
|
+
method_table_insert(thread_data->method_table, method->key, method);
|
119
119
|
|
120
|
-
|
121
|
-
|
120
|
+
/* Is this the first method added to the thread? */
|
121
|
+
if (!thread_data->top)
|
122
|
+
thread_data->top = method;
|
122
123
|
}
|
123
124
|
return method;
|
124
125
|
}
|
125
126
|
|
126
|
-
static void
|
127
|
-
update_result(double total_time,
|
128
|
-
prof_frame_t *parent_frame,
|
129
|
-
prof_frame_t *frame)
|
130
|
-
{
|
131
|
-
double self_time = total_time - frame->child_time - frame->wait_time;
|
132
|
-
prof_call_info_t *call_info = frame->call_info;
|
133
|
-
|
134
|
-
/* Update information about the current method */
|
135
|
-
call_info->called++;
|
136
|
-
call_info->total_time += total_time;
|
137
|
-
call_info->self_time += self_time;
|
138
|
-
call_info->wait_time += frame->wait_time;
|
139
|
-
|
140
|
-
/* Note where the current method was called from */
|
141
|
-
if (parent_frame)
|
142
|
-
call_info->line = parent_frame->line;
|
143
|
-
}
|
144
|
-
|
145
127
|
static prof_frame_t*
|
146
128
|
pop_frame(prof_profile_t* profile, thread_data_t *thread_data)
|
147
129
|
{
|
148
130
|
prof_frame_t *frame = NULL;
|
149
131
|
prof_frame_t* parent_frame = NULL;
|
132
|
+
prof_call_info_t *call_info;
|
133
|
+
double measurement = profile->measurer->measure();
|
150
134
|
double total_time;
|
135
|
+
double self_time;
|
151
136
|
|
152
137
|
frame = stack_pop(thread_data->stack); // only time it's called
|
138
|
+
|
153
139
|
/* Frame can be null. This can happen if RubProf.start is called from
|
154
140
|
a method that exits. And it can happen if an exception is raised
|
155
141
|
in code that is being profiled and the stack unwinds (RubyProf is
|
156
142
|
not notified of that by the ruby runtime. */
|
157
|
-
if (frame == NULL)
|
143
|
+
if (frame == NULL)
|
144
|
+
return NULL;
|
158
145
|
|
159
146
|
/* Calculate the total time this method took */
|
160
|
-
total_time =
|
147
|
+
total_time = measurement - frame->start_time;
|
148
|
+
self_time = total_time - frame->child_time - frame->wait_time;
|
149
|
+
|
150
|
+
/* Update information about the current method */
|
151
|
+
call_info = frame->call_info;
|
152
|
+
call_info->called++;
|
153
|
+
call_info->total_time += total_time;
|
154
|
+
call_info->self_time += self_time;
|
155
|
+
call_info->wait_time += frame->wait_time;
|
161
156
|
|
162
157
|
parent_frame = stack_peek(thread_data->stack);
|
163
158
|
if (parent_frame)
|
164
159
|
{
|
165
|
-
|
160
|
+
parent_frame->child_time += total_time;
|
161
|
+
call_info->line = parent_frame->line;
|
166
162
|
}
|
167
163
|
|
168
|
-
update_result(total_time, parent_frame, frame); // only time it's called
|
169
164
|
return frame;
|
170
165
|
}
|
171
166
|
|
@@ -194,6 +189,42 @@ prof_pop_threads(prof_profile_t* profile)
|
|
194
189
|
st_foreach(profile->threads_tbl, pop_frames, (st_data_t) profile);
|
195
190
|
}
|
196
191
|
|
192
|
+
/* =========== Profiling ================= */
|
193
|
+
#ifdef RUBY_VM
|
194
|
+
static void
|
195
|
+
prof_trace(prof_profile_t* profile, rb_event_flag_t event, ID mid, VALUE klass, double measurement)
|
196
|
+
#else
|
197
|
+
static void
|
198
|
+
prof_trace(prof_profile_t* profile, rb_event_flag_t event, NODE *node, ID mid, VALUE klass, double measurement)
|
199
|
+
#endif
|
200
|
+
{
|
201
|
+
static VALUE last_thread_id = Qnil;
|
202
|
+
|
203
|
+
VALUE thread = rb_thread_current();
|
204
|
+
VALUE thread_id = rb_obj_id(thread);
|
205
|
+
const char* class_name = NULL;
|
206
|
+
const char* method_name = rb_id2name(mid);
|
207
|
+
const char* source_file = rb_sourcefile();
|
208
|
+
unsigned int source_line = rb_sourceline();
|
209
|
+
|
210
|
+
const char* event_name = get_event_name(event);
|
211
|
+
|
212
|
+
if (klass != 0)
|
213
|
+
klass = (BUILTIN_TYPE(klass) == T_ICLASS ? RBASIC(klass)->klass : klass);
|
214
|
+
|
215
|
+
class_name = rb_class2name(klass);
|
216
|
+
|
217
|
+
if (last_thread_id != thread_id)
|
218
|
+
{
|
219
|
+
fprintf(trace_file, "\n");
|
220
|
+
}
|
221
|
+
|
222
|
+
fprintf(trace_file, "%2u:%2ums %-8s %s:%2d %s#%s\n",
|
223
|
+
(unsigned int) thread_id, (unsigned int) measurement, event_name, source_file, source_line, class_name, method_name);
|
224
|
+
fflush(trace_file);
|
225
|
+
last_thread_id = thread_id;
|
226
|
+
}
|
227
|
+
|
197
228
|
#ifdef RUBY_VM
|
198
229
|
static void
|
199
230
|
prof_event_hook(rb_event_flag_t event, VALUE data, VALUE self, ID mid, VALUE klass)
|
@@ -207,11 +238,12 @@ prof_event_hook(rb_event_flag_t event, NODE *node, VALUE self, ID mid, VALUE kla
|
|
207
238
|
#else
|
208
239
|
prof_profile_t* profile = prof_get_profile(data);
|
209
240
|
#endif
|
210
|
-
|
211
|
-
|
241
|
+
|
242
|
+
VALUE thread = Qnil;
|
212
243
|
VALUE thread_id = Qnil;
|
213
244
|
thread_data_t* thread_data = NULL;
|
214
245
|
prof_frame_t *frame = NULL;
|
246
|
+
double measurement;
|
215
247
|
|
216
248
|
#ifdef RUBY_VM
|
217
249
|
if (event != RUBY_EVENT_C_CALL && event != RUBY_EVENT_C_RETURN) {
|
@@ -221,34 +253,15 @@ prof_event_hook(rb_event_flag_t event, NODE *node, VALUE self, ID mid, VALUE kla
|
|
221
253
|
#endif
|
222
254
|
|
223
255
|
/* Get current measurement */
|
224
|
-
|
256
|
+
measurement = profile->measurer->measure();
|
225
257
|
|
226
258
|
if (trace_file != NULL)
|
227
259
|
{
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
const char* method_name = rb_id2name(mid);
|
234
|
-
const char* source_file = rb_sourcefile();
|
235
|
-
unsigned int source_line = rb_sourceline();
|
236
|
-
|
237
|
-
const char* event_name = get_event_name(event);
|
238
|
-
|
239
|
-
if (klass != 0)
|
240
|
-
klass = (BUILTIN_TYPE(klass) == T_ICLASS ? RBASIC(klass)->klass : klass);
|
241
|
-
|
242
|
-
class_name = rb_class2name(klass);
|
243
|
-
|
244
|
-
if (last_thread_id != thread_id) {
|
245
|
-
fprintf(trace_file, "\n");
|
246
|
-
}
|
247
|
-
|
248
|
-
fprintf(trace_file, "%2u:%2ums %-8s %s:%2d %s#%s\n",
|
249
|
-
(unsigned int) thread_id, (unsigned int) profile->measurement, event_name, source_file, source_line, class_name, method_name);
|
250
|
-
/* fflush(trace_file); */
|
251
|
-
last_thread_id = thread_id;
|
260
|
+
#ifdef RUBY_VM
|
261
|
+
prof_trace(profile, event, mid, klass, measurement);
|
262
|
+
#else
|
263
|
+
prof_trace(profile, event, node, mid, klass, measurement);
|
264
|
+
#endif
|
252
265
|
}
|
253
266
|
|
254
267
|
/* Special case - skip any methods from the mProf
|
@@ -271,6 +284,8 @@ prof_event_hook(rb_event_flag_t event, NODE *node, VALUE self, ID mid, VALUE kla
|
|
271
284
|
else
|
272
285
|
thread_data = profile->last_thread_data;
|
273
286
|
|
287
|
+
/* Get the current frame for the current thread. */
|
288
|
+
frame = stack_peek(thread_data->stack);
|
274
289
|
|
275
290
|
switch (event) {
|
276
291
|
case RUBY_EVENT_LINE:
|
@@ -279,9 +294,6 @@ prof_event_hook(rb_event_flag_t event, NODE *node, VALUE self, ID mid, VALUE kla
|
|
279
294
|
a new method is called, we know what line number it was
|
280
295
|
called from. */
|
281
296
|
|
282
|
-
/* Get the current frame for the current thread. */
|
283
|
-
frame = stack_peek(thread_data->stack);
|
284
|
-
|
285
297
|
if (frame)
|
286
298
|
{
|
287
299
|
frame->line = rb_sourceline();
|
@@ -298,20 +310,10 @@ prof_event_hook(rb_event_flag_t event, NODE *node, VALUE self, ID mid, VALUE kla
|
|
298
310
|
prof_call_info_t *call_info = NULL;
|
299
311
|
prof_method_t *method = NULL;
|
300
312
|
|
301
|
-
/* Get the current frame for the current thread. */
|
302
|
-
frame = stack_peek(thread_data->stack);
|
303
|
-
|
304
|
-
/* Is this an include for a module? If so get the actual
|
305
|
-
module class since we want to combine all profiling
|
306
|
-
results for that module. */
|
307
|
-
|
308
|
-
if (klass != 0)
|
309
|
-
klass = (BUILTIN_TYPE(klass) == T_ICLASS ? RBASIC(klass)->klass : klass);
|
310
|
-
|
311
313
|
#ifdef RUBY_VM
|
312
|
-
method = get_method(event, klass, mid, thread_data
|
314
|
+
method = get_method(event, klass, mid, thread_data);
|
313
315
|
#else
|
314
|
-
method = get_method(event, node, klass, mid, thread_data
|
316
|
+
method = get_method(event, node, klass, mid, thread_data);
|
315
317
|
#endif
|
316
318
|
|
317
319
|
if (!frame)
|
@@ -325,6 +327,8 @@ prof_event_hook(rb_event_flag_t event, NODE *node, VALUE self, ID mid, VALUE kla
|
|
325
327
|
|
326
328
|
if (!call_info)
|
327
329
|
{
|
330
|
+
/* This call info does not yet exist. So create it, then add
|
331
|
+
it to previous callinfo's children and to the current method .*/
|
328
332
|
call_info = prof_call_info_create(method, frame->call_info);
|
329
333
|
call_info_table_insert(frame->call_info->call_infos, method->key, call_info);
|
330
334
|
prof_add_call_info(method->call_infos, call_info);
|
@@ -334,23 +338,20 @@ prof_event_hook(rb_event_flag_t event, NODE *node, VALUE self, ID mid, VALUE kla
|
|
334
338
|
/* Push a new frame onto the stack for a new c-call or ruby call (into a method) */
|
335
339
|
frame = stack_push(thread_data->stack);
|
336
340
|
frame->call_info = call_info;
|
337
|
-
|
338
|
-
frame->
|
339
|
-
frame->child_time = 0;
|
341
|
+
frame->call_info->depth = frame->depth;
|
342
|
+
frame->start_time = measurement;
|
340
343
|
frame->line = rb_sourceline();
|
341
344
|
break;
|
342
345
|
}
|
343
346
|
case RUBY_EVENT_RETURN:
|
344
347
|
case RUBY_EVENT_C_RETURN:
|
345
348
|
{
|
346
|
-
|
349
|
+
pop_frame(profile, thread_data);
|
347
350
|
break;
|
348
351
|
}
|
349
352
|
}
|
350
353
|
}
|
351
354
|
|
352
|
-
|
353
|
-
/* =========== Profiling ================= */
|
354
355
|
void
|
355
356
|
prof_install_hook(VALUE self)
|
356
357
|
{
|
@@ -365,7 +366,7 @@ prof_install_hook(VALUE self)
|
|
365
366
|
RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN
|
366
367
|
| RUBY_EVENT_LINE);
|
367
368
|
|
368
|
-
|
369
|
+
pCurrentProfile = prof_get_profile(self);
|
369
370
|
#endif
|
370
371
|
|
371
372
|
#if defined(TOGGLE_GC_STATS)
|
@@ -381,65 +382,53 @@ prof_remove_hook()
|
|
381
382
|
#endif
|
382
383
|
|
383
384
|
#ifndef RUBY_VM
|
384
|
-
|
385
|
+
pCurrentProfile = NULL;
|
385
386
|
#endif
|
386
387
|
|
387
388
|
/* Now unregister from event */
|
388
389
|
rb_remove_event_hook(prof_event_hook);
|
389
390
|
}
|
390
391
|
|
391
|
-
|
392
392
|
static int
|
393
|
-
|
393
|
+
collect_threads(st_data_t key, st_data_t value, st_data_t result)
|
394
394
|
{
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
prof_method_t *method = (prof_method_t *) value;
|
399
|
-
rb_ary_push(methods, prof_method_wrap(method));
|
400
|
-
|
401
|
-
/* Wrap call info objects */
|
402
|
-
prof_call_infos_wrap(method->call_infos);
|
403
|
-
|
395
|
+
thread_data_t* thread_data = (thread_data_t*) value;
|
396
|
+
VALUE threads_array = (VALUE) result;
|
397
|
+
rb_ary_push(threads_array, prof_thread_wrap(thread_data));
|
404
398
|
return ST_CONTINUE;
|
405
399
|
}
|
406
400
|
|
401
|
+
/* ======== Profile Class ====== */
|
407
402
|
static int
|
408
|
-
|
403
|
+
mark_threads(st_data_t key, st_data_t value, st_data_t result)
|
409
404
|
{
|
410
|
-
|
411
|
-
|
412
|
-
However, in thread_data is the real thread id stored
|
413
|
-
as an int. */
|
414
|
-
thread_data_t* thread_data = (thread_data_t*) value;
|
415
|
-
VALUE threads_hash = (VALUE) result;
|
416
|
-
|
417
|
-
VALUE methods = rb_ary_new();
|
418
|
-
|
419
|
-
/* Now collect an array of all the called methods */
|
420
|
-
st_table* method_table = thread_data->method_table;
|
421
|
-
st_foreach(method_table, collect_methods, methods);
|
422
|
-
|
423
|
-
/* Store the results in the threads hash keyed on the thread id. */
|
424
|
-
rb_hash_aset(threads_hash, thread_data->thread_id, methods);
|
425
|
-
|
405
|
+
thread_data_t *thread = (thread_data_t *) value;
|
406
|
+
prof_thread_mark(thread);
|
426
407
|
return ST_CONTINUE;
|
427
408
|
}
|
428
409
|
|
429
|
-
/* ======== Profile Class ====== */
|
430
410
|
static void
|
431
411
|
prof_mark(prof_profile_t *profile)
|
432
412
|
{
|
433
|
-
|
434
|
-
rb_gc_mark(threads);
|
413
|
+
st_foreach(profile->threads_tbl, mark_threads, 0);
|
435
414
|
}
|
436
415
|
|
416
|
+
/* Freeing the profile creates a cascade of freeing.
|
417
|
+
It fress the thread table, which frees its methods,
|
418
|
+
which frees its call infos. */
|
437
419
|
static void
|
438
420
|
prof_free(prof_profile_t *profile)
|
439
421
|
{
|
440
|
-
|
422
|
+
profile->last_thread_data = NULL;
|
423
|
+
|
424
|
+
threads_table_free(profile->threads_tbl);
|
425
|
+
profile->threads_tbl = NULL;
|
426
|
+
|
441
427
|
st_free_table(profile->exclude_threads_tbl);
|
442
|
-
|
428
|
+
profile->exclude_threads_tbl = NULL;
|
429
|
+
|
430
|
+
xfree(profile->measurer);
|
431
|
+
profile->measurer = NULL;
|
443
432
|
|
444
433
|
xfree(profile);
|
445
434
|
}
|
@@ -450,7 +439,8 @@ prof_allocate(VALUE klass)
|
|
450
439
|
VALUE result;
|
451
440
|
prof_profile_t* profile;
|
452
441
|
result = Data_Make_Struct(klass, prof_profile_t, prof_mark, prof_free, profile);
|
453
|
-
profile->
|
442
|
+
profile->threads_tbl = threads_table_create();
|
443
|
+
profile->exclude_threads_tbl = threads_table_create();
|
454
444
|
profile->running = Qfalse;
|
455
445
|
return result;
|
456
446
|
}
|
@@ -469,9 +459,9 @@ prof_initialize(int argc, VALUE *argv, VALUE self)
|
|
469
459
|
{
|
470
460
|
prof_profile_t* profile = prof_get_profile(self);
|
471
461
|
VALUE mode;
|
472
|
-
|
462
|
+
prof_measure_mode_t measurer;
|
473
463
|
VALUE exclude_threads;
|
474
|
-
|
464
|
+
int i;
|
475
465
|
|
476
466
|
switch (rb_scan_args(argc, argv, "02", &mode, &exclude_threads))
|
477
467
|
{
|
@@ -479,32 +469,30 @@ prof_initialize(int argc, VALUE *argv, VALUE self)
|
|
479
469
|
{
|
480
470
|
measurer = MEASURE_WALL_TIME;
|
481
471
|
exclude_threads = rb_ary_new();
|
482
|
-
|
472
|
+
break;
|
483
473
|
}
|
484
474
|
case 1:
|
485
475
|
{
|
486
476
|
measurer = (prof_measure_mode_t)NUM2INT(mode);
|
487
477
|
exclude_threads = rb_ary_new();
|
488
|
-
|
478
|
+
break;
|
489
479
|
}
|
490
480
|
case 2:
|
491
481
|
{
|
492
|
-
|
482
|
+
Check_Type(exclude_threads, T_ARRAY);
|
493
483
|
measurer = (prof_measure_mode_t)NUM2INT(mode);
|
494
|
-
|
484
|
+
break;
|
495
485
|
}
|
496
486
|
}
|
497
487
|
|
498
488
|
profile->measurer = prof_get_measurer(measurer);
|
499
|
-
profile->threads = rb_hash_new();
|
500
|
-
|
501
489
|
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
490
|
+
for (i = 0; i < RARRAY_LEN(exclude_threads); i++)
|
491
|
+
{
|
492
|
+
VALUE thread = rb_ary_entry(exclude_threads, i);
|
493
|
+
VALUE thread_id = rb_obj_id(thread);
|
494
|
+
st_insert(profile->exclude_threads_tbl, thread_id, Qtrue);
|
495
|
+
}
|
508
496
|
|
509
497
|
return self;
|
510
498
|
}
|
@@ -527,7 +515,7 @@ prof_running(VALUE self)
|
|
527
515
|
static VALUE
|
528
516
|
prof_start(VALUE self)
|
529
517
|
{
|
530
|
-
|
518
|
+
char* trace_file_name;
|
531
519
|
prof_profile_t* profile = prof_get_profile(self);
|
532
520
|
|
533
521
|
if (profile->running == Qtrue)
|
@@ -537,17 +525,23 @@ prof_start(VALUE self)
|
|
537
525
|
|
538
526
|
profile->running = Qtrue;
|
539
527
|
profile->last_thread_data = NULL;
|
540
|
-
|
528
|
+
|
541
529
|
|
542
530
|
/* open trace file if environment wants it */
|
543
531
|
trace_file_name = getenv("RUBY_PROF_TRACE");
|
544
|
-
if (trace_file_name != NULL)
|
545
|
-
|
532
|
+
if (trace_file_name != NULL)
|
533
|
+
{
|
534
|
+
if (strcmp(trace_file_name, "stdout") == 0)
|
535
|
+
{
|
546
536
|
trace_file = stdout;
|
547
|
-
}
|
537
|
+
}
|
538
|
+
else if (strcmp(trace_file_name, "stderr") == 0)
|
539
|
+
{
|
548
540
|
trace_file = stderr;
|
549
|
-
}
|
550
|
-
|
541
|
+
}
|
542
|
+
else
|
543
|
+
{
|
544
|
+
trace_file = fopen(trace_file_name, "w");
|
551
545
|
}
|
552
546
|
}
|
553
547
|
|
@@ -609,23 +603,27 @@ prof_stop(VALUE self)
|
|
609
603
|
{
|
610
604
|
prof_profile_t* profile = prof_get_profile(self);
|
611
605
|
|
612
|
-
/* get 'now' before prof emove hook because it calls GC.disable_stats
|
613
|
-
which makes the call within prof_pop_threads of now return 0, which is wrong
|
614
|
-
*/
|
615
|
-
profile->measurement = profile->measurer->measure();
|
616
606
|
if (profile->running == Qfalse)
|
617
607
|
{
|
618
608
|
rb_raise(rb_eRuntimeError, "RubyProf.start was not yet called");
|
619
609
|
}
|
620
610
|
|
611
|
+
prof_remove_hook();
|
612
|
+
|
621
613
|
/* close trace file if open */
|
622
|
-
if (trace_file != NULL)
|
623
|
-
|
614
|
+
if (trace_file != NULL)
|
615
|
+
{
|
616
|
+
if (trace_file !=stderr && trace_file != stdout)
|
617
|
+
{
|
618
|
+
#ifdef _MSC_VER
|
619
|
+
_fcloseall();
|
620
|
+
#else
|
624
621
|
fclose(trace_file);
|
622
|
+
#endif
|
623
|
+
}
|
625
624
|
trace_file = NULL;
|
626
625
|
}
|
627
626
|
|
628
|
-
prof_remove_hook();
|
629
627
|
prof_pop_threads(profile);
|
630
628
|
|
631
629
|
/* Unset the last_thread_data (very important!)
|
@@ -633,13 +631,8 @@ prof_stop(VALUE self)
|
|
633
631
|
profile->running = Qfalse;
|
634
632
|
profile->last_thread_data = NULL;
|
635
633
|
|
636
|
-
/*
|
637
|
-
|
638
|
-
threads_table_free(profile->threads_tbl);
|
639
|
-
profile->threads_tbl = NULL;
|
640
|
-
|
641
|
-
/* compute minimality of call_infos */
|
642
|
-
rb_funcall(self, rb_intern("compute_minimality") , 0);
|
634
|
+
/* Post process result */
|
635
|
+
rb_funcall(self, rb_intern("post_process") , 0);
|
643
636
|
|
644
637
|
return self;
|
645
638
|
}
|
@@ -665,18 +658,17 @@ prof_profile(int argc, VALUE *argv, VALUE klass)
|
|
665
658
|
}
|
666
659
|
|
667
660
|
/* call-seq:
|
668
|
-
threads ->
|
661
|
+
threads -> Array of RubyProf::Thread
|
669
662
|
|
670
|
-
Returns
|
671
|
-
the
|
672
|
-
information for each method called during the threads execution.
|
673
|
-
That hash table is keyed on method name and contains
|
674
|
-
RubyProf::MethodInfo objects. */
|
663
|
+
Returns an array of RubyProf::Thread instances that were executed
|
664
|
+
while the the program was being run. */
|
675
665
|
static VALUE
|
676
666
|
prof_threads(VALUE self)
|
677
667
|
{
|
668
|
+
VALUE result = rb_ary_new();
|
678
669
|
prof_profile_t* profile = prof_get_profile(self);
|
679
|
-
|
670
|
+
st_foreach(profile->threads_tbl, collect_threads, result);
|
671
|
+
return result;
|
680
672
|
}
|
681
673
|
|
682
674
|
void Init_ruby_prof()
|
@@ -687,8 +679,9 @@ void Init_ruby_prof()
|
|
687
679
|
rp_init_measure();
|
688
680
|
rp_init_method_info();
|
689
681
|
rp_init_call_info();
|
682
|
+
rp_init_thread();
|
690
683
|
|
691
|
-
|
684
|
+
cProfile = rb_define_class_under(mProf, "Profile", rb_cObject);
|
692
685
|
rb_define_singleton_method(cProfile, "profile", prof_profile, -1);
|
693
686
|
rb_define_alloc_func (cProfile, prof_allocate);
|
694
687
|
rb_define_method(cProfile, "initialize", prof_initialize, -1);
|