ruby-prof 0.18.0-x64-mingw32
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGES +500 -0
- data/LICENSE +25 -0
- data/README.rdoc +487 -0
- data/Rakefile +113 -0
- data/bin/ruby-prof +345 -0
- data/bin/ruby-prof-check-trace +45 -0
- data/examples/flat.txt +50 -0
- data/examples/graph.dot +84 -0
- data/examples/graph.html +823 -0
- data/examples/graph.txt +139 -0
- data/examples/multi.flat.txt +23 -0
- data/examples/multi.graph.html +760 -0
- data/examples/multi.grind.dat +114 -0
- data/examples/multi.stack.html +547 -0
- data/examples/stack.html +547 -0
- data/ext/ruby_prof/extconf.rb +68 -0
- data/ext/ruby_prof/rp_call_info.c +425 -0
- data/ext/ruby_prof/rp_call_info.h +53 -0
- data/ext/ruby_prof/rp_measure.c +40 -0
- data/ext/ruby_prof/rp_measure.h +45 -0
- data/ext/ruby_prof/rp_measure_allocations.c +76 -0
- data/ext/ruby_prof/rp_measure_cpu_time.c +136 -0
- data/ext/ruby_prof/rp_measure_gc_runs.c +73 -0
- data/ext/ruby_prof/rp_measure_gc_time.c +60 -0
- data/ext/ruby_prof/rp_measure_memory.c +77 -0
- data/ext/ruby_prof/rp_measure_process_time.c +71 -0
- data/ext/ruby_prof/rp_measure_wall_time.c +45 -0
- data/ext/ruby_prof/rp_method.c +630 -0
- data/ext/ruby_prof/rp_method.h +75 -0
- data/ext/ruby_prof/rp_stack.c +173 -0
- data/ext/ruby_prof/rp_stack.h +63 -0
- data/ext/ruby_prof/rp_thread.c +277 -0
- data/ext/ruby_prof/rp_thread.h +27 -0
- data/ext/ruby_prof/ruby_prof.c +794 -0
- data/ext/ruby_prof/ruby_prof.h +60 -0
- data/ext/ruby_prof/vc/ruby_prof.sln +31 -0
- data/ext/ruby_prof/vc/ruby_prof.vcxproj +141 -0
- data/lib/2.6.3/ruby_prof.so +0 -0
- data/lib/ruby-prof.rb +68 -0
- data/lib/ruby-prof/aggregate_call_info.rb +76 -0
- data/lib/ruby-prof/assets/call_stack_printer.css.html +117 -0
- data/lib/ruby-prof/assets/call_stack_printer.js.html +385 -0
- data/lib/ruby-prof/assets/call_stack_printer.png +0 -0
- data/lib/ruby-prof/call_info.rb +115 -0
- data/lib/ruby-prof/call_info_visitor.rb +40 -0
- data/lib/ruby-prof/compatibility.rb +179 -0
- data/lib/ruby-prof/method_info.rb +121 -0
- data/lib/ruby-prof/printers/abstract_printer.rb +104 -0
- data/lib/ruby-prof/printers/call_info_printer.rb +41 -0
- data/lib/ruby-prof/printers/call_stack_printer.rb +265 -0
- data/lib/ruby-prof/printers/call_tree_printer.rb +143 -0
- data/lib/ruby-prof/printers/dot_printer.rb +132 -0
- data/lib/ruby-prof/printers/flat_printer.rb +70 -0
- data/lib/ruby-prof/printers/flat_printer_with_line_numbers.rb +83 -0
- data/lib/ruby-prof/printers/graph_html_printer.rb +249 -0
- data/lib/ruby-prof/printers/graph_printer.rb +116 -0
- data/lib/ruby-prof/printers/multi_printer.rb +84 -0
- data/lib/ruby-prof/profile.rb +26 -0
- data/lib/ruby-prof/profile/exclude_common_methods.rb +207 -0
- data/lib/ruby-prof/profile/legacy_method_elimination.rb +50 -0
- data/lib/ruby-prof/rack.rb +174 -0
- data/lib/ruby-prof/task.rb +147 -0
- data/lib/ruby-prof/thread.rb +35 -0
- data/lib/ruby-prof/version.rb +3 -0
- data/lib/unprof.rb +10 -0
- data/ruby-prof.gemspec +58 -0
- data/test/abstract_printer_test.rb +53 -0
- data/test/aggregate_test.rb +136 -0
- data/test/basic_test.rb +128 -0
- data/test/block_test.rb +74 -0
- data/test/call_info_test.rb +78 -0
- data/test/call_info_visitor_test.rb +31 -0
- data/test/duplicate_names_test.rb +32 -0
- data/test/dynamic_method_test.rb +55 -0
- data/test/enumerable_test.rb +21 -0
- data/test/exceptions_test.rb +24 -0
- data/test/exclude_methods_test.rb +146 -0
- data/test/exclude_threads_test.rb +53 -0
- data/test/fiber_test.rb +79 -0
- data/test/issue137_test.rb +63 -0
- data/test/line_number_test.rb +80 -0
- data/test/measure_allocations_test.rb +26 -0
- data/test/measure_cpu_time_test.rb +212 -0
- data/test/measure_gc_runs_test.rb +32 -0
- data/test/measure_gc_time_test.rb +36 -0
- data/test/measure_memory_test.rb +33 -0
- data/test/measure_process_time_test.rb +61 -0
- data/test/measure_wall_time_test.rb +255 -0
- data/test/method_elimination_test.rb +84 -0
- data/test/module_test.rb +45 -0
- data/test/multi_printer_test.rb +104 -0
- data/test/no_method_class_test.rb +15 -0
- data/test/pause_resume_test.rb +166 -0
- data/test/prime.rb +54 -0
- data/test/printers_test.rb +275 -0
- data/test/printing_recursive_graph_test.rb +127 -0
- data/test/rack_test.rb +157 -0
- data/test/recursive_test.rb +215 -0
- data/test/singleton_test.rb +38 -0
- data/test/stack_printer_test.rb +77 -0
- data/test/stack_test.rb +138 -0
- data/test/start_stop_test.rb +112 -0
- data/test/test_helper.rb +267 -0
- data/test/thread_test.rb +187 -0
- data/test/unique_call_path_test.rb +202 -0
- data/test/yarv_test.rb +55 -0
- metadata +199 -0
@@ -0,0 +1,27 @@
|
|
1
|
+
/* Copyright (C) 2005-2019 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
|
2
|
+
Please see the LICENSE file for copyright and distribution information */
|
3
|
+
|
4
|
+
#ifndef __RP_THREAD__
|
5
|
+
#define __RP_THREAD__
|
6
|
+
|
7
|
+
/* Profiling information for a thread. */
|
8
|
+
typedef struct
|
9
|
+
{
|
10
|
+
VALUE object; /* Cache to wrapped object */
|
11
|
+
VALUE methods; /* Array of RubyProf::MethodInfo */
|
12
|
+
VALUE thread_id; /* Thread id */
|
13
|
+
VALUE fiber_id; /* Fiber id */
|
14
|
+
st_table* method_table; /* Methods called in the thread */
|
15
|
+
prof_stack_t* stack; /* Stack of frames */
|
16
|
+
} thread_data_t;
|
17
|
+
|
18
|
+
void rp_init_thread();
|
19
|
+
st_table * threads_table_create();
|
20
|
+
thread_data_t* switch_thread(void* prof, VALUE thread_id, VALUE fiber_id);
|
21
|
+
void threads_table_free(st_table *table);
|
22
|
+
VALUE prof_thread_wrap(thread_data_t *thread);
|
23
|
+
void prof_thread_mark(thread_data_t *thread);
|
24
|
+
int pause_thread(st_data_t key, st_data_t value, st_data_t data);
|
25
|
+
int unpause_thread(st_data_t key, st_data_t value, st_data_t data);
|
26
|
+
|
27
|
+
#endif //__RP_THREAD__
|
@@ -0,0 +1,794 @@
|
|
1
|
+
/* Copyright (C) 2005-2019 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
|
2
|
+
Please see the LICENSE file for copyright and distribution information */
|
3
|
+
|
4
|
+
/* ruby-prof tracks the time spent executing every method in ruby programming.
|
5
|
+
The main players are:
|
6
|
+
|
7
|
+
profile_t - This represents 1 profile.
|
8
|
+
thread_data_t - Stores data about a single thread.
|
9
|
+
prof_stack_t - The method call stack in a particular thread
|
10
|
+
prof_method_t - Profiling information about each method
|
11
|
+
prof_call_info_t - Keeps track a method's callers and callees.
|
12
|
+
|
13
|
+
The final result is an instance of a profile object which has a hash table of
|
14
|
+
thread_data_t, keyed on the thread id. Each thread in turn has a hash table
|
15
|
+
of prof_method_t, keyed on the method id. A hash table is used for quick
|
16
|
+
look up when doing a profile. However, it is exposed to Ruby as an array.
|
17
|
+
|
18
|
+
Each prof_method_t has two hash tables, parent and children, of prof_call_info_t.
|
19
|
+
These objects keep track of a method's callers (who called the method) and its
|
20
|
+
callees (who the method called). These are keyed the method id, but once again,
|
21
|
+
are exposed to Ruby as arrays. Each prof_call_into_t maintains a pointer to the
|
22
|
+
caller or callee method, thereby making it easy to navigate through the call
|
23
|
+
hierarchy in ruby - which is very helpful for creating call graphs.
|
24
|
+
*/
|
25
|
+
|
26
|
+
#include "ruby_prof.h"
|
27
|
+
#include <assert.h>
|
28
|
+
|
29
|
+
VALUE mProf;
|
30
|
+
VALUE cProfile;
|
31
|
+
VALUE cExcludeCommonMethods;
|
32
|
+
|
33
|
+
static prof_profile_t*
|
34
|
+
prof_get_profile(VALUE self)
|
35
|
+
{
|
36
|
+
/* Can't use Data_Get_Struct because that triggers the event hook
|
37
|
+
ending up in endless recursion. */
|
38
|
+
return (prof_profile_t*)RDATA(self)->data;
|
39
|
+
}
|
40
|
+
|
41
|
+
/* support tracing ruby events from ruby-prof. useful for getting at
|
42
|
+
what actually happens inside the ruby interpreter (and ruby-prof).
|
43
|
+
set environment variable RUBY_PROF_TRACE to filename you want to
|
44
|
+
find the trace in.
|
45
|
+
*/
|
46
|
+
static FILE* trace_file = NULL;
|
47
|
+
|
48
|
+
/* Copied from thread.c (1.9.3) */
|
49
|
+
static const char *
|
50
|
+
get_event_name(rb_event_flag_t event)
|
51
|
+
{
|
52
|
+
switch (event) {
|
53
|
+
case RUBY_EVENT_LINE:
|
54
|
+
return "line";
|
55
|
+
case RUBY_EVENT_CLASS:
|
56
|
+
return "class";
|
57
|
+
case RUBY_EVENT_END:
|
58
|
+
return "end";
|
59
|
+
case RUBY_EVENT_CALL:
|
60
|
+
return "call";
|
61
|
+
case RUBY_EVENT_RETURN:
|
62
|
+
return "return";
|
63
|
+
case RUBY_EVENT_C_CALL:
|
64
|
+
return "c-call";
|
65
|
+
case RUBY_EVENT_C_RETURN:
|
66
|
+
return "c-return";
|
67
|
+
case RUBY_EVENT_RAISE:
|
68
|
+
return "raise";
|
69
|
+
default:
|
70
|
+
return "unknown";
|
71
|
+
}
|
72
|
+
}
|
73
|
+
|
74
|
+
static int
|
75
|
+
excludes_method(prof_method_key_t *key, prof_profile_t *profile)
|
76
|
+
{
|
77
|
+
return (profile->exclude_methods_tbl &&
|
78
|
+
method_table_lookup(profile->exclude_methods_tbl, key) != NULL);
|
79
|
+
}
|
80
|
+
|
81
|
+
static void
|
82
|
+
prof_exclude_common_methods(VALUE profile)
|
83
|
+
{
|
84
|
+
rb_funcall(cExcludeCommonMethods, rb_intern("apply!"), 1, profile);
|
85
|
+
}
|
86
|
+
|
87
|
+
static prof_method_t*
|
88
|
+
create_method(rb_event_flag_t event, VALUE klass, ID mid, const char* source_file, int line)
|
89
|
+
{
|
90
|
+
/* Line numbers are not accurate for c method calls */
|
91
|
+
if (event == RUBY_EVENT_C_CALL)
|
92
|
+
{
|
93
|
+
line = 0;
|
94
|
+
source_file = NULL;
|
95
|
+
}
|
96
|
+
|
97
|
+
return prof_method_create(klass, mid, source_file, line);
|
98
|
+
}
|
99
|
+
|
100
|
+
static prof_method_t*
|
101
|
+
get_method(rb_event_flag_t event, VALUE klass, ID mid, thread_data_t *thread_data, prof_profile_t *profile)
|
102
|
+
{
|
103
|
+
prof_method_key_t key;
|
104
|
+
prof_method_t *method = NULL;
|
105
|
+
|
106
|
+
/* Probe the local table. */
|
107
|
+
method_key(&key, klass, mid);
|
108
|
+
method = method_table_lookup(thread_data->method_table, &key);
|
109
|
+
|
110
|
+
if (!method)
|
111
|
+
{
|
112
|
+
/* Didn't find it; are we excluding it specifically? */
|
113
|
+
if (excludes_method(&key, profile)) {
|
114
|
+
/* We found a exclusion sentinel so propagate it into the thread's local hash table. */
|
115
|
+
/* TODO(nelgau): Is there a way to avoid this allocation completely so that all these
|
116
|
+
tables share the same exclusion method struct? The first attempt failed due to my
|
117
|
+
ignorance of the whims of the GC. */
|
118
|
+
method = prof_method_create_excluded(klass, mid);
|
119
|
+
} else {
|
120
|
+
/* This method has no entry for this thread/fiber and isn't specifically excluded. */
|
121
|
+
const char* source_file = rb_sourcefile();
|
122
|
+
int line = rb_sourceline();
|
123
|
+
method = create_method(event, klass, mid, source_file, line);
|
124
|
+
}
|
125
|
+
|
126
|
+
/* Insert the newly created method, or the exlcusion sentinel. */
|
127
|
+
method_table_insert(thread_data->method_table, method->key, method);
|
128
|
+
}
|
129
|
+
|
130
|
+
return method;
|
131
|
+
}
|
132
|
+
|
133
|
+
static int
|
134
|
+
pop_frames(st_data_t key, st_data_t value, st_data_t data)
|
135
|
+
{
|
136
|
+
thread_data_t* thread_data = (thread_data_t *) value;
|
137
|
+
prof_profile_t* profile = (prof_profile_t*) data;
|
138
|
+
VALUE thread_id = thread_data->thread_id;
|
139
|
+
VALUE fiber_id = thread_data->fiber_id;
|
140
|
+
double measurement = profile->measurer->measure();
|
141
|
+
|
142
|
+
if (!profile->last_thread_data
|
143
|
+
|| (!profile->merge_fibers && profile->last_thread_data->fiber_id != fiber_id)
|
144
|
+
|| profile->last_thread_data->thread_id != thread_id
|
145
|
+
)
|
146
|
+
thread_data = switch_thread(profile, thread_id, fiber_id);
|
147
|
+
else
|
148
|
+
thread_data = profile->last_thread_data;
|
149
|
+
|
150
|
+
while (prof_stack_pop(thread_data->stack, measurement));
|
151
|
+
|
152
|
+
return ST_CONTINUE;
|
153
|
+
}
|
154
|
+
|
155
|
+
static void
|
156
|
+
prof_pop_threads(prof_profile_t* profile)
|
157
|
+
{
|
158
|
+
st_foreach(profile->threads_tbl, pop_frames, (st_data_t) profile);
|
159
|
+
}
|
160
|
+
|
161
|
+
/* =========== Profiling ================= */
|
162
|
+
static void
|
163
|
+
prof_trace(prof_profile_t* profile, rb_event_flag_t event, ID mid, VALUE klass, double measurement)
|
164
|
+
{
|
165
|
+
static VALUE last_fiber_id = Qnil;
|
166
|
+
|
167
|
+
VALUE thread = rb_thread_current();
|
168
|
+
VALUE thread_id = rb_obj_id(thread);
|
169
|
+
VALUE fiber = rb_fiber_current();
|
170
|
+
VALUE fiber_id = rb_obj_id(fiber);
|
171
|
+
const char* class_name = NULL;
|
172
|
+
const char* method_name = rb_id2name(mid);
|
173
|
+
const char* source_file = rb_sourcefile();
|
174
|
+
unsigned int source_line = rb_sourceline();
|
175
|
+
|
176
|
+
const char* event_name = get_event_name(event);
|
177
|
+
|
178
|
+
if (klass != 0)
|
179
|
+
klass = (BUILTIN_TYPE(klass) == T_ICLASS ? RBASIC(klass)->klass : klass);
|
180
|
+
|
181
|
+
class_name = rb_class2name(klass);
|
182
|
+
|
183
|
+
if (last_fiber_id != fiber_id)
|
184
|
+
{
|
185
|
+
fprintf(trace_file, "\n");
|
186
|
+
}
|
187
|
+
|
188
|
+
fprintf(trace_file, "%2lu:%2lu:%2ums %-8s %s:%2d %s#%s\n",
|
189
|
+
(unsigned long) thread_id, (unsigned long) fiber_id, (unsigned int) measurement*1000,
|
190
|
+
event_name, source_file, source_line, class_name, method_name);
|
191
|
+
fflush(trace_file);
|
192
|
+
last_fiber_id = fiber_id;
|
193
|
+
}
|
194
|
+
|
195
|
+
static void
|
196
|
+
prof_event_hook(rb_event_flag_t event, VALUE data, VALUE self, ID mid, VALUE klass)
|
197
|
+
{
|
198
|
+
prof_profile_t* profile = prof_get_profile(data);
|
199
|
+
VALUE thread = Qnil;
|
200
|
+
VALUE thread_id = Qnil;
|
201
|
+
VALUE fiber = Qnil;
|
202
|
+
VALUE fiber_id = Qnil;
|
203
|
+
thread_data_t* thread_data = NULL;
|
204
|
+
prof_frame_t *frame = NULL;
|
205
|
+
double measurement;
|
206
|
+
unsigned LONG_LONG thread_value;
|
207
|
+
|
208
|
+
/* if we don't have a valid method id, try to retrieve one */
|
209
|
+
if (mid == 0) {
|
210
|
+
rb_frame_method_id_and_class(&mid, &klass);
|
211
|
+
}
|
212
|
+
|
213
|
+
/* Get current measurement */
|
214
|
+
measurement = profile->measurer->measure();
|
215
|
+
|
216
|
+
/* Special case - skip any methods from the mProf
|
217
|
+
module or cProfile class since they clutter
|
218
|
+
the results but aren't important to them results. */
|
219
|
+
if (self == mProf || klass == cProfile)
|
220
|
+
return;
|
221
|
+
|
222
|
+
if (trace_file != NULL)
|
223
|
+
{
|
224
|
+
prof_trace(profile, event, mid, klass, measurement);
|
225
|
+
}
|
226
|
+
|
227
|
+
/* Get the current thread and fiber information. */
|
228
|
+
thread = rb_thread_current();
|
229
|
+
thread_id = rb_obj_id(thread);
|
230
|
+
fiber = rb_fiber_current();
|
231
|
+
fiber_id = rb_obj_id(fiber);
|
232
|
+
|
233
|
+
thread_value = NUM2ULL(thread_id);
|
234
|
+
|
235
|
+
/* Don't measure anything if the include_threads option has been specified
|
236
|
+
and the current thread is not in the list */
|
237
|
+
if (profile->include_threads_tbl && !st_lookup(profile->include_threads_tbl, thread_value, 0))
|
238
|
+
{
|
239
|
+
return;
|
240
|
+
}
|
241
|
+
|
242
|
+
/* Don't measure anything if the current thread is in the excluded thread table */
|
243
|
+
if (profile->exclude_threads_tbl && st_lookup(profile->exclude_threads_tbl, thread_value, 0))
|
244
|
+
{
|
245
|
+
return;
|
246
|
+
}
|
247
|
+
|
248
|
+
/* We need to switch the profiling context if we either had none before,
|
249
|
+
we don't merge fibers and the fiber ids differ, or the thread ids differ.
|
250
|
+
*/
|
251
|
+
if (!profile->last_thread_data
|
252
|
+
|| (!profile->merge_fibers && !rb_equal(profile->last_thread_data->fiber_id, fiber_id))
|
253
|
+
|| !rb_equal(profile->last_thread_data->thread_id, thread_id)
|
254
|
+
)
|
255
|
+
thread_data = switch_thread(profile, thread_id, fiber_id);
|
256
|
+
else
|
257
|
+
thread_data = profile->last_thread_data;
|
258
|
+
|
259
|
+
/* Get the current frame for the current thread. */
|
260
|
+
frame = prof_stack_peek(thread_data->stack);
|
261
|
+
|
262
|
+
switch (event) {
|
263
|
+
case RUBY_EVENT_LINE:
|
264
|
+
{
|
265
|
+
/* Keep track of the current line number in this method. When
|
266
|
+
a new method is called, we know what line number it was
|
267
|
+
called from. */
|
268
|
+
if (frame)
|
269
|
+
{
|
270
|
+
if (prof_frame_is_real(frame)) {
|
271
|
+
frame->line = rb_sourceline();
|
272
|
+
}
|
273
|
+
break;
|
274
|
+
}
|
275
|
+
|
276
|
+
/* If we get here there was no frame, which means this is
|
277
|
+
the first method seen for this thread, so fall through
|
278
|
+
to below to create it. */
|
279
|
+
}
|
280
|
+
case RUBY_EVENT_CALL:
|
281
|
+
case RUBY_EVENT_C_CALL:
|
282
|
+
{
|
283
|
+
prof_frame_t *next_frame;
|
284
|
+
prof_call_info_t *call_info;
|
285
|
+
prof_method_t *method;
|
286
|
+
|
287
|
+
method = get_method(event, klass, mid, thread_data, profile);
|
288
|
+
|
289
|
+
if (method->excluded) {
|
290
|
+
prof_stack_pass(thread_data->stack);
|
291
|
+
break;
|
292
|
+
}
|
293
|
+
|
294
|
+
if (!frame)
|
295
|
+
{
|
296
|
+
call_info = prof_call_info_create(method, NULL);
|
297
|
+
prof_add_call_info(method->call_infos, call_info);
|
298
|
+
}
|
299
|
+
else
|
300
|
+
{
|
301
|
+
call_info = call_info_table_lookup(frame->call_info->call_infos, method->key);
|
302
|
+
|
303
|
+
if (!call_info)
|
304
|
+
{
|
305
|
+
/* This call info does not yet exist. So create it, then add
|
306
|
+
it to previous callinfo's children and to the current method .*/
|
307
|
+
call_info = prof_call_info_create(method, frame->call_info);
|
308
|
+
call_info_table_insert(frame->call_info->call_infos, method->key, call_info);
|
309
|
+
prof_add_call_info(method->call_infos, call_info);
|
310
|
+
}
|
311
|
+
}
|
312
|
+
|
313
|
+
/* Push a new frame onto the stack for a new c-call or ruby call (into a method) */
|
314
|
+
next_frame = prof_stack_push(thread_data->stack, call_info, measurement, RTEST(profile->paused));
|
315
|
+
next_frame->line = rb_sourceline();
|
316
|
+
break;
|
317
|
+
}
|
318
|
+
case RUBY_EVENT_RETURN:
|
319
|
+
case RUBY_EVENT_C_RETURN:
|
320
|
+
{
|
321
|
+
prof_stack_pop(thread_data->stack, measurement);
|
322
|
+
break;
|
323
|
+
}
|
324
|
+
}
|
325
|
+
}
|
326
|
+
|
327
|
+
void
|
328
|
+
prof_install_hook(VALUE self)
|
329
|
+
{
|
330
|
+
rb_add_event_hook(prof_event_hook,
|
331
|
+
RUBY_EVENT_CALL | RUBY_EVENT_RETURN |
|
332
|
+
RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN |
|
333
|
+
RUBY_EVENT_LINE, self);
|
334
|
+
}
|
335
|
+
|
336
|
+
#ifdef HAVE_RB_REMOVE_EVENT_HOOK_WITH_DATA
|
337
|
+
extern int
|
338
|
+
rb_remove_event_hook_with_data(rb_event_hook_func_t func, VALUE data);
|
339
|
+
#endif
|
340
|
+
|
341
|
+
void
|
342
|
+
prof_remove_hook(VALUE self)
|
343
|
+
{
|
344
|
+
#ifdef HAVE_RB_REMOVE_EVENT_HOOK_WITH_DATA
|
345
|
+
rb_remove_event_hook_with_data(prof_event_hook, self);
|
346
|
+
#else
|
347
|
+
rb_remove_event_hook(prof_event_hook);
|
348
|
+
#endif
|
349
|
+
}
|
350
|
+
|
351
|
+
static int
|
352
|
+
collect_threads(st_data_t key, st_data_t value, st_data_t result)
|
353
|
+
{
|
354
|
+
thread_data_t* thread_data = (thread_data_t*) value;
|
355
|
+
VALUE threads_array = (VALUE) result;
|
356
|
+
rb_ary_push(threads_array, prof_thread_wrap(thread_data));
|
357
|
+
return ST_CONTINUE;
|
358
|
+
}
|
359
|
+
|
360
|
+
/* ======== Profile Class ====== */
|
361
|
+
static int
|
362
|
+
mark_threads(st_data_t key, st_data_t value, st_data_t result)
|
363
|
+
{
|
364
|
+
thread_data_t *thread = (thread_data_t *) value;
|
365
|
+
prof_thread_mark(thread);
|
366
|
+
return ST_CONTINUE;
|
367
|
+
}
|
368
|
+
|
369
|
+
static int
|
370
|
+
mark_methods(st_data_t key, st_data_t value, st_data_t result)
|
371
|
+
{
|
372
|
+
prof_method_t *method = (prof_method_t *) value;
|
373
|
+
prof_method_mark(method);
|
374
|
+
return ST_CONTINUE;
|
375
|
+
}
|
376
|
+
|
377
|
+
static void
|
378
|
+
prof_mark(prof_profile_t *profile)
|
379
|
+
{
|
380
|
+
st_foreach(profile->threads_tbl, mark_threads, 0);
|
381
|
+
st_foreach(profile->exclude_methods_tbl, mark_methods, 0);
|
382
|
+
}
|
383
|
+
|
384
|
+
/* Freeing the profile creates a cascade of freeing.
|
385
|
+
It fress the thread table, which frees its methods,
|
386
|
+
which frees its call infos. */
|
387
|
+
static void
|
388
|
+
prof_free(prof_profile_t *profile)
|
389
|
+
{
|
390
|
+
profile->last_thread_data = NULL;
|
391
|
+
|
392
|
+
threads_table_free(profile->threads_tbl);
|
393
|
+
profile->threads_tbl = NULL;
|
394
|
+
|
395
|
+
if (profile->exclude_threads_tbl)
|
396
|
+
{
|
397
|
+
st_free_table(profile->exclude_threads_tbl);
|
398
|
+
profile->exclude_threads_tbl = NULL;
|
399
|
+
}
|
400
|
+
|
401
|
+
if (profile->include_threads_tbl)
|
402
|
+
{
|
403
|
+
st_free_table(profile->include_threads_tbl);
|
404
|
+
profile->include_threads_tbl = NULL;
|
405
|
+
}
|
406
|
+
|
407
|
+
/* This table owns the excluded sentinels for now. */
|
408
|
+
method_table_free(profile->exclude_methods_tbl);
|
409
|
+
profile->exclude_methods_tbl = NULL;
|
410
|
+
|
411
|
+
xfree(profile->measurer);
|
412
|
+
profile->measurer = NULL;
|
413
|
+
|
414
|
+
xfree(profile);
|
415
|
+
}
|
416
|
+
|
417
|
+
static VALUE
|
418
|
+
prof_allocate(VALUE klass)
|
419
|
+
{
|
420
|
+
VALUE result;
|
421
|
+
prof_profile_t* profile;
|
422
|
+
result = Data_Make_Struct(klass, prof_profile_t, prof_mark, prof_free, profile);
|
423
|
+
profile->threads_tbl = threads_table_create();
|
424
|
+
profile->exclude_threads_tbl = NULL;
|
425
|
+
profile->include_threads_tbl = NULL;
|
426
|
+
profile->running = Qfalse;
|
427
|
+
profile->merge_fibers = 0;
|
428
|
+
profile->allow_exceptions = 0;
|
429
|
+
profile->exclude_methods_tbl = method_table_create();
|
430
|
+
profile->running = Qfalse;
|
431
|
+
return result;
|
432
|
+
}
|
433
|
+
|
434
|
+
/* call-seq:
|
435
|
+
new()
|
436
|
+
new(options)
|
437
|
+
|
438
|
+
Returns a new profiler. Possible options for the options hash are:
|
439
|
+
|
440
|
+
measure_mode:: Measure mode. Specifies the profile measure mode.
|
441
|
+
If not specified, defaults to RubyProf::WALL_TIME.
|
442
|
+
exclude_threads:: Threads to exclude from the profiling results.
|
443
|
+
include_threads:: Focus profiling on only the given threads. This will ignore
|
444
|
+
all other threads.
|
445
|
+
merge_fibers:: Whether to merge all fibers under a given thread. This should be
|
446
|
+
used when profiling for a callgrind printer.
|
447
|
+
allow_exceptions:: Whether to raise exceptions encountered during profiling,
|
448
|
+
or to suppress all exceptions during profiling
|
449
|
+
*/
|
450
|
+
static VALUE
|
451
|
+
prof_initialize(int argc, VALUE *argv, VALUE self)
|
452
|
+
{
|
453
|
+
prof_profile_t* profile = prof_get_profile(self);
|
454
|
+
VALUE mode_or_options;
|
455
|
+
VALUE mode = Qnil;
|
456
|
+
VALUE exclude_threads = Qnil;
|
457
|
+
VALUE include_threads = Qnil;
|
458
|
+
VALUE merge_fibers = Qnil;
|
459
|
+
VALUE exclude_common = Qnil;
|
460
|
+
VALUE allow_exceptions = Qnil;
|
461
|
+
int i;
|
462
|
+
|
463
|
+
switch (rb_scan_args(argc, argv, "02", &mode_or_options, &exclude_threads))
|
464
|
+
{
|
465
|
+
case 0:
|
466
|
+
break;
|
467
|
+
case 1:
|
468
|
+
if (FIXNUM_P(mode_or_options))
|
469
|
+
{
|
470
|
+
mode = mode_or_options;
|
471
|
+
}
|
472
|
+
else
|
473
|
+
{
|
474
|
+
Check_Type(mode_or_options, T_HASH);
|
475
|
+
mode = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("measure_mode")));
|
476
|
+
merge_fibers = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("merge_fibers")));
|
477
|
+
allow_exceptions = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("allow_exceptions")));
|
478
|
+
exclude_common = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("exclude_common")));
|
479
|
+
exclude_threads = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("exclude_threads")));
|
480
|
+
include_threads = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("include_threads")));
|
481
|
+
}
|
482
|
+
break;
|
483
|
+
case 2:
|
484
|
+
Check_Type(exclude_threads, T_ARRAY);
|
485
|
+
break;
|
486
|
+
}
|
487
|
+
|
488
|
+
if (mode == Qnil)
|
489
|
+
{
|
490
|
+
mode = INT2NUM(MEASURE_WALL_TIME);
|
491
|
+
}
|
492
|
+
else
|
493
|
+
{
|
494
|
+
Check_Type(mode, T_FIXNUM);
|
495
|
+
}
|
496
|
+
profile->measurer = prof_get_measurer(NUM2INT(mode));
|
497
|
+
profile->merge_fibers = merge_fibers != Qnil && merge_fibers != Qfalse;
|
498
|
+
profile->allow_exceptions = allow_exceptions != Qnil && allow_exceptions != Qfalse;
|
499
|
+
|
500
|
+
if (exclude_threads != Qnil)
|
501
|
+
{
|
502
|
+
Check_Type(exclude_threads, T_ARRAY);
|
503
|
+
assert(profile->exclude_threads_tbl == NULL);
|
504
|
+
profile->exclude_threads_tbl = threads_table_create();
|
505
|
+
for (i = 0; i < RARRAY_LEN(exclude_threads); i++)
|
506
|
+
{
|
507
|
+
VALUE thread = rb_ary_entry(exclude_threads, i);
|
508
|
+
VALUE thread_id = rb_obj_id(thread);
|
509
|
+
unsigned LONG_LONG thread_value = NUM2ULL(thread_id);
|
510
|
+
st_insert(profile->exclude_threads_tbl, thread_value, Qtrue);
|
511
|
+
}
|
512
|
+
}
|
513
|
+
|
514
|
+
if (include_threads != Qnil)
|
515
|
+
{
|
516
|
+
Check_Type(include_threads, T_ARRAY);
|
517
|
+
assert(profile->include_threads_tbl == NULL);
|
518
|
+
profile->include_threads_tbl = threads_table_create();
|
519
|
+
for (i = 0; i < RARRAY_LEN(include_threads); i++)
|
520
|
+
{
|
521
|
+
VALUE thread = rb_ary_entry(include_threads, i);
|
522
|
+
VALUE thread_id = rb_obj_id(thread);
|
523
|
+
unsigned LONG_LONG thread_value = NUM2ULL(thread_id);
|
524
|
+
st_insert(profile->include_threads_tbl, thread_value, Qtrue);
|
525
|
+
}
|
526
|
+
}
|
527
|
+
|
528
|
+
if (RTEST(exclude_common)) {
|
529
|
+
prof_exclude_common_methods(self);
|
530
|
+
}
|
531
|
+
|
532
|
+
return self;
|
533
|
+
}
|
534
|
+
|
535
|
+
/* call-seq:
|
536
|
+
paused? -> boolean
|
537
|
+
|
538
|
+
Returns whether a profile is currently paused.*/
|
539
|
+
static VALUE
|
540
|
+
prof_paused(VALUE self)
|
541
|
+
{
|
542
|
+
prof_profile_t* profile = prof_get_profile(self);
|
543
|
+
return profile->paused;
|
544
|
+
}
|
545
|
+
|
546
|
+
/* call-seq:
|
547
|
+
running? -> boolean
|
548
|
+
|
549
|
+
Returns whether a profile is currently running.*/
|
550
|
+
static VALUE
|
551
|
+
prof_running(VALUE self)
|
552
|
+
{
|
553
|
+
prof_profile_t* profile = prof_get_profile(self);
|
554
|
+
return profile->running;
|
555
|
+
}
|
556
|
+
|
557
|
+
/* call-seq:
|
558
|
+
start -> self
|
559
|
+
|
560
|
+
Starts recording profile data.*/
|
561
|
+
static VALUE
|
562
|
+
prof_start(VALUE self)
|
563
|
+
{
|
564
|
+
char* trace_file_name;
|
565
|
+
|
566
|
+
prof_profile_t* profile = prof_get_profile(self);
|
567
|
+
|
568
|
+
if (profile->running == Qtrue)
|
569
|
+
{
|
570
|
+
rb_raise(rb_eRuntimeError, "RubyProf.start was already called");
|
571
|
+
}
|
572
|
+
|
573
|
+
profile->running = Qtrue;
|
574
|
+
profile->paused = Qfalse;
|
575
|
+
profile->last_thread_data = NULL;
|
576
|
+
|
577
|
+
|
578
|
+
/* open trace file if environment wants it */
|
579
|
+
trace_file_name = getenv("RUBY_PROF_TRACE");
|
580
|
+
if (trace_file_name != NULL)
|
581
|
+
{
|
582
|
+
if (strcmp(trace_file_name, "stdout") == 0)
|
583
|
+
{
|
584
|
+
trace_file = stdout;
|
585
|
+
}
|
586
|
+
else if (strcmp(trace_file_name, "stderr") == 0)
|
587
|
+
{
|
588
|
+
trace_file = stderr;
|
589
|
+
}
|
590
|
+
else
|
591
|
+
{
|
592
|
+
trace_file = fopen(trace_file_name, "w");
|
593
|
+
}
|
594
|
+
}
|
595
|
+
|
596
|
+
prof_install_hook(self);
|
597
|
+
return self;
|
598
|
+
}
|
599
|
+
|
600
|
+
/* call-seq:
|
601
|
+
pause -> self
|
602
|
+
|
603
|
+
Pauses collecting profile data. */
|
604
|
+
static VALUE
|
605
|
+
prof_pause(VALUE self)
|
606
|
+
{
|
607
|
+
prof_profile_t* profile = prof_get_profile(self);
|
608
|
+
if (profile->running == Qfalse)
|
609
|
+
{
|
610
|
+
rb_raise(rb_eRuntimeError, "RubyProf is not running.");
|
611
|
+
}
|
612
|
+
|
613
|
+
if (profile->paused == Qfalse)
|
614
|
+
{
|
615
|
+
profile->paused = Qtrue;
|
616
|
+
profile->measurement_at_pause_resume = profile->measurer->measure();
|
617
|
+
st_foreach(profile->threads_tbl, pause_thread, (st_data_t) profile);
|
618
|
+
}
|
619
|
+
|
620
|
+
return self;
|
621
|
+
}
|
622
|
+
|
623
|
+
/* call-seq:
|
624
|
+
resume -> self
|
625
|
+
resume(&block) -> self
|
626
|
+
|
627
|
+
Resumes recording profile data.*/
|
628
|
+
static VALUE
|
629
|
+
prof_resume(VALUE self)
|
630
|
+
{
|
631
|
+
prof_profile_t* profile = prof_get_profile(self);
|
632
|
+
if (profile->running == Qfalse)
|
633
|
+
{
|
634
|
+
rb_raise(rb_eRuntimeError, "RubyProf is not running.");
|
635
|
+
}
|
636
|
+
|
637
|
+
if (profile->paused == Qtrue)
|
638
|
+
{
|
639
|
+
profile->paused = Qfalse;
|
640
|
+
profile->measurement_at_pause_resume = profile->measurer->measure();
|
641
|
+
st_foreach(profile->threads_tbl, unpause_thread, (st_data_t) profile);
|
642
|
+
}
|
643
|
+
|
644
|
+
return rb_block_given_p() ? rb_ensure(rb_yield, self, prof_pause, self) : self;
|
645
|
+
}
|
646
|
+
|
647
|
+
/* call-seq:
|
648
|
+
stop -> self
|
649
|
+
|
650
|
+
Stops collecting profile data.*/
|
651
|
+
static VALUE
|
652
|
+
prof_stop(VALUE self)
|
653
|
+
{
|
654
|
+
prof_profile_t* profile = prof_get_profile(self);
|
655
|
+
|
656
|
+
if (profile->running == Qfalse)
|
657
|
+
{
|
658
|
+
rb_raise(rb_eRuntimeError, "RubyProf.start was not yet called");
|
659
|
+
}
|
660
|
+
|
661
|
+
prof_remove_hook(self);
|
662
|
+
|
663
|
+
/* close trace file if open */
|
664
|
+
if (trace_file != NULL)
|
665
|
+
{
|
666
|
+
if (trace_file !=stderr && trace_file != stdout)
|
667
|
+
{
|
668
|
+
#ifdef _MSC_VER
|
669
|
+
_fcloseall();
|
670
|
+
#else
|
671
|
+
fclose(trace_file);
|
672
|
+
#endif
|
673
|
+
}
|
674
|
+
trace_file = NULL;
|
675
|
+
}
|
676
|
+
|
677
|
+
prof_pop_threads(profile);
|
678
|
+
|
679
|
+
/* Unset the last_thread_data (very important!)
|
680
|
+
and the threads table */
|
681
|
+
profile->running = profile->paused = Qfalse;
|
682
|
+
profile->last_thread_data = NULL;
|
683
|
+
|
684
|
+
return self;
|
685
|
+
}
|
686
|
+
|
687
|
+
/* call-seq:
|
688
|
+
threads -> Array of RubyProf::Thread
|
689
|
+
|
690
|
+
Returns an array of RubyProf::Thread instances that were executed
|
691
|
+
while the the program was being run. */
|
692
|
+
static VALUE
|
693
|
+
prof_threads(VALUE self)
|
694
|
+
{
|
695
|
+
VALUE result = rb_ary_new();
|
696
|
+
prof_profile_t* profile = prof_get_profile(self);
|
697
|
+
st_foreach(profile->threads_tbl, collect_threads, result);
|
698
|
+
return result;
|
699
|
+
}
|
700
|
+
|
701
|
+
/* call-seq:
|
702
|
+
profile {block} -> RubyProf::Result
|
703
|
+
|
704
|
+
Profiles the specified block and returns a RubyProf::Result object. */
|
705
|
+
static VALUE
|
706
|
+
prof_profile_object(VALUE self)
|
707
|
+
{
|
708
|
+
int result;
|
709
|
+
prof_profile_t* profile = prof_get_profile(self);
|
710
|
+
|
711
|
+
if (!rb_block_given_p())
|
712
|
+
{
|
713
|
+
rb_raise(rb_eArgError, "A block must be provided to the profile method.");
|
714
|
+
}
|
715
|
+
|
716
|
+
prof_start(self);
|
717
|
+
rb_protect(rb_yield, self, &result);
|
718
|
+
self = prof_stop(self);
|
719
|
+
|
720
|
+
if (profile->allow_exceptions && result != 0)
|
721
|
+
{
|
722
|
+
rb_jump_tag(result);
|
723
|
+
}
|
724
|
+
|
725
|
+
return self;
|
726
|
+
|
727
|
+
}
|
728
|
+
|
729
|
+
/* call-seq:
|
730
|
+
profile(&block) -> self
|
731
|
+
profile(options, &block) -> self
|
732
|
+
|
733
|
+
Profiles the specified block and returns a RubyProf::Profile
|
734
|
+
object. Arguments are passed to Profile initialize method.
|
735
|
+
*/
|
736
|
+
static VALUE
|
737
|
+
prof_profile_class(int argc, VALUE *argv, VALUE klass)
|
738
|
+
{
|
739
|
+
return prof_profile_object(rb_class_new_instance(argc, argv, cProfile));
|
740
|
+
}
|
741
|
+
|
742
|
+
static VALUE
|
743
|
+
prof_exclude_method(VALUE self, VALUE klass, VALUE sym)
|
744
|
+
{
|
745
|
+
prof_profile_t* profile = prof_get_profile(self);
|
746
|
+
ID mid = SYM2ID(sym);
|
747
|
+
|
748
|
+
prof_method_key_t key;
|
749
|
+
prof_method_t *method;
|
750
|
+
|
751
|
+
if (profile->running == Qtrue)
|
752
|
+
{
|
753
|
+
rb_raise(rb_eRuntimeError, "RubyProf.start was already called");
|
754
|
+
}
|
755
|
+
|
756
|
+
method_key(&key, klass, mid);
|
757
|
+
method = method_table_lookup(profile->exclude_methods_tbl, &key);
|
758
|
+
|
759
|
+
if (!method) {
|
760
|
+
method = prof_method_create_excluded(klass, mid);
|
761
|
+
method_table_insert(profile->exclude_methods_tbl, method->key, method);
|
762
|
+
}
|
763
|
+
|
764
|
+
return self;
|
765
|
+
}
|
766
|
+
|
767
|
+
void Init_ruby_prof()
|
768
|
+
{
|
769
|
+
mProf = rb_define_module("RubyProf");
|
770
|
+
|
771
|
+
rp_init_measure();
|
772
|
+
rp_init_method_info();
|
773
|
+
rp_init_call_info();
|
774
|
+
rp_init_thread();
|
775
|
+
|
776
|
+
cProfile = rb_define_class_under(mProf, "Profile", rb_cObject);
|
777
|
+
rb_define_alloc_func (cProfile, prof_allocate);
|
778
|
+
|
779
|
+
rb_define_method(cProfile, "initialize", prof_initialize, -1);
|
780
|
+
rb_define_method(cProfile, "start", prof_start, 0);
|
781
|
+
rb_define_method(cProfile, "stop", prof_stop, 0);
|
782
|
+
rb_define_method(cProfile, "resume", prof_resume, 0);
|
783
|
+
rb_define_method(cProfile, "pause", prof_pause, 0);
|
784
|
+
rb_define_method(cProfile, "running?", prof_running, 0);
|
785
|
+
rb_define_method(cProfile, "paused?", prof_paused, 0);
|
786
|
+
rb_define_method(cProfile, "threads", prof_threads, 0);
|
787
|
+
|
788
|
+
rb_define_singleton_method(cProfile, "profile", prof_profile_class, -1);
|
789
|
+
rb_define_method(cProfile, "profile", prof_profile_object, 0);
|
790
|
+
|
791
|
+
rb_define_method(cProfile, "exclude_method!", prof_exclude_method, 2);
|
792
|
+
|
793
|
+
cExcludeCommonMethods = rb_define_class_under(cProfile, "ExcludeCommonMethods", rb_cObject);
|
794
|
+
}
|