ruby-prof 0.18.0-x64-mingw32 → 1.1.0-x64-mingw32
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGES +32 -0
- data/LICENSE +2 -2
- data/README.rdoc +1 -483
- data/Rakefile +3 -6
- data/bin/ruby-prof +65 -30
- data/ext/ruby_prof/extconf.rb +6 -38
- data/ext/ruby_prof/rp_allocation.c +279 -0
- data/ext/ruby_prof/rp_allocation.h +31 -0
- data/ext/ruby_prof/rp_call_info.c +129 -283
- data/ext/ruby_prof/rp_call_info.h +16 -34
- data/ext/ruby_prof/rp_measure_allocations.c +25 -49
- data/ext/ruby_prof/rp_measure_memory.c +21 -56
- data/ext/ruby_prof/rp_measure_process_time.c +35 -39
- data/ext/ruby_prof/rp_measure_wall_time.c +36 -19
- data/ext/ruby_prof/rp_measurement.c +230 -0
- data/ext/ruby_prof/rp_measurement.h +50 -0
- data/ext/ruby_prof/rp_method.c +389 -389
- data/ext/ruby_prof/rp_method.h +34 -39
- data/ext/ruby_prof/rp_profile.c +895 -0
- data/ext/ruby_prof/rp_profile.h +37 -0
- data/ext/ruby_prof/rp_stack.c +103 -80
- data/ext/ruby_prof/rp_stack.h +5 -12
- data/ext/ruby_prof/rp_thread.c +143 -83
- data/ext/ruby_prof/rp_thread.h +15 -6
- data/ext/ruby_prof/ruby_prof.c +11 -757
- data/ext/ruby_prof/ruby_prof.h +4 -47
- data/ext/ruby_prof/vc/ruby_prof.vcxproj +10 -8
- data/lib/{2.6.3 → 2.6.5}/ruby_prof.so +0 -0
- data/lib/ruby-prof.rb +2 -18
- data/lib/ruby-prof/assets/call_stack_printer.html.erb +713 -0
- data/lib/ruby-prof/assets/call_stack_printer.png +0 -0
- data/lib/ruby-prof/assets/graph_printer.html.erb +356 -0
- data/lib/ruby-prof/call_info.rb +35 -93
- data/lib/ruby-prof/call_info_visitor.rb +19 -21
- data/lib/ruby-prof/compatibility.rb +37 -107
- data/lib/ruby-prof/exclude_common_methods.rb +198 -0
- data/lib/ruby-prof/measurement.rb +14 -0
- data/lib/ruby-prof/method_info.rb +52 -83
- data/lib/ruby-prof/printers/abstract_printer.rb +73 -50
- data/lib/ruby-prof/printers/call_info_printer.rb +13 -3
- data/lib/ruby-prof/printers/call_stack_printer.rb +62 -145
- data/lib/ruby-prof/printers/call_tree_printer.rb +20 -12
- data/lib/ruby-prof/printers/dot_printer.rb +5 -5
- data/lib/ruby-prof/printers/flat_printer.rb +6 -24
- data/lib/ruby-prof/printers/graph_html_printer.rb +6 -192
- data/lib/ruby-prof/printers/graph_printer.rb +13 -15
- data/lib/ruby-prof/printers/multi_printer.rb +66 -23
- data/lib/ruby-prof/profile.rb +10 -3
- data/lib/ruby-prof/rack.rb +0 -3
- data/lib/ruby-prof/thread.rb +12 -12
- data/lib/ruby-prof/version.rb +1 -1
- data/ruby-prof.gemspec +2 -2
- data/test/abstract_printer_test.rb +0 -27
- data/test/alias_test.rb +129 -0
- data/test/basic_test.rb +41 -40
- data/test/call_info_visitor_test.rb +3 -3
- data/test/dynamic_method_test.rb +0 -2
- data/test/fiber_test.rb +11 -17
- data/test/gc_test.rb +96 -0
- data/test/line_number_test.rb +120 -39
- data/test/marshal_test.rb +119 -0
- data/test/measure_allocations.rb +30 -0
- data/test/measure_allocations_test.rb +371 -12
- data/test/measure_allocations_trace_test.rb +385 -0
- data/test/measure_memory_trace_test.rb +756 -0
- data/test/measure_process_time_test.rb +821 -33
- data/test/measure_times.rb +54 -0
- data/test/measure_wall_time_test.rb +349 -145
- data/test/multi_printer_test.rb +1 -34
- data/test/parser_timings.rb +24 -0
- data/test/pause_resume_test.rb +5 -5
- data/test/prime.rb +2 -0
- data/test/printer_call_stack_test.rb +28 -0
- data/test/printer_call_tree_test.rb +31 -0
- data/test/printer_flat_test.rb +68 -0
- data/test/printer_graph_html_test.rb +60 -0
- data/test/printer_graph_test.rb +41 -0
- data/test/printers_test.rb +32 -166
- data/test/printing_recursive_graph_test.rb +26 -72
- data/test/recursive_test.rb +72 -77
- data/test/stack_printer_test.rb +2 -15
- data/test/start_stop_test.rb +22 -25
- data/test/test_helper.rb +5 -248
- data/test/thread_test.rb +11 -54
- data/test/unique_call_path_test.rb +16 -28
- data/test/yarv_test.rb +1 -0
- metadata +28 -36
- data/examples/flat.txt +0 -50
- data/examples/graph.dot +0 -84
- data/examples/graph.html +0 -823
- data/examples/graph.txt +0 -139
- data/examples/multi.flat.txt +0 -23
- data/examples/multi.graph.html +0 -760
- data/examples/multi.grind.dat +0 -114
- data/examples/multi.stack.html +0 -547
- data/examples/stack.html +0 -547
- data/ext/ruby_prof/rp_measure.c +0 -40
- data/ext/ruby_prof/rp_measure.h +0 -45
- data/ext/ruby_prof/rp_measure_cpu_time.c +0 -136
- data/ext/ruby_prof/rp_measure_gc_runs.c +0 -73
- data/ext/ruby_prof/rp_measure_gc_time.c +0 -60
- data/lib/ruby-prof/aggregate_call_info.rb +0 -76
- data/lib/ruby-prof/assets/call_stack_printer.css.html +0 -117
- data/lib/ruby-prof/assets/call_stack_printer.js.html +0 -385
- data/lib/ruby-prof/printers/flat_printer_with_line_numbers.rb +0 -83
- data/lib/ruby-prof/profile/exclude_common_methods.rb +0 -207
- data/lib/ruby-prof/profile/legacy_method_elimination.rb +0 -50
- data/test/aggregate_test.rb +0 -136
- data/test/block_test.rb +0 -74
- data/test/call_info_test.rb +0 -78
- data/test/issue137_test.rb +0 -63
- data/test/measure_cpu_time_test.rb +0 -212
- data/test/measure_gc_runs_test.rb +0 -32
- data/test/measure_gc_time_test.rb +0 -36
- data/test/measure_memory_test.rb +0 -33
- data/test/method_elimination_test.rb +0 -84
- data/test/module_test.rb +0 -45
- data/test/stack_test.rb +0 -138
data/ext/ruby_prof/rp_method.h
CHANGED
@@ -4,23 +4,18 @@
|
|
4
4
|
#ifndef __RP_METHOD_INFO__
|
5
5
|
#define __RP_METHOD_INFO__
|
6
6
|
|
7
|
-
#include
|
7
|
+
#include "ruby_prof.h"
|
8
|
+
#include "rp_measurement.h"
|
8
9
|
|
9
|
-
extern VALUE
|
10
|
-
|
11
|
-
/* A key used to identify each method */
|
12
|
-
typedef struct
|
13
|
-
{
|
14
|
-
VALUE klass; /* The method's class. */
|
15
|
-
ID mid; /* The method id. */
|
16
|
-
st_index_t key; /* Cache calculated key */
|
17
|
-
} prof_method_key_t;
|
10
|
+
extern VALUE cRpMethodInfo;
|
18
11
|
|
19
12
|
/* Source relation bit offsets. */
|
20
13
|
enum {
|
21
|
-
kModuleIncludee =
|
22
|
-
|
23
|
-
|
14
|
+
kModuleIncludee = 0x1, /* Included in module */
|
15
|
+
kClassSingleton = 0x2, /* Singleton of a class */
|
16
|
+
kModuleSingleton = 0x4, /* Singleton of a module */
|
17
|
+
kObjectSingleton = 0x8, /* Singleton of an object */
|
18
|
+
kOtherSingleton = 0x10 /* Singleton of unkown object */
|
24
19
|
};
|
25
20
|
|
26
21
|
/* Forward declaration, see rp_call_info.h */
|
@@ -30,46 +25,46 @@ struct prof_call_infos_t;
|
|
30
25
|
/* Excluded methods have no call_infos, source_klass, or source_file. */
|
31
26
|
typedef struct
|
32
27
|
{
|
33
|
-
/*
|
34
|
-
|
35
|
-
prof_method_key_t *key; /* Table key */
|
28
|
+
st_data_t key; /* Table key */
|
36
29
|
|
37
|
-
struct prof_call_infos_t *call_infos; /* Call infos */
|
38
30
|
int visits; /* Current visits on the stack */
|
31
|
+
bool excluded; /* Exclude from profile? */
|
39
32
|
|
40
|
-
|
41
|
-
|
33
|
+
st_table* parent_call_infos; /* Call infos that call this method */
|
34
|
+
st_table* child_call_infos; /* Call infos that this method calls */
|
35
|
+
st_table* allocations_table; /* Tracks object allocations */
|
42
36
|
|
43
|
-
/*
|
37
|
+
unsigned int klass_flags; /* Information about the type of class */
|
38
|
+
VALUE klass; /* Resolved klass */
|
39
|
+
VALUE klass_name; /* Resolved klass name for this method */
|
40
|
+
VALUE method_name; /* Resolved method name for this method */
|
44
41
|
|
45
42
|
VALUE object; /* Cached ruby object */
|
46
|
-
VALUE source_klass; /* Source class */
|
47
|
-
const char *source_file; /* Source file */
|
48
|
-
int line; /* Line number */
|
49
43
|
|
50
|
-
|
51
|
-
|
44
|
+
bool root; /* Is this a root method */
|
45
|
+
bool recursive;
|
46
|
+
VALUE source_file; /* Source file */
|
47
|
+
int source_line; /* Line number */
|
48
|
+
|
49
|
+
prof_measurement_t *measurement;
|
52
50
|
} prof_method_t;
|
53
51
|
|
54
52
|
void rp_init_method_info(void);
|
55
53
|
|
56
|
-
|
54
|
+
st_data_t method_key(VALUE klass, VALUE msym);
|
57
55
|
|
58
|
-
st_table *
|
59
|
-
prof_method_t
|
60
|
-
|
56
|
+
st_table *method_table_create(void);
|
57
|
+
prof_method_t* prof_method_create_excluded(VALUE klass, VALUE msym);
|
58
|
+
prof_method_t *method_table_lookup(st_table *table, st_data_t key);
|
59
|
+
size_t method_table_insert(st_table *table, st_data_t key, prof_method_t *val);
|
61
60
|
void method_table_free(st_table *table);
|
62
|
-
|
63
|
-
prof_method_t*
|
64
|
-
prof_method_t* prof_method_create_excluded(VALUE klass, ID mid);
|
61
|
+
prof_method_t *prof_method_create(VALUE klass, VALUE msym, VALUE source_file, int source_line);
|
62
|
+
prof_method_t *prof_method_get(VALUE self);
|
65
63
|
|
66
64
|
VALUE prof_method_wrap(prof_method_t *result);
|
67
|
-
void prof_method_mark(
|
68
|
-
|
69
|
-
/* Setup infrastructure to use method keys as hash comparisons */
|
70
|
-
int method_table_cmp(prof_method_key_t *key1, prof_method_key_t *key2);
|
71
|
-
st_index_t method_table_hash(prof_method_key_t *key);
|
65
|
+
void prof_method_mark(void *data);
|
72
66
|
|
73
|
-
|
67
|
+
VALUE resolve_klass(VALUE klass, unsigned int *klass_flags);
|
68
|
+
VALUE resolve_klass_name(VALUE klass, unsigned int* klass_flags);
|
74
69
|
|
75
|
-
#endif
|
70
|
+
#endif //__RP_METHOD_INFO__
|
@@ -0,0 +1,895 @@
|
|
1
|
+
/* Copyright (C) 2005-2019 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
|
2
|
+
Please see the LICENSE file for copyright and distribution information */
|
3
|
+
|
4
|
+
/* Document-class: RubyProf::Profile
|
5
|
+
|
6
|
+
The Profile class represents a single profiling run and provides the main API for using ruby-prof.
|
7
|
+
After creating a Profile instance, start profiling code by calling the Profile#start method. To finish profiling,
|
8
|
+
call Profile#stop. Once profiling is completed, the Profile instance contains the results.
|
9
|
+
|
10
|
+
profile = RubyProf::Profile.new
|
11
|
+
profile.start
|
12
|
+
...
|
13
|
+
result = profile.stop
|
14
|
+
|
15
|
+
Alternatively, you can use the block syntax:
|
16
|
+
|
17
|
+
profile = RubyProf::Profile.profile do
|
18
|
+
...
|
19
|
+
end
|
20
|
+
*/
|
21
|
+
|
22
|
+
#include <assert.h>
|
23
|
+
|
24
|
+
#include "rp_allocation.h"
|
25
|
+
#include "rp_call_info.h"
|
26
|
+
#include "rp_profile.h"
|
27
|
+
#include "rp_method.h"
|
28
|
+
|
29
|
+
VALUE cProfile;
|
30
|
+
|
31
|
+
/* support tracing ruby events from ruby-prof. useful for getting at
|
32
|
+
what actually happens inside the ruby interpreter (and ruby-prof).
|
33
|
+
set environment variable RUBY_PROF_TRACE to filename you want to
|
34
|
+
find the trace in.
|
35
|
+
*/
|
36
|
+
FILE* trace_file = NULL;
|
37
|
+
|
38
|
+
static int
|
39
|
+
excludes_method(st_data_t key, prof_profile_t* profile)
|
40
|
+
{
|
41
|
+
return (profile->exclude_methods_tbl &&
|
42
|
+
method_table_lookup(profile->exclude_methods_tbl, key) != NULL);
|
43
|
+
}
|
44
|
+
|
45
|
+
static prof_method_t*
|
46
|
+
create_method(prof_profile_t* profile, st_data_t key, VALUE klass, VALUE msym, VALUE source_file, int source_line)
|
47
|
+
{
|
48
|
+
prof_method_t* result = NULL;
|
49
|
+
|
50
|
+
if (excludes_method(key, profile))
|
51
|
+
{
|
52
|
+
/* We found a exclusion sentinel so propagate it into the thread's local hash table. */
|
53
|
+
/* TODO(nelgau): Is there a way to avoid this allocation completely so that all these
|
54
|
+
tables share the same exclusion method struct? The first attempt failed due to my
|
55
|
+
ignorance of the whims of the GC. */
|
56
|
+
result = prof_method_create_excluded(klass, msym);
|
57
|
+
}
|
58
|
+
else
|
59
|
+
{
|
60
|
+
result = prof_method_create(klass, msym, source_file, source_line);
|
61
|
+
}
|
62
|
+
|
63
|
+
/* Insert the newly created method, or the exlcusion sentinel. */
|
64
|
+
method_table_insert(profile->last_thread_data->method_table, result->key, result);
|
65
|
+
|
66
|
+
return result;
|
67
|
+
}
|
68
|
+
|
69
|
+
static const char *
|
70
|
+
get_event_name(rb_event_flag_t event)
|
71
|
+
{
|
72
|
+
switch (event) {
|
73
|
+
case RUBY_EVENT_LINE:
|
74
|
+
return "line";
|
75
|
+
case RUBY_EVENT_CLASS:
|
76
|
+
return "class";
|
77
|
+
case RUBY_EVENT_END:
|
78
|
+
return "end";
|
79
|
+
case RUBY_EVENT_CALL:
|
80
|
+
return "call";
|
81
|
+
case RUBY_EVENT_RETURN:
|
82
|
+
return "return";
|
83
|
+
case RUBY_EVENT_B_CALL:
|
84
|
+
return "b-call";
|
85
|
+
case RUBY_EVENT_B_RETURN:
|
86
|
+
return "b-return";
|
87
|
+
case RUBY_EVENT_C_CALL:
|
88
|
+
return "c-call";
|
89
|
+
case RUBY_EVENT_C_RETURN:
|
90
|
+
return "c-return";
|
91
|
+
case RUBY_EVENT_THREAD_BEGIN:
|
92
|
+
return "thread-begin";
|
93
|
+
case RUBY_EVENT_THREAD_END:
|
94
|
+
return "thread-end";
|
95
|
+
case RUBY_EVENT_FIBER_SWITCH:
|
96
|
+
return "fiber-switch";
|
97
|
+
case RUBY_EVENT_RAISE:
|
98
|
+
return "raise";
|
99
|
+
default:
|
100
|
+
return "unknown";
|
101
|
+
}
|
102
|
+
}
|
103
|
+
|
104
|
+
VALUE get_fiber(prof_profile_t* profile)
|
105
|
+
{
|
106
|
+
if (profile->merge_fibers)
|
107
|
+
return rb_thread_current();
|
108
|
+
else
|
109
|
+
return rb_fiber_current();
|
110
|
+
}
|
111
|
+
/* =========== Profiling ================= */
|
112
|
+
thread_data_t* check_fiber(prof_profile_t *profile, double measurement)
|
113
|
+
{
|
114
|
+
thread_data_t* result = NULL;
|
115
|
+
|
116
|
+
/* Get the current thread and fiber information. */
|
117
|
+
VALUE fiber = get_fiber(profile);
|
118
|
+
|
119
|
+
/* We need to switch the profiling context if we either had none before,
|
120
|
+
we don't merge fibers and the fiber ids differ, or the thread ids differ. */
|
121
|
+
if (profile->last_thread_data->fiber != fiber)
|
122
|
+
{
|
123
|
+
result = threads_table_lookup(profile, fiber);
|
124
|
+
if (!result)
|
125
|
+
{
|
126
|
+
result = threads_table_insert(profile, fiber);
|
127
|
+
}
|
128
|
+
switch_thread(profile, result, measurement);
|
129
|
+
}
|
130
|
+
else
|
131
|
+
{
|
132
|
+
result = profile->last_thread_data;
|
133
|
+
}
|
134
|
+
return result;
|
135
|
+
}
|
136
|
+
|
137
|
+
static void
|
138
|
+
prof_trace(prof_profile_t* profile, rb_trace_arg_t *trace_arg, double measurement)
|
139
|
+
{
|
140
|
+
static VALUE last_fiber = Qnil;
|
141
|
+
VALUE fiber = get_fiber(profile);
|
142
|
+
|
143
|
+
rb_event_flag_t event = rb_tracearg_event_flag(trace_arg);
|
144
|
+
const char* event_name = get_event_name(event);
|
145
|
+
|
146
|
+
VALUE source_file = rb_tracearg_path(trace_arg);
|
147
|
+
int source_line = FIX2INT(rb_tracearg_lineno(trace_arg));
|
148
|
+
|
149
|
+
#ifdef HAVE_RB_TRACEARG_CALLEE_ID
|
150
|
+
VALUE msym = rb_tracearg_callee_id(trace_arg);
|
151
|
+
#else
|
152
|
+
VALUE msym = rb_tracearg_method_id(trace_arg);
|
153
|
+
#endif
|
154
|
+
|
155
|
+
unsigned int klass_flags;
|
156
|
+
VALUE klass = rb_tracearg_defined_class(trace_arg);
|
157
|
+
VALUE resolved_klass = resolve_klass(klass, &klass_flags);
|
158
|
+
const char* class_name = "";
|
159
|
+
|
160
|
+
if (resolved_klass != Qnil)
|
161
|
+
class_name = rb_class2name(resolved_klass);
|
162
|
+
|
163
|
+
if (last_fiber != fiber)
|
164
|
+
{
|
165
|
+
fprintf(trace_file, "\n");
|
166
|
+
}
|
167
|
+
|
168
|
+
const char* method_name_char = (msym != Qnil ? rb_id2name(SYM2ID(msym)) : "");
|
169
|
+
const char* source_file_char = (source_file != Qnil ? StringValuePtr(source_file) : "");
|
170
|
+
|
171
|
+
fprintf(trace_file, "%2lu:%2f %-8s %s#%s %s:%2d\n",
|
172
|
+
FIX2ULONG(fiber), (double) measurement,
|
173
|
+
event_name, class_name, method_name_char, source_file_char, source_line);
|
174
|
+
fflush(trace_file);
|
175
|
+
last_fiber = fiber;
|
176
|
+
}
|
177
|
+
|
178
|
+
static void
|
179
|
+
prof_event_hook(VALUE trace_point, void* data)
|
180
|
+
{
|
181
|
+
prof_profile_t* profile = (prof_profile_t*)data;
|
182
|
+
thread_data_t* thread_data = NULL;
|
183
|
+
prof_frame_t *frame = NULL;
|
184
|
+
rb_trace_arg_t* trace_arg = rb_tracearg_from_tracepoint(trace_point);
|
185
|
+
double measurement = prof_measure(profile->measurer, trace_arg);
|
186
|
+
rb_event_flag_t event = rb_tracearg_event_flag(trace_arg);
|
187
|
+
VALUE self = rb_tracearg_self(trace_arg);
|
188
|
+
|
189
|
+
if (trace_file != NULL)
|
190
|
+
{
|
191
|
+
prof_trace(profile, trace_arg, measurement);
|
192
|
+
}
|
193
|
+
|
194
|
+
/* Special case - skip any methods from the mProf
|
195
|
+
module since they clutter the results but aren't important to them results. */
|
196
|
+
if (self == mProf)
|
197
|
+
return;
|
198
|
+
|
199
|
+
thread_data = check_fiber(profile, measurement);
|
200
|
+
|
201
|
+
if (!thread_data->trace)
|
202
|
+
return;
|
203
|
+
|
204
|
+
/* Get the current frame for the current thread. */
|
205
|
+
frame = thread_data->stack->ptr;
|
206
|
+
|
207
|
+
switch (event)
|
208
|
+
{
|
209
|
+
case RUBY_EVENT_LINE:
|
210
|
+
{
|
211
|
+
/* Keep track of the current line number in this method. When
|
212
|
+
a new method is called, we know what line number it was
|
213
|
+
called from. */
|
214
|
+
if (frame->call_info)
|
215
|
+
{
|
216
|
+
if (prof_frame_is_real(frame))
|
217
|
+
{
|
218
|
+
frame->source_file = rb_tracearg_path(trace_arg);
|
219
|
+
frame->source_line = FIX2INT(rb_tracearg_lineno(trace_arg));
|
220
|
+
}
|
221
|
+
break;
|
222
|
+
}
|
223
|
+
|
224
|
+
/* If we get here there was no frame, which means this is
|
225
|
+
the first method seen for this thread, so fall through
|
226
|
+
to below to create it. */
|
227
|
+
}
|
228
|
+
case RUBY_EVENT_CALL:
|
229
|
+
case RUBY_EVENT_C_CALL:
|
230
|
+
{
|
231
|
+
prof_frame_t* next_frame;
|
232
|
+
prof_call_info_t* call_info;
|
233
|
+
prof_method_t* method;
|
234
|
+
|
235
|
+
/* Get current measurement */
|
236
|
+
measurement = prof_measure(profile->measurer, trace_arg);
|
237
|
+
|
238
|
+
VALUE klass = rb_tracearg_defined_class(trace_arg);
|
239
|
+
|
240
|
+
/* Special case - skip any methods from the mProf
|
241
|
+
module or cProfile class since they clutter
|
242
|
+
the results but aren't important to them results. */
|
243
|
+
if (klass == cProfile)
|
244
|
+
return;
|
245
|
+
|
246
|
+
#ifdef HAVE_RB_TRACEARG_CALLEE_ID
|
247
|
+
VALUE msym = rb_tracearg_callee_id(trace_arg);
|
248
|
+
#else
|
249
|
+
VALUE msym = rb_tracearg_method_id(trace_arg);
|
250
|
+
#endif
|
251
|
+
|
252
|
+
st_data_t key = method_key(klass, msym);
|
253
|
+
|
254
|
+
method = method_table_lookup(thread_data->method_table, key);
|
255
|
+
|
256
|
+
if (!method)
|
257
|
+
{
|
258
|
+
VALUE source_file = (event != RUBY_EVENT_C_CALL ? rb_tracearg_path(trace_arg) : Qnil);
|
259
|
+
int source_line = (event != RUBY_EVENT_C_CALL ? FIX2INT(rb_tracearg_lineno(trace_arg)) : 0);
|
260
|
+
method = create_method(profile, key, klass, msym, source_file, source_line);
|
261
|
+
}
|
262
|
+
|
263
|
+
if (method->excluded)
|
264
|
+
{
|
265
|
+
prof_stack_pass(thread_data->stack);
|
266
|
+
break;
|
267
|
+
}
|
268
|
+
|
269
|
+
if (!frame->call_info)
|
270
|
+
{
|
271
|
+
method->root = true;
|
272
|
+
call_info = prof_call_info_create(method, NULL, method->source_file, method->source_line);
|
273
|
+
st_insert(method->parent_call_infos, (st_data_t)&key, (st_data_t)call_info);
|
274
|
+
}
|
275
|
+
else
|
276
|
+
{
|
277
|
+
call_info = call_info_table_lookup(method->parent_call_infos, frame->call_info->method->key);
|
278
|
+
|
279
|
+
if (!call_info)
|
280
|
+
{
|
281
|
+
/* This call info does not yet exist. So create it, then add
|
282
|
+
it to previous callinfo's children and to the current method .*/
|
283
|
+
call_info = prof_call_info_create(method, frame->call_info->method, frame->source_file, frame->source_line);
|
284
|
+
call_info_table_insert(method->parent_call_infos, frame->call_info->method->key, call_info);
|
285
|
+
call_info_table_insert(frame->call_info->method->child_call_infos, method->key, call_info);
|
286
|
+
}
|
287
|
+
}
|
288
|
+
|
289
|
+
/* Push a new frame onto the stack for a new c-call or ruby call (into a method) */
|
290
|
+
next_frame = prof_stack_push(thread_data->stack, call_info, measurement, RTEST(profile->paused));
|
291
|
+
next_frame->source_file = method->source_file;
|
292
|
+
next_frame->source_line = method->source_line;
|
293
|
+
break;
|
294
|
+
}
|
295
|
+
case RUBY_EVENT_RETURN:
|
296
|
+
case RUBY_EVENT_C_RETURN:
|
297
|
+
{
|
298
|
+
/* Get current measurement */
|
299
|
+
prof_stack_pop(thread_data->stack, measurement);
|
300
|
+
break;
|
301
|
+
}
|
302
|
+
case RUBY_INTERNAL_EVENT_NEWOBJ:
|
303
|
+
{
|
304
|
+
/* We want to assign the allocations lexically, not the execution context (otherwise all allocations will
|
305
|
+
show up under Class#new */
|
306
|
+
int source_line = FIX2INT(rb_tracearg_lineno(trace_arg));
|
307
|
+
VALUE source_file = rb_tracearg_path(trace_arg);
|
308
|
+
|
309
|
+
prof_method_t* method = prof_find_method(thread_data->stack, source_file, source_line);
|
310
|
+
if (method)
|
311
|
+
prof_allocate_increment(method, trace_arg);
|
312
|
+
|
313
|
+
break;
|
314
|
+
}
|
315
|
+
}
|
316
|
+
}
|
317
|
+
|
318
|
+
void
|
319
|
+
prof_install_hook(VALUE self)
|
320
|
+
{
|
321
|
+
prof_profile_t* profile = prof_get_profile(self);
|
322
|
+
|
323
|
+
VALUE event_tracepoint = rb_tracepoint_new(Qnil,
|
324
|
+
RUBY_EVENT_CALL | RUBY_EVENT_RETURN |
|
325
|
+
RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN |
|
326
|
+
RUBY_EVENT_LINE,
|
327
|
+
prof_event_hook, profile);
|
328
|
+
rb_ary_push(profile->tracepoints, event_tracepoint);
|
329
|
+
|
330
|
+
if (profile->measurer->track_allocations)
|
331
|
+
{
|
332
|
+
VALUE allocation_tracepoint = rb_tracepoint_new(Qnil, RUBY_INTERNAL_EVENT_NEWOBJ, prof_event_hook, profile);
|
333
|
+
rb_ary_push(profile->tracepoints, allocation_tracepoint);
|
334
|
+
}
|
335
|
+
|
336
|
+
for (int i = 0; i < RARRAY_LEN(profile->tracepoints); i++)
|
337
|
+
{
|
338
|
+
rb_tracepoint_enable(rb_ary_entry(profile->tracepoints, i));
|
339
|
+
}
|
340
|
+
}
|
341
|
+
|
342
|
+
void
|
343
|
+
prof_remove_hook(VALUE self)
|
344
|
+
{
|
345
|
+
prof_profile_t* profile = prof_get_profile(self);
|
346
|
+
|
347
|
+
for (int i = 0; i < RARRAY_LEN(profile->tracepoints); i++)
|
348
|
+
{
|
349
|
+
rb_tracepoint_disable(rb_ary_entry(profile->tracepoints, i));
|
350
|
+
}
|
351
|
+
rb_ary_clear(profile->tracepoints);
|
352
|
+
}
|
353
|
+
|
354
|
+
prof_profile_t*
|
355
|
+
prof_get_profile(VALUE self)
|
356
|
+
{
|
357
|
+
/* Can't use Data_Get_Struct because that triggers the event hook
|
358
|
+
ending up in endless recursion. */
|
359
|
+
return DATA_PTR(self);
|
360
|
+
}
|
361
|
+
|
362
|
+
static int
|
363
|
+
collect_threads(st_data_t key, st_data_t value, st_data_t result)
|
364
|
+
{
|
365
|
+
thread_data_t* thread_data = (thread_data_t*) value;
|
366
|
+
if (thread_data->trace)
|
367
|
+
{
|
368
|
+
VALUE threads_array = (VALUE)result;
|
369
|
+
rb_ary_push(threads_array, prof_thread_wrap(thread_data));
|
370
|
+
}
|
371
|
+
return ST_CONTINUE;
|
372
|
+
}
|
373
|
+
|
374
|
+
/* ======== Profile Class ====== */
|
375
|
+
static int
|
376
|
+
mark_threads(st_data_t key, st_data_t value, st_data_t result)
|
377
|
+
{
|
378
|
+
thread_data_t *thread = (thread_data_t *) value;
|
379
|
+
prof_thread_mark(thread);
|
380
|
+
return ST_CONTINUE;
|
381
|
+
}
|
382
|
+
|
383
|
+
static int
|
384
|
+
mark_methods(st_data_t key, st_data_t value, st_data_t result)
|
385
|
+
{
|
386
|
+
prof_method_t *method = (prof_method_t *) value;
|
387
|
+
prof_method_mark(method);
|
388
|
+
return ST_CONTINUE;
|
389
|
+
}
|
390
|
+
|
391
|
+
static void
|
392
|
+
prof_mark(prof_profile_t *profile)
|
393
|
+
{
|
394
|
+
rb_gc_mark(profile->tracepoints);
|
395
|
+
st_foreach(profile->threads_tbl, mark_threads, 0);
|
396
|
+
st_foreach(profile->exclude_methods_tbl, mark_methods, 0);
|
397
|
+
}
|
398
|
+
|
399
|
+
/* Freeing the profile creates a cascade of freeing.
|
400
|
+
It fress the thread table, which frees its methods,
|
401
|
+
which frees its call infos. */
|
402
|
+
static void
|
403
|
+
prof_free(prof_profile_t *profile)
|
404
|
+
{
|
405
|
+
profile->last_thread_data = NULL;
|
406
|
+
|
407
|
+
threads_table_free(profile->threads_tbl);
|
408
|
+
profile->threads_tbl = NULL;
|
409
|
+
|
410
|
+
if (profile->exclude_threads_tbl)
|
411
|
+
{
|
412
|
+
st_free_table(profile->exclude_threads_tbl);
|
413
|
+
profile->exclude_threads_tbl = NULL;
|
414
|
+
}
|
415
|
+
|
416
|
+
if (profile->include_threads_tbl)
|
417
|
+
{
|
418
|
+
st_free_table(profile->include_threads_tbl);
|
419
|
+
profile->include_threads_tbl = NULL;
|
420
|
+
}
|
421
|
+
|
422
|
+
/* This table owns the excluded sentinels for now. */
|
423
|
+
method_table_free(profile->exclude_methods_tbl);
|
424
|
+
profile->exclude_methods_tbl = NULL;
|
425
|
+
|
426
|
+
xfree(profile->measurer);
|
427
|
+
profile->measurer = NULL;
|
428
|
+
|
429
|
+
xfree(profile);
|
430
|
+
}
|
431
|
+
|
432
|
+
static VALUE
|
433
|
+
prof_allocate(VALUE klass)
|
434
|
+
{
|
435
|
+
VALUE result;
|
436
|
+
prof_profile_t* profile;
|
437
|
+
result = Data_Make_Struct(klass, prof_profile_t, prof_mark, prof_free, profile);
|
438
|
+
profile->threads_tbl = threads_table_create();
|
439
|
+
profile->exclude_threads_tbl = NULL;
|
440
|
+
profile->include_threads_tbl = NULL;
|
441
|
+
profile->running = Qfalse;
|
442
|
+
profile->allow_exceptions = false;
|
443
|
+
profile->merge_fibers = false;
|
444
|
+
profile->exclude_methods_tbl = method_table_create();
|
445
|
+
profile->running = Qfalse;
|
446
|
+
profile->tracepoints = rb_ary_new();
|
447
|
+
return result;
|
448
|
+
}
|
449
|
+
|
450
|
+
static void
|
451
|
+
prof_exclude_common_methods(VALUE profile)
|
452
|
+
{
|
453
|
+
rb_funcall(profile, rb_intern("exclude_common_methods!"), 0);
|
454
|
+
}
|
455
|
+
|
456
|
+
static int
|
457
|
+
pop_frames(VALUE key, st_data_t value, st_data_t data)
|
458
|
+
{
|
459
|
+
thread_data_t* thread_data = (thread_data_t*)value;
|
460
|
+
prof_profile_t* profile = (prof_profile_t*)data;
|
461
|
+
double measurement = prof_measure(profile->measurer, NULL);
|
462
|
+
|
463
|
+
if (profile->last_thread_data->fiber != thread_data->fiber)
|
464
|
+
switch_thread(profile, thread_data, measurement);
|
465
|
+
|
466
|
+
while (prof_stack_pop(thread_data->stack, measurement));
|
467
|
+
|
468
|
+
return ST_CONTINUE;
|
469
|
+
}
|
470
|
+
|
471
|
+
static void
|
472
|
+
prof_stop_threads(prof_profile_t* profile)
|
473
|
+
{
|
474
|
+
st_foreach(profile->threads_tbl, pop_frames, (st_data_t)profile);
|
475
|
+
}
|
476
|
+
|
477
|
+
/* call-seq:
|
478
|
+
new()
|
479
|
+
new(options)
|
480
|
+
|
481
|
+
Returns a new profiler. Possible options for the options hash are:
|
482
|
+
|
483
|
+
measure_mode:: Measure mode. Specifies the profile measure mode.
|
484
|
+
If not specified, defaults to RubyProf::WALL_TIME.
|
485
|
+
exclude_threads:: Threads to exclude from the profiling results.
|
486
|
+
include_threads:: Focus profiling on only the given threads. This will ignore
|
487
|
+
all other threads.
|
488
|
+
allow_exceptions:: Whether to raise exceptions encountered during profiling,
|
489
|
+
or to suppress all exceptions during profiling
|
490
|
+
merge_fibers:: Whether profiling data for a given thread's fibers should all be
|
491
|
+
subsumed under a single entry. Basically only useful to produce
|
492
|
+
callgrind profiles.
|
493
|
+
*/
|
494
|
+
static VALUE
|
495
|
+
prof_initialize(int argc, VALUE *argv, VALUE self)
|
496
|
+
{
|
497
|
+
prof_profile_t* profile = prof_get_profile(self);
|
498
|
+
VALUE mode_or_options;
|
499
|
+
VALUE mode = Qnil;
|
500
|
+
VALUE exclude_threads = Qnil;
|
501
|
+
VALUE include_threads = Qnil;
|
502
|
+
VALUE exclude_common = Qnil;
|
503
|
+
VALUE allow_exceptions = Qfalse;
|
504
|
+
VALUE merge_fibers = Qfalse;
|
505
|
+
VALUE track_allocations = Qfalse;
|
506
|
+
|
507
|
+
int i;
|
508
|
+
|
509
|
+
switch (rb_scan_args(argc, argv, "02", &mode_or_options, &exclude_threads))
|
510
|
+
{
|
511
|
+
case 0:
|
512
|
+
break;
|
513
|
+
case 1:
|
514
|
+
if (FIXNUM_P(mode_or_options))
|
515
|
+
{
|
516
|
+
mode = mode_or_options;
|
517
|
+
}
|
518
|
+
else
|
519
|
+
{
|
520
|
+
Check_Type(mode_or_options, T_HASH);
|
521
|
+
mode = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("measure_mode")));
|
522
|
+
track_allocations = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("track_allocations")));
|
523
|
+
allow_exceptions = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("allow_exceptions")));
|
524
|
+
merge_fibers = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("merge_fibers")));
|
525
|
+
exclude_common = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("exclude_common")));
|
526
|
+
exclude_threads = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("exclude_threads")));
|
527
|
+
include_threads = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("include_threads")));
|
528
|
+
}
|
529
|
+
break;
|
530
|
+
case 2:
|
531
|
+
Check_Type(exclude_threads, T_ARRAY);
|
532
|
+
break;
|
533
|
+
}
|
534
|
+
|
535
|
+
if (mode == Qnil)
|
536
|
+
{
|
537
|
+
mode = INT2NUM(MEASURE_WALL_TIME);
|
538
|
+
}
|
539
|
+
else
|
540
|
+
{
|
541
|
+
Check_Type(mode, T_FIXNUM);
|
542
|
+
}
|
543
|
+
profile->measurer = prof_get_measurer(NUM2INT(mode), track_allocations == Qtrue);
|
544
|
+
profile->allow_exceptions = (allow_exceptions == Qtrue);
|
545
|
+
profile->merge_fibers = (merge_fibers == Qtrue);
|
546
|
+
|
547
|
+
if (exclude_threads != Qnil)
|
548
|
+
{
|
549
|
+
Check_Type(exclude_threads, T_ARRAY);
|
550
|
+
assert(profile->exclude_threads_tbl == NULL);
|
551
|
+
profile->exclude_threads_tbl = threads_table_create();
|
552
|
+
for (i = 0; i < RARRAY_LEN(exclude_threads); i++)
|
553
|
+
{
|
554
|
+
VALUE thread = rb_ary_entry(exclude_threads, i);
|
555
|
+
st_insert(profile->exclude_threads_tbl, thread, Qtrue);
|
556
|
+
}
|
557
|
+
}
|
558
|
+
|
559
|
+
if (include_threads != Qnil)
|
560
|
+
{
|
561
|
+
Check_Type(include_threads, T_ARRAY);
|
562
|
+
assert(profile->include_threads_tbl == NULL);
|
563
|
+
profile->include_threads_tbl = threads_table_create();
|
564
|
+
for (i = 0; i < RARRAY_LEN(include_threads); i++)
|
565
|
+
{
|
566
|
+
VALUE thread = rb_ary_entry(include_threads, i);
|
567
|
+
st_insert(profile->include_threads_tbl, thread, Qtrue);
|
568
|
+
}
|
569
|
+
}
|
570
|
+
|
571
|
+
if (RTEST(exclude_common))
|
572
|
+
{
|
573
|
+
prof_exclude_common_methods(self);
|
574
|
+
}
|
575
|
+
|
576
|
+
return self;
|
577
|
+
}
|
578
|
+
|
579
|
+
/* call-seq:
|
580
|
+
paused? -> boolean
|
581
|
+
|
582
|
+
Returns whether a profile is currently paused.*/
|
583
|
+
static VALUE
|
584
|
+
prof_paused(VALUE self)
|
585
|
+
{
|
586
|
+
prof_profile_t* profile = prof_get_profile(self);
|
587
|
+
return profile->paused;
|
588
|
+
}
|
589
|
+
|
590
|
+
/* call-seq:
|
591
|
+
running? -> boolean
|
592
|
+
|
593
|
+
Returns whether a profile is currently running.*/
|
594
|
+
static VALUE
|
595
|
+
prof_running(VALUE self)
|
596
|
+
{
|
597
|
+
prof_profile_t* profile = prof_get_profile(self);
|
598
|
+
return profile->running;
|
599
|
+
}
|
600
|
+
|
601
|
+
/* call-seq:
|
602
|
+
mode -> measure_mode
|
603
|
+
|
604
|
+
Returns the measure mode used in this profile.*/
|
605
|
+
static VALUE
|
606
|
+
prof_profile_measure_mode(VALUE self)
|
607
|
+
{
|
608
|
+
prof_profile_t* profile = prof_get_profile(self);
|
609
|
+
return INT2NUM(profile->measurer->mode);
|
610
|
+
}
|
611
|
+
|
612
|
+
/* call-seq:
|
613
|
+
track_allocations -> boolean
|
614
|
+
|
615
|
+
Returns if object allocations were tracked in this profile.*/
|
616
|
+
static VALUE
|
617
|
+
prof_profile_track_allocations(VALUE self)
|
618
|
+
{
|
619
|
+
prof_profile_t* profile = prof_get_profile(self);
|
620
|
+
return profile->measurer->track_allocations ? Qtrue : Qfalse;
|
621
|
+
}
|
622
|
+
|
623
|
+
/* call-seq:
|
624
|
+
start -> self
|
625
|
+
|
626
|
+
Starts recording profile data.*/
|
627
|
+
static VALUE
|
628
|
+
prof_start(VALUE self)
|
629
|
+
{
|
630
|
+
char* trace_file_name;
|
631
|
+
|
632
|
+
prof_profile_t* profile = prof_get_profile(self);
|
633
|
+
|
634
|
+
if (profile->running == Qtrue)
|
635
|
+
{
|
636
|
+
rb_raise(rb_eRuntimeError, "RubyProf.start was already called");
|
637
|
+
}
|
638
|
+
|
639
|
+
profile->running = Qtrue;
|
640
|
+
profile->paused = Qfalse;
|
641
|
+
profile->last_thread_data = threads_table_insert(profile, get_fiber(profile));
|
642
|
+
|
643
|
+
/* open trace file if environment wants it */
|
644
|
+
trace_file_name = getenv("RUBY_PROF_TRACE");
|
645
|
+
if (trace_file_name != NULL)
|
646
|
+
{
|
647
|
+
if (strcmp(trace_file_name, "stdout") == 0)
|
648
|
+
{
|
649
|
+
trace_file = stdout;
|
650
|
+
}
|
651
|
+
else if (strcmp(trace_file_name, "stderr") == 0)
|
652
|
+
{
|
653
|
+
trace_file = stderr;
|
654
|
+
}
|
655
|
+
else
|
656
|
+
{
|
657
|
+
trace_file = fopen(trace_file_name, "w");
|
658
|
+
}
|
659
|
+
}
|
660
|
+
|
661
|
+
prof_install_hook(self);
|
662
|
+
return self;
|
663
|
+
}
|
664
|
+
|
665
|
+
/* call-seq:
|
666
|
+
pause -> self
|
667
|
+
|
668
|
+
Pauses collecting profile data. */
|
669
|
+
static VALUE
|
670
|
+
prof_pause(VALUE self)
|
671
|
+
{
|
672
|
+
prof_profile_t* profile = prof_get_profile(self);
|
673
|
+
if (profile->running == Qfalse)
|
674
|
+
{
|
675
|
+
rb_raise(rb_eRuntimeError, "RubyProf is not running.");
|
676
|
+
}
|
677
|
+
|
678
|
+
if (profile->paused == Qfalse)
|
679
|
+
{
|
680
|
+
profile->paused = Qtrue;
|
681
|
+
profile->measurement_at_pause_resume = prof_measure(profile->measurer, NULL);
|
682
|
+
st_foreach(profile->threads_tbl, pause_thread, (st_data_t) profile);
|
683
|
+
}
|
684
|
+
|
685
|
+
return self;
|
686
|
+
}
|
687
|
+
|
688
|
+
/* call-seq:
|
689
|
+
resume -> self
|
690
|
+
resume(&block) -> self
|
691
|
+
|
692
|
+
Resumes recording profile data.*/
|
693
|
+
static VALUE
|
694
|
+
prof_resume(VALUE self)
|
695
|
+
{
|
696
|
+
prof_profile_t* profile = prof_get_profile(self);
|
697
|
+
if (profile->running == Qfalse)
|
698
|
+
{
|
699
|
+
rb_raise(rb_eRuntimeError, "RubyProf is not running.");
|
700
|
+
}
|
701
|
+
|
702
|
+
if (profile->paused == Qtrue)
|
703
|
+
{
|
704
|
+
profile->paused = Qfalse;
|
705
|
+
profile->measurement_at_pause_resume = prof_measure(profile->measurer, NULL);
|
706
|
+
st_foreach(profile->threads_tbl, unpause_thread, (st_data_t) profile);
|
707
|
+
}
|
708
|
+
|
709
|
+
return rb_block_given_p() ? rb_ensure(rb_yield, self, prof_pause, self) : self;
|
710
|
+
}
|
711
|
+
|
712
|
+
/* call-seq:
|
713
|
+
stop -> self
|
714
|
+
|
715
|
+
Stops collecting profile data.*/
|
716
|
+
static VALUE
|
717
|
+
prof_stop(VALUE self)
|
718
|
+
{
|
719
|
+
prof_profile_t* profile = prof_get_profile(self);
|
720
|
+
|
721
|
+
if (profile->running == Qfalse)
|
722
|
+
{
|
723
|
+
rb_raise(rb_eRuntimeError, "RubyProf.start was not yet called");
|
724
|
+
}
|
725
|
+
|
726
|
+
prof_remove_hook(self);
|
727
|
+
|
728
|
+
/* close trace file if open */
|
729
|
+
if (trace_file != NULL)
|
730
|
+
{
|
731
|
+
if (trace_file !=stderr && trace_file != stdout)
|
732
|
+
{
|
733
|
+
#ifdef _MSC_VER
|
734
|
+
_fcloseall();
|
735
|
+
#else
|
736
|
+
fclose(trace_file);
|
737
|
+
#endif
|
738
|
+
}
|
739
|
+
trace_file = NULL;
|
740
|
+
}
|
741
|
+
|
742
|
+
prof_stop_threads(profile);
|
743
|
+
|
744
|
+
/* Unset the last_thread_data (very important!)
|
745
|
+
and the threads table */
|
746
|
+
profile->running = profile->paused = Qfalse;
|
747
|
+
profile->last_thread_data = NULL;
|
748
|
+
|
749
|
+
return self;
|
750
|
+
}
|
751
|
+
|
752
|
+
/* call-seq:
|
753
|
+
threads -> Array of RubyProf::Thread
|
754
|
+
|
755
|
+
Returns an array of RubyProf::Thread instances that were profiled. */
|
756
|
+
static VALUE
|
757
|
+
prof_threads(VALUE self)
|
758
|
+
{
|
759
|
+
VALUE result = rb_ary_new();
|
760
|
+
prof_profile_t* profile = prof_get_profile(self);
|
761
|
+
st_foreach(profile->threads_tbl, collect_threads, result);
|
762
|
+
return result;
|
763
|
+
}
|
764
|
+
|
765
|
+
/* Document-method: RubyProf::Profile#Profile
|
766
|
+
call-seq:
|
767
|
+
profile(&block) -> self
|
768
|
+
|
769
|
+
Profiles the specified block.
|
770
|
+
|
771
|
+
profile = RubyProf::Profile.new
|
772
|
+
profile.profile do
|
773
|
+
..
|
774
|
+
end
|
775
|
+
*/
|
776
|
+
static VALUE
|
777
|
+
prof_profile_object(VALUE self)
|
778
|
+
{
|
779
|
+
int result;
|
780
|
+
prof_profile_t* profile = prof_get_profile(self);
|
781
|
+
|
782
|
+
if (!rb_block_given_p())
|
783
|
+
{
|
784
|
+
rb_raise(rb_eArgError, "A block must be provided to the profile method.");
|
785
|
+
}
|
786
|
+
|
787
|
+
prof_start(self);
|
788
|
+
rb_protect(rb_yield, self, &result);
|
789
|
+
self = prof_stop(self);
|
790
|
+
|
791
|
+
if (profile->allow_exceptions && result != 0)
|
792
|
+
{
|
793
|
+
rb_jump_tag(result);
|
794
|
+
}
|
795
|
+
|
796
|
+
return self;
|
797
|
+
|
798
|
+
}
|
799
|
+
|
800
|
+
/* Document-method: RubyProf::Profile::Profile
|
801
|
+
call-seq:
|
802
|
+
profile(&block) -> RubyProf::Profile
|
803
|
+
profile(options, &block) -> RubyProf::Profile
|
804
|
+
|
805
|
+
Profiles the specified block and returns a RubyProf::Profile
|
806
|
+
object. Arguments are passed to Profile initialize method.
|
807
|
+
|
808
|
+
profile = RubyProf::Profile.profile do
|
809
|
+
..
|
810
|
+
end
|
811
|
+
*/
|
812
|
+
static VALUE
|
813
|
+
prof_profile_class(int argc, VALUE *argv, VALUE klass)
|
814
|
+
{
|
815
|
+
return prof_profile_object(rb_class_new_instance(argc, argv, cProfile));
|
816
|
+
}
|
817
|
+
|
818
|
+
/* call-seq:
|
819
|
+
exclude_method!(module, method_name) -> self
|
820
|
+
|
821
|
+
Excludes the method from profiling results.
|
822
|
+
*/
|
823
|
+
static VALUE
|
824
|
+
prof_exclude_method(VALUE self, VALUE klass, VALUE msym)
|
825
|
+
{
|
826
|
+
prof_profile_t* profile = prof_get_profile(self);
|
827
|
+
|
828
|
+
st_data_t key = method_key(klass, msym);
|
829
|
+
prof_method_t *method;
|
830
|
+
|
831
|
+
if (profile->running == Qtrue)
|
832
|
+
{
|
833
|
+
rb_raise(rb_eRuntimeError, "RubyProf.start was already called");
|
834
|
+
}
|
835
|
+
|
836
|
+
method = method_table_lookup(profile->exclude_methods_tbl, key);
|
837
|
+
|
838
|
+
if (!method)
|
839
|
+
{
|
840
|
+
method = prof_method_create_excluded(klass, msym);
|
841
|
+
method_table_insert(profile->exclude_methods_tbl, method->key, method);
|
842
|
+
}
|
843
|
+
|
844
|
+
return self;
|
845
|
+
}
|
846
|
+
|
847
|
+
/* :nodoc: */
|
848
|
+
VALUE prof_profile_dump(VALUE self)
|
849
|
+
{
|
850
|
+
VALUE result = rb_hash_new();
|
851
|
+
rb_hash_aset(result, ID2SYM(rb_intern("threads")), prof_threads(self));
|
852
|
+
return result;
|
853
|
+
}
|
854
|
+
|
855
|
+
/* :nodoc: */
|
856
|
+
VALUE prof_profile_load(VALUE self, VALUE data)
|
857
|
+
{
|
858
|
+
prof_profile_t* profile = prof_get_profile(self);
|
859
|
+
|
860
|
+
VALUE threads = rb_hash_aref(data, ID2SYM(rb_intern("threads")));
|
861
|
+
for (int i = 0; i < rb_array_len(threads); i++)
|
862
|
+
{
|
863
|
+
VALUE thread = rb_ary_entry(threads, i);
|
864
|
+
thread_data_t* thread_data = DATA_PTR(thread);
|
865
|
+
st_insert(profile->threads_tbl, (st_data_t)thread_data->fiber_id, (st_data_t)thread_data);
|
866
|
+
}
|
867
|
+
|
868
|
+
return data;
|
869
|
+
}
|
870
|
+
|
871
|
+
void rp_init_profile(void)
|
872
|
+
{
|
873
|
+
mProf = rb_define_module("RubyProf");
|
874
|
+
|
875
|
+
cProfile = rb_define_class_under(mProf, "Profile", rb_cObject);
|
876
|
+
rb_define_alloc_func(cProfile, prof_allocate);
|
877
|
+
|
878
|
+
rb_define_singleton_method(cProfile, "profile", prof_profile_class, -1);
|
879
|
+
rb_define_method(cProfile, "initialize", prof_initialize, -1);
|
880
|
+
rb_define_method(cProfile, "start", prof_start, 0);
|
881
|
+
rb_define_method(cProfile, "stop", prof_stop, 0);
|
882
|
+
rb_define_method(cProfile, "resume", prof_resume, 0);
|
883
|
+
rb_define_method(cProfile, "pause", prof_pause, 0);
|
884
|
+
rb_define_method(cProfile, "running?", prof_running, 0);
|
885
|
+
rb_define_method(cProfile, "paused?", prof_paused, 0);
|
886
|
+
rb_define_method(cProfile, "threads", prof_threads, 0);
|
887
|
+
rb_define_method(cProfile, "exclude_method!", prof_exclude_method, 2);
|
888
|
+
rb_define_method(cProfile, "profile", prof_profile_object, 0);
|
889
|
+
|
890
|
+
rb_define_method(cProfile, "measure_mode", prof_profile_measure_mode, 0);
|
891
|
+
rb_define_method(cProfile, "track_allocations?", prof_profile_track_allocations, 0);
|
892
|
+
|
893
|
+
rb_define_method(cProfile, "_dump_data", prof_profile_dump, 0);
|
894
|
+
rb_define_method(cProfile, "_load_data", prof_profile_load, 1);
|
895
|
+
}
|