ruby-prof 0.11.0.rc3-x86-mingw32 → 0.11.2-x86-mingw32
Sign up to get free protection for your applications and to get access to all the features.
- data/CHANGES +28 -0
- data/Rakefile +1 -0
- data/bin/ruby-prof +57 -0
- data/ext/ruby_prof/rp_measure_allocations.c +9 -30
- data/ext/ruby_prof/rp_measure_gc_runs.c +11 -33
- data/ext/ruby_prof/rp_measure_gc_time.c +7 -19
- data/ext/ruby_prof/rp_measure_memory.c +20 -27
- data/ext/ruby_prof/rp_stack.c +17 -1
- data/ext/ruby_prof/rp_stack.h +8 -0
- data/ext/ruby_prof/ruby_prof.c +79 -16
- data/ext/ruby_prof/ruby_prof.h +2 -0
- data/ext/ruby_prof/vc/ruby_prof_18.vcxproj +3 -0
- data/ext/ruby_prof/version.h +2 -2
- data/lib/1.8/ruby_prof.so +0 -0
- data/lib/1.9/ruby_prof.so +0 -0
- data/lib/ruby-prof/compatibility.rb +43 -23
- data/lib/ruby-prof/printers/flat_printer.rb +1 -1
- data/lib/ruby-prof/printers/graph_html_printer.rb +1 -1
- data/lib/ruby-prof/printers/graph_printer.rb +1 -1
- data/ruby-prof.gemspec +1 -2
- data/test/basic_test.rb +55 -1
- data/test/line_number_test.rb +4 -6
- data/test/measure_cpu_time_test.rb +0 -3
- data/test/multi_printer_test.rb +0 -1
- data/test/pause_resume_test.rb +61 -0
- data/test/pause_test.rb +57 -0
- data/test/prime.rb +1 -1
- data/test/prime_test.rb +1 -1
- data/test/recursive_test.rb +9 -11
- data/test/test_suite.rb +2 -2
- data/test/thread_test.rb +5 -5
- data/test/unique_call_path_test.rb +0 -1
- metadata +8 -7
- data/test/summarize_test.rb +0 -48
data/CHANGES
CHANGED
@@ -1,3 +1,31 @@
|
|
1
|
+
0.11.2 (2012-05-06)
|
2
|
+
======================
|
3
|
+
* Fix compile issue with BOOL. Should be _Bool for C99.
|
4
|
+
|
5
|
+
|
6
|
+
0.11.1 (2012-05-06)
|
7
|
+
======================
|
8
|
+
* Added option --exclude-common-callbacks, plus exclude #map and #inject in common cycles (Vasily Fedoseyev)
|
9
|
+
* Add option --exclude-common-cycles to exclude common iterators (Vasily Fedoseyev)
|
10
|
+
* Allow method elimination from command line via '-x' and '-X' keys (Vasily Fedoseyev)
|
11
|
+
|
12
|
+
|
13
|
+
0.11.0 (2012-05-05)
|
14
|
+
======================
|
15
|
+
* Fix pause/resume so it actually works and add tests (David Barri)
|
16
|
+
* Resume now returns the result of the block when given (David Barri)
|
17
|
+
* Make recursive method explanation more clear (Charlie Savage)
|
18
|
+
* Fix ruby warnings (Charlie Savage)
|
19
|
+
* Toggle GC.enable_stats when profiling for memory to get the data (Vasily Fedoseyev)
|
20
|
+
* Fix patched ruby support and remove some warnings (Vasily Fedoseyev)
|
21
|
+
* Fix tests on 1.8.7 (rogerdpack)
|
22
|
+
|
23
|
+
|
24
|
+
0.11.0.rc3 (2012-03-26)
|
25
|
+
======================
|
26
|
+
* Include missing files in gemspec (Charlie Savage).
|
27
|
+
|
28
|
+
|
1
29
|
0.11.0.rc2 (2012-03-25)
|
2
30
|
======================
|
3
31
|
* Lots of improvements to Rack handler - this can be used to profile requests
|
data/Rakefile
CHANGED
data/bin/ruby-prof
CHANGED
@@ -172,6 +172,59 @@ opts = OptionParser.new do |opts|
|
|
172
172
|
options.exec ||= []
|
173
173
|
options.exec << code
|
174
174
|
end
|
175
|
+
|
176
|
+
opts.on('-x regexp', '--exclude regexp', 'exclude methods by regexp (see method elimination)') do|meth|
|
177
|
+
options.eliminate_methods ||= []
|
178
|
+
options.eliminate_methods << Regexp.new(meth)
|
179
|
+
end
|
180
|
+
|
181
|
+
opts.on('-X file', '--exclude-file file', 'exclude methods by regexp listed in file (see method elimination)') do|file|
|
182
|
+
options.eliminate_methods_files ||= []
|
183
|
+
options.eliminate_methods_files << file
|
184
|
+
end
|
185
|
+
|
186
|
+
opts.on('--exclude-common-cycles', 'make common iterators like Integer#times appear inlined') do|meth|
|
187
|
+
options.eliminate_methods ||= []
|
188
|
+
options.eliminate_methods += %w{
|
189
|
+
Integer#times
|
190
|
+
Integer#upto
|
191
|
+
Integer#downto
|
192
|
+
Enumerator#each
|
193
|
+
Enumerator#each_with_index
|
194
|
+
Enumerator#each_with_object
|
195
|
+
|
196
|
+
Array#each
|
197
|
+
Array#each_index
|
198
|
+
Array#reverse_each
|
199
|
+
Array#map
|
200
|
+
|
201
|
+
Hash#each
|
202
|
+
Hash#each_pair
|
203
|
+
Hash#each_key
|
204
|
+
Hash#each_value
|
205
|
+
|
206
|
+
Range#each
|
207
|
+
Enumerable#each_cons
|
208
|
+
Enumerable#each_entry
|
209
|
+
Enumerable#each_slice
|
210
|
+
Enumerable#each_with_index
|
211
|
+
Enumerable#each_with_object
|
212
|
+
Enumerable#reverse_each
|
213
|
+
Enumerable#inject
|
214
|
+
Enumerable#collect
|
215
|
+
Enumerable#reduce
|
216
|
+
}
|
217
|
+
#TODO: may be the whole Enumerable module should be excluded via 'Enumerable#.*', we need feedback on use cases.
|
218
|
+
end
|
219
|
+
|
220
|
+
opts.on('--exclude-common-callbacks', 'make common callbacks invocations like Integer#times appear inlined so you can see call origins in graph') do|meth|
|
221
|
+
options.eliminate_methods ||= []
|
222
|
+
options.eliminate_methods += %w{
|
223
|
+
Method#call
|
224
|
+
Proc#call
|
225
|
+
ActiveSupport::Callbacks::ClassMethods#__run_callback
|
226
|
+
}
|
227
|
+
end
|
175
228
|
end
|
176
229
|
|
177
230
|
begin
|
@@ -201,6 +254,10 @@ at_exit {
|
|
201
254
|
# Stop profiling
|
202
255
|
result = RubyProf.stop
|
203
256
|
|
257
|
+
# Eliminate unwanted methods from call graph
|
258
|
+
result.eliminate_methods! options.eliminate_methods if options.eliminate_methods
|
259
|
+
options.eliminate_methods_files.each{|f| result.eliminate_methods!(f)} if options.eliminate_methods_files
|
260
|
+
|
204
261
|
# Create a printer
|
205
262
|
printer = options.printer.new(result)
|
206
263
|
printer_options = {:min_percent => options.min_percent, :sort_method => options.sort_method}
|
@@ -8,50 +8,29 @@
|
|
8
8
|
static VALUE cMeasureAllocations;
|
9
9
|
|
10
10
|
#if defined(HAVE_RB_OS_ALLOCATED_OBJECTS)
|
11
|
-
|
11
|
+
unsigned LONG_LONG rb_os_allocated_objects();
|
12
|
+
#endif
|
13
|
+
|
14
|
+
#if defined(HAVE_RB_GC_MALLOC_ALLOCATIONS)
|
15
|
+
unsigned LONG_LONG rb_gc_malloc_allocations();
|
16
|
+
#endif
|
12
17
|
|
13
18
|
static double
|
14
19
|
measure_allocations()
|
15
20
|
{
|
21
|
+
#if defined(HAVE_RB_OS_ALLOCATED_OBJECTS)
|
22
|
+
#define MEASURE_ALLOCATIONS_ENABLED Qtrue
|
16
23
|
return rb_os_allocated_objects();
|
17
|
-
}
|
18
|
-
|
19
|
-
/* Document-method: prof_measure_allocations
|
20
|
-
call-seq:
|
21
|
-
measure_allocations -> int
|
22
|
-
|
23
|
-
Returns the total number of object allocations since Ruby started.*/
|
24
|
-
static VALUE
|
25
|
-
prof_measure_allocations(VALUE self)
|
26
|
-
{
|
27
|
-
#if defined(HAVE_LONG_LONG)
|
28
|
-
return ULL2NUM(rb_os_allocated_objects());
|
29
|
-
#else
|
30
|
-
return ULONG2NUM(rb_os_allocated_objects());
|
31
|
-
#endif
|
32
|
-
}
|
33
24
|
|
34
25
|
#elif defined(HAVE_RB_GC_MALLOC_ALLOCATIONS)
|
35
|
-
|
36
26
|
#define MEASURE_ALLOCATIONS_ENABLED Qtrue
|
37
|
-
|
38
|
-
static double
|
39
|
-
measure_allocations()
|
40
|
-
{
|
41
27
|
return rb_gc_malloc_allocations();
|
42
|
-
}
|
43
28
|
|
44
29
|
#else
|
45
|
-
|
46
30
|
#define MEASURE_ALLOCATIONS_ENABLED Qfalse
|
47
|
-
|
48
|
-
static double
|
49
|
-
measure_allocations()
|
50
|
-
{
|
51
31
|
return 0;
|
52
|
-
}
|
53
|
-
|
54
32
|
#endif
|
33
|
+
}
|
55
34
|
|
56
35
|
|
57
36
|
prof_measurer_t* prof_measurer_allocations()
|
@@ -8,53 +8,31 @@
|
|
8
8
|
static VALUE cMeasureGcRuns;
|
9
9
|
|
10
10
|
#if defined(HAVE_RB_GC_COLLECTIONS)
|
11
|
+
VALUE rb_gc_collections(void);
|
12
|
+
#endif
|
13
|
+
|
14
|
+
#if defined(HAVE_RB_GC_HEAP_INFO)
|
15
|
+
VALUE rb_gc_heap_info(void);
|
16
|
+
#endif
|
11
17
|
|
12
|
-
#define MEASURE_GC_RUNS_ENABLED Qtrue
|
13
18
|
|
14
19
|
static double
|
15
20
|
measure_gc_runs()
|
16
21
|
{
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
/* call-seq:
|
21
|
-
gc_runs -> Integer
|
22
|
-
|
23
|
-
Returns the total number of garbage collections.*/
|
24
|
-
static VALUE
|
25
|
-
prof_measure_gc_runs(VALUE self)
|
26
|
-
{
|
27
|
-
return rb_gc_collections();
|
28
|
-
}
|
22
|
+
#if defined(HAVE_RB_GC_COLLECTIONS)
|
23
|
+
#define MEASURE_GC_RUNS_ENABLED Qtrue
|
24
|
+
return NUM2INT(rb_gc_collections());
|
29
25
|
|
30
26
|
#elif defined(HAVE_RB_GC_HEAP_INFO)
|
31
|
-
|
32
27
|
#define MEASURE_GC_RUNS_ENABLED Qtrue
|
33
|
-
|
34
|
-
static double
|
35
|
-
measure_gc_runs()
|
36
|
-
{
|
37
28
|
VALUE h = rb_gc_heap_info();
|
38
29
|
return NUM2UINT(rb_hash_aref(h, rb_str_new2("num_gc_passes")));
|
39
|
-
}
|
40
|
-
|
41
|
-
static VALUE
|
42
|
-
prof_measure_gc_runs(VALUE self)
|
43
|
-
{
|
44
|
-
VALUE h = rb_gc_heap_info();
|
45
|
-
return rb_hash_aref(h, rb_str_new2("num_gc_passes"));
|
46
|
-
}
|
47
|
-
|
48
|
-
#else
|
49
30
|
|
31
|
+
#else
|
50
32
|
#define MEASURE_GC_RUNS_ENABLED Qfalse
|
51
|
-
|
52
|
-
static double
|
53
|
-
measure_gc_runs()
|
54
|
-
{
|
55
33
|
return 0;
|
56
|
-
}
|
57
34
|
#endif
|
35
|
+
}
|
58
36
|
|
59
37
|
prof_measurer_t* prof_measurer_gc_runs()
|
60
38
|
{
|
@@ -8,40 +8,27 @@
|
|
8
8
|
static VALUE cMeasureGcTimes;
|
9
9
|
|
10
10
|
#if defined(HAVE_RB_GC_TIME)
|
11
|
+
VALUE rb_gc_time();
|
12
|
+
#endif
|
11
13
|
|
12
|
-
#define MEASURE_GC_TIME_ENABLED Qtrue
|
13
14
|
|
14
15
|
static double
|
15
16
|
measure_gc_time()
|
16
17
|
{
|
17
|
-
|
18
|
+
#if defined(HAVE_RB_GC_TIME)
|
19
|
+
#define MEASURE_GC_TIME_ENABLED Qtrue
|
20
|
+
const int conversion = 1000000;
|
18
21
|
#if HAVE_LONG_LONG
|
19
22
|
return NUM2LL(rb_gc_time() / conversion);
|
20
23
|
#else
|
21
24
|
return NUM2LONG(rb_gc_time() / conversion));
|
22
25
|
#endif
|
23
|
-
}
|
24
|
-
|
25
|
-
/* call-seq:
|
26
|
-
gc_time -> Integer
|
27
|
-
|
28
|
-
Returns the time spent doing garbage collections in microseconds.*/
|
29
|
-
static VALUE
|
30
|
-
prof_measure_gc_time(VALUE self)
|
31
|
-
{
|
32
|
-
return rb_gc_time();
|
33
|
-
}
|
34
26
|
|
35
27
|
#else
|
36
|
-
|
37
28
|
#define MEASURE_GC_TIME_ENABLED Qfalse
|
38
|
-
|
39
|
-
static double
|
40
|
-
measure_gc_time()
|
41
|
-
{
|
42
29
|
return 0;
|
43
|
-
}
|
44
30
|
#endif
|
31
|
+
}
|
45
32
|
|
46
33
|
prof_measurer_t* prof_measurer_gc_time()
|
47
34
|
{
|
@@ -63,6 +50,7 @@ prof_measure_gc_time(VALUE self)
|
|
63
50
|
return ULONG2NUM(measure_gc_time());
|
64
51
|
#endif
|
65
52
|
}
|
53
|
+
|
66
54
|
void rp_init_measure_gc_time()
|
67
55
|
{
|
68
56
|
rb_define_const(mProf, "GC_TIME", INT2NUM(MEASURE_GC_TIME));
|
@@ -7,52 +7,45 @@
|
|
7
7
|
|
8
8
|
static VALUE cMeasureMemory;
|
9
9
|
|
10
|
+
|
10
11
|
#if defined(HAVE_RB_GC_ALLOCATED_SIZE)
|
11
|
-
|
12
|
-
#
|
12
|
+
VALUE rb_gc_allocated_size();
|
13
|
+
#endif
|
14
|
+
|
15
|
+
#if defined(HAVE_RB_GC_MALLOC_ALLOCATED_SIZE)
|
16
|
+
size_t rb_gc_malloc_allocated_size();
|
17
|
+
#endif
|
18
|
+
|
19
|
+
#if defined(HAVE_RB_HEAP_TOTAL_MEM)
|
20
|
+
//FIXME: did not find the patch to check prototype, assuming it to return size_t
|
21
|
+
size_t rb_heap_total_mem();
|
22
|
+
#endif
|
13
23
|
|
14
24
|
static double
|
15
25
|
measure_memory()
|
16
26
|
{
|
27
|
+
#if defined(HAVE_RB_GC_ALLOCATED_SIZE)
|
28
|
+
#define TOGGLE_GC_STATS 1
|
29
|
+
#define MEASURE_MEMORY_ENABLED Qtrue
|
17
30
|
#if defined(HAVE_LONG_LONG)
|
18
|
-
return NUM2LL(rb_gc_allocated_size() / 1024
|
31
|
+
return NUM2LL(rb_gc_allocated_size()) / 1024.0;
|
19
32
|
#else
|
20
|
-
return NUM2ULONG(rb_gc_allocated_size() / 1024
|
33
|
+
return NUM2ULONG(rb_gc_allocated_size()) / 1024.0;
|
21
34
|
#endif
|
22
|
-
}
|
23
35
|
|
24
36
|
#elif defined(HAVE_RB_GC_MALLOC_ALLOCATED_SIZE)
|
25
|
-
|
26
37
|
#define MEASURE_MEMORY_ENABLED Qtrue
|
27
|
-
|
28
|
-
static double
|
29
|
-
measure_memory()
|
30
|
-
{
|
31
|
-
return rb_gc_malloc_allocated_size() / 1024;
|
32
|
-
}
|
33
|
-
|
38
|
+
return rb_gc_malloc_allocated_size() / 1024.0;
|
34
39
|
|
35
40
|
#elif defined(HAVE_RB_HEAP_TOTAL_MEM)
|
36
|
-
|
37
41
|
#define MEASURE_MEMORY_ENABLED Qtrue
|
38
|
-
|
39
|
-
static double
|
40
|
-
measure_memory()
|
41
|
-
{
|
42
|
-
return rb_heap_total_mem() / 1024;
|
43
|
-
}
|
42
|
+
return rb_heap_total_mem() / 1024.0;
|
44
43
|
|
45
44
|
#else
|
46
|
-
|
47
45
|
#define MEASURE_MEMORY_ENABLED Qfalse
|
48
|
-
|
49
|
-
static double
|
50
|
-
measure_memory()
|
51
|
-
{
|
52
46
|
return 0;
|
53
|
-
}
|
54
|
-
|
55
47
|
#endif
|
48
|
+
}
|
56
49
|
|
57
50
|
prof_measurer_t* prof_measurer_memory()
|
58
51
|
{
|
data/ext/ruby_prof/rp_stack.c
CHANGED
@@ -5,6 +5,22 @@
|
|
5
5
|
|
6
6
|
#define INITIAL_STACK_SIZE 8
|
7
7
|
|
8
|
+
void
|
9
|
+
frame_pause(prof_frame_t *frame, double current_measurement)
|
10
|
+
{
|
11
|
+
if (frame && frame_is_unpaused(frame))
|
12
|
+
frame->pause_time = current_measurement;
|
13
|
+
}
|
14
|
+
|
15
|
+
void
|
16
|
+
frame_unpause(prof_frame_t *frame, double current_measurement)
|
17
|
+
{
|
18
|
+
if (frame && frame_is_paused(frame)) {
|
19
|
+
frame->dead_time += (current_measurement - frame->pause_time);
|
20
|
+
frame->pause_time = -1;
|
21
|
+
}
|
22
|
+
}
|
23
|
+
|
8
24
|
|
9
25
|
/* Creates a stack of prof_frame_t to keep track
|
10
26
|
of timings for active methods. */
|
@@ -48,7 +64,7 @@ stack_push(prof_stack_t *stack)
|
|
48
64
|
result->child_time = 0;
|
49
65
|
result->switch_time = 0;
|
50
66
|
result->wait_time = 0;
|
51
|
-
result->depth = (stack->ptr - stack->start);
|
67
|
+
result->depth = (int)(stack->ptr - stack->start); // shortening of 64 bit into 32
|
52
68
|
|
53
69
|
// Increment the stack ptr for next time
|
54
70
|
stack->ptr++;
|
data/ext/ruby_prof/rp_stack.h
CHANGED
@@ -22,10 +22,18 @@ typedef struct
|
|
22
22
|
double switch_time; /* Time at switch to different thread */
|
23
23
|
double wait_time;
|
24
24
|
double child_time;
|
25
|
+
double pause_time; // Time pause() was initiated
|
26
|
+
double dead_time; // Time to ignore (i.e. total amount of time between pause/resume blocks)
|
25
27
|
int depth;
|
26
28
|
unsigned int line;
|
27
29
|
} prof_frame_t;
|
28
30
|
|
31
|
+
#define frame_is_paused(f) (f->pause_time >= 0)
|
32
|
+
#define frame_is_unpaused(f) (f->pause_time < 0)
|
33
|
+
void frame_pause(prof_frame_t*, double current_measurement);
|
34
|
+
void frame_unpause(prof_frame_t*, double current_measurement);
|
35
|
+
|
36
|
+
|
29
37
|
/* Current stack of active methods.*/
|
30
38
|
typedef struct
|
31
39
|
{
|
data/ext/ruby_prof/ruby_prof.c
CHANGED
@@ -133,6 +133,11 @@ pop_frame(prof_profile_t* profile, thread_data_t *thread_data)
|
|
133
133
|
double measurement = profile->measurer->measure();
|
134
134
|
double total_time;
|
135
135
|
double self_time;
|
136
|
+
#ifdef _MSC_VER
|
137
|
+
BOOL frame_paused;
|
138
|
+
#else
|
139
|
+
_Bool frame_paused;
|
140
|
+
#endif
|
136
141
|
|
137
142
|
frame = stack_pop(thread_data->stack); // only time it's called
|
138
143
|
|
@@ -140,11 +145,13 @@ pop_frame(prof_profile_t* profile, thread_data_t *thread_data)
|
|
140
145
|
a method that exits. And it can happen if an exception is raised
|
141
146
|
in code that is being profiled and the stack unwinds (RubyProf is
|
142
147
|
not notified of that by the ruby runtime. */
|
143
|
-
if (frame == NULL)
|
148
|
+
if (frame == NULL)
|
144
149
|
return NULL;
|
145
150
|
|
146
151
|
/* Calculate the total time this method took */
|
147
|
-
|
152
|
+
frame_paused = frame_is_paused(frame);
|
153
|
+
frame_unpause(frame, measurement);
|
154
|
+
total_time = measurement - frame->start_time - frame->dead_time;
|
148
155
|
self_time = total_time - frame->child_time - frame->wait_time;
|
149
156
|
|
150
157
|
/* Update information about the current method */
|
@@ -158,6 +165,12 @@ pop_frame(prof_profile_t* profile, thread_data_t *thread_data)
|
|
158
165
|
if (parent_frame)
|
159
166
|
{
|
160
167
|
parent_frame->child_time += total_time;
|
168
|
+
parent_frame->dead_time += frame->dead_time;
|
169
|
+
|
170
|
+
// Repause parent if currently paused
|
171
|
+
if (frame_paused)
|
172
|
+
frame_pause(parent_frame, measurement);
|
173
|
+
|
161
174
|
call_info->line = parent_frame->line;
|
162
175
|
}
|
163
176
|
|
@@ -333,6 +346,11 @@ prof_event_hook(rb_event_flag_t event, NODE *node, VALUE self, ID mid, VALUE kla
|
|
333
346
|
call_info_table_insert(frame->call_info->call_infos, method->key, call_info);
|
334
347
|
prof_add_call_info(method->call_infos, call_info);
|
335
348
|
}
|
349
|
+
|
350
|
+
// Unpause the parent frame. If currently paused then:
|
351
|
+
// 1) The child frame will begin paused.
|
352
|
+
// 2) The parent will inherit the child's dead time.
|
353
|
+
frame_unpause(frame, measurement);
|
336
354
|
}
|
337
355
|
|
338
356
|
/* Push a new frame onto the stack for a new c-call or ruby call (into a method) */
|
@@ -340,6 +358,8 @@ prof_event_hook(rb_event_flag_t event, NODE *node, VALUE self, ID mid, VALUE kla
|
|
340
358
|
frame->call_info = call_info;
|
341
359
|
frame->call_info->depth = frame->depth;
|
342
360
|
frame->start_time = measurement;
|
361
|
+
frame->pause_time = profile->paused == Qtrue ? measurement : -1;
|
362
|
+
frame->dead_time = 0;
|
343
363
|
frame->line = rb_sourceline();
|
344
364
|
break;
|
345
365
|
}
|
@@ -459,7 +479,7 @@ prof_initialize(int argc, VALUE *argv, VALUE self)
|
|
459
479
|
{
|
460
480
|
prof_profile_t* profile = prof_get_profile(self);
|
461
481
|
VALUE mode;
|
462
|
-
prof_measure_mode_t measurer;
|
482
|
+
prof_measure_mode_t measurer = MEASURE_WALL_TIME;
|
463
483
|
VALUE exclude_threads;
|
464
484
|
int i;
|
465
485
|
|
@@ -497,6 +517,39 @@ prof_initialize(int argc, VALUE *argv, VALUE self)
|
|
497
517
|
return self;
|
498
518
|
}
|
499
519
|
|
520
|
+
static int pause_thread(st_data_t key, st_data_t value, st_data_t data)
|
521
|
+
{
|
522
|
+
thread_data_t* thread_data = (thread_data_t *) value;
|
523
|
+
prof_profile_t* profile = (prof_profile_t*) data;
|
524
|
+
|
525
|
+
prof_frame_t* frame = stack_peek(thread_data->stack);
|
526
|
+
frame_pause(frame, profile->measurement_at_pause_resume);
|
527
|
+
|
528
|
+
return ST_CONTINUE;
|
529
|
+
}
|
530
|
+
|
531
|
+
static int unpause_thread(st_data_t key, st_data_t value, st_data_t data)
|
532
|
+
{
|
533
|
+
thread_data_t* thread_data = (thread_data_t *) value;
|
534
|
+
prof_profile_t* profile = (prof_profile_t*) data;
|
535
|
+
|
536
|
+
prof_frame_t* frame = stack_peek(thread_data->stack);
|
537
|
+
frame_unpause(frame, profile->measurement_at_pause_resume);
|
538
|
+
|
539
|
+
return ST_CONTINUE;
|
540
|
+
}
|
541
|
+
|
542
|
+
/* call-seq:
|
543
|
+
paused? -> boolean
|
544
|
+
|
545
|
+
Returns whether a profile is currently paused.*/
|
546
|
+
static VALUE
|
547
|
+
prof_paused(VALUE self)
|
548
|
+
{
|
549
|
+
prof_profile_t* profile = prof_get_profile(self);
|
550
|
+
return profile->paused;
|
551
|
+
}
|
552
|
+
|
500
553
|
/* call-seq:
|
501
554
|
running? -> boolean
|
502
555
|
|
@@ -516,6 +569,7 @@ static VALUE
|
|
516
569
|
prof_start(VALUE self)
|
517
570
|
{
|
518
571
|
char* trace_file_name;
|
572
|
+
|
519
573
|
prof_profile_t* profile = prof_get_profile(self);
|
520
574
|
|
521
575
|
if (profile->running == Qtrue)
|
@@ -523,7 +577,15 @@ prof_start(VALUE self)
|
|
523
577
|
rb_raise(rb_eRuntimeError, "RubyProf.start was already called");
|
524
578
|
}
|
525
579
|
|
580
|
+
#ifndef RUBY_VM
|
581
|
+
if (pCurrentProfile != NULL)
|
582
|
+
{
|
583
|
+
rb_raise(rb_eRuntimeError, "Only one profile can run at a time on Ruby 1.8.*");
|
584
|
+
}
|
585
|
+
#endif
|
586
|
+
|
526
587
|
profile->running = Qtrue;
|
588
|
+
profile->paused = Qfalse;
|
527
589
|
profile->last_thread_data = NULL;
|
528
590
|
|
529
591
|
|
@@ -562,9 +624,13 @@ prof_pause(VALUE self)
|
|
562
624
|
rb_raise(rb_eRuntimeError, "RubyProf is not running.");
|
563
625
|
}
|
564
626
|
|
565
|
-
profile->
|
627
|
+
if (profile->paused == Qfalse)
|
628
|
+
{
|
629
|
+
profile->paused = Qtrue;
|
630
|
+
profile->measurement_at_pause_resume = profile->measurer->measure();
|
631
|
+
st_foreach(profile->threads_tbl, pause_thread, (st_data_t) profile);
|
632
|
+
}
|
566
633
|
|
567
|
-
prof_remove_hook();
|
568
634
|
return self;
|
569
635
|
}
|
570
636
|
|
@@ -578,20 +644,17 @@ prof_resume(VALUE self)
|
|
578
644
|
prof_profile_t* profile = prof_get_profile(self);
|
579
645
|
if (profile->running == Qfalse)
|
580
646
|
{
|
581
|
-
|
582
|
-
}
|
583
|
-
else
|
584
|
-
{
|
585
|
-
profile->running = Qtrue;
|
586
|
-
prof_install_hook(self);
|
647
|
+
rb_raise(rb_eRuntimeError, "RubyProf is not running.");
|
587
648
|
}
|
588
649
|
|
589
|
-
if (
|
650
|
+
if (profile->paused == Qtrue)
|
590
651
|
{
|
591
|
-
|
652
|
+
profile->paused = Qfalse;
|
653
|
+
profile->measurement_at_pause_resume = profile->measurer->measure();
|
654
|
+
st_foreach(profile->threads_tbl, unpause_thread, (st_data_t) profile);
|
592
655
|
}
|
593
656
|
|
594
|
-
return self;
|
657
|
+
return rb_block_given_p() ? rb_ensure(rb_yield, self, prof_pause, self) : self;
|
595
658
|
}
|
596
659
|
|
597
660
|
/* call-seq:
|
@@ -628,7 +691,7 @@ prof_stop(VALUE self)
|
|
628
691
|
|
629
692
|
/* Unset the last_thread_data (very important!)
|
630
693
|
and the threads table */
|
631
|
-
profile->running = Qfalse;
|
694
|
+
profile->running = profile->paused = Qfalse;
|
632
695
|
profile->last_thread_data = NULL;
|
633
696
|
|
634
697
|
/* Post process result */
|
@@ -686,10 +749,10 @@ void Init_ruby_prof()
|
|
686
749
|
rb_define_alloc_func (cProfile, prof_allocate);
|
687
750
|
rb_define_method(cProfile, "initialize", prof_initialize, -1);
|
688
751
|
rb_define_method(cProfile, "start", prof_start, 0);
|
689
|
-
rb_define_method(cProfile, "start", prof_start, 0);
|
690
752
|
rb_define_method(cProfile, "stop", prof_stop, 0);
|
691
753
|
rb_define_method(cProfile, "resume", prof_resume, 0);
|
692
754
|
rb_define_method(cProfile, "pause", prof_pause, 0);
|
693
755
|
rb_define_method(cProfile, "running?", prof_running, 0);
|
756
|
+
rb_define_method(cProfile, "paused?", prof_paused, 0);
|
694
757
|
rb_define_method(cProfile, "threads", prof_threads, 0);
|
695
758
|
}
|