ruby-prof 0.6.0-x86-mswin32-60 → 0.7.0-x86-mswin32-60
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/CHANGES +54 -1
- data/README +134 -7
- data/Rakefile +40 -58
- data/bin/ruby-prof +21 -6
- data/ext/extconf.rb +13 -0
- data/ext/measure_allocations.h +16 -1
- data/ext/measure_cpu_time.h +15 -3
- data/ext/measure_gc_runs.h +76 -0
- data/ext/measure_gc_time.h +57 -0
- data/ext/measure_memory.h +61 -2
- data/ext/measure_process_time.h +13 -2
- data/ext/measure_wall_time.h +12 -1
- data/ext/mingw/Rakefile +23 -0
- data/ext/mingw/build.rake +38 -0
- data/ext/mingw/ruby_prof.so +0 -0
- data/ext/ruby_prof.c +685 -633
- data/ext/ruby_prof.h +188 -0
- data/ext/vc/ruby_prof.sln +20 -0
- data/ext/vc/ruby_prof.vcproj +241 -0
- data/ext/version.h +4 -0
- data/lib/ruby-prof.rb +4 -0
- data/lib/ruby-prof/call_info.rb +47 -0
- data/lib/ruby-prof/call_tree_printer.rb +9 -1
- data/lib/ruby-prof/graph_html_printer.rb +6 -5
- data/lib/ruby-prof/graph_printer.rb +3 -2
- data/lib/ruby-prof/method_info.rb +85 -0
- data/lib/ruby-prof/task.rb +1 -2
- data/lib/ruby-prof/test.rb +148 -0
- data/rails/environment/profile.rb +24 -0
- data/rails/example/example_test.rb +9 -0
- data/rails/profile_test_helper.rb +21 -0
- data/test/basic_test.rb +173 -80
- data/test/duplicate_names_test.rb +2 -3
- data/test/exceptions_test.rb +15 -0
- data/test/exclude_threads_test.rb +54 -0
- data/test/line_number_test.rb +18 -14
- data/test/measurement_test.rb +121 -0
- data/test/module_test.rb +5 -8
- data/test/no_method_class_test.rb +4 -5
- data/test/prime.rb +3 -5
- data/test/prime_test.rb +1 -12
- data/test/printers_test.rb +10 -13
- data/test/profile_unit_test.rb +10 -12
- data/test/recursive_test.rb +202 -92
- data/test/singleton_test.rb +1 -2
- data/test/stack_test.rb +138 -0
- data/test/start_stop_test.rb +95 -0
- data/test/test_suite.rb +7 -3
- data/test/thread_test.rb +111 -87
- data/test/unique_call_path_test.rb +206 -0
- metadata +42 -44
- data/ext/extconf.rb.rej +0 -13
- data/lib/ruby-prof/call_tree_printer.rb.rej +0 -27
- data/lib/ruby-prof/profile_test_case.rb +0 -80
- data/lib/ruby_prof.so +0 -0
- data/rails_plugin/ruby-prof/init.rb +0 -8
- data/rails_plugin/ruby-prof/lib/profiling.rb +0 -57
- data/test/measure_mode_test.rb +0 -79
- data/test/prime1.rb +0 -17
- data/test/prime2.rb +0 -26
- data/test/prime3.rb +0 -17
- data/test/start_test.rb +0 -24
- data/test/test_helper.rb +0 -55
- data/test/timing_test.rb +0 -133
data/ext/extconf.rb
CHANGED
@@ -16,6 +16,19 @@ else
|
|
16
16
|
end
|
17
17
|
|
18
18
|
have_header("sys/times.h")
|
19
|
+
|
20
|
+
# Stefan Kaes / Alexander Dymo GC patch
|
19
21
|
have_func("rb_os_allocated_objects")
|
20
22
|
have_func("rb_gc_allocated_size")
|
23
|
+
have_func("rb_gc_collections")
|
24
|
+
have_func("rb_gc_time")
|
25
|
+
|
26
|
+
# Lloyd Hilaiel's heap info patch
|
27
|
+
have_func("rb_heap_total_mem")
|
28
|
+
have_func("rb_gc_heap_info")
|
29
|
+
|
30
|
+
# Ruby 1.9 unexposed methods
|
31
|
+
have_func("rb_gc_malloc_allocations")
|
32
|
+
have_func("rb_gc_malloc_allocated_size")
|
33
|
+
|
21
34
|
create_makefile("ruby_prof")
|
data/ext/measure_allocations.h
CHANGED
@@ -1,5 +1,5 @@
|
|
1
1
|
/* :nodoc:
|
2
|
-
* Copyright (C)
|
2
|
+
* Copyright (C) 2008 Shugo Maeda <shugo@ruby-lang.org>
|
3
3
|
* Charlie Savage <cfis@savagexi.com>
|
4
4
|
* All rights reserved.
|
5
5
|
*
|
@@ -24,6 +24,7 @@
|
|
24
24
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
25
25
|
* SUCH DAMAGE. */
|
26
26
|
|
27
|
+
#include <ruby.h>
|
27
28
|
|
28
29
|
#if defined(HAVE_RB_OS_ALLOCATED_OBJECTS)
|
29
30
|
#define MEASURE_ALLOCATIONS 3
|
@@ -40,4 +41,18 @@ convert_allocations(prof_measure_t c)
|
|
40
41
|
return c;
|
41
42
|
}
|
42
43
|
|
44
|
+
/* Document-method: prof_measure_allocations
|
45
|
+
call-seq:
|
46
|
+
measure_allocations -> int
|
47
|
+
|
48
|
+
Returns the total number of object allocations since Ruby started.*/
|
49
|
+
static VALUE
|
50
|
+
prof_measure_allocations(VALUE self)
|
51
|
+
{
|
52
|
+
#if defined(HAVE_LONG_LONG)
|
53
|
+
return ULL2NUM(rb_os_allocated_objects());
|
54
|
+
#else
|
55
|
+
return ULONG2NUM(rb_os_allocated_objects());
|
56
|
+
#endif
|
57
|
+
}
|
43
58
|
#endif
|
data/ext/measure_cpu_time.h
CHANGED
@@ -1,5 +1,5 @@
|
|
1
1
|
/* :nodoc:
|
2
|
-
* Copyright (C)
|
2
|
+
* Copyright (C) 2008 Shugo Maeda <shugo@ruby-lang.org>
|
3
3
|
* Charlie Savage <cfis@savagexi.com>
|
4
4
|
* All rights reserved.
|
5
5
|
*
|
@@ -24,6 +24,7 @@
|
|
24
24
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
25
25
|
* SUCH DAMAGE. */
|
26
26
|
|
27
|
+
#include <ruby.h>
|
27
28
|
|
28
29
|
#if defined(_WIN32) || (defined(__GNUC__) && (defined(__i386__) || defined(__powerpc__) || defined(__ppc__)))
|
29
30
|
#define MEASURE_CPU_TIME 2
|
@@ -110,6 +111,17 @@ convert_cpu_time(prof_measure_t c)
|
|
110
111
|
return (double) c / cpu_frequency;
|
111
112
|
}
|
112
113
|
|
114
|
+
/* Document-method: prof_measure_cpu_time
|
115
|
+
call-seq:
|
116
|
+
measure_cpu_time -> float
|
117
|
+
|
118
|
+
Returns the cpu time.*/
|
119
|
+
static VALUE
|
120
|
+
prof_measure_cpu_time(VALUE self)
|
121
|
+
{
|
122
|
+
return rb_float_new(convert_cpu_time(measure_cpu_time()));
|
123
|
+
}
|
124
|
+
|
113
125
|
/* Document-method: prof_get_cpu_frequency
|
114
126
|
call-seq:
|
115
127
|
cpu_frequency -> int
|
@@ -119,7 +131,7 @@ RubyProf::measure_mode is set to CPU_TIME. */
|
|
119
131
|
static VALUE
|
120
132
|
prof_get_cpu_frequency(VALUE self)
|
121
133
|
{
|
122
|
-
return
|
134
|
+
return ULL2NUM(cpu_frequency);
|
123
135
|
}
|
124
136
|
|
125
137
|
/* Document-method: prof_set_cpu_frequency
|
@@ -131,7 +143,7 @@ RubyProf::measure_mode is set to CPU_TIME. */
|
|
131
143
|
static VALUE
|
132
144
|
prof_set_cpu_frequency(VALUE self, VALUE val)
|
133
145
|
{
|
134
|
-
cpu_frequency =
|
146
|
+
cpu_frequency = NUM2LL(val);
|
135
147
|
return val;
|
136
148
|
}
|
137
149
|
|
@@ -0,0 +1,76 @@
|
|
1
|
+
/* :nodoc:
|
2
|
+
* Copyright (C) 2008 Shugo Maeda <shugo@ruby-lang.org>
|
3
|
+
* Charlie Savage <cfis@savagexi.com>
|
4
|
+
* All rights reserved.
|
5
|
+
*
|
6
|
+
* Redistribution and use in source and binary forms, with or without
|
7
|
+
* modification, are permitted provided that the following conditions
|
8
|
+
* are met:
|
9
|
+
* 1. Redistributions of source code must retain the above copyright
|
10
|
+
* notice, this list of conditions and the following disclaimer.
|
11
|
+
* 2. Redistributions in binary form must reproduce the above copyright
|
12
|
+
* notice, this list of conditions and the following disclaimer in the
|
13
|
+
* documentation and/or other materials provided with the distribution.
|
14
|
+
*
|
15
|
+
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
16
|
+
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
17
|
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
18
|
+
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
19
|
+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
20
|
+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
21
|
+
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
22
|
+
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
23
|
+
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
24
|
+
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
25
|
+
* SUCH DAMAGE. */
|
26
|
+
|
27
|
+
#if defined(HAVE_RB_GC_COLLECTIONS)
|
28
|
+
#define MEASURE_GC_RUNS 5
|
29
|
+
|
30
|
+
static prof_measure_t
|
31
|
+
measure_gc_runs()
|
32
|
+
{
|
33
|
+
return NUM2INT(rb_gc_collections());
|
34
|
+
}
|
35
|
+
|
36
|
+
static double
|
37
|
+
convert_gc_runs(prof_measure_t c)
|
38
|
+
{
|
39
|
+
return c;
|
40
|
+
}
|
41
|
+
|
42
|
+
/* Document-method: prof_measure_gc_runs
|
43
|
+
call-seq:
|
44
|
+
gc_runs -> Integer
|
45
|
+
|
46
|
+
Returns the total number of garbage collections.*/
|
47
|
+
static VALUE
|
48
|
+
prof_measure_gc_runs(VALUE self)
|
49
|
+
{
|
50
|
+
return rb_gc_collections();
|
51
|
+
}
|
52
|
+
|
53
|
+
#elif defined(HAVE_RB_GC_HEAP_INFO)
|
54
|
+
#define MEASURE_GC_RUNS 5
|
55
|
+
|
56
|
+
static prof_measure_t
|
57
|
+
measure_gc_runs()
|
58
|
+
{
|
59
|
+
VALUE h = rb_gc_heap_info();
|
60
|
+
return NUM2UINT(rb_hash_aref(h, rb_str_new2("num_gc_passes")));
|
61
|
+
}
|
62
|
+
|
63
|
+
static double
|
64
|
+
convert_gc_runs(prof_measure_t c)
|
65
|
+
{
|
66
|
+
return c;
|
67
|
+
}
|
68
|
+
|
69
|
+
static VALUE
|
70
|
+
prof_measure_gc_runs(VALUE self)
|
71
|
+
{
|
72
|
+
VALUE h = rb_gc_heap_info();
|
73
|
+
return rb_hash_aref(h, rb_str_new2("num_gc_passes"));
|
74
|
+
}
|
75
|
+
|
76
|
+
#endif
|
@@ -0,0 +1,57 @@
|
|
1
|
+
/* :nodoc:
|
2
|
+
* Copyright (C) 2008 Shugo Maeda <shugo@ruby-lang.org>
|
3
|
+
* Charlie Savage <cfis@savagexi.com>
|
4
|
+
* All rights reserved.
|
5
|
+
*
|
6
|
+
* Redistribution and use in source and binary forms, with or without
|
7
|
+
* modification, are permitted provided that the following conditions
|
8
|
+
* are met:
|
9
|
+
* 1. Redistributions of source code must retain the above copyright
|
10
|
+
* notice, this list of conditions and the following disclaimer.
|
11
|
+
* 2. Redistributions in binary form must reproduce the above copyright
|
12
|
+
* notice, this list of conditions and the following disclaimer in the
|
13
|
+
* documentation and/or other materials provided with the distribution.
|
14
|
+
*
|
15
|
+
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
16
|
+
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
17
|
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
18
|
+
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
19
|
+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
20
|
+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
21
|
+
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
22
|
+
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
23
|
+
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
24
|
+
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
25
|
+
* SUCH DAMAGE. */
|
26
|
+
|
27
|
+
#if defined(HAVE_RB_GC_TIME)
|
28
|
+
#define MEASURE_GC_TIME 6
|
29
|
+
|
30
|
+
static prof_measure_t
|
31
|
+
measure_gc_time()
|
32
|
+
{
|
33
|
+
#if HAVE_LONG_LONG
|
34
|
+
return NUM2LL(rb_gc_time());
|
35
|
+
#else
|
36
|
+
return NUM2LONG(rb_gc_time());
|
37
|
+
#endif
|
38
|
+
}
|
39
|
+
|
40
|
+
static double
|
41
|
+
convert_gc_time(prof_measure_t c)
|
42
|
+
{
|
43
|
+
return (double) c / 1000000;
|
44
|
+
}
|
45
|
+
|
46
|
+
/* Document-method: prof_measure_gc_time
|
47
|
+
call-seq:
|
48
|
+
gc_time -> Integer
|
49
|
+
|
50
|
+
Returns the time spent doing garbage collections in microseconds.*/
|
51
|
+
static VALUE
|
52
|
+
prof_measure_gc_time(VALUE self)
|
53
|
+
{
|
54
|
+
return rb_gc_time();
|
55
|
+
}
|
56
|
+
|
57
|
+
#endif
|
data/ext/measure_memory.h
CHANGED
@@ -1,5 +1,6 @@
|
|
1
1
|
/* :nodoc:
|
2
2
|
* Copyright (C) 2008 Alexander Dymo <adymo@pluron.com>
|
3
|
+
*
|
3
4
|
* All rights reserved.
|
4
5
|
*
|
5
6
|
* Redistribution and use in source and binary forms, with or without
|
@@ -26,17 +27,75 @@
|
|
26
27
|
|
27
28
|
#if defined(HAVE_RB_GC_ALLOCATED_SIZE)
|
28
29
|
#define MEASURE_MEMORY 4
|
30
|
+
#define TOGGLE_GC_STATS 1
|
29
31
|
|
30
32
|
static prof_measure_t
|
31
33
|
measure_memory()
|
32
34
|
{
|
33
|
-
|
35
|
+
#if defined(HAVE_LONG_LONG)
|
36
|
+
return NUM2LL(rb_gc_allocated_size());
|
37
|
+
#else
|
38
|
+
return NUM2ULONG(rb_gc_allocated_size());
|
39
|
+
#endif
|
34
40
|
}
|
35
41
|
|
36
42
|
static double
|
37
43
|
convert_memory(prof_measure_t c)
|
38
44
|
{
|
39
|
-
return
|
45
|
+
return (double) c / 1024;
|
46
|
+
}
|
47
|
+
|
48
|
+
/* Document-method: prof_measure_memory
|
49
|
+
call-seq:
|
50
|
+
measure_memory -> int
|
51
|
+
|
52
|
+
Returns total allocated memory in bytes.*/
|
53
|
+
static VALUE
|
54
|
+
prof_measure_memory(VALUE self)
|
55
|
+
{
|
56
|
+
return rb_gc_allocated_size();
|
57
|
+
}
|
58
|
+
|
59
|
+
#elif defined(HAVE_RB_GC_MALLOC_ALLOCATED_SIZE)
|
60
|
+
#define MEASURE_MEMORY 4
|
61
|
+
|
62
|
+
static prof_measure_t
|
63
|
+
measure_memory()
|
64
|
+
{
|
65
|
+
return rb_gc_malloc_allocated_size();
|
66
|
+
}
|
67
|
+
|
68
|
+
static double
|
69
|
+
convert_memory(prof_measure_t c)
|
70
|
+
{
|
71
|
+
return (double) c / 1024;
|
72
|
+
}
|
73
|
+
|
74
|
+
static VALUE
|
75
|
+
prof_measure_memory(VALUE self)
|
76
|
+
{
|
77
|
+
return UINT2NUM(rb_gc_malloc_allocated_size());
|
78
|
+
}
|
79
|
+
|
80
|
+
#elif defined(HAVE_RB_HEAP_TOTAL_MEM)
|
81
|
+
#define MEASURE_MEMORY 4
|
82
|
+
|
83
|
+
static prof_measure_t
|
84
|
+
measure_memory()
|
85
|
+
{
|
86
|
+
return rb_heap_total_mem();
|
87
|
+
}
|
88
|
+
|
89
|
+
static double
|
90
|
+
convert_memory(prof_measure_t c)
|
91
|
+
{
|
92
|
+
return (double) c / 1024;
|
93
|
+
}
|
94
|
+
|
95
|
+
static VALUE
|
96
|
+
prof_measure_memory(VALUE self)
|
97
|
+
{
|
98
|
+
return ULONG2NUM(rb_heap_total_mem());
|
40
99
|
}
|
41
100
|
|
42
101
|
#endif
|
data/ext/measure_process_time.h
CHANGED
@@ -1,5 +1,5 @@
|
|
1
|
-
/*
|
2
|
-
* Copyright (C)
|
1
|
+
/*
|
2
|
+
* Copyright (C) 2008 Shugo Maeda <shugo@ruby-lang.org>
|
3
3
|
* Charlie Savage <cfis@savagexi.com>
|
4
4
|
* All rights reserved.
|
5
5
|
*
|
@@ -39,3 +39,14 @@ convert_process_time(prof_measure_t c)
|
|
39
39
|
{
|
40
40
|
return (double) c / CLOCKS_PER_SEC;
|
41
41
|
}
|
42
|
+
|
43
|
+
/* Document-method: measure_process_time
|
44
|
+
call-seq:
|
45
|
+
measure_process_time -> float
|
46
|
+
|
47
|
+
Returns the process time.*/
|
48
|
+
static VALUE
|
49
|
+
prof_measure_process_time(VALUE self)
|
50
|
+
{
|
51
|
+
return rb_float_new(convert_process_time(measure_process_time()));
|
52
|
+
}
|
data/ext/measure_wall_time.h
CHANGED
@@ -1,5 +1,5 @@
|
|
1
1
|
/* :nodoc:
|
2
|
-
* Copyright (C)
|
2
|
+
* Copyright (C) 2008 Shugo Maeda <shugo@ruby-lang.org>
|
3
3
|
* Charlie Savage <cfis@savagexi.com>
|
4
4
|
* All rights reserved.
|
5
5
|
*
|
@@ -40,3 +40,14 @@ convert_wall_time(prof_measure_t c)
|
|
40
40
|
{
|
41
41
|
return (double) c / 1000000;
|
42
42
|
}
|
43
|
+
|
44
|
+
/* Document-method: prof_measure_wall_time
|
45
|
+
call-seq:
|
46
|
+
measure_wall_time -> float
|
47
|
+
|
48
|
+
Returns the wall time.*/
|
49
|
+
static VALUE
|
50
|
+
prof_measure_wall_time(VALUE self)
|
51
|
+
{
|
52
|
+
return rb_float_new(convert_wall_time(measure_wall_time()));
|
53
|
+
}
|
data/ext/mingw/Rakefile
ADDED
@@ -0,0 +1,23 @@
|
|
1
|
+
# We can't use Ruby's standard build procedures
|
2
|
+
# on Windows because the Ruby executable is
|
3
|
+
# built with VC++ while here we want to build
|
4
|
+
# with MingW. So just roll our own...
|
5
|
+
|
6
|
+
require 'fileutils'
|
7
|
+
require 'rbconfig'
|
8
|
+
|
9
|
+
EXTENSION_NAME = "ruby_prof.#{Config::CONFIG["DLEXT"]}"
|
10
|
+
|
11
|
+
# This is called when the Windows GEM is installed!
|
12
|
+
task :install do
|
13
|
+
# Gems will pass these two environment variables:
|
14
|
+
# RUBYARCHDIR=#{dest_path}
|
15
|
+
# RUBYLIBDIR=#{dest_path}
|
16
|
+
|
17
|
+
dest_path = ENV['RUBYLIBDIR']
|
18
|
+
|
19
|
+
# Copy the extension
|
20
|
+
cp(EXTENSION_NAME, dest_path)
|
21
|
+
end
|
22
|
+
|
23
|
+
task :default => :install
|
@@ -0,0 +1,38 @@
|
|
1
|
+
# We can't use Ruby's standard build procedures
|
2
|
+
# on Windows because the Ruby executable is
|
3
|
+
# built with VC++ while here we want to build
|
4
|
+
# with MingW. So just roll our own...
|
5
|
+
|
6
|
+
require 'rake/clean'
|
7
|
+
require 'rbconfig'
|
8
|
+
|
9
|
+
RUBY_INCLUDE_DIR = Config::CONFIG["archdir"]
|
10
|
+
RUBY_BIN_DIR = Config::CONFIG["bindir"]
|
11
|
+
RUBY_LIB_DIR = Config::CONFIG["libdir"]
|
12
|
+
RUBY_SHARED_LIB = Config::CONFIG["LIBRUBY"]
|
13
|
+
RUBY_SHARED_DLL = RUBY_SHARED_LIB.gsub(/lib$/, 'dll')
|
14
|
+
|
15
|
+
EXTENSION_NAME = "ruby_prof.#{Config::CONFIG["DLEXT"]}"
|
16
|
+
|
17
|
+
CLEAN.include('*.o')
|
18
|
+
CLOBBER.include(EXTENSION_NAME)
|
19
|
+
|
20
|
+
task :default => "ruby_prof"
|
21
|
+
|
22
|
+
SRC = FileList['../*.c']
|
23
|
+
OBJ = SRC.collect do |file_name|
|
24
|
+
File.basename(file_name).ext('o')
|
25
|
+
end
|
26
|
+
|
27
|
+
SRC.each do |srcfile|
|
28
|
+
objfile = File.basename(srcfile).ext('o')
|
29
|
+
file objfile => srcfile do
|
30
|
+
command = "gcc -c -fPIC -O2 -Wall -o #{objfile} -I/usr/local/include #{srcfile} -I#{RUBY_INCLUDE_DIR}"
|
31
|
+
sh "sh -c '#{command}'"
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
file "ruby_prof" => OBJ do
|
36
|
+
command = "gcc -shared -o #{EXTENSION_NAME} -L/usr/local/lib #{OBJ} #{RUBY_BIN_DIR}/#{RUBY_SHARED_DLL}"
|
37
|
+
sh "sh -c '#{command}'"
|
38
|
+
end
|
Binary file
|
data/ext/ruby_prof.c
CHANGED
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (C)
|
2
|
+
* Copyright (C) 2008 Shugo Maeda <shugo@ruby-lang.org>
|
3
3
|
* Charlie Savage <cfis@savagexi.com>
|
4
4
|
* All rights reserved.
|
5
5
|
*
|
@@ -47,129 +47,10 @@
|
|
47
47
|
hierarchy in ruby - which is very helpful for creating call graphs.
|
48
48
|
*/
|
49
49
|
|
50
|
-
|
51
|
-
#include <stdio.h>
|
52
|
-
|
53
|
-
#include <ruby.h>
|
54
|
-
#ifndef RUBY_VM
|
55
|
-
#include <node.h>
|
56
|
-
#include <st.h>
|
57
|
-
typedef rb_event_t rb_event_flag_t;
|
58
|
-
#define rb_sourcefile() (node ? node->nd_file : 0)
|
59
|
-
#define rb_sourceline() (node ? nd_line(node) : 0)
|
60
|
-
#endif
|
61
|
-
|
62
|
-
|
63
|
-
/* ================ Constants =================*/
|
64
|
-
#define INITIAL_STACK_SIZE 8
|
65
|
-
#define PROF_VERSION "0.6.0"
|
66
|
-
|
67
|
-
|
68
|
-
/* ================ Measurement =================*/
|
69
|
-
#ifdef HAVE_LONG_LONG
|
70
|
-
typedef LONG_LONG prof_measure_t;
|
71
|
-
#else
|
72
|
-
typedef unsigned long prof_measure_t;
|
73
|
-
#endif
|
74
|
-
|
75
|
-
#include "measure_process_time.h"
|
76
|
-
#include "measure_wall_time.h"
|
77
|
-
#include "measure_cpu_time.h"
|
78
|
-
#include "measure_allocations.h"
|
79
|
-
#include "measure_memory.h"
|
80
|
-
|
81
|
-
static prof_measure_t (*get_measurement)() = measure_process_time;
|
82
|
-
static double (*convert_measurement)(prof_measure_t) = convert_process_time;
|
83
|
-
|
84
|
-
/* ================ DataTypes =================*/
|
85
|
-
static VALUE mProf;
|
86
|
-
static VALUE cResult;
|
87
|
-
static VALUE cMethodInfo;
|
88
|
-
static VALUE cCallInfo;
|
89
|
-
|
90
|
-
/* Profiling information for each method. */
|
91
|
-
typedef struct prof_method_t {
|
92
|
-
st_data_t key; /* Cache hash value for speed reasons. */
|
93
|
-
VALUE name; /* Name of the method. */
|
94
|
-
VALUE klass; /* The method's class. */
|
95
|
-
ID mid; /* The method id. */
|
96
|
-
int depth; /* The recursive depth this method was called at.*/
|
97
|
-
int called; /* Number of times called */
|
98
|
-
const char* source_file; /* The method's source file */
|
99
|
-
int line; /* The method's line number. */
|
100
|
-
prof_measure_t total_time; /* Total time spent in this method and children. */
|
101
|
-
prof_measure_t self_time; /* Total time spent in this method. */
|
102
|
-
prof_measure_t wait_time; /* Total time this method spent waiting for other threads. */
|
103
|
-
st_table *parents; /* The method's callers (prof_call_info_t). */
|
104
|
-
st_table *children; /* The method's callees (prof_call_info_t). */
|
105
|
-
int active_frame; /* # of active frames for this method. Used to detect
|
106
|
-
recursion. Stashed here to avoid extra lookups in
|
107
|
-
the hook method - so a bit hackey. */
|
108
|
-
struct prof_method_t *base; /* For recursion - this is the parent method */
|
109
|
-
} prof_method_t;
|
110
|
-
|
111
|
-
|
112
|
-
/* Callers and callee information for a method. */
|
113
|
-
typedef struct {
|
114
|
-
prof_method_t *target;
|
115
|
-
int called;
|
116
|
-
prof_measure_t total_time;
|
117
|
-
prof_measure_t self_time;
|
118
|
-
prof_measure_t wait_time;
|
119
|
-
int line;
|
120
|
-
} prof_call_info_t;
|
121
|
-
|
122
|
-
|
123
|
-
/* Temporary object that maintains profiling information
|
124
|
-
for active methods - there is one per method.*/
|
125
|
-
typedef struct {
|
126
|
-
/* Caching prof_method_t values significantly
|
127
|
-
increases performance. */
|
128
|
-
prof_method_t *method;
|
129
|
-
prof_measure_t start_time;
|
130
|
-
prof_measure_t wait_time;
|
131
|
-
prof_measure_t child_time;
|
132
|
-
unsigned int line;
|
133
|
-
} prof_frame_t;
|
134
|
-
|
135
|
-
/* Current stack of active methods.*/
|
136
|
-
typedef struct {
|
137
|
-
prof_frame_t *start;
|
138
|
-
prof_frame_t *end;
|
139
|
-
prof_frame_t *ptr;
|
140
|
-
} prof_stack_t;
|
141
|
-
|
142
|
-
/* Profiling information for a thread. */
|
143
|
-
typedef struct {
|
144
|
-
unsigned long thread_id; /* Thread id */
|
145
|
-
st_table* method_info_table; /* All called methods */
|
146
|
-
prof_stack_t* stack; /* Active methods */
|
147
|
-
prof_measure_t last_switch; /* Point of last context switch */
|
148
|
-
} thread_data_t;
|
149
|
-
|
150
|
-
typedef struct {
|
151
|
-
VALUE threads;
|
152
|
-
} prof_result_t;
|
153
|
-
|
154
|
-
|
155
|
-
/* ================ Variables =================*/
|
156
|
-
static int measure_mode;
|
157
|
-
static st_table *threads_tbl = NULL;
|
158
|
-
/* TODO - If Ruby become multi-threaded this has to turn into
|
159
|
-
a separate stack since this isn't thread safe! */
|
160
|
-
static thread_data_t* last_thread_data = NULL;
|
50
|
+
#include "ruby_prof.h"
|
161
51
|
|
162
52
|
|
163
53
|
/* ================ Helper Functions =================*/
|
164
|
-
/* Helper method to get the id of a Ruby thread. */
|
165
|
-
static inline long
|
166
|
-
get_thread_id(VALUE thread)
|
167
|
-
{
|
168
|
-
//return NUM2ULONG(rb_obj_id(thread));
|
169
|
-
// From line 1997 in gc.c
|
170
|
-
return (long)thread;
|
171
|
-
}
|
172
|
-
|
173
54
|
static VALUE
|
174
55
|
figure_singleton_name(VALUE klass)
|
175
56
|
{
|
@@ -285,21 +166,7 @@ full_name(VALUE klass, ID mid, int depth)
|
|
285
166
|
return result;
|
286
167
|
}
|
287
168
|
|
288
|
-
|
289
|
-
static inline st_data_t
|
290
|
-
method_key(VALUE klass, ID mid, int depth)
|
291
|
-
{
|
292
|
-
/* No idea if this is a unique key or not. Would be
|
293
|
-
best to use the method name, but we can't, since
|
294
|
-
that calls internal ruby functions which would
|
295
|
-
cause the hook method to recursively call itself.
|
296
|
-
And that is too much of a bother to deal with.
|
297
|
-
Plus of course, this is faster. */
|
298
|
-
return (klass * 100) + (mid * 10) + depth;
|
299
|
-
}
|
300
|
-
|
301
169
|
/* ================ Stack Handling =================*/
|
302
|
-
|
303
170
|
/* Creates a stack of prof_frame_t to keep track
|
304
171
|
of timings for active methods. */
|
305
172
|
static prof_stack_t *
|
@@ -319,7 +186,7 @@ stack_free(prof_stack_t *stack)
|
|
319
186
|
xfree(stack);
|
320
187
|
}
|
321
188
|
|
322
|
-
static
|
189
|
+
static prof_frame_t *
|
323
190
|
stack_push(prof_stack_t *stack)
|
324
191
|
{
|
325
192
|
/* Is there space on the stack? If not, double
|
@@ -335,7 +202,7 @@ stack_push(prof_stack_t *stack)
|
|
335
202
|
return stack->ptr++;
|
336
203
|
}
|
337
204
|
|
338
|
-
static
|
205
|
+
static prof_frame_t *
|
339
206
|
stack_pop(prof_stack_t *stack)
|
340
207
|
{
|
341
208
|
if (stack->ptr == stack->start)
|
@@ -344,7 +211,7 @@ stack_pop(prof_stack_t *stack)
|
|
344
211
|
return --stack->ptr;
|
345
212
|
}
|
346
213
|
|
347
|
-
static
|
214
|
+
static prof_frame_t *
|
348
215
|
stack_peek(prof_stack_t *stack)
|
349
216
|
{
|
350
217
|
if (stack->ptr == stack->start)
|
@@ -353,71 +220,54 @@ stack_peek(prof_stack_t *stack)
|
|
353
220
|
return stack->ptr - 1;
|
354
221
|
}
|
355
222
|
|
356
|
-
|
357
|
-
|
223
|
+
/* ================ Method Key =================*/
|
224
|
+
static int
|
225
|
+
method_table_cmp(prof_method_key_t *key1, prof_method_key_t *key2)
|
358
226
|
{
|
359
|
-
return
|
227
|
+
return (key1->klass != key2->klass) ||
|
228
|
+
(key1->mid != key2->mid) ||
|
229
|
+
(key1->depth != key2->depth);
|
360
230
|
}
|
361
231
|
|
362
|
-
|
363
|
-
|
364
|
-
/* --- Keeps track of the methods the current method calls */
|
365
|
-
static st_table *
|
366
|
-
method_info_table_create()
|
232
|
+
static int
|
233
|
+
method_table_hash(prof_method_key_t *key)
|
367
234
|
{
|
368
|
-
|
369
|
-
}
|
370
|
-
|
371
|
-
static inline size_t
|
372
|
-
method_info_table_insert(st_table *table, st_data_t key, prof_method_t *val)
|
373
|
-
{
|
374
|
-
return st_insert(table, key, (st_data_t) val);
|
375
|
-
}
|
376
|
-
|
377
|
-
static inline prof_method_t *
|
378
|
-
method_info_table_lookup(st_table *table, st_data_t key)
|
379
|
-
{
|
380
|
-
st_data_t val;
|
381
|
-
if (st_lookup(table, key, &val))
|
382
|
-
{
|
383
|
-
return (prof_method_t *) val;
|
384
|
-
}
|
385
|
-
else
|
386
|
-
{
|
387
|
-
return NULL;
|
388
|
-
}
|
235
|
+
return key->key;
|
389
236
|
}
|
390
237
|
|
238
|
+
static struct st_hash_type type_method_hash = {
|
239
|
+
method_table_cmp,
|
240
|
+
method_table_hash
|
241
|
+
};
|
391
242
|
|
392
243
|
static void
|
393
|
-
|
244
|
+
method_key(prof_method_key_t* key, VALUE klass, ID mid, int depth)
|
394
245
|
{
|
395
|
-
|
396
|
-
|
397
|
-
|
246
|
+
key->klass = klass;
|
247
|
+
key->mid = mid;
|
248
|
+
key->depth = depth;
|
249
|
+
key->key = (klass << 4) + (mid << 2) + depth;
|
398
250
|
}
|
399
251
|
|
400
252
|
|
401
|
-
/* ================ Call Info
|
402
|
-
|
403
|
-
/* ---- Hash, keyed on class/method_id, that holds call_info objects ---- */
|
253
|
+
/* ================ Call Info =================*/
|
404
254
|
static st_table *
|
405
|
-
|
255
|
+
call_info_table_create()
|
406
256
|
{
|
407
|
-
|
257
|
+
return st_init_table(&type_method_hash);
|
408
258
|
}
|
409
259
|
|
410
|
-
static
|
411
|
-
|
260
|
+
static size_t
|
261
|
+
call_info_table_insert(st_table *table, const prof_method_key_t *key, prof_call_info_t *val)
|
412
262
|
{
|
413
|
-
|
263
|
+
return st_insert(table, (st_data_t) key, (st_data_t) val);
|
414
264
|
}
|
415
265
|
|
416
|
-
static
|
417
|
-
|
266
|
+
static prof_call_info_t *
|
267
|
+
call_info_table_lookup(st_table *table, const prof_method_key_t *key)
|
418
268
|
{
|
419
269
|
st_data_t val;
|
420
|
-
if (st_lookup(table, key, &val))
|
270
|
+
if (st_lookup(table, (st_data_t) key, &val))
|
421
271
|
{
|
422
272
|
return (prof_call_info_t *) val;
|
423
273
|
}
|
@@ -428,7 +278,7 @@ caller_table_lookup(st_table *table, st_data_t key)
|
|
428
278
|
}
|
429
279
|
|
430
280
|
static void
|
431
|
-
|
281
|
+
call_info_table_free(st_table *table)
|
432
282
|
{
|
433
283
|
st_free_table(table);
|
434
284
|
}
|
@@ -440,43 +290,51 @@ they took to execute. */
|
|
440
290
|
|
441
291
|
/* :nodoc: */
|
442
292
|
static prof_call_info_t *
|
443
|
-
|
293
|
+
prof_call_info_create(prof_method_t* method, prof_call_info_t* parent)
|
444
294
|
{
|
445
|
-
prof_call_info_t *result;
|
446
|
-
|
447
|
-
result = ALLOC(prof_call_info_t);
|
295
|
+
prof_call_info_t *result = ALLOC(prof_call_info_t);
|
296
|
+
result->object = Qnil;
|
448
297
|
result->target = method;
|
298
|
+
result->parent = parent;
|
299
|
+
result->call_infos = call_info_table_create();
|
300
|
+
result->children = Qnil;
|
301
|
+
|
449
302
|
result->called = 0;
|
450
303
|
result->total_time = 0;
|
451
304
|
result->self_time = 0;
|
452
305
|
result->wait_time = 0;
|
306
|
+
result->line = 0;
|
453
307
|
return result;
|
454
308
|
}
|
455
309
|
|
456
310
|
static void
|
457
|
-
|
311
|
+
prof_call_info_mark(prof_call_info_t *call_info)
|
458
312
|
{
|
459
|
-
|
313
|
+
rb_gc_mark(prof_method_wrap(call_info->target));
|
314
|
+
rb_gc_mark(call_info->children);
|
315
|
+
if (call_info->parent)
|
316
|
+
rb_gc_mark(prof_call_info_wrap(call_info->parent));
|
460
317
|
}
|
461
318
|
|
462
|
-
static
|
463
|
-
|
319
|
+
static void
|
320
|
+
prof_call_info_free(prof_call_info_t *call_info)
|
464
321
|
{
|
465
|
-
|
466
|
-
|
467
|
-
return ST_CONTINUE;
|
322
|
+
call_info_table_free(call_info->call_infos);
|
323
|
+
xfree(call_info);
|
468
324
|
}
|
469
325
|
|
470
326
|
static VALUE
|
471
|
-
|
327
|
+
prof_call_info_wrap(prof_call_info_t *call_info)
|
472
328
|
{
|
473
|
-
|
474
|
-
|
475
|
-
|
329
|
+
if (call_info->object == Qnil)
|
330
|
+
{
|
331
|
+
call_info->object = Data_Wrap_Struct(cCallInfo, prof_call_info_mark, prof_call_info_free, call_info);
|
332
|
+
}
|
333
|
+
return call_info->object;
|
476
334
|
}
|
477
335
|
|
478
336
|
static prof_call_info_t *
|
479
|
-
|
337
|
+
prof_get_call_info_result(VALUE obj)
|
480
338
|
{
|
481
339
|
if (BUILTIN_TYPE(obj) != T_DATA)
|
482
340
|
{
|
@@ -492,14 +350,14 @@ get_call_info_result(VALUE obj)
|
|
492
350
|
|
493
351
|
Returns the target method. */
|
494
352
|
static VALUE
|
495
|
-
|
353
|
+
prof_call_info_target(VALUE self)
|
496
354
|
{
|
497
355
|
/* Target is a pointer to a method_info - so we have to be careful
|
498
356
|
about the GC. We will wrap the method_info but provide no
|
499
357
|
free method so the underlying object is not freed twice! */
|
500
358
|
|
501
|
-
prof_call_info_t *result =
|
502
|
-
return
|
359
|
+
prof_call_info_t *result = prof_get_call_info_result(self);
|
360
|
+
return prof_method_wrap(result->target);
|
503
361
|
}
|
504
362
|
|
505
363
|
/* call-seq:
|
@@ -507,10 +365,9 @@ call_info_target(VALUE self)
|
|
507
365
|
|
508
366
|
Returns the total amount of time this method was called. */
|
509
367
|
static VALUE
|
510
|
-
|
368
|
+
prof_call_info_called(VALUE self)
|
511
369
|
{
|
512
|
-
prof_call_info_t *result =
|
513
|
-
|
370
|
+
prof_call_info_t *result = prof_get_call_info_result(self);
|
514
371
|
return INT2NUM(result->called);
|
515
372
|
}
|
516
373
|
|
@@ -519,9 +376,10 @@ call_info_called(VALUE self)
|
|
519
376
|
|
520
377
|
returns the line number of the method */
|
521
378
|
static VALUE
|
522
|
-
|
379
|
+
prof_call_info_line(VALUE self)
|
523
380
|
{
|
524
|
-
|
381
|
+
prof_call_info_t *result = prof_get_call_info_result(self);
|
382
|
+
return rb_int_new(result->line);
|
525
383
|
}
|
526
384
|
|
527
385
|
/* call-seq:
|
@@ -529,10 +387,9 @@ call_info_line(VALUE self)
|
|
529
387
|
|
530
388
|
Returns the total amount of time spent in this method and its children. */
|
531
389
|
static VALUE
|
532
|
-
|
390
|
+
prof_call_info_total_time(VALUE self)
|
533
391
|
{
|
534
|
-
prof_call_info_t *result =
|
535
|
-
|
392
|
+
prof_call_info_t *result = prof_get_call_info_result(self);
|
536
393
|
return rb_float_new(convert_measurement(result->total_time));
|
537
394
|
}
|
538
395
|
|
@@ -541,9 +398,9 @@ call_info_total_time(VALUE self)
|
|
541
398
|
|
542
399
|
Returns the total amount of time spent in this method. */
|
543
400
|
static VALUE
|
544
|
-
|
401
|
+
prof_call_info_self_time(VALUE self)
|
545
402
|
{
|
546
|
-
prof_call_info_t *result =
|
403
|
+
prof_call_info_t *result = prof_get_call_info_result(self);
|
547
404
|
|
548
405
|
return rb_float_new(convert_measurement(result->self_time));
|
549
406
|
}
|
@@ -553,138 +410,177 @@ call_info_self_time(VALUE self)
|
|
553
410
|
|
554
411
|
Returns the total amount of time this method waited for other threads. */
|
555
412
|
static VALUE
|
556
|
-
|
413
|
+
prof_call_info_wait_time(VALUE self)
|
557
414
|
{
|
558
|
-
prof_call_info_t *result =
|
415
|
+
prof_call_info_t *result = prof_get_call_info_result(self);
|
559
416
|
|
560
417
|
return rb_float_new(convert_measurement(result->wait_time));
|
561
418
|
}
|
562
419
|
|
563
420
|
/* call-seq:
|
564
|
-
|
421
|
+
parent -> call_info
|
565
422
|
|
566
|
-
Returns the
|
423
|
+
Returns the call_infos parent call_info object (the method that called this method).*/
|
567
424
|
static VALUE
|
568
|
-
|
425
|
+
prof_call_info_parent(VALUE self)
|
569
426
|
{
|
570
|
-
prof_call_info_t *result =
|
571
|
-
|
572
|
-
|
427
|
+
prof_call_info_t *result = prof_get_call_info_result(self);
|
428
|
+
if (result->parent)
|
429
|
+
return prof_call_info_wrap(result->parent);
|
430
|
+
else
|
431
|
+
return Qnil;
|
573
432
|
}
|
574
433
|
|
434
|
+
static int
|
435
|
+
prof_call_info_collect_children(st_data_t key, st_data_t value, st_data_t result)
|
436
|
+
{
|
437
|
+
prof_call_info_t *call_info = (prof_call_info_t *) value;
|
438
|
+
VALUE arr = (VALUE) result;
|
439
|
+
rb_ary_push(arr, prof_call_info_wrap(call_info));
|
440
|
+
return ST_CONTINUE;
|
441
|
+
}
|
575
442
|
|
576
|
-
/*
|
577
|
-
|
578
|
-
One instance of the RubyProf::MethodInfo class is created per method
|
579
|
-
called per thread. Thus, if a method is called in two different
|
580
|
-
thread then there will be two RubyProf::MethodInfo objects
|
581
|
-
created. RubyProf::MethodInfo objects can be accessed via
|
582
|
-
the RubyProf::Result object.
|
583
|
-
*/
|
443
|
+
/* call-seq:
|
444
|
+
children -> hash
|
584
445
|
|
585
|
-
|
586
|
-
|
587
|
-
|
588
|
-
|
446
|
+
Returns an array of call info objects of methods that this method
|
447
|
+
called (ie, children).*/
|
448
|
+
static VALUE
|
449
|
+
prof_call_info_children(VALUE self)
|
589
450
|
{
|
590
|
-
|
591
|
-
|
592
|
-
|
593
|
-
|
594
|
-
|
595
|
-
|
451
|
+
prof_call_info_t *call_info = prof_get_call_info_result(self);
|
452
|
+
if (call_info->children == Qnil)
|
453
|
+
{
|
454
|
+
call_info->children = rb_ary_new();
|
455
|
+
st_foreach(call_info->call_infos, prof_call_info_collect_children, call_info->children);
|
456
|
+
}
|
457
|
+
return call_info->children;
|
458
|
+
}
|
596
459
|
|
597
|
-
|
598
|
-
|
599
|
-
|
600
|
-
|
601
|
-
|
602
|
-
|
603
|
-
|
604
|
-
|
605
|
-
|
606
|
-
|
607
|
-
result->line = line;
|
608
|
-
return result;
|
460
|
+
/* ================ Call Infos =================*/
|
461
|
+
static prof_call_infos_t*
|
462
|
+
prof_call_infos_create()
|
463
|
+
{
|
464
|
+
prof_call_infos_t *result = ALLOC(prof_call_infos_t);
|
465
|
+
result->start = ALLOC_N(prof_call_info_t*, INITIAL_CALL_INFOS_SIZE);
|
466
|
+
result->end = result->start + INITIAL_CALL_INFOS_SIZE;
|
467
|
+
result->ptr = result->start;
|
468
|
+
result->object = Qnil;
|
469
|
+
return result;
|
609
470
|
}
|
610
471
|
|
611
472
|
static void
|
612
|
-
|
473
|
+
prof_call_infos_free(prof_call_infos_t *call_infos)
|
613
474
|
{
|
614
|
-
|
475
|
+
xfree(call_infos->start);
|
476
|
+
xfree(call_infos);
|
615
477
|
}
|
616
478
|
|
617
479
|
static void
|
618
|
-
|
480
|
+
prof_add_call_info(prof_call_infos_t *call_infos, prof_call_info_t *call_info)
|
619
481
|
{
|
620
|
-
|
621
|
-
|
622
|
-
|
623
|
-
|
624
|
-
|
625
|
-
|
626
|
-
|
482
|
+
if (call_infos->ptr == call_infos->end)
|
483
|
+
{
|
484
|
+
size_t len = call_infos->ptr - call_infos->start;
|
485
|
+
size_t new_capacity = (call_infos->end - call_infos->start) * 2;
|
486
|
+
REALLOC_N(call_infos->start, prof_call_info_t*, new_capacity);
|
487
|
+
call_infos->ptr = call_infos->start + len;
|
488
|
+
call_infos->end = call_infos->start + new_capacity;
|
489
|
+
}
|
490
|
+
*call_infos->ptr = call_info;
|
491
|
+
call_infos->ptr++;
|
627
492
|
}
|
628
493
|
|
629
494
|
static VALUE
|
630
|
-
|
495
|
+
prof_call_infos_wrap(prof_call_infos_t *call_infos)
|
631
496
|
{
|
632
|
-
|
497
|
+
if (call_infos->object == Qnil)
|
498
|
+
{
|
499
|
+
prof_call_info_t **i;
|
500
|
+
call_infos->object = rb_ary_new();
|
501
|
+
for(i=call_infos->start; i<call_infos->ptr; i++)
|
502
|
+
{
|
503
|
+
VALUE call_info = prof_call_info_wrap(*i);
|
504
|
+
rb_ary_push(call_infos->object, call_info);
|
505
|
+
}
|
506
|
+
}
|
507
|
+
return call_infos->object;
|
633
508
|
}
|
634
509
|
|
635
|
-
static prof_method_t *
|
636
|
-
get_prof_method(VALUE obj)
|
637
|
-
{
|
638
|
-
return (prof_method_t *) DATA_PTR(obj);
|
639
|
-
}
|
640
510
|
|
641
|
-
/*
|
642
|
-
|
511
|
+
/* ================ Method Info =================*/
|
512
|
+
/* Document-class: RubyProf::MethodInfo
|
513
|
+
The RubyProf::MethodInfo class stores profiling data for a method.
|
514
|
+
One instance of the RubyProf::MethodInfo class is created per method
|
515
|
+
called per thread. Thus, if a method is called in two different
|
516
|
+
thread then there will be two RubyProf::MethodInfo objects
|
517
|
+
created. RubyProf::MethodInfo objects can be accessed via
|
518
|
+
the RubyProf::Result object.
|
519
|
+
*/
|
643
520
|
|
644
|
-
|
645
|
-
|
646
|
-
prof_method_called(VALUE self)
|
521
|
+
static prof_method_t*
|
522
|
+
prof_method_create(prof_method_key_t *key, const char* source_file, int line)
|
647
523
|
{
|
648
|
-
prof_method_t *result =
|
524
|
+
prof_method_t *result = ALLOC(prof_method_t);
|
525
|
+
result->object = Qnil;
|
526
|
+
result->key = ALLOC(prof_method_key_t);
|
527
|
+
method_key(result->key, key->klass, key->mid, key->depth);
|
649
528
|
|
650
|
-
|
651
|
-
}
|
529
|
+
result->call_infos = prof_call_infos_create();
|
652
530
|
|
531
|
+
result->active = 0;
|
653
532
|
|
654
|
-
|
655
|
-
|
533
|
+
if (source_file != NULL)
|
534
|
+
{
|
535
|
+
int len = strlen(source_file) + 1;
|
536
|
+
char *buffer = ALLOC_N(char, len);
|
656
537
|
|
657
|
-
|
658
|
-
|
659
|
-
|
660
|
-
|
661
|
-
|
538
|
+
MEMCPY(buffer, source_file, char, len);
|
539
|
+
result->source_file = buffer;
|
540
|
+
}
|
541
|
+
else
|
542
|
+
{
|
543
|
+
result->source_file = source_file;
|
544
|
+
}
|
545
|
+
result->line = line;
|
662
546
|
|
663
|
-
return
|
547
|
+
return result;
|
664
548
|
}
|
665
549
|
|
666
|
-
|
667
|
-
|
550
|
+
static void
|
551
|
+
prof_method_mark(prof_method_t *method)
|
552
|
+
{
|
553
|
+
rb_gc_mark(method->call_infos->object);
|
554
|
+
rb_gc_mark(method->key->klass);
|
555
|
+
}
|
668
556
|
|
669
|
-
|
670
|
-
|
671
|
-
prof_method_self_time(VALUE self)
|
557
|
+
static void
|
558
|
+
prof_method_free(prof_method_t *method)
|
672
559
|
{
|
673
|
-
|
560
|
+
if (method->source_file)
|
561
|
+
{
|
562
|
+
xfree((char*)method->source_file);
|
563
|
+
}
|
674
564
|
|
675
|
-
|
565
|
+
prof_call_infos_free(method->call_infos);
|
566
|
+
xfree(method->key);
|
567
|
+
xfree(method);
|
676
568
|
}
|
677
569
|
|
678
|
-
/* call-seq:
|
679
|
-
wait_time -> float
|
680
|
-
|
681
|
-
Returns the total amount of time this method waited for other threads. */
|
682
570
|
static VALUE
|
683
|
-
|
571
|
+
prof_method_wrap(prof_method_t *result)
|
684
572
|
{
|
685
|
-
|
573
|
+
if (result->object == Qnil)
|
574
|
+
{
|
575
|
+
result->object = Data_Wrap_Struct(cMethodInfo, prof_method_mark, prof_method_free, result);
|
576
|
+
}
|
577
|
+
return result->object;
|
578
|
+
}
|
686
579
|
|
687
|
-
|
580
|
+
static prof_method_t *
|
581
|
+
get_prof_method(VALUE obj)
|
582
|
+
{
|
583
|
+
return (prof_method_t *) DATA_PTR(obj);
|
688
584
|
}
|
689
585
|
|
690
586
|
/* call-seq:
|
@@ -697,18 +593,6 @@ prof_method_line(VALUE self)
|
|
697
593
|
return rb_int_new(get_prof_method(self)->line);
|
698
594
|
}
|
699
595
|
|
700
|
-
/* call-seq:
|
701
|
-
children_time -> float
|
702
|
-
|
703
|
-
Returns the total amount of time spent in this method's children. */
|
704
|
-
static VALUE
|
705
|
-
prof_method_children_time(VALUE self)
|
706
|
-
{
|
707
|
-
prof_method_t *result = get_prof_method(self);
|
708
|
-
prof_measure_t children_time = result->total_time - result->self_time - result->wait_time;
|
709
|
-
return rb_float_new(convert_measurement(children_time));
|
710
|
-
}
|
711
|
-
|
712
596
|
/* call-seq:
|
713
597
|
source_file => string
|
714
598
|
|
@@ -736,8 +620,7 @@ static VALUE
|
|
736
620
|
prof_method_klass(VALUE self)
|
737
621
|
{
|
738
622
|
prof_method_t *result = get_prof_method(self);
|
739
|
-
|
740
|
-
return result->klass;
|
623
|
+
return result->key->klass;
|
741
624
|
}
|
742
625
|
|
743
626
|
/* call-seq:
|
@@ -748,8 +631,7 @@ static VALUE
|
|
748
631
|
prof_method_id(VALUE self)
|
749
632
|
{
|
750
633
|
prof_method_t *result = get_prof_method(self);
|
751
|
-
|
752
|
-
return ID2SYM(result->mid);
|
634
|
+
return ID2SYM(result->key->mid);
|
753
635
|
}
|
754
636
|
|
755
637
|
/* call-seq:
|
@@ -762,7 +644,7 @@ static VALUE
|
|
762
644
|
prof_klass_name(VALUE self)
|
763
645
|
{
|
764
646
|
prof_method_t *method = get_prof_method(self);
|
765
|
-
return klass_name(method->klass);
|
647
|
+
return klass_name(method->key->klass);
|
766
648
|
}
|
767
649
|
|
768
650
|
/* call-seq:
|
@@ -772,10 +654,10 @@ Returns the name of this method in the format Object#method. Singletons
|
|
772
654
|
methods will be returned in the format <Object::Object>#method.*/
|
773
655
|
|
774
656
|
static VALUE
|
775
|
-
prof_method_name(VALUE self)
|
657
|
+
prof_method_name(VALUE self, int depth)
|
776
658
|
{
|
777
659
|
prof_method_t *method = get_prof_method(self);
|
778
|
-
return method_name(method->mid,
|
660
|
+
return method_name(method->key->mid, depth);
|
779
661
|
}
|
780
662
|
|
781
663
|
/* call-seq:
|
@@ -787,105 +669,70 @@ static VALUE
|
|
787
669
|
prof_full_name(VALUE self)
|
788
670
|
{
|
789
671
|
prof_method_t *method = get_prof_method(self);
|
790
|
-
return full_name(method->klass, method->mid, method->depth);
|
672
|
+
return full_name(method->key->klass, method->key->mid, method->key->depth);
|
791
673
|
}
|
792
674
|
|
793
675
|
/* call-seq:
|
794
|
-
|
676
|
+
call_infos -> Array of call_info
|
795
677
|
|
796
|
-
|
797
|
-
|
678
|
+
Returns an array of call info objects that contain profiling information
|
679
|
+
about the current method.*/
|
798
680
|
static VALUE
|
799
|
-
|
681
|
+
prof_method_call_infos(VALUE self)
|
800
682
|
{
|
801
683
|
prof_method_t *method = get_prof_method(self);
|
802
|
-
|
803
|
-
if (method == method->base)
|
804
|
-
return self;
|
805
|
-
else
|
806
|
-
/* Target is a pointer to a method_info - so we have to be careful
|
807
|
-
about the GC. We will wrap the method_info but provide no
|
808
|
-
free method so the underlying object is not freed twice! */
|
809
|
-
return Data_Wrap_Struct(cMethodInfo, NULL, NULL, method->base);
|
684
|
+
return prof_call_infos_wrap(method->call_infos);
|
810
685
|
}
|
811
686
|
|
812
687
|
static int
|
813
|
-
|
688
|
+
collect_methods(st_data_t key, st_data_t value, st_data_t result)
|
814
689
|
{
|
815
|
-
/*
|
816
|
-
|
817
|
-
|
818
|
-
|
819
|
-
|
820
|
-
|
821
|
-
|
690
|
+
/* Called for each method stored in a thread's method table.
|
691
|
+
We want to store the method info information into an array.*/
|
692
|
+
VALUE methods = (VALUE) result;
|
693
|
+
prof_method_t *method = (prof_method_t *) value;
|
694
|
+
rb_ary_push(methods, prof_method_wrap(method));
|
695
|
+
|
696
|
+
/* Wrap call info objects */
|
697
|
+
prof_call_infos_wrap(method->call_infos);
|
698
|
+
|
822
699
|
return ST_CONTINUE;
|
823
700
|
}
|
824
701
|
|
825
|
-
/*
|
826
|
-
|
827
|
-
|
828
|
-
Returns an array of call info objects of methods that this method
|
829
|
-
was called by (ie, parents).*/
|
830
|
-
static VALUE
|
831
|
-
prof_method_parents(VALUE self)
|
702
|
+
/* ================ Method Table =================*/
|
703
|
+
static st_table *
|
704
|
+
method_table_create()
|
832
705
|
{
|
833
|
-
|
834
|
-
method's callers (the methods this method called). */
|
835
|
-
|
836
|
-
VALUE children = rb_ary_new();
|
837
|
-
prof_method_t *result = get_prof_method(self);
|
838
|
-
st_foreach(result->parents, prof_method_collect_call_infos, children);
|
839
|
-
return children;
|
706
|
+
return st_init_table(&type_method_hash);
|
840
707
|
}
|
841
708
|
|
842
|
-
|
843
|
-
|
844
|
-
children -> hash
|
845
|
-
|
846
|
-
Returns an array of call info objects of methods that this method
|
847
|
-
called (ie, children).*/
|
848
|
-
static VALUE
|
849
|
-
prof_method_children(VALUE self)
|
709
|
+
static size_t
|
710
|
+
method_table_insert(st_table *table, const prof_method_key_t *key, prof_method_t *val)
|
850
711
|
{
|
851
|
-
|
852
|
-
method's callees (the methods this method called). */
|
853
|
-
|
854
|
-
VALUE children = rb_ary_new();
|
855
|
-
prof_method_t *result = get_prof_method(self);
|
856
|
-
st_foreach(result->children, prof_method_collect_call_infos, children);
|
857
|
-
return children;
|
712
|
+
return st_insert(table, (st_data_t) key, (st_data_t) val);
|
858
713
|
}
|
859
714
|
|
860
|
-
|
861
|
-
|
862
|
-
|
863
|
-
|
864
|
-
|
865
|
-
|
866
|
-
|
867
|
-
|
868
|
-
|
869
|
-
|
870
|
-
return
|
871
|
-
|
872
|
-
return INT2FIX(1);
|
873
|
-
else if (y->called == 0)
|
874
|
-
return INT2FIX(-1);
|
875
|
-
else
|
876
|
-
return rb_dbl_cmp(x->total_time, y->total_time);
|
715
|
+
static prof_method_t *
|
716
|
+
method_table_lookup(st_table *table, const prof_method_key_t* key)
|
717
|
+
{
|
718
|
+
st_data_t val;
|
719
|
+
if (st_lookup(table, (st_data_t)key, &val))
|
720
|
+
{
|
721
|
+
return (prof_method_t *) val;
|
722
|
+
}
|
723
|
+
else
|
724
|
+
{
|
725
|
+
return NULL;
|
726
|
+
}
|
877
727
|
}
|
878
728
|
|
879
|
-
static int
|
880
|
-
collect_methods(st_data_t key, st_data_t value, st_data_t result)
|
881
|
-
{
|
882
|
-
/* Called for each method stored in a thread's method table.
|
883
|
-
We want to store the method info information into an array.*/
|
884
|
-
VALUE methods = (VALUE) result;
|
885
|
-
prof_method_t *method = (prof_method_t *) value;
|
886
|
-
rb_ary_push(methods, prof_method_new(method));
|
887
729
|
|
888
|
-
|
730
|
+
static void
|
731
|
+
method_table_free(st_table *table)
|
732
|
+
{
|
733
|
+
/* Don't free the contents since they are wrapped by
|
734
|
+
Ruby objects! */
|
735
|
+
st_free_table(table);
|
889
736
|
}
|
890
737
|
|
891
738
|
|
@@ -897,20 +744,19 @@ thread_data_create()
|
|
897
744
|
{
|
898
745
|
thread_data_t* result = ALLOC(thread_data_t);
|
899
746
|
result->stack = stack_create();
|
900
|
-
result->
|
901
|
-
result->last_switch =
|
747
|
+
result->method_table = method_table_create();
|
748
|
+
result->last_switch = get_measurement();
|
902
749
|
return result;
|
903
750
|
}
|
904
751
|
|
905
752
|
static void
|
906
753
|
thread_data_free(thread_data_t* thread_data)
|
907
754
|
{
|
755
|
+
method_table_free(thread_data->method_table);
|
908
756
|
stack_free(thread_data->stack);
|
909
|
-
method_info_table_free(thread_data->method_info_table);
|
910
757
|
xfree(thread_data);
|
911
758
|
}
|
912
759
|
|
913
|
-
|
914
760
|
/* ---- Hash, keyed on thread, that stores thread's stack
|
915
761
|
and methods---- */
|
916
762
|
|
@@ -920,15 +766,15 @@ threads_table_create()
|
|
920
766
|
return st_init_numtable();
|
921
767
|
}
|
922
768
|
|
923
|
-
static
|
769
|
+
static size_t
|
924
770
|
threads_table_insert(st_table *table, VALUE thread, thread_data_t *thread_data)
|
925
771
|
{
|
926
772
|
/* Its too slow to key on the real thread id so just typecast thread instead. */
|
927
|
-
return st_insert(table, (st_data_t
|
773
|
+
return st_insert(table, (st_data_t) thread, (st_data_t) thread_data);
|
928
774
|
}
|
929
775
|
|
930
|
-
static
|
931
|
-
threads_table_lookup(st_table *table,
|
776
|
+
static thread_data_t *
|
777
|
+
threads_table_lookup(st_table *table, VALUE thread_id)
|
932
778
|
{
|
933
779
|
thread_data_t* result;
|
934
780
|
st_data_t val;
|
@@ -974,14 +820,15 @@ collect_threads(st_data_t key, st_data_t value, st_data_t result)
|
|
974
820
|
as an int. */
|
975
821
|
thread_data_t* thread_data = (thread_data_t*) value;
|
976
822
|
VALUE threads_hash = (VALUE) result;
|
977
|
-
|
823
|
+
|
978
824
|
VALUE methods = rb_ary_new();
|
979
|
-
|
825
|
+
|
980
826
|
/* Now collect an array of all the called methods */
|
981
|
-
|
982
|
-
|
827
|
+
st_table* method_table = thread_data->method_table;
|
828
|
+
st_foreach(method_table, collect_methods, methods);
|
829
|
+
|
983
830
|
/* Store the results in the threads hash keyed on the thread id. */
|
984
|
-
rb_hash_aset(threads_hash,
|
831
|
+
rb_hash_aset(threads_hash, thread_data->thread_id, methods);
|
985
832
|
|
986
833
|
return ST_CONTINUE;
|
987
834
|
}
|
@@ -989,6 +836,7 @@ collect_threads(st_data_t key, st_data_t value, st_data_t result)
|
|
989
836
|
|
990
837
|
/* ================ Profiling =================*/
|
991
838
|
/* Copied from eval.c */
|
839
|
+
#ifdef DEBUG
|
992
840
|
static char *
|
993
841
|
get_event_name(rb_event_flag_t event)
|
994
842
|
{
|
@@ -1011,70 +859,143 @@ get_event_name(rb_event_flag_t event)
|
|
1011
859
|
return "raise";
|
1012
860
|
default:
|
1013
861
|
return "unknown";
|
862
|
+
}
|
863
|
+
}
|
864
|
+
#endif
|
865
|
+
|
866
|
+
static prof_method_t*
|
867
|
+
get_method(rb_event_flag_t event, NODE *node, VALUE klass, ID mid, int depth, st_table* method_table)
|
868
|
+
{
|
869
|
+
prof_method_key_t key;
|
870
|
+
prof_method_t *method = NULL;
|
871
|
+
|
872
|
+
method_key(&key, klass, mid, depth);
|
873
|
+
method = method_table_lookup(method_table, &key);
|
874
|
+
|
875
|
+
if (!method)
|
876
|
+
{
|
877
|
+
const char* source_file = rb_sourcefile();
|
878
|
+
int line = rb_sourceline();
|
879
|
+
|
880
|
+
/* Line numbers are not accurate for c method calls */
|
881
|
+
if (event == RUBY_EVENT_C_CALL)
|
882
|
+
{
|
883
|
+
line = 0;
|
884
|
+
source_file = NULL;
|
885
|
+
}
|
886
|
+
|
887
|
+
method = prof_method_create(&key, source_file, line);
|
888
|
+
method_table_insert(method_table, method->key, method);
|
1014
889
|
}
|
890
|
+
return method;
|
1015
891
|
}
|
1016
892
|
|
1017
893
|
static void
|
1018
|
-
update_result(
|
1019
|
-
|
1020
|
-
prof_frame_t
|
1021
|
-
{
|
1022
|
-
|
1023
|
-
|
1024
|
-
prof_call_info_t *
|
1025
|
-
prof_call_info_t *child_call_info = NULL;
|
894
|
+
update_result(prof_measure_t total_time,
|
895
|
+
prof_frame_t *parent_frame,
|
896
|
+
prof_frame_t *frame)
|
897
|
+
{
|
898
|
+
prof_measure_t self_time = total_time - frame->child_time - frame->wait_time;
|
899
|
+
|
900
|
+
prof_call_info_t *call_info = frame->call_info;
|
1026
901
|
|
1027
|
-
|
1028
|
-
|
902
|
+
/* Update information about the current method */
|
903
|
+
call_info->called++;
|
904
|
+
call_info->total_time += total_time;
|
905
|
+
call_info->self_time += self_time;
|
906
|
+
call_info->wait_time += frame->wait_time;
|
1029
907
|
|
1030
|
-
/*
|
1031
|
-
|
1032
|
-
|
1033
|
-
|
1034
|
-
child->wait_time += wait_time;
|
908
|
+
/* Note where the current method was called from */
|
909
|
+
if (parent_frame)
|
910
|
+
call_info->line = parent_frame->line;
|
911
|
+
}
|
1035
912
|
|
1036
|
-
|
913
|
+
static thread_data_t *
|
914
|
+
switch_thread(VALUE thread_id, prof_measure_t now)
|
915
|
+
{
|
916
|
+
prof_frame_t *frame = NULL;
|
917
|
+
prof_measure_t wait_time = 0;
|
1037
918
|
|
1038
|
-
|
1039
|
-
|
1040
|
-
child_call_info = caller_table_lookup(parent->children, child->key);
|
1041
|
-
if (child_call_info == NULL)
|
1042
|
-
{
|
1043
|
-
child_call_info = call_info_create(child);
|
1044
|
-
caller_table_insert(parent->children, child->key, child_call_info);
|
1045
|
-
}
|
919
|
+
/* Get new thread information. */
|
920
|
+
thread_data_t *thread_data = threads_table_lookup(threads_tbl, thread_id);
|
1046
921
|
|
1047
|
-
|
1048
|
-
|
1049
|
-
|
1050
|
-
|
1051
|
-
|
1052
|
-
|
1053
|
-
|
1054
|
-
|
1055
|
-
|
1056
|
-
|
1057
|
-
|
1058
|
-
|
1059
|
-
|
1060
|
-
|
1061
|
-
|
1062
|
-
|
1063
|
-
|
1064
|
-
|
1065
|
-
|
922
|
+
/* How long has this thread been waiting? */
|
923
|
+
wait_time = now - thread_data->last_switch;
|
924
|
+
thread_data->last_switch = 0;
|
925
|
+
|
926
|
+
/* Get the frame at the top of the stack. This may represent
|
927
|
+
the current method (EVENT_LINE, EVENT_RETURN) or the
|
928
|
+
previous method (EVENT_CALL).*/
|
929
|
+
frame = stack_peek(thread_data->stack);
|
930
|
+
|
931
|
+
if (frame)
|
932
|
+
frame->wait_time += wait_time;
|
933
|
+
|
934
|
+
/* Save on the last thread the time of the context switch
|
935
|
+
and reset this thread's last context switch to 0.*/
|
936
|
+
if (last_thread_data)
|
937
|
+
last_thread_data->last_switch = now;
|
938
|
+
|
939
|
+
last_thread_data = thread_data;
|
940
|
+
return thread_data;
|
941
|
+
}
|
942
|
+
|
943
|
+
static prof_frame_t*
|
944
|
+
pop_frame(thread_data_t *thread_data, prof_measure_t now)
|
945
|
+
{
|
946
|
+
prof_frame_t *frame = NULL;
|
947
|
+
prof_frame_t* parent_frame = NULL;
|
948
|
+
prof_measure_t total_time;
|
949
|
+
|
950
|
+
frame = stack_pop(thread_data->stack);
|
1066
951
|
|
952
|
+
/* Frame can be null. This can happen if RubProf.start is called from
|
953
|
+
a method that exits. And it can happen if an exception is raised
|
954
|
+
in code that is being profiled and the stack unwinds (RubProf is
|
955
|
+
not notified of that by the ruby runtime. */
|
956
|
+
if (frame == NULL) return NULL;
|
957
|
+
|
958
|
+
/* Calculate the total time this method took */
|
959
|
+
total_time = now - frame->start_time;
|
960
|
+
|
961
|
+
/* Now deactivate the method */
|
962
|
+
frame->call_info->target->active = 0;
|
963
|
+
|
964
|
+
parent_frame = stack_peek(thread_data->stack);
|
965
|
+
if (parent_frame)
|
966
|
+
{
|
967
|
+
parent_frame->child_time += total_time;
|
968
|
+
}
|
1067
969
|
|
1068
|
-
|
1069
|
-
|
1070
|
-
|
1071
|
-
|
1072
|
-
|
1073
|
-
|
970
|
+
update_result(total_time, parent_frame, frame);
|
971
|
+
return frame;
|
972
|
+
}
|
973
|
+
|
974
|
+
static int
|
975
|
+
pop_frames(st_data_t key, st_data_t value, st_data_t now_arg)
|
976
|
+
{
|
977
|
+
VALUE thread_id = (VALUE)key;
|
978
|
+
thread_data_t* thread_data = (thread_data_t *) value;
|
979
|
+
prof_measure_t now = *(prof_measure_t *) now_arg;
|
980
|
+
|
981
|
+
if (!last_thread_data || last_thread_data->thread_id != thread_id)
|
982
|
+
thread_data = switch_thread(thread_id, now);
|
983
|
+
else
|
984
|
+
thread_data = last_thread_data;
|
985
|
+
|
986
|
+
while (pop_frame(thread_data, now))
|
1074
987
|
{
|
1075
|
-
parent->total_time += total_time;
|
1076
|
-
parent->wait_time += wait_time;
|
1077
988
|
}
|
989
|
+
|
990
|
+
return ST_CONTINUE;
|
991
|
+
}
|
992
|
+
|
993
|
+
static void
|
994
|
+
prof_pop_threads()
|
995
|
+
{
|
996
|
+
/* Get current measurement*/
|
997
|
+
prof_measure_t now = get_measurement();
|
998
|
+
st_foreach(threads_tbl, pop_frames, (st_data_t) &now);
|
1078
999
|
}
|
1079
1000
|
|
1080
1001
|
|
@@ -1087,44 +1008,51 @@ prof_event_hook(rb_event_flag_t event, NODE *node, VALUE self, ID mid, VALUE kla
|
|
1087
1008
|
#endif
|
1088
1009
|
{
|
1089
1010
|
|
1090
|
-
VALUE thread;
|
1011
|
+
VALUE thread = Qnil;
|
1012
|
+
VALUE thread_id = Qnil;
|
1091
1013
|
prof_measure_t now = 0;
|
1092
1014
|
thread_data_t* thread_data = NULL;
|
1093
|
-
long thread_id = 0;
|
1094
1015
|
prof_frame_t *frame = NULL;
|
1016
|
+
|
1017
|
+
|
1095
1018
|
#ifdef RUBY_VM
|
1096
1019
|
|
1097
|
-
if (event != RUBY_EVENT_C_CALL &&
|
1098
|
-
|
1099
|
-
VALUE thread = rb_thread_current();
|
1100
|
-
rb_frame_method_id_and_class(&mid, &klass);
|
1020
|
+
if (event != RUBY_EVENT_C_CALL && event != RUBY_EVENT_C_RETURN) {
|
1021
|
+
rb_frame_method_id_and_class(&mid, &klass);
|
1101
1022
|
}
|
1102
1023
|
#endif
|
1024
|
+
|
1025
|
+
#ifdef DEBUG
|
1103
1026
|
/* This code is here for debug purposes - uncomment it out
|
1104
1027
|
when debugging to see a print out of exactly what the
|
1105
|
-
profiler is tracing.
|
1028
|
+
profiler is tracing. */
|
1106
1029
|
{
|
1107
|
-
|
1108
|
-
static
|
1030
|
+
char* key = 0;
|
1031
|
+
static VALUE last_thread_id = Qnil;
|
1109
1032
|
|
1110
1033
|
VALUE thread = rb_thread_current();
|
1111
|
-
|
1112
|
-
char* class_name =
|
1034
|
+
VALUE thread_id = rb_obj_id(thread);
|
1035
|
+
char* class_name = NULL;
|
1113
1036
|
char* method_name = rb_id2name(mid);
|
1114
|
-
char* source_file =
|
1115
|
-
unsigned int source_line =
|
1037
|
+
char* source_file = rb_sourcefile();
|
1038
|
+
unsigned int source_line = rb_sourceline();
|
1039
|
+
|
1116
1040
|
char* event_name = get_event_name(event);
|
1117
|
-
|
1118
|
-
if (last_thread_id != thread_id)
|
1119
|
-
printf("\n");
|
1120
|
-
|
1041
|
+
|
1121
1042
|
if (klass != 0)
|
1122
1043
|
klass = (BUILTIN_TYPE(klass) == T_ICLASS ? RBASIC(klass)->klass : klass);
|
1123
|
-
|
1124
|
-
|
1125
|
-
|
1044
|
+
|
1045
|
+
class_name = rb_class2name(klass);
|
1046
|
+
|
1047
|
+
if (last_thread_id != thread_id)
|
1048
|
+
printf("\n");
|
1049
|
+
|
1050
|
+
printf("%2u: %-8s :%2d %s#%s\n",
|
1051
|
+
thread_id, event_name, source_line, class_name, method_name);
|
1052
|
+
fflush(stdout);
|
1126
1053
|
last_thread_id = thread_id;
|
1127
|
-
}
|
1054
|
+
}
|
1055
|
+
#endif
|
1128
1056
|
|
1129
1057
|
/* Special case - skip any methods from the mProf
|
1130
1058
|
module, such as Prof.stop, since they clutter
|
@@ -1136,41 +1064,23 @@ prof_event_hook(rb_event_flag_t event, NODE *node, VALUE self, ID mid, VALUE kla
|
|
1136
1064
|
|
1137
1065
|
/* Get the current thread information. */
|
1138
1066
|
thread = rb_thread_current();
|
1139
|
-
thread_id =
|
1067
|
+
thread_id = rb_obj_id(thread);
|
1068
|
+
|
1069
|
+
if (exclude_threads_tbl &&
|
1070
|
+
st_lookup(exclude_threads_tbl, (st_data_t) thread_id, 0))
|
1071
|
+
{
|
1072
|
+
return;
|
1073
|
+
}
|
1140
1074
|
|
1141
1075
|
/* Was there a context switch? */
|
1142
1076
|
if (!last_thread_data || last_thread_data->thread_id != thread_id)
|
1143
|
-
|
1144
|
-
prof_measure_t wait_time = 0;
|
1145
|
-
|
1146
|
-
/* Get new thread information. */
|
1147
|
-
thread_data = threads_table_lookup(threads_tbl, thread_id);
|
1148
|
-
|
1149
|
-
/* How long has this thread been waiting? */
|
1150
|
-
wait_time = now - thread_data->last_switch;
|
1151
|
-
thread_data->last_switch = 0;
|
1152
|
-
|
1153
|
-
/* Get the frame at the top of the stack. This may represent
|
1154
|
-
the current method (EVENT_LINE, EVENT_RETURN) or the
|
1155
|
-
previous method (EVENT_CALL).*/
|
1156
|
-
frame = stack_peek(thread_data->stack);
|
1157
|
-
|
1158
|
-
if (frame)
|
1159
|
-
frame->wait_time += wait_time;
|
1160
|
-
|
1161
|
-
/* Save on the last thread the time of the context switch
|
1162
|
-
and reset this thread's last context switch to 0.*/
|
1163
|
-
if (last_thread_data)
|
1164
|
-
last_thread_data->last_switch = now;
|
1165
|
-
|
1166
|
-
last_thread_data = thread_data;
|
1167
|
-
}
|
1077
|
+
thread_data = switch_thread(thread_id, now);
|
1168
1078
|
else
|
1169
|
-
{
|
1170
1079
|
thread_data = last_thread_data;
|
1171
|
-
frame = stack_peek(thread_data->stack);
|
1172
|
-
}
|
1173
1080
|
|
1081
|
+
/* Get the current frame for the current thread. */
|
1082
|
+
frame = stack_peek(thread_data->stack);
|
1083
|
+
|
1174
1084
|
switch (event) {
|
1175
1085
|
case RUBY_EVENT_LINE:
|
1176
1086
|
{
|
@@ -1179,14 +1089,10 @@ prof_event_hook(rb_event_flag_t event, NODE *node, VALUE self, ID mid, VALUE kla
|
|
1179
1089
|
called from. */
|
1180
1090
|
if (frame)
|
1181
1091
|
{
|
1182
|
-
|
1183
|
-
frame->line = rb_sourceline();
|
1184
|
-
#else
|
1185
|
-
if (node)
|
1186
|
-
frame->line = nd_line(node);
|
1187
|
-
#endif
|
1092
|
+
frame->line = rb_sourceline();
|
1188
1093
|
break;
|
1189
1094
|
}
|
1095
|
+
|
1190
1096
|
/* If we get here there was no frame, which means this is
|
1191
1097
|
the first method seen for this thread, so fall through
|
1192
1098
|
to below to create it. */
|
@@ -1194,8 +1100,7 @@ prof_event_hook(rb_event_flag_t event, NODE *node, VALUE self, ID mid, VALUE kla
|
|
1194
1100
|
case RUBY_EVENT_CALL:
|
1195
1101
|
case RUBY_EVENT_C_CALL:
|
1196
1102
|
{
|
1197
|
-
|
1198
|
-
st_data_t key = 0;
|
1103
|
+
prof_call_info_t *call_info = NULL;
|
1199
1104
|
prof_method_t *method = NULL;
|
1200
1105
|
|
1201
1106
|
/* Is this an include for a module? If so get the actual
|
@@ -1205,56 +1110,41 @@ prof_event_hook(rb_event_flag_t event, NODE *node, VALUE self, ID mid, VALUE kla
|
|
1205
1110
|
if (klass != 0)
|
1206
1111
|
klass = (BUILTIN_TYPE(klass) == T_ICLASS ? RBASIC(klass)->klass : klass);
|
1207
1112
|
|
1208
|
-
|
1209
|
-
|
1210
|
-
|
1211
|
-
|
1212
|
-
if (
|
1113
|
+
/* Assume this is the first time we have called this method. */
|
1114
|
+
method = get_method(event, node, klass, mid, 0, thread_data->method_table);
|
1115
|
+
|
1116
|
+
/* Check for a recursive call */
|
1117
|
+
if (method->active)
|
1213
1118
|
{
|
1214
|
-
|
1215
|
-
|
1216
|
-
|
1217
|
-
/* Line numbers are not accurate for c method calls */
|
1218
|
-
if (event == RUBY_EVENT_C_CALL)
|
1219
|
-
{
|
1220
|
-
line = 0;
|
1221
|
-
source_file = NULL;
|
1222
|
-
}
|
1223
|
-
|
1224
|
-
method = prof_method_create(key, klass, mid, depth, source_file, line);
|
1225
|
-
method_info_table_insert(thread_data->method_info_table, key, method);
|
1119
|
+
/* Yes, this method is already active */
|
1120
|
+
method = get_method(event, node, klass, mid, method->key->depth + 1, thread_data->method_table);
|
1226
1121
|
}
|
1227
|
-
|
1228
|
-
depth = method->active_frame;
|
1229
|
-
method->active_frame++;
|
1230
|
-
|
1231
|
-
if (depth > 0)
|
1122
|
+
else
|
1232
1123
|
{
|
1233
|
-
|
1234
|
-
|
1235
|
-
|
1236
|
-
|
1237
|
-
|
1124
|
+
/* No, so make it active */
|
1125
|
+
method->active = 1;
|
1126
|
+
}
|
1127
|
+
|
1128
|
+
if (!frame)
|
1129
|
+
{
|
1130
|
+
call_info = prof_call_info_create(method, NULL);
|
1131
|
+
prof_add_call_info(method->call_infos, call_info);
|
1132
|
+
}
|
1133
|
+
else
|
1134
|
+
{
|
1135
|
+
call_info = call_info_table_lookup(frame->call_info->call_infos, method->key);
|
1136
|
+
|
1137
|
+
if (!call_info)
|
1238
1138
|
{
|
1239
|
-
|
1240
|
-
|
1241
|
-
|
1242
|
-
/* Line numbers are not accurate for c method calls */
|
1243
|
-
if (event == RUBY_EVENT_C_CALL)
|
1244
|
-
{
|
1245
|
-
line = 0;
|
1246
|
-
source_file = NULL;
|
1247
|
-
}
|
1248
|
-
|
1249
|
-
method = prof_method_create(key, klass, mid, depth, source_file, line);
|
1250
|
-
method->base = base_method;
|
1251
|
-
method_info_table_insert(thread_data->method_info_table, key, method);
|
1139
|
+
call_info = prof_call_info_create(method, frame->call_info);
|
1140
|
+
call_info_table_insert(frame->call_info->call_infos, method->key, call_info);
|
1141
|
+
prof_add_call_info(method->call_infos, call_info);
|
1252
1142
|
}
|
1253
1143
|
}
|
1254
1144
|
|
1255
1145
|
/* Push a new frame onto the stack */
|
1256
1146
|
frame = stack_push(thread_data->stack);
|
1257
|
-
frame->
|
1147
|
+
frame->call_info = call_info;
|
1258
1148
|
frame->start_time = now;
|
1259
1149
|
frame->wait_time = 0;
|
1260
1150
|
frame->child_time = 0;
|
@@ -1265,29 +1155,7 @@ prof_event_hook(rb_event_flag_t event, NODE *node, VALUE self, ID mid, VALUE kla
|
|
1265
1155
|
case RUBY_EVENT_RETURN:
|
1266
1156
|
case RUBY_EVENT_C_RETURN:
|
1267
1157
|
{
|
1268
|
-
|
1269
|
-
|
1270
|
-
prof_measure_t total_time;
|
1271
|
-
|
1272
|
-
frame = stack_pop(thread_data->stack);
|
1273
|
-
caller_frame = stack_peek(thread_data->stack);
|
1274
|
-
|
1275
|
-
/* Frame can be null. This can happen if RubProf.start is called from
|
1276
|
-
a method that exits. And it can happen if an exception is raised
|
1277
|
-
in code that is being profiled and the stack unwinds (RubProf is
|
1278
|
-
not notified of that by the ruby runtime. */
|
1279
|
-
if (frame == NULL) return;
|
1280
|
-
|
1281
|
-
total_time = now - frame->start_time;
|
1282
|
-
|
1283
|
-
if (caller_frame)
|
1284
|
-
{
|
1285
|
-
caller_frame->child_time += total_time;
|
1286
|
-
}
|
1287
|
-
|
1288
|
-
frame->method->base->active_frame--;
|
1289
|
-
|
1290
|
-
update_result(thread_data, total_time, caller_frame, frame);
|
1158
|
+
pop_frame(thread_data, now);
|
1291
1159
|
break;
|
1292
1160
|
}
|
1293
1161
|
}
|
@@ -1374,7 +1242,9 @@ prof_result_threads(VALUE self)
|
|
1374
1242
|
*RubyProf::WALL_TIME - Measure wall time using gettimeofday on Linx and GetLocalTime on Windows
|
1375
1243
|
*RubyProf::CPU_TIME - Measure time using the CPU clock counter. This mode is only supported on Pentium or PowerPC platforms.
|
1376
1244
|
*RubyProf::ALLOCATIONS - Measure object allocations. This requires a patched Ruby interpreter.
|
1377
|
-
*RubyProf::MEMORY - Measure memory size. This requires a patched Ruby interpreter
|
1245
|
+
*RubyProf::MEMORY - Measure memory size. This requires a patched Ruby interpreter.
|
1246
|
+
*RubyProf::GC_RUNS - Measure number of garbage collections. This requires a patched Ruby interpreter.
|
1247
|
+
*RubyProf::GC_TIME - Measure time spent doing garbage collection. This requires a patched Ruby interpreter.*/
|
1378
1248
|
static VALUE
|
1379
1249
|
prof_get_measure_mode(VALUE self)
|
1380
1250
|
{
|
@@ -1390,7 +1260,9 @@ prof_get_measure_mode(VALUE self)
|
|
1390
1260
|
*RubyProf::WALL_TIME - Measure wall time using gettimeofday on Linx and GetLocalTime on Windows
|
1391
1261
|
*RubyProf::CPU_TIME - Measure time using the CPU clock counter. This mode is only supported on Pentium or PowerPC platforms.
|
1392
1262
|
*RubyProf::ALLOCATIONS - Measure object allocations. This requires a patched Ruby interpreter.
|
1393
|
-
*RubyProf::MEMORY - Measure memory size. This requires a patched Ruby interpreter
|
1263
|
+
*RubyProf::MEMORY - Measure memory size. This requires a patched Ruby interpreter.
|
1264
|
+
*RubyProf::GC_RUNS - Measure number of garbage collections. This requires a patched Ruby interpreter.
|
1265
|
+
*RubyProf::GC_TIME - Measure time spent doing garbage collection. This requires a patched Ruby interpreter.*/
|
1394
1266
|
static VALUE
|
1395
1267
|
prof_set_measure_mode(VALUE self, VALUE val)
|
1396
1268
|
{
|
@@ -1415,7 +1287,7 @@ prof_set_measure_mode(VALUE self, VALUE val)
|
|
1415
1287
|
#if defined(MEASURE_CPU_TIME)
|
1416
1288
|
case MEASURE_CPU_TIME:
|
1417
1289
|
if (cpu_frequency == 0)
|
1418
|
-
cpu_frequency =
|
1290
|
+
cpu_frequency = get_cpu_frequency();
|
1419
1291
|
get_measurement = measure_cpu_time;
|
1420
1292
|
convert_measurement = convert_cpu_time;
|
1421
1293
|
break;
|
@@ -1434,9 +1306,23 @@ prof_set_measure_mode(VALUE self, VALUE val)
|
|
1434
1306
|
convert_measurement = convert_memory;
|
1435
1307
|
break;
|
1436
1308
|
#endif
|
1437
|
-
|
1309
|
+
|
1310
|
+
#if defined(MEASURE_GC_RUNS)
|
1311
|
+
case MEASURE_GC_RUNS:
|
1312
|
+
get_measurement = measure_gc_runs;
|
1313
|
+
convert_measurement = convert_gc_runs;
|
1314
|
+
break;
|
1315
|
+
#endif
|
1316
|
+
|
1317
|
+
#if defined(MEASURE_GC_TIME)
|
1318
|
+
case MEASURE_GC_TIME:
|
1319
|
+
get_measurement = measure_gc_time;
|
1320
|
+
convert_measurement = convert_gc_time;
|
1321
|
+
break;
|
1322
|
+
#endif
|
1323
|
+
|
1438
1324
|
default:
|
1439
|
-
rb_raise(rb_eArgError, "invalid mode: %
|
1325
|
+
rb_raise(rb_eArgError, "invalid mode: %ld", mode);
|
1440
1326
|
break;
|
1441
1327
|
}
|
1442
1328
|
|
@@ -1444,7 +1330,75 @@ prof_set_measure_mode(VALUE self, VALUE val)
|
|
1444
1330
|
return val;
|
1445
1331
|
}
|
1446
1332
|
|
1333
|
+
/* call-seq:
|
1334
|
+
exclude_threads= -> void
|
1335
|
+
|
1336
|
+
Specifies what threads ruby-prof should exclude from profiling */
|
1337
|
+
static VALUE
|
1338
|
+
prof_set_exclude_threads(VALUE self, VALUE threads)
|
1339
|
+
{
|
1340
|
+
int i;
|
1341
|
+
|
1342
|
+
if (threads_tbl != NULL)
|
1343
|
+
{
|
1344
|
+
rb_raise(rb_eRuntimeError, "can't set exclude_threads while profiling");
|
1345
|
+
}
|
1346
|
+
|
1347
|
+
/* Stay simple, first free the old hash table */
|
1348
|
+
if (exclude_threads_tbl)
|
1349
|
+
{
|
1350
|
+
st_free_table(exclude_threads_tbl);
|
1351
|
+
exclude_threads_tbl = NULL;
|
1352
|
+
}
|
1353
|
+
|
1354
|
+
/* Now create a new one if the user passed in any threads */
|
1355
|
+
if (threads != Qnil)
|
1356
|
+
{
|
1357
|
+
Check_Type(threads, T_ARRAY);
|
1358
|
+
exclude_threads_tbl = st_init_numtable();
|
1359
|
+
|
1360
|
+
for (i=0; i < RARRAY_LEN(threads); ++i)
|
1361
|
+
{
|
1362
|
+
VALUE thread = rb_ary_entry(threads, i);
|
1363
|
+
st_insert(exclude_threads_tbl, (st_data_t) rb_obj_id(thread), 0);
|
1364
|
+
}
|
1365
|
+
}
|
1366
|
+
return threads;
|
1367
|
+
}
|
1368
|
+
|
1369
|
+
|
1447
1370
|
/* ========= Profiling ============= */
|
1371
|
+
void
|
1372
|
+
prof_install_hook()
|
1373
|
+
{
|
1374
|
+
#ifdef RUBY_VM
|
1375
|
+
rb_add_event_hook(prof_event_hook,
|
1376
|
+
RUBY_EVENT_CALL | RUBY_EVENT_RETURN |
|
1377
|
+
RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN
|
1378
|
+
| RUBY_EVENT_LINE, Qnil);
|
1379
|
+
#else
|
1380
|
+
rb_add_event_hook(prof_event_hook,
|
1381
|
+
RUBY_EVENT_CALL | RUBY_EVENT_RETURN |
|
1382
|
+
RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN
|
1383
|
+
| RUBY_EVENT_LINE);
|
1384
|
+
#endif
|
1385
|
+
|
1386
|
+
#if defined(TOGGLE_GC_STATS)
|
1387
|
+
rb_gc_enable_stats();
|
1388
|
+
#endif
|
1389
|
+
}
|
1390
|
+
|
1391
|
+
void
|
1392
|
+
prof_remove_hook()
|
1393
|
+
{
|
1394
|
+
#if defined(TOGGLE_GC_STATS)
|
1395
|
+
rb_gc_disable_stats();
|
1396
|
+
#endif
|
1397
|
+
|
1398
|
+
/* Now unregister from event */
|
1399
|
+
rb_remove_event_hook(prof_event_hook);
|
1400
|
+
}
|
1401
|
+
|
1448
1402
|
|
1449
1403
|
|
1450
1404
|
/* call-seq:
|
@@ -1461,7 +1415,7 @@ prof_running(VALUE self)
|
|
1461
1415
|
}
|
1462
1416
|
|
1463
1417
|
/* call-seq:
|
1464
|
-
start ->
|
1418
|
+
start -> RubyProf
|
1465
1419
|
|
1466
1420
|
Starts recording profile data.*/
|
1467
1421
|
static VALUE
|
@@ -1475,26 +1429,50 @@ prof_start(VALUE self)
|
|
1475
1429
|
/* Setup globals */
|
1476
1430
|
last_thread_data = NULL;
|
1477
1431
|
threads_tbl = threads_table_create();
|
1478
|
-
|
1479
|
-
#ifdef RUBY_VM
|
1480
|
-
rb_add_event_hook(prof_event_hook,
|
1481
|
-
RUBY_EVENT_CALL | RUBY_EVENT_RETURN |
|
1482
|
-
RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN
|
1483
|
-
| RUBY_EVENT_LINE, Qnil);
|
1484
|
-
#else
|
1485
|
-
rb_add_event_hook(prof_event_hook,
|
1486
|
-
RUBY_EVENT_CALL | RUBY_EVENT_RETURN |
|
1487
|
-
RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN
|
1488
|
-
| RUBY_EVENT_LINE);
|
1489
|
-
#endif
|
1490
1432
|
|
1491
|
-
|
1492
|
-
|
1493
|
-
|
1433
|
+
prof_install_hook();
|
1434
|
+
return self;
|
1435
|
+
}
|
1436
|
+
|
1437
|
+
/* call-seq:
|
1438
|
+
pause -> RubyProf
|
1439
|
+
|
1440
|
+
Pauses collecting profile data. */
|
1441
|
+
static VALUE
|
1442
|
+
prof_pause(VALUE self)
|
1443
|
+
{
|
1444
|
+
if (threads_tbl == NULL)
|
1445
|
+
{
|
1446
|
+
rb_raise(rb_eRuntimeError, "RubyProf is not running.");
|
1447
|
+
}
|
1494
1448
|
|
1495
|
-
|
1449
|
+
prof_remove_hook();
|
1450
|
+
return self;
|
1496
1451
|
}
|
1497
1452
|
|
1453
|
+
/* call-seq:
|
1454
|
+
resume {block} -> RubyProf
|
1455
|
+
|
1456
|
+
Resumes recording profile data.*/
|
1457
|
+
static VALUE
|
1458
|
+
prof_resume(VALUE self)
|
1459
|
+
{
|
1460
|
+
if (threads_tbl == NULL)
|
1461
|
+
{
|
1462
|
+
prof_start(self);
|
1463
|
+
}
|
1464
|
+
else
|
1465
|
+
{
|
1466
|
+
prof_install_hook();
|
1467
|
+
}
|
1468
|
+
|
1469
|
+
if (rb_block_given_p())
|
1470
|
+
{
|
1471
|
+
rb_ensure(rb_yield, self, prof_pause, self);
|
1472
|
+
}
|
1473
|
+
|
1474
|
+
return self;
|
1475
|
+
}
|
1498
1476
|
|
1499
1477
|
/* call-seq:
|
1500
1478
|
stop -> RubyProf::Result
|
@@ -1503,19 +1481,11 @@ prof_start(VALUE self)
|
|
1503
1481
|
static VALUE
|
1504
1482
|
prof_stop(VALUE self)
|
1505
1483
|
{
|
1506
|
-
#if defined(MEASURE_MEMORY)
|
1507
|
-
rb_gc_disable_stats();
|
1508
|
-
#endif
|
1509
|
-
|
1510
1484
|
VALUE result = Qnil;
|
1485
|
+
|
1486
|
+
prof_remove_hook();
|
1511
1487
|
|
1512
|
-
|
1513
|
-
{
|
1514
|
-
rb_raise(rb_eRuntimeError, "RubyProf is not running.");
|
1515
|
-
}
|
1516
|
-
|
1517
|
-
/* Now unregister from event */
|
1518
|
-
rb_remove_event_hook(prof_event_hook);
|
1488
|
+
prof_pop_threads();
|
1519
1489
|
|
1520
1490
|
/* Create the result */
|
1521
1491
|
result = prof_result_new();
|
@@ -1529,7 +1499,6 @@ prof_stop(VALUE self)
|
|
1529
1499
|
return result;
|
1530
1500
|
}
|
1531
1501
|
|
1532
|
-
|
1533
1502
|
/* call-seq:
|
1534
1503
|
profile {block} -> RubyProf::Result
|
1535
1504
|
|
@@ -1537,16 +1506,83 @@ Profiles the specified block and returns a RubyProf::Result object. */
|
|
1537
1506
|
static VALUE
|
1538
1507
|
prof_profile(VALUE self)
|
1539
1508
|
{
|
1509
|
+
int result;
|
1510
|
+
|
1540
1511
|
if (!rb_block_given_p())
|
1541
1512
|
{
|
1542
1513
|
rb_raise(rb_eArgError, "A block must be provided to the profile method.");
|
1543
1514
|
}
|
1544
1515
|
|
1545
1516
|
prof_start(self);
|
1546
|
-
rb_yield
|
1517
|
+
rb_protect(rb_yield, self, &result);
|
1547
1518
|
return prof_stop(self);
|
1548
1519
|
}
|
1549
1520
|
|
1521
|
+
/* Get arround annoying limitations in RDOC */
|
1522
|
+
|
1523
|
+
/* Document-method: measure_process_time
|
1524
|
+
call-seq:
|
1525
|
+
measure_process_time -> float
|
1526
|
+
|
1527
|
+
Returns the process time.*/
|
1528
|
+
|
1529
|
+
/* Document-method: measure_wall_time
|
1530
|
+
call-seq:
|
1531
|
+
measure_wall_time -> float
|
1532
|
+
|
1533
|
+
Returns the wall time.*/
|
1534
|
+
|
1535
|
+
/* Document-method: measure_cpu_time
|
1536
|
+
call-seq:
|
1537
|
+
measure_cpu_time -> float
|
1538
|
+
|
1539
|
+
Returns the cpu time.*/
|
1540
|
+
|
1541
|
+
/* Document-method: get_cpu_frequency
|
1542
|
+
call-seq:
|
1543
|
+
cpu_frequency -> int
|
1544
|
+
|
1545
|
+
Returns the cpu's frequency. This value is needed when
|
1546
|
+
RubyProf::measure_mode is set to CPU_TIME. */
|
1547
|
+
|
1548
|
+
/* Document-method: cpu_frequency
|
1549
|
+
call-seq:
|
1550
|
+
cpu_frequency -> int
|
1551
|
+
|
1552
|
+
Returns the cpu's frequency. This value is needed when
|
1553
|
+
RubyProf::measure_mode is set to CPU_TIME. */
|
1554
|
+
|
1555
|
+
/* Document-method: cpu_frequency=
|
1556
|
+
call-seq:
|
1557
|
+
cpu_frequency = frequency
|
1558
|
+
|
1559
|
+
Sets the cpu's frequency. This value is needed when
|
1560
|
+
RubyProf::measure_mode is set to CPU_TIME. */
|
1561
|
+
|
1562
|
+
/* Document-method: measure_allocations
|
1563
|
+
call-seq:
|
1564
|
+
measure_allocations -> int
|
1565
|
+
|
1566
|
+
Returns the total number of object allocations since Ruby started.*/
|
1567
|
+
|
1568
|
+
/* Document-method: measure_memory
|
1569
|
+
call-seq:
|
1570
|
+
measure_memory -> int
|
1571
|
+
|
1572
|
+
Returns total allocated memory in bytes.*/
|
1573
|
+
|
1574
|
+
/* Document-method: measure_gc_runs
|
1575
|
+
call-seq:
|
1576
|
+
gc_runs -> Integer
|
1577
|
+
|
1578
|
+
Returns the total number of garbage collections.*/
|
1579
|
+
|
1580
|
+
/* Document-method: measure_gc_time
|
1581
|
+
call-seq:
|
1582
|
+
gc_time -> Integer
|
1583
|
+
|
1584
|
+
Returns the time spent doing garbage collections in microseconds.*/
|
1585
|
+
|
1550
1586
|
|
1551
1587
|
#if defined(_WIN32)
|
1552
1588
|
__declspec(dllexport)
|
@@ -1556,23 +1592,29 @@ void
|
|
1556
1592
|
Init_ruby_prof()
|
1557
1593
|
{
|
1558
1594
|
mProf = rb_define_module("RubyProf");
|
1559
|
-
rb_define_const(mProf, "VERSION", rb_str_new2(
|
1595
|
+
rb_define_const(mProf, "VERSION", rb_str_new2(RUBY_PROF_VERSION));
|
1560
1596
|
rb_define_module_function(mProf, "start", prof_start, 0);
|
1561
1597
|
rb_define_module_function(mProf, "stop", prof_stop, 0);
|
1598
|
+
rb_define_module_function(mProf, "resume", prof_resume, 0);
|
1599
|
+
rb_define_module_function(mProf, "pause", prof_pause, 0);
|
1562
1600
|
rb_define_module_function(mProf, "running?", prof_running, 0);
|
1563
1601
|
rb_define_module_function(mProf, "profile", prof_profile, 0);
|
1564
1602
|
|
1603
|
+
rb_define_singleton_method(mProf, "exclude_threads=", prof_set_exclude_threads, 1);
|
1565
1604
|
rb_define_singleton_method(mProf, "measure_mode", prof_get_measure_mode, 0);
|
1566
1605
|
rb_define_singleton_method(mProf, "measure_mode=", prof_set_measure_mode, 1);
|
1567
1606
|
|
1568
1607
|
rb_define_const(mProf, "CLOCKS_PER_SEC", INT2NUM(CLOCKS_PER_SEC));
|
1569
1608
|
rb_define_const(mProf, "PROCESS_TIME", INT2NUM(MEASURE_PROCESS_TIME));
|
1609
|
+
rb_define_singleton_method(mProf, "measure_process_time", prof_measure_process_time, 0); /* in measure_process_time.h */
|
1570
1610
|
rb_define_const(mProf, "WALL_TIME", INT2NUM(MEASURE_WALL_TIME));
|
1611
|
+
rb_define_singleton_method(mProf, "measure_wall_time", prof_measure_wall_time, 0); /* in measure_wall_time.h */
|
1571
1612
|
|
1572
1613
|
#ifndef MEASURE_CPU_TIME
|
1573
1614
|
rb_define_const(mProf, "CPU_TIME", Qnil);
|
1574
1615
|
#else
|
1575
1616
|
rb_define_const(mProf, "CPU_TIME", INT2NUM(MEASURE_CPU_TIME));
|
1617
|
+
rb_define_singleton_method(mProf, "measure_cpu_time", prof_measure_cpu_time, 0); /* in measure_cpu_time.h */
|
1576
1618
|
rb_define_singleton_method(mProf, "cpu_frequency", prof_get_cpu_frequency, 0); /* in measure_cpu_time.h */
|
1577
1619
|
rb_define_singleton_method(mProf, "cpu_frequency=", prof_set_cpu_frequency, 1); /* in measure_cpu_time.h */
|
1578
1620
|
#endif
|
@@ -1581,20 +1623,36 @@ Init_ruby_prof()
|
|
1581
1623
|
rb_define_const(mProf, "ALLOCATIONS", Qnil);
|
1582
1624
|
#else
|
1583
1625
|
rb_define_const(mProf, "ALLOCATIONS", INT2NUM(MEASURE_ALLOCATIONS));
|
1626
|
+
rb_define_singleton_method(mProf, "measure_allocations", prof_measure_allocations, 0); /* in measure_allocations.h */
|
1584
1627
|
#endif
|
1585
1628
|
|
1586
1629
|
#ifndef MEASURE_MEMORY
|
1587
1630
|
rb_define_const(mProf, "MEMORY", Qnil);
|
1588
1631
|
#else
|
1589
1632
|
rb_define_const(mProf, "MEMORY", INT2NUM(MEASURE_MEMORY));
|
1633
|
+
rb_define_singleton_method(mProf, "measure_memory", prof_measure_memory, 0); /* in measure_memory.h */
|
1590
1634
|
#endif
|
1591
|
-
|
1635
|
+
|
1636
|
+
#ifndef MEASURE_GC_RUNS
|
1637
|
+
rb_define_const(mProf, "GC_RUNS", Qnil);
|
1638
|
+
#else
|
1639
|
+
rb_define_const(mProf, "GC_RUNS", INT2NUM(MEASURE_GC_RUNS));
|
1640
|
+
rb_define_singleton_method(mProf, "measure_gc_runs", prof_measure_gc_runs, 0); /* in measure_gc_runs.h */
|
1641
|
+
#endif
|
1642
|
+
|
1643
|
+
#ifndef MEASURE_GC_TIME
|
1644
|
+
rb_define_const(mProf, "GC_TIME", Qnil);
|
1645
|
+
#else
|
1646
|
+
rb_define_const(mProf, "GC_TIME", INT2NUM(MEASURE_GC_TIME));
|
1647
|
+
rb_define_singleton_method(mProf, "measure_gc_time", prof_measure_gc_time, 0); /* in measure_gc_time.h */
|
1648
|
+
#endif
|
1649
|
+
|
1592
1650
|
cResult = rb_define_class_under(mProf, "Result", rb_cObject);
|
1593
1651
|
rb_undef_method(CLASS_OF(cMethodInfo), "new");
|
1594
1652
|
rb_define_method(cResult, "threads", prof_result_threads, 0);
|
1595
1653
|
|
1654
|
+
/* MethodInfo */
|
1596
1655
|
cMethodInfo = rb_define_class_under(mProf, "MethodInfo", rb_cObject);
|
1597
|
-
rb_include_module(cMethodInfo, rb_mComparable);
|
1598
1656
|
rb_undef_method(CLASS_OF(cMethodInfo), "new");
|
1599
1657
|
|
1600
1658
|
rb_define_method(cMethodInfo, "klass", prof_method_klass, 0);
|
@@ -1602,27 +1660,21 @@ Init_ruby_prof()
|
|
1602
1660
|
rb_define_method(cMethodInfo, "method_name", prof_method_name, 0);
|
1603
1661
|
rb_define_method(cMethodInfo, "full_name", prof_full_name, 0);
|
1604
1662
|
rb_define_method(cMethodInfo, "method_id", prof_method_id, 0);
|
1605
|
-
rb_define_method(cMethodInfo, "base", prof_method_base, 0);
|
1606
1663
|
|
1607
|
-
rb_define_method(cMethodInfo, "parents", prof_method_parents, 0);
|
1608
|
-
rb_define_method(cMethodInfo, "children", prof_method_children, 0);
|
1609
|
-
rb_define_method(cMethodInfo, "<=>", prof_method_cmp, 1);
|
1610
1664
|
rb_define_method(cMethodInfo, "source_file", prof_method_source_file,0);
|
1611
1665
|
rb_define_method(cMethodInfo, "line", prof_method_line, 0);
|
1612
|
-
rb_define_method(cMethodInfo, "called", prof_method_called, 0);
|
1613
|
-
rb_define_method(cMethodInfo, "total_time", prof_method_total_time, 0);
|
1614
|
-
rb_define_method(cMethodInfo, "self_time", prof_method_self_time, 0);
|
1615
|
-
rb_define_method(cMethodInfo, "wait_time", prof_method_wait_time, 0);
|
1616
|
-
rb_define_method(cMethodInfo, "children_time", prof_method_children_time, 0);
|
1617
1666
|
|
1667
|
+
rb_define_method(cMethodInfo, "call_infos", prof_method_call_infos, 0);
|
1668
|
+
|
1669
|
+
/* CallInfo */
|
1618
1670
|
cCallInfo = rb_define_class_under(mProf, "CallInfo", rb_cObject);
|
1619
1671
|
rb_undef_method(CLASS_OF(cCallInfo), "new");
|
1620
|
-
rb_define_method(cCallInfo, "
|
1621
|
-
rb_define_method(cCallInfo, "
|
1622
|
-
rb_define_method(cCallInfo, "
|
1623
|
-
rb_define_method(cCallInfo, "
|
1624
|
-
rb_define_method(cCallInfo, "
|
1625
|
-
rb_define_method(cCallInfo, "
|
1626
|
-
rb_define_method(cCallInfo, "
|
1672
|
+
rb_define_method(cCallInfo, "parent", prof_call_info_parent, 0);
|
1673
|
+
rb_define_method(cCallInfo, "children", prof_call_info_children, 0);
|
1674
|
+
rb_define_method(cCallInfo, "target", prof_call_info_target, 0);
|
1675
|
+
rb_define_method(cCallInfo, "called", prof_call_info_called, 0);
|
1676
|
+
rb_define_method(cCallInfo, "total_time", prof_call_info_total_time, 0);
|
1677
|
+
rb_define_method(cCallInfo, "self_time", prof_call_info_self_time, 0);
|
1678
|
+
rb_define_method(cCallInfo, "wait_time", prof_call_info_wait_time, 0);
|
1679
|
+
rb_define_method(cCallInfo, "line", prof_call_info_line, 0);
|
1627
1680
|
}
|
1628
|
-
|