rdp-ruby-prof 0.7.4
Sign up to get free protection for your applications and to get access to all the features.
- data/CHANGES +202 -0
- data/LICENSE +23 -0
- data/README +445 -0
- data/Rakefile +123 -0
- data/bin/ruby-prof +207 -0
- data/examples/flat.txt +55 -0
- data/examples/graph.html +823 -0
- data/examples/graph.txt +170 -0
- data/ext/#ruby_prof.c# +1679 -0
- data/ext/Makefile +180 -0
- data/ext/extconf.rb +40 -0
- data/ext/measure_allocations.h +58 -0
- data/ext/measure_cpu_time.h +152 -0
- data/ext/measure_gc_runs.h +76 -0
- data/ext/measure_gc_time.h +57 -0
- data/ext/measure_memory.h +101 -0
- data/ext/measure_process_time.h +52 -0
- data/ext/measure_wall_time.h +53 -0
- data/ext/mingw/Rakefile +23 -0
- data/ext/mingw/build.rake +38 -0
- data/ext/ruby_prof.c +1707 -0
- data/ext/ruby_prof.e +19984 -0
- data/ext/ruby_prof.h +188 -0
- data/ext/vc/ruby_prof.sln +20 -0
- data/ext/vc/ruby_prof.vcproj +241 -0
- data/ext/version.h +4 -0
- data/lib/ruby-prof.rb +48 -0
- data/lib/ruby-prof/abstract_printer.rb +41 -0
- data/lib/ruby-prof/aggregate_call_info.rb +62 -0
- data/lib/ruby-prof/call_info.rb +47 -0
- data/lib/ruby-prof/call_tree_printer.rb +84 -0
- data/lib/ruby-prof/flat_printer.rb +79 -0
- data/lib/ruby-prof/graph_html_printer.rb +256 -0
- data/lib/ruby-prof/graph_html_printer.rb.orig +256 -0
- data/lib/ruby-prof/graph_html_printer.rb.rej +34 -0
- data/lib/ruby-prof/graph_printer.rb +164 -0
- data/lib/ruby-prof/graph_printer.rb.orig +164 -0
- data/lib/ruby-prof/method_info.rb +111 -0
- data/lib/ruby-prof/task.rb +146 -0
- data/lib/ruby-prof/test.rb +148 -0
- data/lib/unprof.rb +8 -0
- data/rails/environment/profile.rb +24 -0
- data/rails/example/example_test.rb +9 -0
- data/rails/profile_test_helper.rb +21 -0
- data/test/aggregate_test.rb +121 -0
- data/test/basic_test.rb +283 -0
- data/test/duplicate_names_test.rb +32 -0
- data/test/exceptions_test.rb +15 -0
- data/test/exclude_threads_test.rb +54 -0
- data/test/line_number_test.rb +73 -0
- data/test/measurement_test.rb +121 -0
- data/test/module_test.rb +54 -0
- data/test/no_method_class_test.rb +13 -0
- data/test/prime.rb +58 -0
- data/test/prime_test.rb +13 -0
- data/test/printers_test.rb +71 -0
- data/test/recursive_test.rb +254 -0
- data/test/singleton_test.rb +37 -0
- data/test/stack_test.rb +138 -0
- data/test/start_stop_test.rb +95 -0
- data/test/test_suite.rb +23 -0
- data/test/thread_test.rb +159 -0
- data/test/unique_call_path_test.rb +206 -0
- metadata +124 -0
@@ -0,0 +1,57 @@
|
|
1
|
+
/* :nodoc:
|
2
|
+
* Copyright (C) 2008 Shugo Maeda <shugo@ruby-lang.org>
|
3
|
+
* Charlie Savage <cfis@savagexi.com>
|
4
|
+
* All rights reserved.
|
5
|
+
*
|
6
|
+
* Redistribution and use in source and binary forms, with or without
|
7
|
+
* modification, are permitted provided that the following conditions
|
8
|
+
* are met:
|
9
|
+
* 1. Redistributions of source code must retain the above copyright
|
10
|
+
* notice, this list of conditions and the following disclaimer.
|
11
|
+
* 2. Redistributions in binary form must reproduce the above copyright
|
12
|
+
* notice, this list of conditions and the following disclaimer in the
|
13
|
+
* documentation and/or other materials provided with the distribution.
|
14
|
+
*
|
15
|
+
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
16
|
+
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
17
|
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
18
|
+
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
19
|
+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
20
|
+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
21
|
+
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
22
|
+
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
23
|
+
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
24
|
+
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
25
|
+
* SUCH DAMAGE. */
|
26
|
+
|
27
|
+
#if defined(HAVE_RB_GC_TIME)
|
28
|
+
#define MEASURE_GC_TIME 6
|
29
|
+
|
30
|
+
static prof_measure_t
|
31
|
+
measure_gc_time()
|
32
|
+
{
|
33
|
+
#if HAVE_LONG_LONG
|
34
|
+
return NUM2LL(rb_gc_time());
|
35
|
+
#else
|
36
|
+
return NUM2LONG(rb_gc_time());
|
37
|
+
#endif
|
38
|
+
}
|
39
|
+
|
40
|
+
static double
|
41
|
+
convert_gc_time(prof_measure_t c)
|
42
|
+
{
|
43
|
+
return (double) c / 1000000;
|
44
|
+
}
|
45
|
+
|
46
|
+
/* Document-method: prof_measure_gc_time
|
47
|
+
call-seq:
|
48
|
+
gc_time -> Integer
|
49
|
+
|
50
|
+
Returns the time spent doing garbage collections in microseconds.*/
|
51
|
+
static VALUE
|
52
|
+
prof_measure_gc_time(VALUE self)
|
53
|
+
{
|
54
|
+
return rb_gc_time();
|
55
|
+
}
|
56
|
+
|
57
|
+
#endif
|
@@ -0,0 +1,101 @@
|
|
1
|
+
/* :nodoc:
|
2
|
+
* Copyright (C) 2008 Alexander Dymo <adymo@pluron.com>
|
3
|
+
*
|
4
|
+
* All rights reserved.
|
5
|
+
*
|
6
|
+
* Redistribution and use in source and binary forms, with or without
|
7
|
+
* modification, are permitted provided that the following conditions
|
8
|
+
* are met:
|
9
|
+
* 1. Redistributions of source code must retain the above copyright
|
10
|
+
* notice, this list of conditions and the following disclaimer.
|
11
|
+
* 2. Redistributions in binary form must reproduce the above copyright
|
12
|
+
* notice, this list of conditions and the following disclaimer in the
|
13
|
+
* documentation and/or other materials provided with the distribution.
|
14
|
+
*
|
15
|
+
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
16
|
+
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
17
|
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
18
|
+
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
19
|
+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
20
|
+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
21
|
+
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
22
|
+
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
23
|
+
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
24
|
+
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
25
|
+
* SUCH DAMAGE. */
|
26
|
+
|
27
|
+
|
28
|
+
#if defined(HAVE_RB_GC_ALLOCATED_SIZE)
|
29
|
+
#define MEASURE_MEMORY 4
|
30
|
+
#define TOGGLE_GC_STATS 1
|
31
|
+
|
32
|
+
static prof_measure_t
|
33
|
+
measure_memory()
|
34
|
+
{
|
35
|
+
#if defined(HAVE_LONG_LONG)
|
36
|
+
return NUM2LL(rb_gc_allocated_size());
|
37
|
+
#else
|
38
|
+
return NUM2ULONG(rb_gc_allocated_size());
|
39
|
+
#endif
|
40
|
+
}
|
41
|
+
|
42
|
+
static double
|
43
|
+
convert_memory(prof_measure_t c)
|
44
|
+
{
|
45
|
+
return (double) c / 1024;
|
46
|
+
}
|
47
|
+
|
48
|
+
/* Document-method: prof_measure_memory
|
49
|
+
call-seq:
|
50
|
+
measure_memory -> int
|
51
|
+
|
52
|
+
Returns total allocated memory in bytes.*/
|
53
|
+
static VALUE
|
54
|
+
prof_measure_memory(VALUE self)
|
55
|
+
{
|
56
|
+
return rb_gc_allocated_size();
|
57
|
+
}
|
58
|
+
|
59
|
+
#elif defined(HAVE_RB_GC_MALLOC_ALLOCATED_SIZE)
|
60
|
+
#define MEASURE_MEMORY 4
|
61
|
+
|
62
|
+
static prof_measure_t
|
63
|
+
measure_memory()
|
64
|
+
{
|
65
|
+
return rb_gc_malloc_allocated_size();
|
66
|
+
}
|
67
|
+
|
68
|
+
static double
|
69
|
+
convert_memory(prof_measure_t c)
|
70
|
+
{
|
71
|
+
return (double) c / 1024;
|
72
|
+
}
|
73
|
+
|
74
|
+
static VALUE
|
75
|
+
prof_measure_memory(VALUE self)
|
76
|
+
{
|
77
|
+
return UINT2NUM(rb_gc_malloc_allocated_size());
|
78
|
+
}
|
79
|
+
|
80
|
+
#elif defined(HAVE_RB_HEAP_TOTAL_MEM)
|
81
|
+
#define MEASURE_MEMORY 4
|
82
|
+
|
83
|
+
static prof_measure_t
|
84
|
+
measure_memory()
|
85
|
+
{
|
86
|
+
return rb_heap_total_mem();
|
87
|
+
}
|
88
|
+
|
89
|
+
static double
|
90
|
+
convert_memory(prof_measure_t c)
|
91
|
+
{
|
92
|
+
return (double) c / 1024;
|
93
|
+
}
|
94
|
+
|
95
|
+
static VALUE
|
96
|
+
prof_measure_memory(VALUE self)
|
97
|
+
{
|
98
|
+
return ULONG2NUM(rb_heap_total_mem());
|
99
|
+
}
|
100
|
+
|
101
|
+
#endif
|
@@ -0,0 +1,52 @@
|
|
1
|
+
/*
|
2
|
+
* Copyright (C) 2008 Shugo Maeda <shugo@ruby-lang.org>
|
3
|
+
* Charlie Savage <cfis@savagexi.com>
|
4
|
+
* All rights reserved.
|
5
|
+
*
|
6
|
+
* Redistribution and use in source and binary forms, with or without
|
7
|
+
* modification, are permitted provided that the following conditions
|
8
|
+
* are met:
|
9
|
+
* 1. Redistributions of source code must retain the above copyright
|
10
|
+
* notice, this list of conditions and the following disclaimer.
|
11
|
+
* 2. Redistributions in binary form must reproduce the above copyright
|
12
|
+
* notice, this list of conditions and the following disclaimer in the
|
13
|
+
* documentation and/or other materials provided with the distribution.
|
14
|
+
*
|
15
|
+
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
16
|
+
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
17
|
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
18
|
+
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
19
|
+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
20
|
+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
21
|
+
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
22
|
+
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
23
|
+
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
24
|
+
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
25
|
+
* SUCH DAMAGE. */
|
26
|
+
|
27
|
+
#include <time.h>
|
28
|
+
|
29
|
+
#define MEASURE_PROCESS_TIME 0
|
30
|
+
|
31
|
+
static prof_measure_t
|
32
|
+
measure_process_time()
|
33
|
+
{
|
34
|
+
return clock();
|
35
|
+
}
|
36
|
+
|
37
|
+
static double
|
38
|
+
convert_process_time(prof_measure_t c)
|
39
|
+
{
|
40
|
+
return (double) c / CLOCKS_PER_SEC;
|
41
|
+
}
|
42
|
+
|
43
|
+
/* Document-method: measure_process_time
|
44
|
+
call-seq:
|
45
|
+
measure_process_time -> float
|
46
|
+
|
47
|
+
Returns the process time.*/
|
48
|
+
static VALUE
|
49
|
+
prof_measure_process_time(VALUE self)
|
50
|
+
{
|
51
|
+
return rb_float_new(convert_process_time(measure_process_time()));
|
52
|
+
}
|
@@ -0,0 +1,53 @@
|
|
1
|
+
/* :nodoc:
|
2
|
+
* Copyright (C) 2008 Shugo Maeda <shugo@ruby-lang.org>
|
3
|
+
* Charlie Savage <cfis@savagexi.com>
|
4
|
+
* All rights reserved.
|
5
|
+
*
|
6
|
+
* Redistribution and use in source and binary forms, with or without
|
7
|
+
* modification, are permitted provided that the following conditions
|
8
|
+
* are met:
|
9
|
+
* 1. Redistributions of source code must retain the above copyright
|
10
|
+
* notice, this list of conditions and the following disclaimer.
|
11
|
+
* 2. Redistributions in binary form must reproduce the above copyright
|
12
|
+
* notice, this list of conditions and the following disclaimer in the
|
13
|
+
* documentation and/or other materials provided with the distribution.
|
14
|
+
*
|
15
|
+
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
16
|
+
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
17
|
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
18
|
+
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
19
|
+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
20
|
+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
21
|
+
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
22
|
+
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
23
|
+
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
24
|
+
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
25
|
+
* SUCH DAMAGE. */
|
26
|
+
|
27
|
+
|
28
|
+
#define MEASURE_WALL_TIME 1
|
29
|
+
|
30
|
+
static prof_measure_t
|
31
|
+
measure_wall_time()
|
32
|
+
{
|
33
|
+
struct timeval tv;
|
34
|
+
gettimeofday(&tv, NULL);
|
35
|
+
return tv.tv_sec * 1000000 + tv.tv_usec;
|
36
|
+
}
|
37
|
+
|
38
|
+
static double
|
39
|
+
convert_wall_time(prof_measure_t c)
|
40
|
+
{
|
41
|
+
return (double) c / 1000000;
|
42
|
+
}
|
43
|
+
|
44
|
+
/* Document-method: prof_measure_wall_time
|
45
|
+
call-seq:
|
46
|
+
measure_wall_time -> float
|
47
|
+
|
48
|
+
Returns the wall time.*/
|
49
|
+
static VALUE
|
50
|
+
prof_measure_wall_time(VALUE self)
|
51
|
+
{
|
52
|
+
return rb_float_new(convert_wall_time(measure_wall_time()));
|
53
|
+
}
|
data/ext/mingw/Rakefile
ADDED
@@ -0,0 +1,23 @@
|
|
1
|
+
# We can't use Ruby's standard build procedures
|
2
|
+
# on Windows because the Ruby executable is
|
3
|
+
# built with VC++ while here we want to build
|
4
|
+
# with MingW. So just roll our own...
|
5
|
+
|
6
|
+
require 'fileutils'
|
7
|
+
require 'rbconfig'
|
8
|
+
|
9
|
+
EXTENSION_NAME = "ruby_prof.#{Config::CONFIG["DLEXT"]}"
|
10
|
+
|
11
|
+
# This is called when the Windows GEM is installed!
|
12
|
+
task :install do
|
13
|
+
# Gems will pass these two environment variables:
|
14
|
+
# RUBYARCHDIR=#{dest_path}
|
15
|
+
# RUBYLIBDIR=#{dest_path}
|
16
|
+
|
17
|
+
dest_path = ENV['RUBYLIBDIR']
|
18
|
+
|
19
|
+
# Copy the extension
|
20
|
+
cp(EXTENSION_NAME, dest_path)
|
21
|
+
end
|
22
|
+
|
23
|
+
task :default => :install
|
@@ -0,0 +1,38 @@
|
|
1
|
+
# We can't use Ruby's standard build procedures
|
2
|
+
# on Windows because the Ruby executable is
|
3
|
+
# built with VC++ while here we want to build
|
4
|
+
# with MingW. So just roll our own...
|
5
|
+
|
6
|
+
require 'rake/clean'
|
7
|
+
require 'rbconfig'
|
8
|
+
|
9
|
+
RUBY_INCLUDE_DIR = Config::CONFIG["archdir"]
|
10
|
+
RUBY_BIN_DIR = Config::CONFIG["bindir"]
|
11
|
+
RUBY_LIB_DIR = Config::CONFIG["libdir"]
|
12
|
+
RUBY_SHARED_LIB = Config::CONFIG["LIBRUBY"]
|
13
|
+
RUBY_SHARED_DLL = RUBY_SHARED_LIB.gsub(/lib$/, 'dll')
|
14
|
+
|
15
|
+
EXTENSION_NAME = "ruby_prof.#{Config::CONFIG["DLEXT"]}"
|
16
|
+
|
17
|
+
CLEAN.include('*.o')
|
18
|
+
CLOBBER.include(EXTENSION_NAME)
|
19
|
+
|
20
|
+
task :default => "ruby_prof"
|
21
|
+
|
22
|
+
SRC = FileList['../*.c']
|
23
|
+
OBJ = SRC.collect do |file_name|
|
24
|
+
File.basename(file_name).ext('o')
|
25
|
+
end
|
26
|
+
|
27
|
+
SRC.each do |srcfile|
|
28
|
+
objfile = File.basename(srcfile).ext('o')
|
29
|
+
file objfile => srcfile do
|
30
|
+
command = "gcc -c -fPIC -O2 -Wall -o #{objfile} -I/usr/local/include #{srcfile} -I#{RUBY_INCLUDE_DIR}"
|
31
|
+
sh "sh -c '#{command}'"
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
file "ruby_prof" => OBJ do
|
36
|
+
command = "gcc -shared -o #{EXTENSION_NAME} -L/usr/local/lib #{OBJ} #{RUBY_BIN_DIR}/#{RUBY_SHARED_DLL}"
|
37
|
+
sh "sh -c '#{command}'"
|
38
|
+
end
|
data/ext/ruby_prof.c
ADDED
@@ -0,0 +1,1707 @@
|
|
1
|
+
/*
|
2
|
+
* Copyright (C) 2008 Shugo Maeda <shugo@ruby-lang.org>
|
3
|
+
* Charlie Savage <cfis@savagexi.com>
|
4
|
+
* All rights reserved.
|
5
|
+
*
|
6
|
+
* Redistribution and use in source and binary forms, with or without
|
7
|
+
* modification, are permitted provided that the following conditions
|
8
|
+
* are met:
|
9
|
+
* 1. Redistributions of source code must retain the above copyright
|
10
|
+
* notice, this list of conditions and the following disclaimer.
|
11
|
+
* 2. Redistributions in binary form must reproduce the above copyright
|
12
|
+
* notice, this list of conditions and the following disclaimer in the
|
13
|
+
* documentation and/or other materials provided with the distribution.
|
14
|
+
*
|
15
|
+
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
16
|
+
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
17
|
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
18
|
+
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
19
|
+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
20
|
+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
21
|
+
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
22
|
+
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
23
|
+
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
24
|
+
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
25
|
+
* SUCH DAMAGE.
|
26
|
+
*/
|
27
|
+
|
28
|
+
/* ruby-prof tracks the time spent executing every method in ruby programming.
|
29
|
+
The main players are:
|
30
|
+
|
31
|
+
prof_result_t - Its one field, values, contains the overall results
|
32
|
+
thread_data_t - Stores data about a single thread.
|
33
|
+
prof_stack_t - The method call stack in a particular thread
|
34
|
+
prof_method_t - Profiling information for each method
|
35
|
+
prof_call_info_t - Keeps track a method's callers and callees.
|
36
|
+
|
37
|
+
The final resulut is a hash table of thread_data_t, keyed on the thread
|
38
|
+
id. Each thread has an hash a table of prof_method_t, keyed on the
|
39
|
+
method id. A hash table is used for quick look up when doing a profile.
|
40
|
+
However, it is exposed to Ruby as an array.
|
41
|
+
|
42
|
+
Each prof_method_t has two hash tables, parent and children, of prof_call_info_t.
|
43
|
+
These objects keep track of a method's callers (who called the method) and its
|
44
|
+
callees (who the method called). These are keyed the method id, but once again,
|
45
|
+
are exposed to Ruby as arrays. Each prof_call_into_t maintains a pointer to the
|
46
|
+
caller or callee method, thereby making it easy to navigate through the call
|
47
|
+
hierarchy in ruby - which is very helpful for creating call graphs.
|
48
|
+
*/
|
49
|
+
|
50
|
+
#include "ruby_prof.h"
|
51
|
+
|
52
|
+
|
53
|
+
/* ================ Helper Functions =================*/
|
54
|
+
static VALUE
|
55
|
+
figure_singleton_name(VALUE klass)
|
56
|
+
{
|
57
|
+
VALUE result = Qnil;
|
58
|
+
|
59
|
+
/* We have come across a singleton object. First
|
60
|
+
figure out what it is attached to.*/
|
61
|
+
VALUE attached = rb_iv_get(klass, "__attached__");
|
62
|
+
|
63
|
+
/* Is this a singleton class acting as a metaclass? */
|
64
|
+
if (BUILTIN_TYPE(attached) == T_CLASS)
|
65
|
+
{
|
66
|
+
result = rb_str_new2("<Class::");
|
67
|
+
rb_str_append(result, rb_inspect(attached));
|
68
|
+
rb_str_cat2(result, ">");
|
69
|
+
}
|
70
|
+
|
71
|
+
/* Is this for singleton methods on a module? */
|
72
|
+
else if (BUILTIN_TYPE(attached) == T_MODULE)
|
73
|
+
{
|
74
|
+
result = rb_str_new2("<Module::");
|
75
|
+
rb_str_append(result, rb_inspect(attached));
|
76
|
+
rb_str_cat2(result, ">");
|
77
|
+
}
|
78
|
+
|
79
|
+
/* Is this for singleton methods on an object? */
|
80
|
+
else if (BUILTIN_TYPE(attached) == T_OBJECT)
|
81
|
+
{
|
82
|
+
/* Make sure to get the super class so that we don't
|
83
|
+
mistakenly grab a T_ICLASS which would lead to
|
84
|
+
unknown method errors. */
|
85
|
+
#ifdef RCLASS_SUPER
|
86
|
+
VALUE super = rb_class_real(RCLASS_SUPER(klass));
|
87
|
+
#else
|
88
|
+
VALUE super = rb_class_real(RCLASS(klass)->super);
|
89
|
+
#endif
|
90
|
+
result = rb_str_new2("<Object::");
|
91
|
+
rb_str_append(result, rb_inspect(super));
|
92
|
+
rb_str_cat2(result, ">");
|
93
|
+
}
|
94
|
+
|
95
|
+
/* Ok, this could be other things like an array made put onto
|
96
|
+
a singleton object (yeah, it happens, see the singleton
|
97
|
+
objects test case). */
|
98
|
+
else
|
99
|
+
{
|
100
|
+
result = rb_inspect(klass);
|
101
|
+
}
|
102
|
+
|
103
|
+
return result;
|
104
|
+
}
|
105
|
+
|
106
|
+
static VALUE
|
107
|
+
klass_name(VALUE klass)
|
108
|
+
{
|
109
|
+
VALUE result = Qnil;
|
110
|
+
|
111
|
+
if (klass == 0 || klass == Qnil)
|
112
|
+
{
|
113
|
+
result = rb_str_new2("Global");
|
114
|
+
}
|
115
|
+
else if (BUILTIN_TYPE(klass) == T_MODULE)
|
116
|
+
{
|
117
|
+
result = rb_inspect(klass);
|
118
|
+
}
|
119
|
+
else if (BUILTIN_TYPE(klass) == T_CLASS && FL_TEST(klass, FL_SINGLETON))
|
120
|
+
{
|
121
|
+
result = figure_singleton_name(klass);
|
122
|
+
}
|
123
|
+
else if (BUILTIN_TYPE(klass) == T_CLASS)
|
124
|
+
{
|
125
|
+
result = rb_inspect(klass);
|
126
|
+
}
|
127
|
+
else
|
128
|
+
{
|
129
|
+
/* Should never happen. */
|
130
|
+
result = rb_str_new2("Unknown");
|
131
|
+
}
|
132
|
+
|
133
|
+
return result;
|
134
|
+
}
|
135
|
+
|
136
|
+
static VALUE
|
137
|
+
method_name(ID mid, int depth)
|
138
|
+
{
|
139
|
+
VALUE result;
|
140
|
+
|
141
|
+
if (mid == ID_ALLOCATOR)
|
142
|
+
result = rb_str_new2("allocate");
|
143
|
+
else if (mid == 0)
|
144
|
+
result = rb_str_new2("[No method]");
|
145
|
+
else
|
146
|
+
result = rb_String(ID2SYM(mid));
|
147
|
+
|
148
|
+
if (depth > 0)
|
149
|
+
{
|
150
|
+
char buffer[65];
|
151
|
+
sprintf(buffer, "%i", depth);
|
152
|
+
rb_str_cat2(result, "-");
|
153
|
+
rb_str_cat2(result, buffer);
|
154
|
+
}
|
155
|
+
|
156
|
+
return result;
|
157
|
+
}
|
158
|
+
|
159
|
+
static VALUE
|
160
|
+
full_name(VALUE klass, ID mid, int depth)
|
161
|
+
{
|
162
|
+
VALUE result = klass_name(klass);
|
163
|
+
rb_str_cat2(result, "#");
|
164
|
+
rb_str_append(result, method_name(mid, depth));
|
165
|
+
|
166
|
+
return result;
|
167
|
+
}
|
168
|
+
|
169
|
+
/* ================ Stack Handling =================*/
|
170
|
+
/* Creates a stack of prof_frame_t to keep track
|
171
|
+
of timings for active methods. */
|
172
|
+
static prof_stack_t *
|
173
|
+
stack_create()
|
174
|
+
{
|
175
|
+
prof_stack_t *stack = ALLOC(prof_stack_t);
|
176
|
+
stack->start = ALLOC_N(prof_frame_t, INITIAL_STACK_SIZE);
|
177
|
+
stack->ptr = stack->start;
|
178
|
+
stack->end = stack->start + INITIAL_STACK_SIZE;
|
179
|
+
return stack;
|
180
|
+
}
|
181
|
+
|
182
|
+
static void
|
183
|
+
stack_free(prof_stack_t *stack)
|
184
|
+
{
|
185
|
+
xfree(stack->start);
|
186
|
+
xfree(stack);
|
187
|
+
}
|
188
|
+
|
189
|
+
static prof_frame_t *
|
190
|
+
stack_push(prof_stack_t *stack)
|
191
|
+
{
|
192
|
+
/* Is there space on the stack? If not, double
|
193
|
+
its size. */
|
194
|
+
if (stack->ptr == stack->end)
|
195
|
+
{
|
196
|
+
size_t len = stack->ptr - stack->start;
|
197
|
+
size_t new_capacity = (stack->end - stack->start) * 2;
|
198
|
+
REALLOC_N(stack->start, prof_frame_t, new_capacity);
|
199
|
+
stack->ptr = stack->start + len;
|
200
|
+
stack->end = stack->start + new_capacity;
|
201
|
+
}
|
202
|
+
return stack->ptr++;
|
203
|
+
}
|
204
|
+
|
205
|
+
static prof_frame_t *
|
206
|
+
stack_pop(prof_stack_t *stack)
|
207
|
+
{
|
208
|
+
if (stack->ptr == stack->start)
|
209
|
+
return NULL;
|
210
|
+
else
|
211
|
+
return --stack->ptr;
|
212
|
+
}
|
213
|
+
|
214
|
+
static prof_frame_t *
|
215
|
+
stack_peek(prof_stack_t *stack)
|
216
|
+
{
|
217
|
+
if (stack->ptr == stack->start)
|
218
|
+
return NULL;
|
219
|
+
else
|
220
|
+
return stack->ptr - 1;
|
221
|
+
}
|
222
|
+
|
223
|
+
/* ================ Method Key =================*/
|
224
|
+
static int
|
225
|
+
method_table_cmp(prof_method_key_t *key1, prof_method_key_t *key2)
|
226
|
+
{
|
227
|
+
return (key1->klass != key2->klass) ||
|
228
|
+
(key1->mid != key2->mid) ||
|
229
|
+
(key1->depth != key2->depth);
|
230
|
+
}
|
231
|
+
|
232
|
+
static int
|
233
|
+
method_table_hash(prof_method_key_t *key)
|
234
|
+
{
|
235
|
+
return key->key;
|
236
|
+
}
|
237
|
+
|
238
|
+
static struct st_hash_type type_method_hash = {
|
239
|
+
method_table_cmp,
|
240
|
+
method_table_hash
|
241
|
+
};
|
242
|
+
|
243
|
+
static void
|
244
|
+
method_key(prof_method_key_t* key, VALUE klass, ID mid, int depth)
|
245
|
+
{
|
246
|
+
key->klass = klass;
|
247
|
+
key->mid = mid;
|
248
|
+
key->depth = depth;
|
249
|
+
key->key = (klass << 4) + (mid << 2) + depth;
|
250
|
+
}
|
251
|
+
|
252
|
+
|
253
|
+
/* ================ Call Info =================*/
|
254
|
+
static st_table *
|
255
|
+
call_info_table_create()
|
256
|
+
{
|
257
|
+
return st_init_table(&type_method_hash);
|
258
|
+
}
|
259
|
+
|
260
|
+
static size_t
|
261
|
+
call_info_table_insert(st_table *table, const prof_method_key_t *key, prof_call_info_t *val)
|
262
|
+
{
|
263
|
+
return st_insert(table, (st_data_t) key, (st_data_t) val);
|
264
|
+
}
|
265
|
+
|
266
|
+
static prof_call_info_t *
|
267
|
+
call_info_table_lookup(st_table *table, const prof_method_key_t *key)
|
268
|
+
{
|
269
|
+
st_data_t val;
|
270
|
+
if (st_lookup(table, (st_data_t) key, &val))
|
271
|
+
{
|
272
|
+
return (prof_call_info_t *) val;
|
273
|
+
}
|
274
|
+
else
|
275
|
+
{
|
276
|
+
return NULL;
|
277
|
+
}
|
278
|
+
}
|
279
|
+
|
280
|
+
static void
|
281
|
+
call_info_table_free(st_table *table)
|
282
|
+
{
|
283
|
+
st_free_table(table);
|
284
|
+
}
|
285
|
+
|
286
|
+
/* Document-class: RubyProf::CallInfo
|
287
|
+
RubyProf::CallInfo is a helper class used by RubyProf::MethodInfo
|
288
|
+
to keep track of which child methods were called and how long
|
289
|
+
they took to execute. */
|
290
|
+
|
291
|
+
/* :nodoc: */
|
292
|
+
static prof_call_info_t *
|
293
|
+
prof_call_info_create(prof_method_t* method, prof_call_info_t* parent)
|
294
|
+
{
|
295
|
+
prof_call_info_t *result = ALLOC(prof_call_info_t);
|
296
|
+
result->object = Qnil;
|
297
|
+
result->target = method;
|
298
|
+
result->parent = parent;
|
299
|
+
result->call_infos = call_info_table_create();
|
300
|
+
result->children = Qnil;
|
301
|
+
|
302
|
+
result->called = 0;
|
303
|
+
result->total_time = 0;
|
304
|
+
result->self_time = 0;
|
305
|
+
result->wait_time = 0;
|
306
|
+
result->line = 0;
|
307
|
+
return result;
|
308
|
+
}
|
309
|
+
|
310
|
+
static void prof_method_mark(prof_method_t *method);
|
311
|
+
|
312
|
+
static void
|
313
|
+
prof_call_info_mark(prof_call_info_t *call_info)
|
314
|
+
{
|
315
|
+
{
|
316
|
+
VALUE target = call_info->target->object;
|
317
|
+
if (NIL_P(target))
|
318
|
+
prof_method_mark(call_info->target);
|
319
|
+
else
|
320
|
+
rb_gc_mark(target);
|
321
|
+
}
|
322
|
+
rb_gc_mark(call_info->children);
|
323
|
+
if (call_info->parent) {
|
324
|
+
VALUE parent = call_info->parent->object;
|
325
|
+
if (NIL_P(parent)) {
|
326
|
+
prof_call_info_mark(call_info->parent);
|
327
|
+
}
|
328
|
+
else {
|
329
|
+
rb_gc_mark(parent);
|
330
|
+
}
|
331
|
+
}
|
332
|
+
}
|
333
|
+
|
334
|
+
static void
|
335
|
+
prof_call_info_free(prof_call_info_t *call_info)
|
336
|
+
{
|
337
|
+
call_info_table_free(call_info->call_infos);
|
338
|
+
xfree(call_info);
|
339
|
+
}
|
340
|
+
|
341
|
+
static VALUE
|
342
|
+
prof_call_info_wrap(prof_call_info_t *call_info)
|
343
|
+
{
|
344
|
+
if (call_info->object == Qnil)
|
345
|
+
{
|
346
|
+
call_info->object = Data_Wrap_Struct(cCallInfo, prof_call_info_mark, prof_call_info_free, call_info);
|
347
|
+
}
|
348
|
+
return call_info->object;
|
349
|
+
}
|
350
|
+
|
351
|
+
static prof_call_info_t *
|
352
|
+
prof_get_call_info_result(VALUE obj)
|
353
|
+
{
|
354
|
+
if (BUILTIN_TYPE(obj) != T_DATA)
|
355
|
+
{
|
356
|
+
/* Should never happen */
|
357
|
+
rb_raise(rb_eTypeError, "Not a call info object");
|
358
|
+
}
|
359
|
+
return (prof_call_info_t *) DATA_PTR(obj);
|
360
|
+
}
|
361
|
+
|
362
|
+
|
363
|
+
/* call-seq:
|
364
|
+
called -> MethodInfo
|
365
|
+
|
366
|
+
Returns the target method. */
|
367
|
+
static VALUE
|
368
|
+
prof_call_info_target(VALUE self)
|
369
|
+
{
|
370
|
+
/* Target is a pointer to a method_info - so we have to be careful
|
371
|
+
about the GC. We will wrap the method_info but provide no
|
372
|
+
free method so the underlying object is not freed twice! */
|
373
|
+
|
374
|
+
prof_call_info_t *result = prof_get_call_info_result(self);
|
375
|
+
return prof_method_wrap(result->target);
|
376
|
+
}
|
377
|
+
|
378
|
+
/* call-seq:
|
379
|
+
called -> int
|
380
|
+
|
381
|
+
Returns the total amount of time this method was called. */
|
382
|
+
static VALUE
|
383
|
+
prof_call_info_called(VALUE self)
|
384
|
+
{
|
385
|
+
prof_call_info_t *result = prof_get_call_info_result(self);
|
386
|
+
return INT2NUM(result->called);
|
387
|
+
}
|
388
|
+
|
389
|
+
/* call-seq:
|
390
|
+
line_no -> int
|
391
|
+
|
392
|
+
returns the line number of the method */
|
393
|
+
static VALUE
|
394
|
+
prof_call_info_line(VALUE self)
|
395
|
+
{
|
396
|
+
prof_call_info_t *result = prof_get_call_info_result(self);
|
397
|
+
return rb_int_new(result->line);
|
398
|
+
}
|
399
|
+
|
400
|
+
/* call-seq:
|
401
|
+
total_time -> float
|
402
|
+
|
403
|
+
Returns the total amount of time spent in this method and its children. */
|
404
|
+
static VALUE
|
405
|
+
prof_call_info_total_time(VALUE self)
|
406
|
+
{
|
407
|
+
prof_call_info_t *result = prof_get_call_info_result(self);
|
408
|
+
return rb_float_new(convert_measurement(result->total_time));
|
409
|
+
}
|
410
|
+
|
411
|
+
/* call-seq:
|
412
|
+
self_time -> float
|
413
|
+
|
414
|
+
Returns the total amount of time spent in this method. */
|
415
|
+
static VALUE
|
416
|
+
prof_call_info_self_time(VALUE self)
|
417
|
+
{
|
418
|
+
prof_call_info_t *result = prof_get_call_info_result(self);
|
419
|
+
|
420
|
+
return rb_float_new(convert_measurement(result->self_time));
|
421
|
+
}
|
422
|
+
|
423
|
+
/* call-seq:
|
424
|
+
wait_time -> float
|
425
|
+
|
426
|
+
Returns the total amount of time this method waited for other threads. */
|
427
|
+
static VALUE
|
428
|
+
prof_call_info_wait_time(VALUE self)
|
429
|
+
{
|
430
|
+
prof_call_info_t *result = prof_get_call_info_result(self);
|
431
|
+
|
432
|
+
return rb_float_new(convert_measurement(result->wait_time));
|
433
|
+
}
|
434
|
+
|
435
|
+
/* call-seq:
|
436
|
+
parent -> call_info
|
437
|
+
|
438
|
+
Returns the call_infos parent call_info object (the method that called this method).*/
|
439
|
+
static VALUE
|
440
|
+
prof_call_info_parent(VALUE self)
|
441
|
+
{
|
442
|
+
prof_call_info_t *result = prof_get_call_info_result(self);
|
443
|
+
if (result->parent)
|
444
|
+
return prof_call_info_wrap(result->parent);
|
445
|
+
else
|
446
|
+
return Qnil;
|
447
|
+
}
|
448
|
+
|
449
|
+
static int
|
450
|
+
prof_call_info_collect_children(st_data_t key, st_data_t value, st_data_t result)
|
451
|
+
{
|
452
|
+
prof_call_info_t *call_info = (prof_call_info_t *) value;
|
453
|
+
VALUE arr = (VALUE) result;
|
454
|
+
rb_ary_push(arr, prof_call_info_wrap(call_info));
|
455
|
+
return ST_CONTINUE;
|
456
|
+
}
|
457
|
+
|
458
|
+
/* call-seq:
|
459
|
+
children -> hash
|
460
|
+
|
461
|
+
Returns an array of call info objects of methods that this method
|
462
|
+
called (ie, children).*/
|
463
|
+
static VALUE
|
464
|
+
prof_call_info_children(VALUE self)
|
465
|
+
{
|
466
|
+
prof_call_info_t *call_info = prof_get_call_info_result(self);
|
467
|
+
if (call_info->children == Qnil)
|
468
|
+
{
|
469
|
+
call_info->children = rb_ary_new();
|
470
|
+
st_foreach(call_info->call_infos, prof_call_info_collect_children, call_info->children);
|
471
|
+
}
|
472
|
+
return call_info->children;
|
473
|
+
}
|
474
|
+
|
475
|
+
/* ================ Call Infos =================*/
|
476
|
+
static prof_call_infos_t*
|
477
|
+
prof_call_infos_create()
|
478
|
+
{
|
479
|
+
prof_call_infos_t *result = ALLOC(prof_call_infos_t);
|
480
|
+
result->start = ALLOC_N(prof_call_info_t*, INITIAL_CALL_INFOS_SIZE);
|
481
|
+
result->end = result->start + INITIAL_CALL_INFOS_SIZE;
|
482
|
+
result->ptr = result->start;
|
483
|
+
result->object = Qnil;
|
484
|
+
return result;
|
485
|
+
}
|
486
|
+
|
487
|
+
static void
|
488
|
+
prof_call_infos_free(prof_call_infos_t *call_infos)
|
489
|
+
{
|
490
|
+
xfree(call_infos->start);
|
491
|
+
xfree(call_infos);
|
492
|
+
}
|
493
|
+
|
494
|
+
static void
|
495
|
+
prof_add_call_info(prof_call_infos_t *call_infos, prof_call_info_t *call_info)
|
496
|
+
{
|
497
|
+
if (call_infos->ptr == call_infos->end)
|
498
|
+
{
|
499
|
+
size_t len = call_infos->ptr - call_infos->start;
|
500
|
+
size_t new_capacity = (call_infos->end - call_infos->start) * 2;
|
501
|
+
REALLOC_N(call_infos->start, prof_call_info_t*, new_capacity);
|
502
|
+
call_infos->ptr = call_infos->start + len;
|
503
|
+
call_infos->end = call_infos->start + new_capacity;
|
504
|
+
}
|
505
|
+
*call_infos->ptr = call_info;
|
506
|
+
call_infos->ptr++;
|
507
|
+
}
|
508
|
+
|
509
|
+
static VALUE
|
510
|
+
prof_call_infos_wrap(prof_call_infos_t *call_infos)
|
511
|
+
{
|
512
|
+
if (call_infos->object == Qnil)
|
513
|
+
{
|
514
|
+
prof_call_info_t **i;
|
515
|
+
call_infos->object = rb_ary_new();
|
516
|
+
for(i=call_infos->start; i<call_infos->ptr; i++)
|
517
|
+
{
|
518
|
+
VALUE call_info = prof_call_info_wrap(*i);
|
519
|
+
rb_ary_push(call_infos->object, call_info);
|
520
|
+
}
|
521
|
+
}
|
522
|
+
return call_infos->object;
|
523
|
+
}
|
524
|
+
|
525
|
+
|
526
|
+
/* ================ Method Info =================*/
|
527
|
+
/* Document-class: RubyProf::MethodInfo
|
528
|
+
The RubyProf::MethodInfo class stores profiling data for a method.
|
529
|
+
One instance of the RubyProf::MethodInfo class is created per method
|
530
|
+
called per thread. Thus, if a method is called in two different
|
531
|
+
thread then there will be two RubyProf::MethodInfo objects
|
532
|
+
created. RubyProf::MethodInfo objects can be accessed via
|
533
|
+
the RubyProf::Result object.
|
534
|
+
*/
|
535
|
+
|
536
|
+
static prof_method_t*
|
537
|
+
prof_method_create(prof_method_key_t *key, const char* source_file, int line)
|
538
|
+
{
|
539
|
+
prof_method_t *result = ALLOC(prof_method_t);
|
540
|
+
result->object = Qnil;
|
541
|
+
result->key = ALLOC(prof_method_key_t);
|
542
|
+
method_key(result->key, key->klass, key->mid, key->depth);
|
543
|
+
|
544
|
+
result->call_infos = prof_call_infos_create();
|
545
|
+
|
546
|
+
result->active = 0;
|
547
|
+
|
548
|
+
if (source_file != NULL)
|
549
|
+
{
|
550
|
+
int len = strlen(source_file) + 1;
|
551
|
+
char *buffer = ALLOC_N(char, len);
|
552
|
+
|
553
|
+
MEMCPY(buffer, source_file, char, len);
|
554
|
+
result->source_file = buffer;
|
555
|
+
}
|
556
|
+
else
|
557
|
+
{
|
558
|
+
result->source_file = source_file;
|
559
|
+
}
|
560
|
+
result->line = line;
|
561
|
+
|
562
|
+
return result;
|
563
|
+
}
|
564
|
+
|
565
|
+
static void
|
566
|
+
prof_method_mark(prof_method_t *method)
|
567
|
+
{
|
568
|
+
rb_gc_mark(method->call_infos->object);
|
569
|
+
rb_gc_mark(method->key->klass);
|
570
|
+
}
|
571
|
+
|
572
|
+
static void
|
573
|
+
prof_method_free(prof_method_t *method)
|
574
|
+
{
|
575
|
+
if (method->source_file)
|
576
|
+
{
|
577
|
+
xfree((char*)method->source_file);
|
578
|
+
}
|
579
|
+
|
580
|
+
prof_call_infos_free(method->call_infos);
|
581
|
+
xfree(method->key);
|
582
|
+
xfree(method);
|
583
|
+
}
|
584
|
+
|
585
|
+
static VALUE
|
586
|
+
prof_method_wrap(prof_method_t *result)
|
587
|
+
{
|
588
|
+
if (result->object == Qnil)
|
589
|
+
{
|
590
|
+
result->object = Data_Wrap_Struct(cMethodInfo, prof_method_mark, prof_method_free, result);
|
591
|
+
}
|
592
|
+
return result->object;
|
593
|
+
}
|
594
|
+
|
595
|
+
static prof_method_t *
|
596
|
+
get_prof_method(VALUE obj)
|
597
|
+
{
|
598
|
+
return (prof_method_t *) DATA_PTR(obj);
|
599
|
+
}
|
600
|
+
|
601
|
+
/* call-seq:
|
602
|
+
line_no -> int
|
603
|
+
|
604
|
+
returns the line number of the method */
|
605
|
+
static VALUE
|
606
|
+
prof_method_line(VALUE self)
|
607
|
+
{
|
608
|
+
return rb_int_new(get_prof_method(self)->line);
|
609
|
+
}
|
610
|
+
|
611
|
+
/* call-seq:
|
612
|
+
source_file => string
|
613
|
+
|
614
|
+
return the source file of the method
|
615
|
+
*/
|
616
|
+
static VALUE prof_method_source_file(VALUE self)
|
617
|
+
{
|
618
|
+
const char* sf = get_prof_method(self)->source_file;
|
619
|
+
if(!sf)
|
620
|
+
{
|
621
|
+
return rb_str_new2("ruby_runtime");
|
622
|
+
}
|
623
|
+
else
|
624
|
+
{
|
625
|
+
return rb_str_new2(sf);
|
626
|
+
}
|
627
|
+
}
|
628
|
+
|
629
|
+
|
630
|
+
/* call-seq:
|
631
|
+
method_class -> klass
|
632
|
+
|
633
|
+
Returns the Ruby klass that owns this method. */
|
634
|
+
static VALUE
|
635
|
+
prof_method_klass(VALUE self)
|
636
|
+
{
|
637
|
+
prof_method_t *result = get_prof_method(self);
|
638
|
+
return result->key->klass;
|
639
|
+
}
|
640
|
+
|
641
|
+
/* call-seq:
|
642
|
+
method_id -> ID
|
643
|
+
|
644
|
+
Returns the id of this method. */
|
645
|
+
static VALUE
|
646
|
+
prof_method_id(VALUE self)
|
647
|
+
{
|
648
|
+
prof_method_t *result = get_prof_method(self);
|
649
|
+
return ID2SYM(result->key->mid);
|
650
|
+
}
|
651
|
+
|
652
|
+
/* call-seq:
|
653
|
+
klass_name -> string
|
654
|
+
|
655
|
+
Returns the name of this method's class. Singleton classes
|
656
|
+
will have the form <Object::Object>. */
|
657
|
+
|
658
|
+
static VALUE
|
659
|
+
prof_klass_name(VALUE self)
|
660
|
+
{
|
661
|
+
prof_method_t *method = get_prof_method(self);
|
662
|
+
return klass_name(method->key->klass);
|
663
|
+
}
|
664
|
+
|
665
|
+
/* call-seq:
|
666
|
+
method_name -> string
|
667
|
+
|
668
|
+
Returns the name of this method in the format Object#method. Singletons
|
669
|
+
methods will be returned in the format <Object::Object>#method.*/
|
670
|
+
|
671
|
+
static VALUE
|
672
|
+
prof_method_name(VALUE self, int depth)
|
673
|
+
{
|
674
|
+
prof_method_t *method = get_prof_method(self);
|
675
|
+
return method_name(method->key->mid, depth);
|
676
|
+
}
|
677
|
+
|
678
|
+
/* call-seq:
|
679
|
+
full_name -> string
|
680
|
+
|
681
|
+
Returns the full name of this method in the format Object#method.*/
|
682
|
+
|
683
|
+
static VALUE
|
684
|
+
prof_full_name(VALUE self)
|
685
|
+
{
|
686
|
+
prof_method_t *method = get_prof_method(self);
|
687
|
+
return full_name(method->key->klass, method->key->mid, method->key->depth);
|
688
|
+
}
|
689
|
+
|
690
|
+
/* call-seq:
|
691
|
+
call_infos -> Array of call_info
|
692
|
+
|
693
|
+
Returns an array of call info objects that contain profiling information
|
694
|
+
about the current method.*/
|
695
|
+
static VALUE
|
696
|
+
prof_method_call_infos(VALUE self)
|
697
|
+
{
|
698
|
+
prof_method_t *method = get_prof_method(self);
|
699
|
+
return prof_call_infos_wrap(method->call_infos);
|
700
|
+
}
|
701
|
+
|
702
|
+
static int
|
703
|
+
collect_methods(st_data_t key, st_data_t value, st_data_t result)
|
704
|
+
{
|
705
|
+
/* Called for each method stored in a thread's method table.
|
706
|
+
We want to store the method info information into an array.*/
|
707
|
+
VALUE methods = (VALUE) result;
|
708
|
+
prof_method_t *method = (prof_method_t *) value;
|
709
|
+
rb_ary_push(methods, prof_method_wrap(method));
|
710
|
+
|
711
|
+
/* Wrap call info objects */
|
712
|
+
prof_call_infos_wrap(method->call_infos);
|
713
|
+
|
714
|
+
return ST_CONTINUE;
|
715
|
+
}
|
716
|
+
|
717
|
+
/* ================ Method Table =================*/
|
718
|
+
static st_table *
|
719
|
+
method_table_create()
|
720
|
+
{
|
721
|
+
return st_init_table(&type_method_hash);
|
722
|
+
}
|
723
|
+
|
724
|
+
static size_t
|
725
|
+
method_table_insert(st_table *table, const prof_method_key_t *key, prof_method_t *val)
|
726
|
+
{
|
727
|
+
return st_insert(table, (st_data_t) key, (st_data_t) val);
|
728
|
+
}
|
729
|
+
|
730
|
+
static prof_method_t *
|
731
|
+
method_table_lookup(st_table *table, const prof_method_key_t* key)
|
732
|
+
{
|
733
|
+
st_data_t val;
|
734
|
+
if (st_lookup(table, (st_data_t)key, &val))
|
735
|
+
{
|
736
|
+
return (prof_method_t *) val;
|
737
|
+
}
|
738
|
+
else
|
739
|
+
{
|
740
|
+
return NULL;
|
741
|
+
}
|
742
|
+
}
|
743
|
+
|
744
|
+
|
745
|
+
static void
|
746
|
+
method_table_free(st_table *table)
|
747
|
+
{
|
748
|
+
/* Don't free the contents since they are wrapped by
|
749
|
+
Ruby objects! */
|
750
|
+
st_free_table(table);
|
751
|
+
}
|
752
|
+
|
753
|
+
|
754
|
+
/* ================ Thread Handling =================*/
|
755
|
+
|
756
|
+
/* ---- Keeps track of thread's stack and methods ---- */
|
757
|
+
static thread_data_t*
|
758
|
+
thread_data_create()
|
759
|
+
{
|
760
|
+
thread_data_t* result = ALLOC(thread_data_t);
|
761
|
+
result->stack = stack_create();
|
762
|
+
result->method_table = method_table_create();
|
763
|
+
result->last_switch = get_measurement();
|
764
|
+
return result;
|
765
|
+
}
|
766
|
+
|
767
|
+
static void
|
768
|
+
thread_data_free(thread_data_t* thread_data)
|
769
|
+
{
|
770
|
+
method_table_free(thread_data->method_table);
|
771
|
+
stack_free(thread_data->stack);
|
772
|
+
xfree(thread_data);
|
773
|
+
}
|
774
|
+
|
775
|
+
/* ---- Hash, keyed on thread, that stores thread's stack
|
776
|
+
and methods---- */
|
777
|
+
|
778
|
+
static st_table *
|
779
|
+
threads_table_create()
|
780
|
+
{
|
781
|
+
return st_init_numtable();
|
782
|
+
}
|
783
|
+
|
784
|
+
static size_t
|
785
|
+
threads_table_insert(st_table *table, VALUE thread, thread_data_t *thread_data)
|
786
|
+
{
|
787
|
+
/* Its too slow to key on the real thread id so just typecast thread instead. */
|
788
|
+
return st_insert(table, (st_data_t) thread, (st_data_t) thread_data);
|
789
|
+
}
|
790
|
+
|
791
|
+
static thread_data_t *
|
792
|
+
threads_table_lookup(st_table *table, VALUE thread_id)
|
793
|
+
{
|
794
|
+
thread_data_t* result;
|
795
|
+
st_data_t val;
|
796
|
+
|
797
|
+
/* Its too slow to key on the real thread id so just typecast thread instead. */
|
798
|
+
if (st_lookup(table, (st_data_t) thread_id, &val))
|
799
|
+
{
|
800
|
+
result = (thread_data_t *) val;
|
801
|
+
}
|
802
|
+
else
|
803
|
+
{
|
804
|
+
result = thread_data_create();
|
805
|
+
result->thread_id = thread_id;
|
806
|
+
|
807
|
+
/* Insert the table */
|
808
|
+
threads_table_insert(threads_tbl, thread_id, result);
|
809
|
+
}
|
810
|
+
return result;
|
811
|
+
}
|
812
|
+
|
813
|
+
static int
|
814
|
+
free_thread_data(st_data_t key, st_data_t value, st_data_t dummy)
|
815
|
+
{
|
816
|
+
thread_data_free((thread_data_t*)value);
|
817
|
+
return ST_CONTINUE;
|
818
|
+
}
|
819
|
+
|
820
|
+
|
821
|
+
static void
|
822
|
+
threads_table_free(st_table *table)
|
823
|
+
{
|
824
|
+
st_foreach(table, free_thread_data, 0);
|
825
|
+
st_free_table(table);
|
826
|
+
}
|
827
|
+
|
828
|
+
|
829
|
+
static int
|
830
|
+
collect_threads(st_data_t key, st_data_t value, st_data_t result)
|
831
|
+
{
|
832
|
+
/* Although threads are keyed on an id, that is actually a
|
833
|
+
pointer to the VALUE object of the thread. So its bogus.
|
834
|
+
However, in thread_data is the real thread id stored
|
835
|
+
as an int. */
|
836
|
+
thread_data_t* thread_data = (thread_data_t*) value;
|
837
|
+
VALUE threads_hash = (VALUE) result;
|
838
|
+
|
839
|
+
VALUE methods = rb_ary_new();
|
840
|
+
|
841
|
+
/* Now collect an array of all the called methods */
|
842
|
+
st_table* method_table = thread_data->method_table;
|
843
|
+
st_foreach(method_table, collect_methods, methods);
|
844
|
+
|
845
|
+
/* Store the results in the threads hash keyed on the thread id. */
|
846
|
+
rb_hash_aset(threads_hash, thread_data->thread_id, methods);
|
847
|
+
|
848
|
+
return ST_CONTINUE;
|
849
|
+
}
|
850
|
+
|
851
|
+
|
852
|
+
/* ================ Profiling =================*/
|
853
|
+
/* Copied from eval.c */
|
854
|
+
#ifdef DEBUG
|
855
|
+
static char *
|
856
|
+
get_event_name(rb_event_flag_t event)
|
857
|
+
{
|
858
|
+
switch (event) {
|
859
|
+
case RUBY_EVENT_LINE:
|
860
|
+
return "line";
|
861
|
+
case RUBY_EVENT_CLASS:
|
862
|
+
return "class";
|
863
|
+
case RUBY_EVENT_END:
|
864
|
+
return "end";
|
865
|
+
case RUBY_EVENT_CALL:
|
866
|
+
return "call";
|
867
|
+
case RUBY_EVENT_RETURN:
|
868
|
+
return "return";
|
869
|
+
case RUBY_EVENT_C_CALL:
|
870
|
+
return "c-call";
|
871
|
+
case RUBY_EVENT_C_RETURN:
|
872
|
+
return "c-return";
|
873
|
+
case RUBY_EVENT_RAISE:
|
874
|
+
return "raise";
|
875
|
+
default:
|
876
|
+
return "unknown";
|
877
|
+
}
|
878
|
+
}
|
879
|
+
#endif
|
880
|
+
|
881
|
+
|
882
|
+
// these differ 1.9/1.8
|
883
|
+
|
884
|
+
static prof_method_t*
|
885
|
+
#ifdef RUBY_VM
|
886
|
+
get_method(rb_event_flag_t event, VALUE klass, ID mid, int depth, st_table* method_table)
|
887
|
+
# else
|
888
|
+
get_method(rb_event_flag_t event, NODE *node, VALUE klass, ID mid, int depth, st_table* method_table)
|
889
|
+
#endif
|
890
|
+
{
|
891
|
+
prof_method_key_t key;
|
892
|
+
prof_method_t *method = NULL;
|
893
|
+
|
894
|
+
method_key(&key, klass, mid, depth);
|
895
|
+
method = method_table_lookup(method_table, &key);
|
896
|
+
|
897
|
+
if (!method)
|
898
|
+
{
|
899
|
+
const char* source_file = rb_sourcefile();
|
900
|
+
int line = rb_sourceline();
|
901
|
+
|
902
|
+
/* Line numbers are not accurate for c method calls */
|
903
|
+
if (event == RUBY_EVENT_C_CALL)
|
904
|
+
{
|
905
|
+
line = 0;
|
906
|
+
source_file = NULL;
|
907
|
+
}
|
908
|
+
|
909
|
+
method = prof_method_create(&key, source_file, line);
|
910
|
+
method_table_insert(method_table, method->key, method);
|
911
|
+
}
|
912
|
+
return method;
|
913
|
+
}
|
914
|
+
|
915
|
+
static void
|
916
|
+
update_result(prof_measure_t total_time,
|
917
|
+
prof_frame_t *parent_frame,
|
918
|
+
prof_frame_t *frame)
|
919
|
+
{
|
920
|
+
prof_measure_t self_time = total_time - frame->child_time - frame->wait_time;
|
921
|
+
|
922
|
+
prof_call_info_t *call_info = frame->call_info;
|
923
|
+
|
924
|
+
/* Update information about the current method */
|
925
|
+
call_info->called++;
|
926
|
+
call_info->total_time += total_time;
|
927
|
+
call_info->self_time += self_time;
|
928
|
+
call_info->wait_time += frame->wait_time;
|
929
|
+
|
930
|
+
/* Note where the current method was called from */
|
931
|
+
if (parent_frame)
|
932
|
+
call_info->line = parent_frame->line;
|
933
|
+
}
|
934
|
+
|
935
|
+
static thread_data_t *
|
936
|
+
switch_thread(VALUE thread_id, prof_measure_t now)
|
937
|
+
{
|
938
|
+
prof_frame_t *frame = NULL;
|
939
|
+
prof_measure_t wait_time = 0;
|
940
|
+
|
941
|
+
/* Get new thread information. */
|
942
|
+
thread_data_t *thread_data = threads_table_lookup(threads_tbl, thread_id);
|
943
|
+
|
944
|
+
/* How long has this thread been waiting? */
|
945
|
+
wait_time = now - thread_data->last_switch;
|
946
|
+
thread_data->last_switch = 0;
|
947
|
+
|
948
|
+
/* Get the frame at the top of the stack. This may represent
|
949
|
+
the current method (EVENT_LINE, EVENT_RETURN) or the
|
950
|
+
previous method (EVENT_CALL).*/
|
951
|
+
frame = stack_peek(thread_data->stack);
|
952
|
+
|
953
|
+
if (frame)
|
954
|
+
frame->wait_time += wait_time;
|
955
|
+
|
956
|
+
/* Save on the last thread the time of the context switch
|
957
|
+
and reset this thread's last context switch to 0.*/
|
958
|
+
if (last_thread_data)
|
959
|
+
last_thread_data->last_switch = now;
|
960
|
+
|
961
|
+
last_thread_data = thread_data;
|
962
|
+
return thread_data;
|
963
|
+
}
|
964
|
+
|
965
|
+
static prof_frame_t*
|
966
|
+
pop_frame(thread_data_t *thread_data, prof_measure_t now)
|
967
|
+
{
|
968
|
+
prof_frame_t *frame = NULL;
|
969
|
+
prof_frame_t* parent_frame = NULL;
|
970
|
+
prof_measure_t total_time;
|
971
|
+
|
972
|
+
frame = stack_pop(thread_data->stack);
|
973
|
+
|
974
|
+
/* Frame can be null. This can happen if RubProf.start is called from
|
975
|
+
a method that exits. And it can happen if an exception is raised
|
976
|
+
in code that is being profiled and the stack unwinds (RubProf is
|
977
|
+
not notified of that by the ruby runtime. */
|
978
|
+
if (frame == NULL) return NULL;
|
979
|
+
|
980
|
+
/* Calculate the total time this method took */
|
981
|
+
total_time = now - frame->start_time;
|
982
|
+
|
983
|
+
/* Now deactivate the method */
|
984
|
+
frame->call_info->target->active = 0;
|
985
|
+
|
986
|
+
parent_frame = stack_peek(thread_data->stack);
|
987
|
+
if (parent_frame)
|
988
|
+
{
|
989
|
+
parent_frame->child_time += total_time;
|
990
|
+
}
|
991
|
+
|
992
|
+
update_result(total_time, parent_frame, frame);
|
993
|
+
return frame;
|
994
|
+
}
|
995
|
+
|
996
|
+
static int
|
997
|
+
pop_frames(st_data_t key, st_data_t value, st_data_t now_arg)
|
998
|
+
{
|
999
|
+
VALUE thread_id = (VALUE)key;
|
1000
|
+
thread_data_t* thread_data = (thread_data_t *) value;
|
1001
|
+
prof_measure_t now = *(prof_measure_t *) now_arg;
|
1002
|
+
|
1003
|
+
if (!last_thread_data || last_thread_data->thread_id != thread_id)
|
1004
|
+
thread_data = switch_thread(thread_id, now);
|
1005
|
+
else
|
1006
|
+
thread_data = last_thread_data;
|
1007
|
+
|
1008
|
+
while (pop_frame(thread_data, now))
|
1009
|
+
{
|
1010
|
+
}
|
1011
|
+
|
1012
|
+
return ST_CONTINUE;
|
1013
|
+
}
|
1014
|
+
|
1015
|
+
static void
|
1016
|
+
prof_pop_threads()
|
1017
|
+
{
|
1018
|
+
/* Get current measurement*/
|
1019
|
+
prof_measure_t now = get_measurement();
|
1020
|
+
st_foreach(threads_tbl, pop_frames, (st_data_t) &now);
|
1021
|
+
}
|
1022
|
+
|
1023
|
+
|
1024
|
+
#ifdef RUBY_VM
|
1025
|
+
static void
|
1026
|
+
prof_event_hook(rb_event_flag_t event, VALUE data, VALUE self, ID mid, VALUE klass)
|
1027
|
+
#else
|
1028
|
+
static void
|
1029
|
+
prof_event_hook(rb_event_flag_t event, NODE *node, VALUE self, ID mid, VALUE klass)
|
1030
|
+
#endif
|
1031
|
+
{
|
1032
|
+
|
1033
|
+
VALUE thread = Qnil;
|
1034
|
+
VALUE thread_id = Qnil;
|
1035
|
+
prof_measure_t now = 0;
|
1036
|
+
thread_data_t* thread_data = NULL;
|
1037
|
+
prof_frame_t *frame = NULL;
|
1038
|
+
|
1039
|
+
|
1040
|
+
#ifdef RUBY_VM
|
1041
|
+
|
1042
|
+
if (event != RUBY_EVENT_C_CALL && event != RUBY_EVENT_C_RETURN) {
|
1043
|
+
rb_frame_method_id_and_class(&mid, &klass);
|
1044
|
+
}
|
1045
|
+
#endif
|
1046
|
+
|
1047
|
+
#ifdef DEBUG
|
1048
|
+
/* This code is here for debug purposes - uncomment it out
|
1049
|
+
when debugging to see a print out of exactly what the
|
1050
|
+
profiler is tracing. */
|
1051
|
+
{
|
1052
|
+
char* key = 0;
|
1053
|
+
static VALUE last_thread_id = Qnil;
|
1054
|
+
|
1055
|
+
VALUE thread = rb_thread_current();
|
1056
|
+
VALUE thread_id = rb_obj_id(thread);
|
1057
|
+
char* class_name = NULL;
|
1058
|
+
char* method_name = rb_id2name(mid);
|
1059
|
+
char* source_file = rb_sourcefile();
|
1060
|
+
unsigned int source_line = rb_sourceline();
|
1061
|
+
|
1062
|
+
char* event_name = get_event_name(event);
|
1063
|
+
|
1064
|
+
if (klass != 0)
|
1065
|
+
klass = (BUILTIN_TYPE(klass) == T_ICLASS ? RBASIC(klass)->klass : klass);
|
1066
|
+
|
1067
|
+
class_name = rb_class2name(klass);
|
1068
|
+
|
1069
|
+
if (last_thread_id != thread_id)
|
1070
|
+
printf("\n");
|
1071
|
+
|
1072
|
+
printf("%2u: %-8s :%2d %s#%s\n",
|
1073
|
+
thread_id, event_name, source_line, class_name, method_name);
|
1074
|
+
fflush(stdout);
|
1075
|
+
last_thread_id = thread_id;
|
1076
|
+
}
|
1077
|
+
#endif
|
1078
|
+
|
1079
|
+
/* Special case - skip any methods from the mProf
|
1080
|
+
module, such as Prof.stop, since they clutter
|
1081
|
+
the results but aren't important to them results. */
|
1082
|
+
if (self == mProf) return;
|
1083
|
+
|
1084
|
+
/* Get current measurement*/
|
1085
|
+
now = get_measurement();
|
1086
|
+
|
1087
|
+
/* Get the current thread information. */
|
1088
|
+
thread = rb_thread_current();
|
1089
|
+
thread_id = rb_obj_id(thread);
|
1090
|
+
|
1091
|
+
if (exclude_threads_tbl &&
|
1092
|
+
st_lookup(exclude_threads_tbl, (st_data_t) thread_id, 0))
|
1093
|
+
{
|
1094
|
+
return;
|
1095
|
+
}
|
1096
|
+
|
1097
|
+
/* Was there a context switch? */
|
1098
|
+
if (!last_thread_data || last_thread_data->thread_id != thread_id)
|
1099
|
+
thread_data = switch_thread(thread_id, now);
|
1100
|
+
else
|
1101
|
+
thread_data = last_thread_data;
|
1102
|
+
|
1103
|
+
/* Get the current frame for the current thread. */
|
1104
|
+
frame = stack_peek(thread_data->stack);
|
1105
|
+
|
1106
|
+
switch (event) {
|
1107
|
+
case RUBY_EVENT_LINE:
|
1108
|
+
{
|
1109
|
+
/* Keep track of the current line number in this method. When
|
1110
|
+
a new method is called, we know what line number it was
|
1111
|
+
called from. */
|
1112
|
+
if (frame)
|
1113
|
+
{
|
1114
|
+
frame->line = rb_sourceline();
|
1115
|
+
break;
|
1116
|
+
}
|
1117
|
+
|
1118
|
+
/* If we get here there was no frame, which means this is
|
1119
|
+
the first method seen for this thread, so fall through
|
1120
|
+
to below to create it. */
|
1121
|
+
}
|
1122
|
+
case RUBY_EVENT_CALL:
|
1123
|
+
case RUBY_EVENT_C_CALL:
|
1124
|
+
{
|
1125
|
+
prof_call_info_t *call_info = NULL;
|
1126
|
+
prof_method_t *method = NULL;
|
1127
|
+
|
1128
|
+
/* Is this an include for a module? If so get the actual
|
1129
|
+
module class since we want to combine all profiling
|
1130
|
+
results for that module. */
|
1131
|
+
|
1132
|
+
if (klass != 0)
|
1133
|
+
klass = (BUILTIN_TYPE(klass) == T_ICLASS ? RBASIC(klass)->klass : klass);
|
1134
|
+
|
1135
|
+
/* Assume this is the first time we have called this method. */
|
1136
|
+
#ifdef RUBY_VM
|
1137
|
+
method = get_method(event, klass, mid, 0, thread_data->method_table);
|
1138
|
+
#else
|
1139
|
+
method = get_method(event, node, klass, mid, 0, thread_data->method_table);
|
1140
|
+
#endif
|
1141
|
+
|
1142
|
+
/* Check for a recursive call */
|
1143
|
+
while (method->active)
|
1144
|
+
{
|
1145
|
+
/* Yes, this method is already active */
|
1146
|
+
#ifdef RUBY_VM
|
1147
|
+
method = get_method(event, klass, mid, method->key->depth + 1, thread_data->method_table);
|
1148
|
+
#else
|
1149
|
+
method = get_method(event, node, klass, mid, method->key->depth + 1, thread_data->method_table);
|
1150
|
+
#endif
|
1151
|
+
}
|
1152
|
+
method->active = 1;
|
1153
|
+
|
1154
|
+
|
1155
|
+
if (!frame)
|
1156
|
+
{
|
1157
|
+
call_info = prof_call_info_create(method, NULL);
|
1158
|
+
prof_add_call_info(method->call_infos, call_info);
|
1159
|
+
}
|
1160
|
+
else
|
1161
|
+
{
|
1162
|
+
call_info = call_info_table_lookup(frame->call_info->call_infos, method->key);
|
1163
|
+
|
1164
|
+
if (!call_info)
|
1165
|
+
{
|
1166
|
+
call_info = prof_call_info_create(method, frame->call_info);
|
1167
|
+
call_info_table_insert(frame->call_info->call_infos, method->key, call_info);
|
1168
|
+
prof_add_call_info(method->call_infos, call_info);
|
1169
|
+
}
|
1170
|
+
}
|
1171
|
+
|
1172
|
+
/* Push a new frame onto the stack */
|
1173
|
+
frame = stack_push(thread_data->stack);
|
1174
|
+
frame->call_info = call_info;
|
1175
|
+
frame->start_time = now;
|
1176
|
+
frame->wait_time = 0;
|
1177
|
+
frame->child_time = 0;
|
1178
|
+
frame->line = rb_sourceline();
|
1179
|
+
|
1180
|
+
break;
|
1181
|
+
}
|
1182
|
+
case RUBY_EVENT_RETURN:
|
1183
|
+
case RUBY_EVENT_C_RETURN:
|
1184
|
+
{
|
1185
|
+
pop_frame(thread_data, now);
|
1186
|
+
break;
|
1187
|
+
}
|
1188
|
+
}
|
1189
|
+
}
|
1190
|
+
|
1191
|
+
|
1192
|
+
/* ======== ProfResult ============== */
|
1193
|
+
|
1194
|
+
/* Document-class: RubyProf::Result
|
1195
|
+
The RubyProf::Result class is used to store the results of a
|
1196
|
+
profiling run. And instace of the class is returned from
|
1197
|
+
the methods RubyProf#stop and RubyProf#profile.
|
1198
|
+
|
1199
|
+
RubyProf::Result has one field, called threads, which is a hash
|
1200
|
+
table keyed on thread ID. For each thread id, the hash table
|
1201
|
+
stores another hash table that contains profiling information
|
1202
|
+
for each method called during the threads execution. That
|
1203
|
+
hash table is keyed on method name and contains
|
1204
|
+
RubyProf::MethodInfo objects. */
|
1205
|
+
|
1206
|
+
|
1207
|
+
static void
|
1208
|
+
prof_result_mark(prof_result_t *prof_result)
|
1209
|
+
{
|
1210
|
+
VALUE threads = prof_result->threads;
|
1211
|
+
rb_gc_mark(threads);
|
1212
|
+
}
|
1213
|
+
|
1214
|
+
static void
|
1215
|
+
prof_result_free(prof_result_t *prof_result)
|
1216
|
+
{
|
1217
|
+
prof_result->threads = Qnil;
|
1218
|
+
xfree(prof_result);
|
1219
|
+
}
|
1220
|
+
|
1221
|
+
static VALUE
|
1222
|
+
prof_result_new()
|
1223
|
+
{
|
1224
|
+
prof_result_t *prof_result = ALLOC(prof_result_t);
|
1225
|
+
|
1226
|
+
/* Wrap threads in Ruby regular Ruby hash table. */
|
1227
|
+
prof_result->threads = rb_hash_new();
|
1228
|
+
st_foreach(threads_tbl, collect_threads, prof_result->threads);
|
1229
|
+
|
1230
|
+
return Data_Wrap_Struct(cResult, prof_result_mark, prof_result_free, prof_result);
|
1231
|
+
}
|
1232
|
+
|
1233
|
+
|
1234
|
+
static prof_result_t *
|
1235
|
+
get_prof_result(VALUE obj)
|
1236
|
+
{
|
1237
|
+
if (BUILTIN_TYPE(obj) != T_DATA ||
|
1238
|
+
RDATA(obj)->dfree != (RUBY_DATA_FUNC) prof_result_free)
|
1239
|
+
{
|
1240
|
+
/* Should never happen */
|
1241
|
+
rb_raise(rb_eTypeError, "wrong result object");
|
1242
|
+
}
|
1243
|
+
return (prof_result_t *) DATA_PTR(obj);
|
1244
|
+
}
|
1245
|
+
|
1246
|
+
/* call-seq:
|
1247
|
+
threads -> Hash
|
1248
|
+
|
1249
|
+
Returns a hash table keyed on thread ID. For each thread id,
|
1250
|
+
the hash table stores another hash table that contains profiling
|
1251
|
+
information for each method called during the threads execution.
|
1252
|
+
That hash table is keyed on method name and contains
|
1253
|
+
RubyProf::MethodInfo objects. */
|
1254
|
+
static VALUE
|
1255
|
+
prof_result_threads(VALUE self)
|
1256
|
+
{
|
1257
|
+
prof_result_t *prof_result = get_prof_result(self);
|
1258
|
+
return prof_result->threads;
|
1259
|
+
}
|
1260
|
+
|
1261
|
+
|
1262
|
+
|
1263
|
+
/* call-seq:
|
1264
|
+
measure_mode -> measure_mode
|
1265
|
+
|
1266
|
+
Returns what ruby-prof is measuring. Valid values include:
|
1267
|
+
|
1268
|
+
*RubyProf::PROCESS_TIME - Measure process time. This is default. It is implemented using the clock functions in the C Runtime library.
|
1269
|
+
*RubyProf::WALL_TIME - Measure wall time using gettimeofday on Linx and GetLocalTime on Windows
|
1270
|
+
*RubyProf::CPU_TIME - Measure time using the CPU clock counter. This mode is only supported on Pentium or PowerPC platforms.
|
1271
|
+
*RubyProf::ALLOCATIONS - Measure object allocations. This requires a patched Ruby interpreter.
|
1272
|
+
*RubyProf::MEMORY - Measure memory size. This requires a patched Ruby interpreter.
|
1273
|
+
*RubyProf::GC_RUNS - Measure number of garbage collections. This requires a patched Ruby interpreter.
|
1274
|
+
*RubyProf::GC_TIME - Measure time spent doing garbage collection. This requires a patched Ruby interpreter.*/
|
1275
|
+
static VALUE
|
1276
|
+
prof_get_measure_mode(VALUE self)
|
1277
|
+
{
|
1278
|
+
return INT2NUM(measure_mode);
|
1279
|
+
}
|
1280
|
+
|
1281
|
+
/* call-seq:
|
1282
|
+
measure_mode=value -> void
|
1283
|
+
|
1284
|
+
Specifies what ruby-prof should measure. Valid values include:
|
1285
|
+
|
1286
|
+
*RubyProf::PROCESS_TIME - Measure process time. This is default. It is implemented using the clock functions in the C Runtime library.
|
1287
|
+
*RubyProf::WALL_TIME - Measure wall time using gettimeofday on Linx and GetLocalTime on Windows
|
1288
|
+
*RubyProf::CPU_TIME - Measure time using the CPU clock counter. This mode is only supported on Pentium or PowerPC platforms.
|
1289
|
+
*RubyProf::ALLOCATIONS - Measure object allocations. This requires a patched Ruby interpreter.
|
1290
|
+
*RubyProf::MEMORY - Measure memory size. This requires a patched Ruby interpreter.
|
1291
|
+
*RubyProf::GC_RUNS - Measure number of garbage collections. This requires a patched Ruby interpreter.
|
1292
|
+
*RubyProf::GC_TIME - Measure time spent doing garbage collection. This requires a patched Ruby interpreter.*/
|
1293
|
+
static VALUE
|
1294
|
+
prof_set_measure_mode(VALUE self, VALUE val)
|
1295
|
+
{
|
1296
|
+
long mode = NUM2LONG(val);
|
1297
|
+
|
1298
|
+
if (threads_tbl)
|
1299
|
+
{
|
1300
|
+
rb_raise(rb_eRuntimeError, "can't set measure_mode while profiling");
|
1301
|
+
}
|
1302
|
+
|
1303
|
+
switch (mode) {
|
1304
|
+
case MEASURE_PROCESS_TIME:
|
1305
|
+
get_measurement = measure_process_time;
|
1306
|
+
convert_measurement = convert_process_time;
|
1307
|
+
break;
|
1308
|
+
|
1309
|
+
case MEASURE_WALL_TIME:
|
1310
|
+
get_measurement = measure_wall_time;
|
1311
|
+
convert_measurement = convert_wall_time;
|
1312
|
+
break;
|
1313
|
+
|
1314
|
+
#if defined(MEASURE_CPU_TIME)
|
1315
|
+
case MEASURE_CPU_TIME:
|
1316
|
+
if (cpu_frequency == 0)
|
1317
|
+
cpu_frequency = get_cpu_frequency();
|
1318
|
+
get_measurement = measure_cpu_time;
|
1319
|
+
convert_measurement = convert_cpu_time;
|
1320
|
+
break;
|
1321
|
+
#endif
|
1322
|
+
|
1323
|
+
#if defined(MEASURE_ALLOCATIONS)
|
1324
|
+
case MEASURE_ALLOCATIONS:
|
1325
|
+
get_measurement = measure_allocations;
|
1326
|
+
convert_measurement = convert_allocations;
|
1327
|
+
break;
|
1328
|
+
#endif
|
1329
|
+
|
1330
|
+
#if defined(MEASURE_MEMORY)
|
1331
|
+
case MEASURE_MEMORY:
|
1332
|
+
get_measurement = measure_memory;
|
1333
|
+
convert_measurement = convert_memory;
|
1334
|
+
break;
|
1335
|
+
#endif
|
1336
|
+
|
1337
|
+
#if defined(MEASURE_GC_RUNS)
|
1338
|
+
case MEASURE_GC_RUNS:
|
1339
|
+
get_measurement = measure_gc_runs;
|
1340
|
+
convert_measurement = convert_gc_runs;
|
1341
|
+
break;
|
1342
|
+
#endif
|
1343
|
+
|
1344
|
+
#if defined(MEASURE_GC_TIME)
|
1345
|
+
case MEASURE_GC_TIME:
|
1346
|
+
get_measurement = measure_gc_time;
|
1347
|
+
convert_measurement = convert_gc_time;
|
1348
|
+
break;
|
1349
|
+
#endif
|
1350
|
+
|
1351
|
+
default:
|
1352
|
+
rb_raise(rb_eArgError, "invalid mode: %ld", mode);
|
1353
|
+
break;
|
1354
|
+
}
|
1355
|
+
|
1356
|
+
measure_mode = mode;
|
1357
|
+
return val;
|
1358
|
+
}
|
1359
|
+
|
1360
|
+
/* call-seq:
|
1361
|
+
exclude_threads= -> void
|
1362
|
+
|
1363
|
+
Specifies what threads ruby-prof should exclude from profiling */
|
1364
|
+
static VALUE
|
1365
|
+
prof_set_exclude_threads(VALUE self, VALUE threads)
|
1366
|
+
{
|
1367
|
+
int i;
|
1368
|
+
|
1369
|
+
if (threads_tbl != NULL)
|
1370
|
+
{
|
1371
|
+
rb_raise(rb_eRuntimeError, "can't set exclude_threads while profiling");
|
1372
|
+
}
|
1373
|
+
|
1374
|
+
/* Stay simple, first free the old hash table */
|
1375
|
+
if (exclude_threads_tbl)
|
1376
|
+
{
|
1377
|
+
st_free_table(exclude_threads_tbl);
|
1378
|
+
exclude_threads_tbl = NULL;
|
1379
|
+
}
|
1380
|
+
|
1381
|
+
/* Now create a new one if the user passed in any threads */
|
1382
|
+
if (threads != Qnil)
|
1383
|
+
{
|
1384
|
+
Check_Type(threads, T_ARRAY);
|
1385
|
+
exclude_threads_tbl = st_init_numtable();
|
1386
|
+
|
1387
|
+
for (i=0; i < RARRAY_LEN(threads); ++i)
|
1388
|
+
{
|
1389
|
+
VALUE thread = rb_ary_entry(threads, i);
|
1390
|
+
st_insert(exclude_threads_tbl, (st_data_t) rb_obj_id(thread), 0);
|
1391
|
+
}
|
1392
|
+
}
|
1393
|
+
return threads;
|
1394
|
+
}
|
1395
|
+
|
1396
|
+
|
1397
|
+
/* ========= Profiling ============= */
|
1398
|
+
void
|
1399
|
+
prof_install_hook()
|
1400
|
+
{
|
1401
|
+
#ifdef RUBY_VM
|
1402
|
+
rb_add_event_hook(prof_event_hook,
|
1403
|
+
RUBY_EVENT_CALL | RUBY_EVENT_RETURN |
|
1404
|
+
RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN
|
1405
|
+
| RUBY_EVENT_LINE, Qnil);
|
1406
|
+
#else
|
1407
|
+
rb_add_event_hook(prof_event_hook,
|
1408
|
+
RUBY_EVENT_CALL | RUBY_EVENT_RETURN |
|
1409
|
+
RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN
|
1410
|
+
| RUBY_EVENT_LINE);
|
1411
|
+
#endif
|
1412
|
+
|
1413
|
+
#if defined(TOGGLE_GC_STATS)
|
1414
|
+
rb_gc_enable_stats();
|
1415
|
+
#endif
|
1416
|
+
}
|
1417
|
+
|
1418
|
+
void
|
1419
|
+
prof_remove_hook()
|
1420
|
+
{
|
1421
|
+
#if defined(TOGGLE_GC_STATS)
|
1422
|
+
rb_gc_disable_stats();
|
1423
|
+
#endif
|
1424
|
+
|
1425
|
+
/* Now unregister from event */
|
1426
|
+
rb_remove_event_hook(prof_event_hook);
|
1427
|
+
}
|
1428
|
+
|
1429
|
+
|
1430
|
+
|
1431
|
+
/* call-seq:
|
1432
|
+
running? -> boolean
|
1433
|
+
|
1434
|
+
Returns whether a profile is currently running.*/
|
1435
|
+
static VALUE
|
1436
|
+
prof_running(VALUE self)
|
1437
|
+
{
|
1438
|
+
if (threads_tbl != NULL)
|
1439
|
+
return Qtrue;
|
1440
|
+
else
|
1441
|
+
return Qfalse;
|
1442
|
+
}
|
1443
|
+
|
1444
|
+
/* call-seq:
|
1445
|
+
start -> RubyProf
|
1446
|
+
|
1447
|
+
Starts recording profile data.*/
|
1448
|
+
static VALUE
|
1449
|
+
prof_start(VALUE self)
|
1450
|
+
{
|
1451
|
+
if (threads_tbl != NULL)
|
1452
|
+
{
|
1453
|
+
rb_raise(rb_eRuntimeError, "RubyProf.start was already called");
|
1454
|
+
}
|
1455
|
+
|
1456
|
+
/* Setup globals */
|
1457
|
+
last_thread_data = NULL;
|
1458
|
+
threads_tbl = threads_table_create();
|
1459
|
+
|
1460
|
+
prof_install_hook();
|
1461
|
+
return self;
|
1462
|
+
}
|
1463
|
+
|
1464
|
+
/* call-seq:
|
1465
|
+
pause -> RubyProf
|
1466
|
+
|
1467
|
+
Pauses collecting profile data. */
|
1468
|
+
static VALUE
|
1469
|
+
prof_pause(VALUE self)
|
1470
|
+
{
|
1471
|
+
if (threads_tbl == NULL)
|
1472
|
+
{
|
1473
|
+
rb_raise(rb_eRuntimeError, "RubyProf is not running.");
|
1474
|
+
}
|
1475
|
+
|
1476
|
+
prof_remove_hook();
|
1477
|
+
return self;
|
1478
|
+
}
|
1479
|
+
|
1480
|
+
/* call-seq:
|
1481
|
+
resume {block} -> RubyProf
|
1482
|
+
|
1483
|
+
Resumes recording profile data.*/
|
1484
|
+
static VALUE
|
1485
|
+
prof_resume(VALUE self)
|
1486
|
+
{
|
1487
|
+
if (threads_tbl == NULL)
|
1488
|
+
{
|
1489
|
+
prof_start(self);
|
1490
|
+
}
|
1491
|
+
else
|
1492
|
+
{
|
1493
|
+
prof_install_hook();
|
1494
|
+
}
|
1495
|
+
|
1496
|
+
if (rb_block_given_p())
|
1497
|
+
{
|
1498
|
+
rb_ensure(rb_yield, self, prof_pause, self);
|
1499
|
+
}
|
1500
|
+
|
1501
|
+
return self;
|
1502
|
+
}
|
1503
|
+
|
1504
|
+
/* call-seq:
|
1505
|
+
stop -> RubyProf::Result
|
1506
|
+
|
1507
|
+
Stops collecting profile data and returns a RubyProf::Result object. */
|
1508
|
+
static VALUE
|
1509
|
+
prof_stop(VALUE self)
|
1510
|
+
{
|
1511
|
+
VALUE result = Qnil;
|
1512
|
+
|
1513
|
+
prof_remove_hook();
|
1514
|
+
|
1515
|
+
prof_pop_threads();
|
1516
|
+
|
1517
|
+
/* Create the result */
|
1518
|
+
result = prof_result_new();
|
1519
|
+
|
1520
|
+
/* Unset the last_thread_data (very important!)
|
1521
|
+
and the threads table */
|
1522
|
+
last_thread_data = NULL;
|
1523
|
+
threads_table_free(threads_tbl);
|
1524
|
+
threads_tbl = NULL;
|
1525
|
+
|
1526
|
+
return result;
|
1527
|
+
}
|
1528
|
+
|
1529
|
+
/* call-seq:
|
1530
|
+
profile {block} -> RubyProf::Result
|
1531
|
+
|
1532
|
+
Profiles the specified block and returns a RubyProf::Result object. */
|
1533
|
+
static VALUE
|
1534
|
+
prof_profile(VALUE self)
|
1535
|
+
{
|
1536
|
+
int result;
|
1537
|
+
|
1538
|
+
if (!rb_block_given_p())
|
1539
|
+
{
|
1540
|
+
rb_raise(rb_eArgError, "A block must be provided to the profile method.");
|
1541
|
+
}
|
1542
|
+
|
1543
|
+
prof_start(self);
|
1544
|
+
rb_protect(rb_yield, self, &result);
|
1545
|
+
return prof_stop(self);
|
1546
|
+
}
|
1547
|
+
|
1548
|
+
/* Get arround annoying limitations in RDOC */
|
1549
|
+
|
1550
|
+
/* Document-method: measure_process_time
|
1551
|
+
call-seq:
|
1552
|
+
measure_process_time -> float
|
1553
|
+
|
1554
|
+
Returns the process time.*/
|
1555
|
+
|
1556
|
+
/* Document-method: measure_wall_time
|
1557
|
+
call-seq:
|
1558
|
+
measure_wall_time -> float
|
1559
|
+
|
1560
|
+
Returns the wall time.*/
|
1561
|
+
|
1562
|
+
/* Document-method: measure_cpu_time
|
1563
|
+
call-seq:
|
1564
|
+
measure_cpu_time -> float
|
1565
|
+
|
1566
|
+
Returns the cpu time.*/
|
1567
|
+
|
1568
|
+
/* Document-method: get_cpu_frequency
|
1569
|
+
call-seq:
|
1570
|
+
cpu_frequency -> int
|
1571
|
+
|
1572
|
+
Returns the cpu's frequency. This value is needed when
|
1573
|
+
RubyProf::measure_mode is set to CPU_TIME. */
|
1574
|
+
|
1575
|
+
/* Document-method: cpu_frequency
|
1576
|
+
call-seq:
|
1577
|
+
cpu_frequency -> int
|
1578
|
+
|
1579
|
+
Returns the cpu's frequency. This value is needed when
|
1580
|
+
RubyProf::measure_mode is set to CPU_TIME. */
|
1581
|
+
|
1582
|
+
/* Document-method: cpu_frequency=
|
1583
|
+
call-seq:
|
1584
|
+
cpu_frequency = frequency
|
1585
|
+
|
1586
|
+
Sets the cpu's frequency. This value is needed when
|
1587
|
+
RubyProf::measure_mode is set to CPU_TIME. */
|
1588
|
+
|
1589
|
+
/* Document-method: measure_allocations
|
1590
|
+
call-seq:
|
1591
|
+
measure_allocations -> int
|
1592
|
+
|
1593
|
+
Returns the total number of object allocations since Ruby started.*/
|
1594
|
+
|
1595
|
+
/* Document-method: measure_memory
|
1596
|
+
call-seq:
|
1597
|
+
measure_memory -> int
|
1598
|
+
|
1599
|
+
Returns total allocated memory in bytes.*/
|
1600
|
+
|
1601
|
+
/* Document-method: measure_gc_runs
|
1602
|
+
call-seq:
|
1603
|
+
gc_runs -> Integer
|
1604
|
+
|
1605
|
+
Returns the total number of garbage collections.*/
|
1606
|
+
|
1607
|
+
/* Document-method: measure_gc_time
|
1608
|
+
call-seq:
|
1609
|
+
gc_time -> Integer
|
1610
|
+
|
1611
|
+
Returns the time spent doing garbage collections in microseconds.*/
|
1612
|
+
|
1613
|
+
|
1614
|
+
#if defined(_WIN32)
|
1615
|
+
__declspec(dllexport)
|
1616
|
+
#endif
|
1617
|
+
void
|
1618
|
+
|
1619
|
+
Init_ruby_prof()
|
1620
|
+
{
|
1621
|
+
mProf = rb_define_module("RubyProf");
|
1622
|
+
rb_define_const(mProf, "VERSION", rb_str_new2(RUBY_PROF_VERSION));
|
1623
|
+
rb_define_module_function(mProf, "start", prof_start, 0);
|
1624
|
+
rb_define_module_function(mProf, "stop", prof_stop, 0);
|
1625
|
+
rb_define_module_function(mProf, "resume", prof_resume, 0);
|
1626
|
+
rb_define_module_function(mProf, "pause", prof_pause, 0);
|
1627
|
+
rb_define_module_function(mProf, "running?", prof_running, 0);
|
1628
|
+
rb_define_module_function(mProf, "profile", prof_profile, 0);
|
1629
|
+
|
1630
|
+
rb_define_singleton_method(mProf, "exclude_threads=", prof_set_exclude_threads, 1);
|
1631
|
+
rb_define_singleton_method(mProf, "measure_mode", prof_get_measure_mode, 0);
|
1632
|
+
rb_define_singleton_method(mProf, "measure_mode=", prof_set_measure_mode, 1);
|
1633
|
+
|
1634
|
+
rb_define_const(mProf, "CLOCKS_PER_SEC", INT2NUM(CLOCKS_PER_SEC));
|
1635
|
+
rb_define_const(mProf, "PROCESS_TIME", INT2NUM(MEASURE_PROCESS_TIME));
|
1636
|
+
rb_define_singleton_method(mProf, "measure_process_time", prof_measure_process_time, 0); /* in measure_process_time.h */
|
1637
|
+
rb_define_const(mProf, "WALL_TIME", INT2NUM(MEASURE_WALL_TIME));
|
1638
|
+
rb_define_singleton_method(mProf, "measure_wall_time", prof_measure_wall_time, 0); /* in measure_wall_time.h */
|
1639
|
+
|
1640
|
+
#ifndef MEASURE_CPU_TIME
|
1641
|
+
rb_define_const(mProf, "CPU_TIME", Qnil);
|
1642
|
+
#else
|
1643
|
+
rb_define_const(mProf, "CPU_TIME", INT2NUM(MEASURE_CPU_TIME));
|
1644
|
+
rb_define_singleton_method(mProf, "measure_cpu_time", prof_measure_cpu_time, 0); /* in measure_cpu_time.h */
|
1645
|
+
rb_define_singleton_method(mProf, "cpu_frequency", prof_get_cpu_frequency, 0); /* in measure_cpu_time.h */
|
1646
|
+
rb_define_singleton_method(mProf, "cpu_frequency=", prof_set_cpu_frequency, 1); /* in measure_cpu_time.h */
|
1647
|
+
#endif
|
1648
|
+
|
1649
|
+
#ifndef MEASURE_ALLOCATIONS
|
1650
|
+
rb_define_const(mProf, "ALLOCATIONS", Qnil);
|
1651
|
+
#else
|
1652
|
+
rb_define_const(mProf, "ALLOCATIONS", INT2NUM(MEASURE_ALLOCATIONS));
|
1653
|
+
rb_define_singleton_method(mProf, "measure_allocations", prof_measure_allocations, 0); /* in measure_allocations.h */
|
1654
|
+
#endif
|
1655
|
+
|
1656
|
+
#ifndef MEASURE_MEMORY
|
1657
|
+
rb_define_const(mProf, "MEMORY", Qnil);
|
1658
|
+
#else
|
1659
|
+
rb_define_const(mProf, "MEMORY", INT2NUM(MEASURE_MEMORY));
|
1660
|
+
rb_define_singleton_method(mProf, "measure_memory", prof_measure_memory, 0); /* in measure_memory.h */
|
1661
|
+
#endif
|
1662
|
+
|
1663
|
+
#ifndef MEASURE_GC_RUNS
|
1664
|
+
rb_define_const(mProf, "GC_RUNS", Qnil);
|
1665
|
+
#else
|
1666
|
+
rb_define_const(mProf, "GC_RUNS", INT2NUM(MEASURE_GC_RUNS));
|
1667
|
+
rb_define_singleton_method(mProf, "measure_gc_runs", prof_measure_gc_runs, 0); /* in measure_gc_runs.h */
|
1668
|
+
#endif
|
1669
|
+
|
1670
|
+
#ifndef MEASURE_GC_TIME
|
1671
|
+
rb_define_const(mProf, "GC_TIME", Qnil);
|
1672
|
+
#else
|
1673
|
+
rb_define_const(mProf, "GC_TIME", INT2NUM(MEASURE_GC_TIME));
|
1674
|
+
rb_define_singleton_method(mProf, "measure_gc_time", prof_measure_gc_time, 0); /* in measure_gc_time.h */
|
1675
|
+
#endif
|
1676
|
+
|
1677
|
+
cResult = rb_define_class_under(mProf, "Result", rb_cObject);
|
1678
|
+
rb_undef_method(CLASS_OF(cMethodInfo), "new");
|
1679
|
+
rb_define_method(cResult, "threads", prof_result_threads, 0);
|
1680
|
+
|
1681
|
+
/* MethodInfo */
|
1682
|
+
cMethodInfo = rb_define_class_under(mProf, "MethodInfo", rb_cObject);
|
1683
|
+
rb_undef_method(CLASS_OF(cMethodInfo), "new");
|
1684
|
+
|
1685
|
+
rb_define_method(cMethodInfo, "klass", prof_method_klass, 0);
|
1686
|
+
rb_define_method(cMethodInfo, "klass_name", prof_klass_name, 0);
|
1687
|
+
rb_define_method(cMethodInfo, "method_name", prof_method_name, 0);
|
1688
|
+
rb_define_method(cMethodInfo, "full_name", prof_full_name, 0);
|
1689
|
+
rb_define_method(cMethodInfo, "method_id", prof_method_id, 0);
|
1690
|
+
|
1691
|
+
rb_define_method(cMethodInfo, "source_file", prof_method_source_file,0);
|
1692
|
+
rb_define_method(cMethodInfo, "line", prof_method_line, 0);
|
1693
|
+
|
1694
|
+
rb_define_method(cMethodInfo, "call_infos", prof_method_call_infos, 0);
|
1695
|
+
|
1696
|
+
/* CallInfo */
|
1697
|
+
cCallInfo = rb_define_class_under(mProf, "CallInfo", rb_cObject);
|
1698
|
+
rb_undef_method(CLASS_OF(cCallInfo), "new");
|
1699
|
+
rb_define_method(cCallInfo, "parent", prof_call_info_parent, 0);
|
1700
|
+
rb_define_method(cCallInfo, "children", prof_call_info_children, 0);
|
1701
|
+
rb_define_method(cCallInfo, "target", prof_call_info_target, 0);
|
1702
|
+
rb_define_method(cCallInfo, "called", prof_call_info_called, 0);
|
1703
|
+
rb_define_method(cCallInfo, "total_time", prof_call_info_total_time, 0);
|
1704
|
+
rb_define_method(cCallInfo, "self_time", prof_call_info_self_time, 0);
|
1705
|
+
rb_define_method(cCallInfo, "wait_time", prof_call_info_wait_time, 0);
|
1706
|
+
rb_define_method(cCallInfo, "line", prof_call_info_line, 0);
|
1707
|
+
}
|