ruby-prof 0.4.1-mswin32 → 0.5.0-mswin32
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/CHANGES +30 -0
- data/README +65 -25
- data/Rakefile +33 -32
- data/bin/ruby-prof +100 -83
- data/examples/graph.html +65 -69
- data/ext/measure_allocations.h +43 -0
- data/ext/measure_cpu_time.h +138 -0
- data/ext/measure_process_time.h +41 -0
- data/ext/measure_wall_time.h +42 -0
- data/ext/ruby_prof.c +737 -653
- data/lib/ruby-prof.rb +41 -38
- data/lib/ruby-prof/abstract_printer.rb +42 -0
- data/lib/ruby-prof/call_tree_printer.rb +69 -0
- data/lib/ruby-prof/flat_printer.rb +78 -75
- data/lib/ruby-prof/graph_html_printer.rb +241 -228
- data/lib/ruby-prof/graph_printer.rb +160 -141
- data/lib/ruby-prof/profile_test_case.rb +80 -0
- data/lib/ruby-prof/rails_plugin/ruby-prof/init.rb +6 -0
- data/lib/ruby-prof/rails_plugin/ruby-prof/lib/profiling.rb +52 -0
- data/lib/ruby-prof/task.rb +147 -0
- data/lib/ruby_prof.so +0 -0
- data/test/basic_test.rb +65 -35
- data/test/duplicate_names_test.rb +20 -24
- data/test/gc.log +5 -0
- data/test/measure_mode_test.rb +79 -0
- data/test/module_test.rb +31 -18
- data/test/no_method_class_test.rb +14 -0
- data/test/prime1.rb +17 -0
- data/test/prime2.rb +26 -0
- data/test/prime3.rb +17 -0
- data/test/prime_test.rb +10 -10
- data/test/printers_test.rb +14 -12
- data/test/profile_unit_test.rb +24 -0
- data/test/recursive_test.rb +105 -17
- data/test/singleton_test.rb +38 -0
- data/test/start_test.rb +24 -0
- data/test/test_helper.rb +33 -29
- data/test/test_suite.rb +10 -2
- data/test/thread_test.rb +123 -17
- data/test/timing_test.rb +70 -29
- metadata +28 -30
- data/doc/created.rid +0 -1
- data/doc/files/LICENSE.html +0 -0
- data/doc/files/README.html +0 -376
- data/doc/files/bin/ruby-prof.html +0 -143
- data/doc/files/examples/flat_txt.html +0 -179
- data/doc/files/examples/graph_html.html +0 -948
- data/doc/files/examples/graph_txt.html +0 -297
- data/doc/files/ext/ruby_prof_c.html +0 -101
- data/doc/files/lib/ruby-prof/flat_printer_rb.html +0 -101
- data/doc/files/lib/ruby-prof/graph_html_printer_rb.html +0 -108
- data/doc/files/lib/ruby-prof/graph_printer_rb.html +0 -101
- data/doc/files/lib/ruby-prof/profiletask_rb.html +0 -109
- data/doc/files/lib/ruby-prof_rb.html +0 -111
- data/doc/files/lib/unprof_rb.html +0 -108
- data/doc/rdoc-style.css +0 -208
- data/lib/ruby-prof/profiletask.rb +0 -150
- data/test/clock_mode_test.rb +0 -73
@@ -0,0 +1,138 @@
|
|
1
|
+
/* :nodoc:
|
2
|
+
* Copyright (C) 2007 Shugo Maeda <shugo@ruby-lang.org>
|
3
|
+
* Charlie Savage <cfis@savagexi.com>
|
4
|
+
* All rights reserved.
|
5
|
+
*
|
6
|
+
* Redistribution and use in source and binary forms, with or without
|
7
|
+
* modification, are permitted provided that the following conditions
|
8
|
+
* are met:
|
9
|
+
* 1. Redistributions of source code must retain the above copyright
|
10
|
+
* notice, this list of conditions and the following disclaimer.
|
11
|
+
* 2. Redistributions in binary form must reproduce the above copyright
|
12
|
+
* notice, this list of conditions and the following disclaimer in the
|
13
|
+
* documentation and/or other materials provided with the distribution.
|
14
|
+
*
|
15
|
+
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
16
|
+
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
17
|
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
18
|
+
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
19
|
+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
20
|
+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
21
|
+
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
22
|
+
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
23
|
+
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
24
|
+
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
25
|
+
* SUCH DAMAGE. */
|
26
|
+
|
27
|
+
|
28
|
+
#if defined(_WIN32) || (defined(__GNUC__) && (defined(__i386__) || defined(__powerpc__) || defined(__ppc__)))
|
29
|
+
#define MEASURE_CPU_TIME 2
|
30
|
+
|
31
|
+
static unsigned long long cpu_frequency;
|
32
|
+
|
33
|
+
#if defined(__GNUC__)
|
34
|
+
|
35
|
+
static prof_measure_t
|
36
|
+
measure_cpu_time()
|
37
|
+
{
|
38
|
+
#if defined(__i386__)
|
39
|
+
unsigned long long x;
|
40
|
+
__asm__ __volatile__ ("rdtsc" : "=A" (x));
|
41
|
+
return x;
|
42
|
+
#elif defined(__powerpc__) || defined(__ppc__)
|
43
|
+
unsigned long long x, y;
|
44
|
+
|
45
|
+
__asm__ __volatile__ ("\n\
|
46
|
+
1: mftbu %1\n\
|
47
|
+
mftb %L0\n\
|
48
|
+
mftbu %0\n\
|
49
|
+
cmpw %0,%1\n\
|
50
|
+
bne- 1b"
|
51
|
+
: "=r" (x), "=r" (y));
|
52
|
+
return x;
|
53
|
+
#endif
|
54
|
+
}
|
55
|
+
|
56
|
+
#elif defined(_WIN32)
|
57
|
+
|
58
|
+
static prof_measure_t
|
59
|
+
measure_cpu_time()
|
60
|
+
{
|
61
|
+
prof_measure_t cycles = 0;
|
62
|
+
|
63
|
+
__asm
|
64
|
+
{
|
65
|
+
rdtsc
|
66
|
+
mov DWORD PTR cycles, eax
|
67
|
+
mov DWORD PTR [cycles + 4], edx
|
68
|
+
}
|
69
|
+
return cycles;
|
70
|
+
}
|
71
|
+
|
72
|
+
#endif
|
73
|
+
|
74
|
+
|
75
|
+
/* The _WIN32 check is needed for msys (and maybe cygwin?) */
|
76
|
+
#if defined(__GNUC__) && !defined(_WIN32)
|
77
|
+
|
78
|
+
unsigned long long get_cpu_frequency()
|
79
|
+
{
|
80
|
+
unsigned long long x, y;
|
81
|
+
|
82
|
+
struct timespec ts;
|
83
|
+
ts.tv_sec = 0;
|
84
|
+
ts.tv_nsec = 500000000;
|
85
|
+
x = measure_cpu_time();
|
86
|
+
nanosleep(&ts, NULL);
|
87
|
+
y = measure_cpu_time();
|
88
|
+
return (y - x) * 2;
|
89
|
+
}
|
90
|
+
|
91
|
+
#elif defined(_WIN32)
|
92
|
+
|
93
|
+
unsigned long long get_cpu_frequency()
|
94
|
+
{
|
95
|
+
unsigned long long x, y;
|
96
|
+
unsigned long long frequency;
|
97
|
+
x = measure_cpu_time();
|
98
|
+
|
99
|
+
/* Use the windows sleep function, not Ruby's */
|
100
|
+
Sleep(500);
|
101
|
+
y = measure_cpu_time();
|
102
|
+
frequency = 2*(y-x);
|
103
|
+
return frequency;
|
104
|
+
}
|
105
|
+
#endif
|
106
|
+
|
107
|
+
static double
|
108
|
+
convert_cpu_time(prof_measure_t c)
|
109
|
+
{
|
110
|
+
return (double) c / cpu_frequency;
|
111
|
+
}
|
112
|
+
|
113
|
+
/* Document-method: prof_get_cpu_frequency
|
114
|
+
call-seq:
|
115
|
+
cpu_frequency -> int
|
116
|
+
|
117
|
+
Returns the cpu's frequency. This value is needed when
|
118
|
+
RubyProf::measure_mode is set to CPU_TIME. */
|
119
|
+
static VALUE
|
120
|
+
prof_get_cpu_frequency(VALUE self)
|
121
|
+
{
|
122
|
+
return LONG2NUM(cpu_frequency);
|
123
|
+
}
|
124
|
+
|
125
|
+
/* Document-method: prof_set_cpu_frequency
|
126
|
+
call-seq:
|
127
|
+
cpu_frequency=value -> void
|
128
|
+
|
129
|
+
Sets the cpu's frequency. This value is needed when
|
130
|
+
RubyProf::measure_mode is set to CPU_TIME. */
|
131
|
+
static VALUE
|
132
|
+
prof_set_cpu_frequency(VALUE self, VALUE val)
|
133
|
+
{
|
134
|
+
cpu_frequency = NUM2LONG(val);
|
135
|
+
return val;
|
136
|
+
}
|
137
|
+
|
138
|
+
#endif
|
@@ -0,0 +1,41 @@
|
|
1
|
+
/* :nodoc:
|
2
|
+
* Copyright (C) 2007 Shugo Maeda <shugo@ruby-lang.org>
|
3
|
+
* Charlie Savage <cfis@savagexi.com>
|
4
|
+
* All rights reserved.
|
5
|
+
*
|
6
|
+
* Redistribution and use in source and binary forms, with or without
|
7
|
+
* modification, are permitted provided that the following conditions
|
8
|
+
* are met:
|
9
|
+
* 1. Redistributions of source code must retain the above copyright
|
10
|
+
* notice, this list of conditions and the following disclaimer.
|
11
|
+
* 2. Redistributions in binary form must reproduce the above copyright
|
12
|
+
* notice, this list of conditions and the following disclaimer in the
|
13
|
+
* documentation and/or other materials provided with the distribution.
|
14
|
+
*
|
15
|
+
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
16
|
+
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
17
|
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
18
|
+
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
19
|
+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
20
|
+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
21
|
+
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
22
|
+
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
23
|
+
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
24
|
+
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
25
|
+
* SUCH DAMAGE. */
|
26
|
+
|
27
|
+
#include <time.h>
|
28
|
+
|
29
|
+
#define MEASURE_PROCESS_TIME 0
|
30
|
+
|
31
|
+
static prof_measure_t
|
32
|
+
measure_process_time()
|
33
|
+
{
|
34
|
+
return clock();
|
35
|
+
}
|
36
|
+
|
37
|
+
static double
|
38
|
+
convert_process_time(prof_measure_t c)
|
39
|
+
{
|
40
|
+
return (double) c / CLOCKS_PER_SEC;
|
41
|
+
}
|
@@ -0,0 +1,42 @@
|
|
1
|
+
/* :nodoc:
|
2
|
+
* Copyright (C) 2007 Shugo Maeda <shugo@ruby-lang.org>
|
3
|
+
* Charlie Savage <cfis@savagexi.com>
|
4
|
+
* All rights reserved.
|
5
|
+
*
|
6
|
+
* Redistribution and use in source and binary forms, with or without
|
7
|
+
* modification, are permitted provided that the following conditions
|
8
|
+
* are met:
|
9
|
+
* 1. Redistributions of source code must retain the above copyright
|
10
|
+
* notice, this list of conditions and the following disclaimer.
|
11
|
+
* 2. Redistributions in binary form must reproduce the above copyright
|
12
|
+
* notice, this list of conditions and the following disclaimer in the
|
13
|
+
* documentation and/or other materials provided with the distribution.
|
14
|
+
*
|
15
|
+
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
16
|
+
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
17
|
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
18
|
+
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
19
|
+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
20
|
+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
21
|
+
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
22
|
+
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
23
|
+
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
24
|
+
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
25
|
+
* SUCH DAMAGE. */
|
26
|
+
|
27
|
+
|
28
|
+
#define MEASURE_WALL_TIME 1
|
29
|
+
|
30
|
+
static prof_measure_t
|
31
|
+
measure_wall_time()
|
32
|
+
{
|
33
|
+
struct timeval tv;
|
34
|
+
gettimeofday(&tv, NULL);
|
35
|
+
return tv.tv_sec * 1000000 + tv.tv_usec;
|
36
|
+
}
|
37
|
+
|
38
|
+
static double
|
39
|
+
convert_wall_time(prof_measure_t c)
|
40
|
+
{
|
41
|
+
return (double) c / 1000000;
|
42
|
+
}
|
data/ext/ruby_prof.c
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
/*
|
2
|
-
*
|
3
|
-
*
|
2
|
+
* Copyright (C) 2007 Shugo Maeda <shugo@ruby-lang.org>
|
3
|
+
* Charlie Savage <cfis@savagexi.com>
|
4
4
|
* All rights reserved.
|
5
5
|
*
|
6
6
|
* Redistribution and use in source and binary forms, with or without
|
@@ -25,240 +25,141 @@
|
|
25
25
|
* SUCH DAMAGE.
|
26
26
|
*/
|
27
27
|
|
28
|
+
/* ruby-prof tracks the time spent executing every method in ruby programming.
|
29
|
+
The main players are:
|
30
|
+
|
31
|
+
prof_result_t - Its one field, values, contains the overall results
|
32
|
+
thread_data_t - Stores data about a single thread.
|
33
|
+
prof_stack_t - The method call stack in a particular thread
|
34
|
+
prof_method_t - Profiling information for each method
|
35
|
+
prof_call_info_t - Keeps track a method's callers and callees.
|
36
|
+
|
37
|
+
The final resulut is a hash table of thread_data_t, keyed on the thread
|
38
|
+
id. Each thread has an hash a table of prof_method_t, keyed on the
|
39
|
+
method id. A hash table is used for quick look up when doing a profile.
|
40
|
+
However, it is exposed to Ruby as an array.
|
41
|
+
|
42
|
+
Each prof_method_t has two hash tables, parent and children, of prof_call_info_t.
|
43
|
+
These objects keep track of a method's callers (who called the method) and its
|
44
|
+
callees (who the method called). These are keyed the method id, but once again,
|
45
|
+
are exposed to Ruby as arrays. Each prof_call_into_t maintains a pointer to the
|
46
|
+
caller or callee method, thereby making it easy to navigate through the call
|
47
|
+
hierarchy in ruby - which is very helpful for creating call graphs.
|
48
|
+
*/
|
49
|
+
|
50
|
+
|
28
51
|
#include <stdio.h>
|
29
|
-
#include <time.h>
|
30
|
-
#ifdef HAVE_SYS_TIMES_H
|
31
|
-
#include <sys/times.h>
|
32
|
-
#endif
|
33
52
|
|
34
53
|
#include <ruby.h>
|
35
54
|
#include <node.h>
|
36
55
|
#include <st.h>
|
37
56
|
|
38
|
-
#define PROF_VERSION "0.4.1"
|
39
57
|
|
58
|
+
/* ================ Constants =================*/
|
59
|
+
#define INITIAL_STACK_SIZE 8
|
60
|
+
#define PROF_VERSION "0.5.0"
|
61
|
+
|
62
|
+
|
63
|
+
/* ================ Measurement =================*/
|
64
|
+
#ifdef HAVE_LONG_LONG
|
65
|
+
typedef LONG_LONG prof_measure_t;
|
66
|
+
#else
|
67
|
+
typedef unsigned long prof_measure_t;
|
68
|
+
#endif
|
69
|
+
|
70
|
+
#include "measure_process_time.h"
|
71
|
+
#include "measure_wall_time.h"
|
72
|
+
#include "measure_cpu_time.h"
|
73
|
+
#include "measure_allocations.h"
|
74
|
+
|
75
|
+
static prof_measure_t (*get_measurement)() = measure_process_time;
|
76
|
+
static double (*convert_measurement)(prof_measure_t) = convert_process_time;
|
77
|
+
|
78
|
+
/* ================ DataTypes =================*/
|
40
79
|
static VALUE mProf;
|
41
80
|
static VALUE cResult;
|
42
81
|
static VALUE cMethodInfo;
|
43
82
|
static VALUE cCallInfo;
|
44
83
|
|
45
|
-
|
46
|
-
typedef
|
47
|
-
|
48
|
-
|
49
|
-
|
84
|
+
/* Profiling information for each method. */
|
85
|
+
typedef struct prof_method_t {
|
86
|
+
st_data_t key; /* Cache hash value for speed reasons. */
|
87
|
+
VALUE name; /* Name of the method. */
|
88
|
+
VALUE klass; /* The method's class. */
|
89
|
+
ID mid; /* The method id. */
|
90
|
+
int depth; /* The recursive depth this method was called at.*/
|
91
|
+
int called; /* Number of times called */
|
92
|
+
const char* source_file; /* The method's source file */
|
93
|
+
int line; /* The method's line number. */
|
94
|
+
prof_measure_t total_time; /* Total time spent in this method and children. */
|
95
|
+
prof_measure_t self_time; /* Total time spent in this method. */
|
96
|
+
prof_measure_t wait_time; /* Total time this method spent waiting for other threads. */
|
97
|
+
st_table *parents; /* The method's callers (prof_call_info_t). */
|
98
|
+
st_table *children; /* The method's callees (prof_call_info_t). */
|
99
|
+
int active_frame; /* # of active frames for this method. Used to detect
|
100
|
+
recursion. Stashed here to avoid extra lookups in
|
101
|
+
the hook method - so a bit hackey. */
|
102
|
+
struct prof_method_t *base; /* For recursion - this is the parent method */
|
103
|
+
} prof_method_t;
|
104
|
+
|
50
105
|
|
106
|
+
/* Callers and callee information for a method. */
|
51
107
|
typedef struct {
|
52
|
-
|
53
|
-
ID mid;
|
108
|
+
prof_method_t *target;
|
54
109
|
int called;
|
55
|
-
|
56
|
-
|
110
|
+
prof_measure_t total_time;
|
111
|
+
prof_measure_t self_time;
|
112
|
+
prof_measure_t wait_time;
|
113
|
+
int line;
|
57
114
|
} prof_call_info_t;
|
58
115
|
|
59
|
-
typedef struct {
|
60
|
-
/* Cache hash value for speed reasons. */
|
61
|
-
st_data_t key;
|
62
|
-
VALUE klass;
|
63
|
-
ID mid;
|
64
|
-
int thread_id;
|
65
|
-
int called;
|
66
|
-
prof_clock_t self_time;
|
67
|
-
prof_clock_t total_time;
|
68
|
-
st_table *parents;
|
69
|
-
st_table *children;
|
70
|
-
/* Hack - piggyback a field to keep track of the
|
71
|
-
of times the method appears in the current
|
72
|
-
stack. Used to detect recursive cycles. This
|
73
|
-
works because there is an instance of this struct
|
74
|
-
per method per thread. Could have a separate
|
75
|
-
hash table...would be cleaner but adds a bit of
|
76
|
-
code and 1 extra lookup per event.*/
|
77
|
-
int stack_count;
|
78
|
-
} prof_method_t;
|
79
116
|
|
117
|
+
/* Temporary object that maintains profiling information
|
118
|
+
for active methods - there is one per method.*/
|
80
119
|
typedef struct {
|
81
|
-
/*
|
82
|
-
|
83
|
-
prof_method_t *
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
120
|
+
/* Caching prof_method_t values significantly
|
121
|
+
increases performance. */
|
122
|
+
prof_method_t *method;
|
123
|
+
prof_measure_t start_time;
|
124
|
+
prof_measure_t wait_time;
|
125
|
+
prof_measure_t child_time;
|
126
|
+
unsigned int line;
|
127
|
+
} prof_frame_t;
|
128
|
+
|
129
|
+
/* Current stack of active methods.*/
|
88
130
|
typedef struct {
|
89
|
-
|
90
|
-
|
91
|
-
|
131
|
+
prof_frame_t *start;
|
132
|
+
prof_frame_t *end;
|
133
|
+
prof_frame_t *ptr;
|
92
134
|
} prof_stack_t;
|
93
135
|
|
136
|
+
/* Profiling information for a thread. */
|
94
137
|
typedef struct {
|
95
|
-
|
96
|
-
st_table*
|
97
|
-
|
138
|
+
unsigned long thread_id; /* Thread id */
|
139
|
+
st_table* method_info_table; /* All called methods */
|
140
|
+
prof_stack_t* stack; /* Active methods */
|
141
|
+
prof_measure_t last_switch; /* Point of last context switch */
|
98
142
|
} thread_data_t;
|
99
143
|
|
100
144
|
typedef struct {
|
101
145
|
VALUE threads;
|
102
146
|
} prof_result_t;
|
103
147
|
|
104
|
-
static VALUE toplevel_id;
|
105
|
-
static st_data_t toplevel_key;
|
106
|
-
static int clock_mode;
|
107
|
-
static st_table *threads_tbl = NULL;
|
108
|
-
static VALUE class_tbl = Qnil;
|
109
|
-
|
110
|
-
#define CLOCK_MODE_PROCESS 0
|
111
|
-
#define CLOCK_MODE_WALL 1
|
112
|
-
#if defined(_WIN32) || (defined(__GNUC__) && (defined(__i386__) || defined(__powerpc__) || defined(__ppc__)))
|
113
|
-
#define CLOCK_MODE_CPU 2
|
114
|
-
static double cpu_frequency;
|
115
|
-
#endif
|
116
|
-
|
117
|
-
#define INITIAL_STACK_SIZE 8
|
118
|
-
|
119
|
-
static prof_clock_t
|
120
|
-
clock_get_clock()
|
121
|
-
{
|
122
|
-
return clock();
|
123
|
-
}
|
124
|
-
|
125
|
-
static double
|
126
|
-
clock_clock2sec(prof_clock_t c)
|
127
|
-
{
|
128
|
-
return (double) c / CLOCKS_PER_SEC;
|
129
|
-
}
|
130
|
-
|
131
|
-
static prof_clock_t
|
132
|
-
gettimeofday_get_clock()
|
133
|
-
{
|
134
|
-
struct timeval tv;
|
135
|
-
gettimeofday(&tv, NULL);
|
136
|
-
return tv.tv_sec * 1000000 + tv.tv_usec;
|
137
|
-
}
|
138
|
-
|
139
|
-
static double
|
140
|
-
gettimeofday_clock2sec(prof_clock_t c)
|
141
|
-
{
|
142
|
-
return (double) c / 1000000;
|
143
|
-
}
|
144
|
-
|
145
|
-
#ifdef CLOCK_MODE_CPU
|
146
|
-
|
147
|
-
|
148
|
-
#if defined(__GNUC__)
|
149
|
-
|
150
|
-
static prof_clock_t
|
151
|
-
cpu_get_clock()
|
152
|
-
{
|
153
|
-
#if defined(__i386__)
|
154
|
-
unsigned long long x;
|
155
|
-
__asm__ __volatile__ ("rdtsc" : "=A" (x));
|
156
|
-
return x;
|
157
|
-
#elif defined(__powerpc__) || defined(__ppc__)
|
158
|
-
unsigned long long x, y;
|
159
|
-
|
160
|
-
__asm__ __volatile__ ("\n\
|
161
|
-
1: mftbu %1\n\
|
162
|
-
mftb %L0\n\
|
163
|
-
mftbu %0\n\
|
164
|
-
cmpw %0,%1\n\
|
165
|
-
bne- 1b"
|
166
|
-
: "=r" (x), "=r" (y));
|
167
|
-
return x;
|
168
|
-
#endif
|
169
|
-
}
|
170
|
-
|
171
|
-
#elif defined(_WIN32)
|
172
|
-
|
173
|
-
static prof_clock_t
|
174
|
-
cpu_get_clock()
|
175
|
-
{
|
176
|
-
prof_clock_t cycles = 0;
|
177
|
-
|
178
|
-
__asm
|
179
|
-
{
|
180
|
-
rdtsc
|
181
|
-
mov DWORD PTR cycles, eax
|
182
|
-
mov DWORD PTR [cycles + 4], edx
|
183
|
-
}
|
184
|
-
return cycles;
|
185
|
-
}
|
186
|
-
|
187
|
-
#endif
|
188
|
-
|
189
|
-
|
190
|
-
/* The _WIN32 check is needed for msys (and maybe cygwin?) */
|
191
|
-
#if defined(__GNUC__) && !defined(_WIN32)
|
192
|
-
|
193
|
-
double get_cpu_frequency()
|
194
|
-
{
|
195
|
-
unsigned long long x, y;
|
196
|
-
|
197
|
-
struct timespec ts;
|
198
|
-
ts.tv_sec = 0;
|
199
|
-
ts.tv_nsec = 500000000;
|
200
|
-
x = cpu_get_clock();
|
201
|
-
nanosleep(&ts, NULL);
|
202
|
-
y = cpu_get_clock();
|
203
|
-
return (y - x) * 2;
|
204
|
-
}
|
205
|
-
|
206
|
-
#elif defined(_WIN32)
|
207
|
-
|
208
|
-
double get_cpu_frequency()
|
209
|
-
{
|
210
|
-
unsigned long long x, y;
|
211
|
-
double frequency;
|
212
|
-
x = cpu_get_clock();
|
213
|
-
|
214
|
-
/* Use the windows sleep function, not Ruby's */
|
215
|
-
Sleep(500);
|
216
|
-
y = cpu_get_clock();
|
217
|
-
frequency = 2*(y-x);
|
218
|
-
return frequency;
|
219
|
-
}
|
220
|
-
#endif
|
221
148
|
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
/* call-seq:
|
229
|
-
cpu_frequency -> int
|
230
|
-
|
231
|
-
Returns the cpu's frequency. This value is needed when using the
|
232
|
-
cpu RubyProf::clock_mode. */
|
233
|
-
static VALUE
|
234
|
-
prof_get_cpu_frequency(VALUE self)
|
235
|
-
{
|
236
|
-
return rb_float_new(cpu_frequency);
|
237
|
-
}
|
238
|
-
|
239
|
-
/* call-seq:
|
240
|
-
cpu_frequency=value -> void
|
241
|
-
|
242
|
-
Sets the cpu's frequency. This value is needed when using the
|
243
|
-
cpu RubyProf::clock_mode. */
|
244
|
-
static VALUE
|
245
|
-
prof_set_cpu_freqeuncy(VALUE self, VALUE val)
|
246
|
-
{
|
247
|
-
cpu_frequency = NUM2DBL(val);
|
248
|
-
return val;
|
249
|
-
}
|
250
|
-
|
251
|
-
#endif
|
252
|
-
|
253
|
-
static prof_clock_t (*get_clock)() = clock_get_clock;
|
254
|
-
static double (*clock2sec)(prof_clock_t) = clock_clock2sec;
|
149
|
+
/* ================ Variables =================*/
|
150
|
+
static int measure_mode;
|
151
|
+
static st_table *threads_tbl = NULL;
|
152
|
+
/* TODO - If Ruby become multi-threaded this has to turn into
|
153
|
+
a separate stack since this isn't thread safe! */
|
154
|
+
static thread_data_t* last_thread_data = NULL;
|
255
155
|
|
256
156
|
|
157
|
+
/* ================ Helper Functions =================*/
|
257
158
|
/* Helper method to get the id of a Ruby thread. */
|
258
|
-
static inline
|
159
|
+
static inline long
|
259
160
|
get_thread_id(VALUE thread)
|
260
161
|
{
|
261
|
-
return
|
162
|
+
return NUM2ULONG(rb_obj_id(thread));
|
262
163
|
}
|
263
164
|
|
264
165
|
static VALUE
|
@@ -270,25 +171,24 @@ figure_singleton_name(VALUE klass)
|
|
270
171
|
figure out what it is attached to.*/
|
271
172
|
VALUE attached = rb_iv_get(klass, "__attached__");
|
272
173
|
|
273
|
-
|
274
174
|
/* Is this a singleton class acting as a metaclass? */
|
275
|
-
if (
|
175
|
+
if (BUILTIN_TYPE(attached) == T_CLASS)
|
276
176
|
{
|
277
177
|
result = rb_str_new2("<Class::");
|
278
178
|
rb_str_append(result, rb_inspect(attached));
|
279
|
-
rb_str_cat2(result, "
|
179
|
+
rb_str_cat2(result, ">");
|
280
180
|
}
|
281
181
|
|
282
182
|
/* Is this for singleton methods on a module? */
|
283
|
-
else if (
|
183
|
+
else if (BUILTIN_TYPE(attached) == T_MODULE)
|
284
184
|
{
|
285
185
|
result = rb_str_new2("<Module::");
|
286
186
|
rb_str_append(result, rb_inspect(attached));
|
287
|
-
rb_str_cat2(result, "
|
187
|
+
rb_str_cat2(result, ">");
|
288
188
|
}
|
289
189
|
|
290
|
-
/* Is
|
291
|
-
else if (
|
190
|
+
/* Is this for singleton methods on an object? */
|
191
|
+
else if (BUILTIN_TYPE(attached) == T_OBJECT)
|
292
192
|
{
|
293
193
|
/* Make sure to get the super class so that we don't
|
294
194
|
mistakenly grab a T_ICLASS which would lead to
|
@@ -296,79 +196,106 @@ figure_singleton_name(VALUE klass)
|
|
296
196
|
VALUE super = rb_class_real(RCLASS(klass)->super);
|
297
197
|
result = rb_str_new2("<Object::");
|
298
198
|
rb_str_append(result, rb_inspect(super));
|
299
|
-
rb_str_cat2(result, "
|
199
|
+
rb_str_cat2(result, ">");
|
300
200
|
}
|
201
|
+
|
202
|
+
/* Ok, this could be other things like an array made put onto
|
203
|
+
a singleton object (yeah, it happens, see the singleton
|
204
|
+
objects test case). */
|
301
205
|
else
|
302
206
|
{
|
303
|
-
|
304
|
-
result = rb_str_new2("<Unknown:");
|
305
|
-
rb_str_append(result, rb_inspect(klass));
|
306
|
-
rb_str_cat2(result, ">#");
|
307
|
-
rb_raise(rb_eRuntimeError, "Unknown singleton class: %i", result);
|
207
|
+
result = rb_inspect(klass);
|
308
208
|
}
|
309
209
|
|
310
210
|
return result;
|
311
211
|
}
|
312
212
|
|
313
213
|
static VALUE
|
314
|
-
|
214
|
+
klass_name(VALUE klass)
|
315
215
|
{
|
316
|
-
VALUE result;
|
317
|
-
VALUE method_name;
|
318
|
-
|
319
|
-
if (mid == ID_ALLOCATOR)
|
320
|
-
method_name = rb_str_new2("allocate");
|
321
|
-
else
|
322
|
-
method_name = rb_String(ID2SYM(mid));
|
216
|
+
VALUE result = Qnil;
|
323
217
|
|
324
|
-
|
325
|
-
|
326
|
-
result = rb_str_new2("
|
327
|
-
|
218
|
+
if (klass == 0 || klass == Qnil)
|
219
|
+
{
|
220
|
+
result = rb_str_new2("Global");
|
221
|
+
}
|
222
|
+
else if (BUILTIN_TYPE(klass) == T_MODULE)
|
328
223
|
{
|
329
224
|
result = rb_inspect(klass);
|
330
|
-
rb_str_cat2(result, "#");
|
331
225
|
}
|
332
|
-
else if (
|
226
|
+
else if (BUILTIN_TYPE(klass) == T_CLASS && FL_TEST(klass, FL_SINGLETON))
|
333
227
|
{
|
334
228
|
result = figure_singleton_name(klass);
|
335
229
|
}
|
336
|
-
else if (
|
230
|
+
else if (BUILTIN_TYPE(klass) == T_CLASS)
|
337
231
|
{
|
338
232
|
result = rb_inspect(klass);
|
339
|
-
rb_str_cat2(result, "#");
|
340
233
|
}
|
341
234
|
else
|
342
235
|
{
|
343
236
|
/* Should never happen. */
|
344
|
-
result = rb_str_new2("Unknown
|
345
|
-
rb_str_append(result, rb_inspect(klass));
|
346
|
-
rb_str_cat2(result, ">#");
|
347
|
-
rb_raise(rb_eRuntimeError, "Unsupported type in method name: %i\n", result);
|
237
|
+
result = rb_str_new2("Unknown");
|
348
238
|
}
|
349
239
|
|
350
|
-
|
351
|
-
|
240
|
+
return result;
|
241
|
+
}
|
242
|
+
|
243
|
+
static VALUE
|
244
|
+
method_name(ID mid, int depth)
|
245
|
+
{
|
246
|
+
VALUE result;
|
247
|
+
|
248
|
+
if (mid == ID_ALLOCATOR)
|
249
|
+
result = rb_str_new2("allocate");
|
250
|
+
else if (mid == 0)
|
251
|
+
result = rb_str_new2("[No method]");
|
252
|
+
else
|
253
|
+
result = rb_String(ID2SYM(mid));
|
254
|
+
|
255
|
+
if (depth > 0)
|
256
|
+
{
|
257
|
+
char buffer[65];
|
258
|
+
sprintf(buffer, "%i", depth);
|
259
|
+
rb_str_cat2(result, "-");
|
260
|
+
rb_str_cat2(result, buffer);
|
261
|
+
}
|
352
262
|
|
353
263
|
return result;
|
354
264
|
}
|
355
265
|
|
266
|
+
static VALUE
|
267
|
+
full_name(VALUE klass, ID mid, int depth)
|
268
|
+
{
|
269
|
+
VALUE result = klass_name(klass);
|
270
|
+
rb_str_cat2(result, "#");
|
271
|
+
rb_str_append(result, method_name(mid, depth));
|
272
|
+
|
273
|
+
return result;
|
274
|
+
}
|
275
|
+
|
276
|
+
|
356
277
|
static inline st_data_t
|
357
|
-
method_key(VALUE klass, ID mid)
|
278
|
+
method_key(VALUE klass, ID mid, int depth)
|
358
279
|
{
|
359
|
-
|
280
|
+
/* No idea if this is a unique key or not. Would be
|
281
|
+
best to use the method name, but we can't, since
|
282
|
+
that calls internal ruby functions which would
|
283
|
+
cause the hook method to recursively call itself.
|
284
|
+
And that is too much of a bother to deal with.
|
285
|
+
Plus of course, this is faster. */
|
286
|
+
return (klass * 100) + (mid * 10) + depth;
|
360
287
|
}
|
361
288
|
|
289
|
+
/* ================ Stack Handling =================*/
|
362
290
|
|
363
|
-
/*
|
291
|
+
/* Creates a stack of prof_frame_t to keep track
|
292
|
+
of timings for active methods. */
|
364
293
|
static prof_stack_t *
|
365
294
|
stack_create()
|
366
295
|
{
|
367
|
-
prof_stack_t *stack;
|
368
|
-
|
369
|
-
stack =
|
370
|
-
stack->start = stack->ptr =
|
371
|
-
ALLOC_N(prof_data_t, INITIAL_STACK_SIZE);
|
296
|
+
prof_stack_t *stack = ALLOC(prof_stack_t);
|
297
|
+
stack->start = ALLOC_N(prof_frame_t, INITIAL_STACK_SIZE);
|
298
|
+
stack->ptr = stack->start;
|
372
299
|
stack->end = stack->start + INITIAL_STACK_SIZE;
|
373
300
|
return stack;
|
374
301
|
}
|
@@ -380,100 +307,116 @@ stack_free(prof_stack_t *stack)
|
|
380
307
|
xfree(stack);
|
381
308
|
}
|
382
309
|
|
383
|
-
static inline
|
310
|
+
static inline prof_frame_t *
|
384
311
|
stack_push(prof_stack_t *stack)
|
385
312
|
{
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
313
|
+
/* Is there space on the stack? If not, double
|
314
|
+
its size. */
|
315
|
+
if (stack->ptr == stack->end)
|
316
|
+
{
|
317
|
+
size_t len = stack->ptr - stack->start;
|
318
|
+
size_t new_capacity = (stack->end - stack->start) * 2;
|
319
|
+
REALLOC_N(stack->start, prof_frame_t, new_capacity);
|
320
|
+
stack->ptr = stack->start + len;
|
321
|
+
stack->end = stack->start + new_capacity;
|
322
|
+
}
|
323
|
+
return stack->ptr++;
|
395
324
|
}
|
396
325
|
|
397
|
-
static inline
|
326
|
+
static inline prof_frame_t *
|
398
327
|
stack_pop(prof_stack_t *stack)
|
399
328
|
{
|
400
329
|
if (stack->ptr == stack->start)
|
401
|
-
|
330
|
+
return NULL;
|
402
331
|
else
|
403
|
-
|
332
|
+
return --stack->ptr;
|
404
333
|
}
|
405
334
|
|
406
|
-
static inline
|
335
|
+
static inline prof_frame_t *
|
407
336
|
stack_peek(prof_stack_t *stack)
|
408
337
|
{
|
409
338
|
if (stack->ptr == stack->start)
|
410
|
-
|
339
|
+
return NULL;
|
411
340
|
else
|
412
|
-
|
341
|
+
return stack->ptr - 1;
|
413
342
|
}
|
414
343
|
|
344
|
+
static inline size_t
|
345
|
+
stack_size(prof_stack_t *stack)
|
346
|
+
{
|
347
|
+
return stack->ptr - stack->start;
|
348
|
+
}
|
415
349
|
|
350
|
+
/* ================ Method Info Handling =================*/
|
416
351
|
|
417
352
|
/* --- Keeps track of the methods the current method calls */
|
418
353
|
static st_table *
|
419
|
-
|
354
|
+
method_info_table_create()
|
420
355
|
{
|
421
356
|
return st_init_numtable();
|
422
357
|
}
|
423
358
|
|
424
|
-
static inline
|
425
|
-
|
359
|
+
static inline size_t
|
360
|
+
method_info_table_insert(st_table *table, st_data_t key, prof_method_t *val)
|
426
361
|
{
|
427
362
|
return st_insert(table, key, (st_data_t) val);
|
428
363
|
}
|
429
364
|
|
430
365
|
static inline prof_method_t *
|
431
|
-
|
366
|
+
method_info_table_lookup(st_table *table, st_data_t key)
|
432
367
|
{
|
433
368
|
st_data_t val;
|
434
|
-
if (st_lookup(table, key, &val))
|
435
|
-
|
369
|
+
if (st_lookup(table, key, &val))
|
370
|
+
{
|
371
|
+
return (prof_method_t *) val;
|
436
372
|
}
|
437
|
-
else
|
438
|
-
|
373
|
+
else
|
374
|
+
{
|
375
|
+
return NULL;
|
439
376
|
}
|
440
377
|
}
|
441
378
|
|
379
|
+
|
442
380
|
static void
|
443
|
-
|
381
|
+
method_info_table_free(st_table *table)
|
444
382
|
{
|
383
|
+
/* Don't free the contents since they are wrapped by
|
384
|
+
Ruby objects! */
|
445
385
|
st_free_table(table);
|
446
386
|
}
|
447
387
|
|
448
388
|
|
449
|
-
/*
|
450
|
-
|
389
|
+
/* ================ Call Info Handling =================*/
|
390
|
+
|
391
|
+
/* ---- Hash, keyed on class/method_id, that holds call_info objects ---- */
|
451
392
|
static st_table *
|
452
|
-
|
393
|
+
caller_table_create()
|
453
394
|
{
|
454
395
|
return st_init_numtable();
|
455
396
|
}
|
456
397
|
|
457
|
-
static inline
|
458
|
-
|
398
|
+
static inline size_t
|
399
|
+
caller_table_insert(st_table *table, st_data_t key, prof_call_info_t *val)
|
459
400
|
{
|
460
401
|
return st_insert(table, key, (st_data_t) val);
|
461
402
|
}
|
462
403
|
|
463
404
|
static inline prof_call_info_t *
|
464
|
-
|
405
|
+
caller_table_lookup(st_table *table, st_data_t key)
|
465
406
|
{
|
466
407
|
st_data_t val;
|
467
|
-
if (st_lookup(table, key, &val))
|
468
|
-
|
408
|
+
if (st_lookup(table, key, &val))
|
409
|
+
{
|
410
|
+
return (prof_call_info_t *) val;
|
469
411
|
}
|
470
|
-
else
|
471
|
-
|
412
|
+
else
|
413
|
+
{
|
414
|
+
return NULL;
|
472
415
|
}
|
473
416
|
}
|
474
417
|
|
475
418
|
static void
|
476
|
-
|
419
|
+
caller_table_free(st_table *table)
|
477
420
|
{
|
478
421
|
st_free_table(table);
|
479
422
|
}
|
@@ -485,16 +428,16 @@ they took to execute. */
|
|
485
428
|
|
486
429
|
/* :nodoc: */
|
487
430
|
static prof_call_info_t *
|
488
|
-
call_info_create(
|
431
|
+
call_info_create(prof_method_t* method)
|
489
432
|
{
|
490
433
|
prof_call_info_t *result;
|
491
434
|
|
492
435
|
result = ALLOC(prof_call_info_t);
|
493
|
-
result->
|
494
|
-
result->mid = mid;
|
436
|
+
result->target = method;
|
495
437
|
result->called = 0;
|
496
438
|
result->total_time = 0;
|
497
439
|
result->self_time = 0;
|
440
|
+
result->wait_time = 0;
|
498
441
|
return result;
|
499
442
|
}
|
500
443
|
|
@@ -516,21 +459,37 @@ static VALUE
|
|
516
459
|
call_info_new(prof_call_info_t *result)
|
517
460
|
{
|
518
461
|
/* We don't want Ruby freeing the underlying C structures, that
|
519
|
-
is when the prof_method_t is freed. */
|
462
|
+
is done when the prof_method_t is freed. */
|
520
463
|
return Data_Wrap_Struct(cCallInfo, NULL, NULL, result);
|
521
464
|
}
|
522
465
|
|
523
466
|
static prof_call_info_t *
|
524
467
|
get_call_info_result(VALUE obj)
|
525
468
|
{
|
526
|
-
if (
|
469
|
+
if (BUILTIN_TYPE(obj) != T_DATA)
|
527
470
|
{
|
528
471
|
/* Should never happen */
|
529
|
-
|
472
|
+
rb_raise(rb_eTypeError, "Not a call info object");
|
530
473
|
}
|
531
474
|
return (prof_call_info_t *) DATA_PTR(obj);
|
532
475
|
}
|
533
476
|
|
477
|
+
|
478
|
+
/* call-seq:
|
479
|
+
called -> MethodInfo
|
480
|
+
|
481
|
+
Returns the target method. */
|
482
|
+
static VALUE
|
483
|
+
call_info_target(VALUE self)
|
484
|
+
{
|
485
|
+
/* Target is a pointer to a method_info - so we have to be careful
|
486
|
+
about the GC. We will wrap the method_info but provide no
|
487
|
+
free method so the underlying object is not freed twice! */
|
488
|
+
|
489
|
+
prof_call_info_t *result = get_call_info_result(self);
|
490
|
+
return Data_Wrap_Struct(cMethodInfo, NULL, NULL, result->target);
|
491
|
+
}
|
492
|
+
|
534
493
|
/* call-seq:
|
535
494
|
called -> int
|
536
495
|
|
@@ -543,6 +502,16 @@ call_info_called(VALUE self)
|
|
543
502
|
return INT2NUM(result->called);
|
544
503
|
}
|
545
504
|
|
505
|
+
/* call-seq:
|
506
|
+
line_no -> int
|
507
|
+
|
508
|
+
returns the line number of the method */
|
509
|
+
static VALUE
|
510
|
+
call_info_line(VALUE self)
|
511
|
+
{
|
512
|
+
return rb_int_new(get_call_info_result(self)->line);
|
513
|
+
}
|
514
|
+
|
546
515
|
/* call-seq:
|
547
516
|
total_time -> float
|
548
517
|
|
@@ -552,7 +521,7 @@ call_info_total_time(VALUE self)
|
|
552
521
|
{
|
553
522
|
prof_call_info_t *result = get_call_info_result(self);
|
554
523
|
|
555
|
-
return rb_float_new(
|
524
|
+
return rb_float_new(convert_measurement(result->total_time));
|
556
525
|
}
|
557
526
|
|
558
527
|
/* call-seq:
|
@@ -564,7 +533,19 @@ call_info_self_time(VALUE self)
|
|
564
533
|
{
|
565
534
|
prof_call_info_t *result = get_call_info_result(self);
|
566
535
|
|
567
|
-
return rb_float_new(
|
536
|
+
return rb_float_new(convert_measurement(result->self_time));
|
537
|
+
}
|
538
|
+
|
539
|
+
/* call-seq:
|
540
|
+
wait_time -> float
|
541
|
+
|
542
|
+
Returns the total amount of time this method waited for other threads. */
|
543
|
+
static VALUE
|
544
|
+
call_info_wait_time(VALUE self)
|
545
|
+
{
|
546
|
+
prof_call_info_t *result = get_call_info_result(self);
|
547
|
+
|
548
|
+
return rb_float_new(convert_measurement(result->wait_time));
|
568
549
|
}
|
569
550
|
|
570
551
|
/* call-seq:
|
@@ -575,8 +556,8 @@ static VALUE
|
|
575
556
|
call_info_children_time(VALUE self)
|
576
557
|
{
|
577
558
|
prof_call_info_t *result = get_call_info_result(self);
|
578
|
-
|
579
|
-
return rb_float_new(
|
559
|
+
prof_measure_t children_time = result->total_time - result->self_time - result->wait_time;
|
560
|
+
return rb_float_new(convert_measurement(children_time));
|
580
561
|
}
|
581
562
|
|
582
563
|
|
@@ -591,24 +572,26 @@ the RubyProf::Result object.
|
|
591
572
|
|
592
573
|
/* :nodoc: */
|
593
574
|
static prof_method_t *
|
594
|
-
prof_method_create(VALUE klass, ID mid,
|
575
|
+
prof_method_create(NODE *node, st_data_t key, VALUE klass, ID mid, int depth)
|
595
576
|
{
|
596
|
-
prof_method_t *result;
|
597
|
-
|
598
|
-
|
599
|
-
|
577
|
+
prof_method_t *result = ALLOC(prof_method_t);
|
578
|
+
|
579
|
+
result->klass = klass;
|
580
|
+
result->mid = mid;
|
581
|
+
result->key = key;
|
582
|
+
result->depth = depth;
|
600
583
|
|
601
|
-
result = ALLOC(prof_method_t);
|
602
|
-
result->key = method_key(klass, mid);
|
603
584
|
result->called = 0;
|
604
585
|
result->total_time = 0;
|
605
586
|
result->self_time = 0;
|
606
|
-
result->
|
607
|
-
result->
|
608
|
-
result->
|
609
|
-
result->
|
610
|
-
result->
|
611
|
-
|
587
|
+
result->wait_time = 0;
|
588
|
+
result->parents = caller_table_create();
|
589
|
+
result->children = caller_table_create();
|
590
|
+
result->active_frame = 0;
|
591
|
+
result->base = result;
|
592
|
+
|
593
|
+
result->source_file = (node ? node->nd_file : 0);
|
594
|
+
result->line = (node ? nd_line(node) : 0);
|
612
595
|
return result;
|
613
596
|
}
|
614
597
|
|
@@ -621,28 +604,24 @@ prof_method_mark(prof_method_t *data)
|
|
621
604
|
static void
|
622
605
|
prof_method_free(prof_method_t *data)
|
623
606
|
{
|
607
|
+
st_foreach(data->parents, free_call_infos, 0);
|
608
|
+
caller_table_free(data->parents);
|
609
|
+
|
624
610
|
st_foreach(data->children, free_call_infos, 0);
|
625
|
-
|
626
|
-
|
611
|
+
caller_table_free(data->children);
|
612
|
+
|
627
613
|
xfree(data);
|
628
614
|
}
|
629
615
|
|
630
616
|
static VALUE
|
631
617
|
prof_method_new(prof_method_t *result)
|
632
618
|
{
|
633
|
-
return Data_Wrap_Struct(cMethodInfo, prof_method_mark, prof_method_free,
|
634
|
-
result);
|
619
|
+
return Data_Wrap_Struct(cMethodInfo, prof_method_mark, prof_method_free, result);
|
635
620
|
}
|
636
621
|
|
637
622
|
static prof_method_t *
|
638
623
|
get_prof_method(VALUE obj)
|
639
624
|
{
|
640
|
-
if (TYPE(obj) != T_DATA ||
|
641
|
-
RDATA(obj)->dfree != (RUBY_DATA_FUNC) prof_method_free)
|
642
|
-
{
|
643
|
-
/* Should never happen */
|
644
|
-
rb_raise(rb_eTypeError, "wrong profile result");
|
645
|
-
}
|
646
625
|
return (prof_method_t *) DATA_PTR(obj);
|
647
626
|
}
|
648
627
|
|
@@ -668,7 +647,7 @@ prof_method_total_time(VALUE self)
|
|
668
647
|
{
|
669
648
|
prof_method_t *result = get_prof_method(self);
|
670
649
|
|
671
|
-
return rb_float_new(
|
650
|
+
return rb_float_new(convert_measurement(result->total_time));
|
672
651
|
}
|
673
652
|
|
674
653
|
/* call-seq:
|
@@ -680,39 +659,68 @@ prof_method_self_time(VALUE self)
|
|
680
659
|
{
|
681
660
|
prof_method_t *result = get_prof_method(self);
|
682
661
|
|
683
|
-
return rb_float_new(
|
662
|
+
return rb_float_new(convert_measurement(result->self_time));
|
684
663
|
}
|
685
664
|
|
686
665
|
/* call-seq:
|
687
|
-
|
666
|
+
wait_time -> float
|
688
667
|
|
689
|
-
Returns the total amount of time
|
668
|
+
Returns the total amount of time this method waited for other threads. */
|
690
669
|
static VALUE
|
691
|
-
|
670
|
+
prof_method_wait_time(VALUE self)
|
692
671
|
{
|
693
672
|
prof_method_t *result = get_prof_method(self);
|
694
|
-
|
695
|
-
return rb_float_new(
|
673
|
+
|
674
|
+
return rb_float_new(convert_measurement(result->wait_time));
|
696
675
|
}
|
697
676
|
|
698
677
|
/* call-seq:
|
699
|
-
|
678
|
+
line_no -> int
|
700
679
|
|
701
|
-
|
680
|
+
returns the line number of the method */
|
702
681
|
static VALUE
|
703
|
-
|
682
|
+
prof_method_line(VALUE self)
|
683
|
+
{
|
684
|
+
return rb_int_new(get_prof_method(self)->line);
|
685
|
+
}
|
686
|
+
|
687
|
+
/* call-seq:
|
688
|
+
children_time -> float
|
689
|
+
|
690
|
+
Returns the total amount of time spent in this method's children. */
|
691
|
+
static VALUE
|
692
|
+
prof_method_children_time(VALUE self)
|
704
693
|
{
|
705
694
|
prof_method_t *result = get_prof_method(self);
|
695
|
+
prof_measure_t children_time = result->total_time - result->self_time - result->wait_time;
|
696
|
+
return rb_float_new(convert_measurement(children_time));
|
697
|
+
}
|
706
698
|
|
707
|
-
|
699
|
+
/* call-seq:
|
700
|
+
source_file => string
|
701
|
+
|
702
|
+
return the source file of the method
|
703
|
+
*/
|
704
|
+
static VALUE prof_method_source_file(VALUE self)
|
705
|
+
{
|
706
|
+
const char* sf = get_prof_method(self)->source_file;
|
707
|
+
if(!sf)
|
708
|
+
{
|
709
|
+
return Qnil;
|
710
|
+
}
|
711
|
+
else
|
712
|
+
{
|
713
|
+
return rb_str_new2(sf);
|
714
|
+
}
|
708
715
|
}
|
709
716
|
|
717
|
+
|
710
718
|
/* call-seq:
|
711
719
|
method_class -> klass
|
712
720
|
|
713
721
|
Returns the Ruby klass that owns this method. */
|
714
722
|
static VALUE
|
715
|
-
|
723
|
+
prof_method_klass(VALUE self)
|
716
724
|
{
|
717
725
|
prof_method_t *result = get_prof_method(self);
|
718
726
|
|
@@ -731,6 +739,19 @@ prof_method_id(VALUE self)
|
|
731
739
|
return ID2SYM(result->mid);
|
732
740
|
}
|
733
741
|
|
742
|
+
/* call-seq:
|
743
|
+
klass_name -> string
|
744
|
+
|
745
|
+
Returns the name of this method's class. Singleton classes
|
746
|
+
will have the form <Object::Object>. */
|
747
|
+
|
748
|
+
static VALUE
|
749
|
+
prof_klass_name(VALUE self)
|
750
|
+
{
|
751
|
+
prof_method_t *method = get_prof_method(self);
|
752
|
+
return klass_name(method->klass);
|
753
|
+
}
|
754
|
+
|
734
755
|
/* call-seq:
|
735
756
|
method_name -> string
|
736
757
|
|
@@ -741,100 +762,85 @@ static VALUE
|
|
741
762
|
prof_method_name(VALUE self)
|
742
763
|
{
|
743
764
|
prof_method_t *method = get_prof_method(self);
|
744
|
-
return method_name(method->
|
745
|
-
}
|
746
|
-
|
747
|
-
static int
|
748
|
-
prof_method_collect_parents(st_data_t key, st_data_t value, st_data_t parents)
|
749
|
-
{
|
750
|
-
prof_method_t *parent = (prof_method_t *) value;
|
751
|
-
|
752
|
-
rb_ary_push(parents, INT2FIX((int) parent));
|
753
|
-
return ST_CONTINUE;
|
765
|
+
return method_name(method->mid, method->depth);
|
754
766
|
}
|
755
767
|
|
756
|
-
|
757
768
|
/* call-seq:
|
758
|
-
|
769
|
+
full_name -> string
|
770
|
+
|
771
|
+
Returns the full name of this method in the format Object#method.*/
|
759
772
|
|
760
|
-
Returns a hash table that lists all the methods that called this
|
761
|
-
method (ie, parents). The hash table is keyed on method name and contains references
|
762
|
-
to RubyProf::MethodInfo objects.*/
|
763
773
|
static VALUE
|
764
|
-
|
774
|
+
prof_full_name(VALUE self)
|
765
775
|
{
|
766
|
-
|
767
|
-
|
768
|
-
|
769
|
-
int i = 0;
|
770
|
-
|
771
|
-
/* Get the list of parents */
|
772
|
-
prof_method_t *child = get_prof_method(self);
|
773
|
-
st_foreach(child->parents, prof_method_collect_parents, parents);
|
774
|
-
|
775
|
-
/* Iterate over each parent */
|
776
|
-
len = RARRAY(parents)->len;
|
777
|
-
for(i = 0; i<len; i++)
|
778
|
-
{
|
779
|
-
prof_call_info_t *call_info;
|
780
|
-
|
781
|
-
/* First get the parent */
|
782
|
-
VALUE item = rb_ary_entry(parents, i);
|
783
|
-
prof_method_t *parent = (prof_method_t *)(FIX2INT(item));
|
784
|
-
|
785
|
-
/* Now get the call info */
|
786
|
-
call_info = child_table_lookup(parent->children, child->key);
|
787
|
-
|
788
|
-
if (call_info == NULL)
|
789
|
-
{
|
790
|
-
/* Should never happen */
|
791
|
-
rb_raise(rb_eRuntimeError,
|
792
|
-
"Could not find parent call info object for %s",
|
793
|
-
method_name(child->klass, child->mid));
|
794
|
-
}
|
776
|
+
prof_method_t *method = get_prof_method(self);
|
777
|
+
return full_name(method->klass, method->mid, method->depth);
|
778
|
+
}
|
795
779
|
|
796
|
-
|
797
|
-
|
798
|
-
we want to see that printed out for parent records in
|
799
|
-
a call graph. */
|
800
|
-
rb_hash_aset(result, method_name(parent->klass, parent->mid),
|
801
|
-
call_info_new(call_info));
|
802
|
-
}
|
780
|
+
/* call-seq:
|
781
|
+
called -> MethodInfo
|
803
782
|
|
804
|
-
|
783
|
+
For recursively called methods, returns the base method. Otherwise,
|
784
|
+
returns self. */
|
785
|
+
static VALUE
|
786
|
+
prof_method_base(VALUE self)
|
787
|
+
{
|
788
|
+
prof_method_t *method = get_prof_method(self);
|
789
|
+
|
790
|
+
if (method == method->base)
|
791
|
+
return self;
|
792
|
+
else
|
793
|
+
/* Target is a pointer to a method_info - so we have to be careful
|
794
|
+
about the GC. We will wrap the method_info but provide no
|
795
|
+
free method so the underlying object is not freed twice! */
|
796
|
+
return Data_Wrap_Struct(cMethodInfo, NULL, NULL, method->base);
|
805
797
|
}
|
806
798
|
|
807
|
-
|
808
799
|
static int
|
809
|
-
|
800
|
+
prof_method_collect_call_infos(st_data_t key, st_data_t value, st_data_t result)
|
810
801
|
{
|
811
|
-
prof_call_info_t *call_info = (prof_call_info_t *) value;
|
812
|
-
VALUE name = method_name(call_info->klass, call_info->mid);
|
813
|
-
VALUE hash = (VALUE) result;
|
814
|
-
|
815
802
|
/* Create a new Ruby CallInfo object and store it into the hash
|
816
803
|
keyed on the parent's name. We use the parent's name because
|
817
804
|
we want to see that printed out for child records in
|
818
805
|
a call graph. */
|
819
|
-
|
806
|
+
prof_call_info_t *call_info = (prof_call_info_t *) value;
|
807
|
+
VALUE arr = (VALUE) result;
|
808
|
+
rb_ary_push(arr, call_info_new(call_info));
|
820
809
|
return ST_CONTINUE;
|
821
810
|
}
|
822
811
|
|
823
812
|
/* call-seq:
|
824
813
|
children -> hash
|
825
814
|
|
826
|
-
Returns
|
827
|
-
called (ie,
|
828
|
-
|
815
|
+
Returns an array of call info objects of methods that this method
|
816
|
+
was called by (ie, parents).*/
|
817
|
+
static VALUE
|
818
|
+
prof_method_parents(VALUE self)
|
819
|
+
{
|
820
|
+
/* Returns an array of call info objects for this
|
821
|
+
method's callers (the methods this method called). */
|
822
|
+
|
823
|
+
VALUE children = rb_ary_new();
|
824
|
+
prof_method_t *result = get_prof_method(self);
|
825
|
+
st_foreach(result->parents, prof_method_collect_call_infos, children);
|
826
|
+
return children;
|
827
|
+
}
|
828
|
+
|
829
|
+
|
830
|
+
/* call-seq:
|
831
|
+
children -> hash
|
832
|
+
|
833
|
+
Returns an array of call info objects of methods that this method
|
834
|
+
called (ie, children).*/
|
829
835
|
static VALUE
|
830
836
|
prof_method_children(VALUE self)
|
831
837
|
{
|
832
|
-
/* Returns
|
833
|
-
|
838
|
+
/* Returns an array of call info objects for this
|
839
|
+
method's callees (the methods this method called). */
|
834
840
|
|
835
|
-
VALUE children =
|
841
|
+
VALUE children = rb_ary_new();
|
836
842
|
prof_method_t *result = get_prof_method(self);
|
837
|
-
st_foreach(result->children,
|
843
|
+
st_foreach(result->children, prof_method_collect_call_infos, children);
|
838
844
|
return children;
|
839
845
|
}
|
840
846
|
|
@@ -847,56 +853,41 @@ prof_method_cmp(VALUE self, VALUE other)
|
|
847
853
|
prof_method_t *x = get_prof_method(self);
|
848
854
|
prof_method_t *y = get_prof_method(other);
|
849
855
|
|
850
|
-
|
851
|
-
|
852
|
-
|
853
|
-
|
854
|
-
return INT2FIX(-11);
|
856
|
+
if (x->called == 0)
|
857
|
+
return INT2FIX(1);
|
858
|
+
else if (y->called == 0)
|
859
|
+
return INT2FIX(-1);
|
855
860
|
else if (x->total_time < y->total_time)
|
856
|
-
|
861
|
+
return INT2FIX(-1);
|
857
862
|
else if (x->total_time == y->total_time)
|
858
|
-
|
863
|
+
return INT2FIX(0);
|
859
864
|
else
|
860
|
-
|
865
|
+
return INT2FIX(1);
|
861
866
|
}
|
862
867
|
|
863
868
|
static int
|
864
869
|
collect_methods(st_data_t key, st_data_t value, st_data_t result)
|
865
870
|
{
|
871
|
+
/* Called for each method stored in a thread's method table.
|
872
|
+
We want to store the method info information into an array.*/
|
873
|
+
VALUE methods = (VALUE) result;
|
866
874
|
prof_method_t *method = (prof_method_t *) value;
|
867
|
-
|
868
|
-
VALUE base_name = method_name(method->klass, method->mid);
|
869
|
-
VALUE method_key = base_name;
|
870
|
-
|
871
|
-
/* Sanity check. If we have generated the same method name for another prof_method
|
872
|
-
then we will overrite a pre-existing MethodInfo object in the table.
|
873
|
-
That would leave the original one unreferenced, which means it will
|
874
|
-
be garbage collected which leads to segmentation faults. */
|
875
|
-
VALUE existing_value = rb_hash_aref(hash, method_key);
|
876
|
-
|
877
|
-
int i = 1;
|
878
|
-
while(existing_value != Qnil)
|
879
|
-
{
|
880
|
-
method_key = rb_str_dup(base_name);
|
881
|
-
rb_str_cat2(method_key, "_");
|
882
|
-
rb_str_concat(method_key, rb_inspect(INT2NUM(i)));
|
883
|
-
existing_value = rb_hash_aref(hash, method_key);
|
884
|
-
i++;
|
885
|
-
}
|
886
|
-
|
887
|
-
rb_hash_aset(hash, method_key, prof_method_new(method));
|
875
|
+
rb_ary_push(methods, prof_method_new(method));
|
888
876
|
|
889
877
|
return ST_CONTINUE;
|
890
878
|
}
|
891
879
|
|
892
880
|
|
881
|
+
/* ================ Thread Handling =================*/
|
882
|
+
|
893
883
|
/* ---- Keeps track of thread's stack and methods ---- */
|
894
884
|
static thread_data_t*
|
895
885
|
thread_data_create()
|
896
886
|
{
|
897
887
|
thread_data_t* result = ALLOC(thread_data_t);
|
898
888
|
result->stack = stack_create();
|
899
|
-
result->
|
889
|
+
result->method_info_table = method_info_table_create();
|
890
|
+
result->last_switch = 0;
|
900
891
|
return result;
|
901
892
|
}
|
902
893
|
|
@@ -904,7 +895,7 @@ static void
|
|
904
895
|
thread_data_free(thread_data_t* thread_data)
|
905
896
|
{
|
906
897
|
stack_free(thread_data->stack);
|
907
|
-
|
898
|
+
method_info_table_free(thread_data->method_info_table);
|
908
899
|
xfree(thread_data);
|
909
900
|
}
|
910
901
|
|
@@ -918,7 +909,7 @@ threads_table_create()
|
|
918
909
|
return st_init_numtable();
|
919
910
|
}
|
920
911
|
|
921
|
-
static inline
|
912
|
+
static inline size_t
|
922
913
|
threads_table_insert(st_table *table, VALUE thread, thread_data_t *thread_data)
|
923
914
|
{
|
924
915
|
/* Its too slow to key on the real thread id so just typecast thread instead. */
|
@@ -934,34 +925,21 @@ threads_table_lookup(st_table *table, VALUE thread)
|
|
934
925
|
/* Its too slow to key on the real thread id so just typecast thread instead. */
|
935
926
|
if (st_lookup(table, (st_data_t) thread, &val))
|
936
927
|
{
|
937
|
-
|
928
|
+
result = (thread_data_t *) val;
|
938
929
|
}
|
939
930
|
else
|
940
931
|
{
|
941
|
-
prof_method_t *toplevel;
|
942
932
|
result = thread_data_create();
|
933
|
+
|
943
934
|
/* Store the real thread id here so it can be shown in the results. */
|
944
935
|
result->thread_id = get_thread_id(thread);
|
945
936
|
|
946
|
-
/* Add a toplevel method to the thread */
|
947
|
-
toplevel = prof_method_create(Qnil, toplevel_id, thread);
|
948
|
-
toplevel->called = 1;
|
949
|
-
toplevel->total_time = 0;
|
950
|
-
toplevel->self_time = 0;
|
951
|
-
minfo_table_insert(result->minfo_table, toplevel->key, toplevel);
|
952
|
-
|
953
937
|
/* Insert the table */
|
954
938
|
threads_table_insert(threads_tbl, thread, result);
|
955
|
-
|
939
|
+
}
|
956
940
|
return result;
|
957
941
|
}
|
958
942
|
|
959
|
-
static void
|
960
|
-
threads_table_free(st_table *table)
|
961
|
-
{
|
962
|
-
st_free_table(table);
|
963
|
-
}
|
964
|
-
|
965
943
|
static int
|
966
944
|
free_thread_data(st_data_t key, st_data_t value, st_data_t dummy)
|
967
945
|
{
|
@@ -969,12 +947,15 @@ free_thread_data(st_data_t key, st_data_t value, st_data_t dummy)
|
|
969
947
|
return ST_CONTINUE;
|
970
948
|
}
|
971
949
|
|
950
|
+
|
972
951
|
static void
|
973
|
-
|
952
|
+
threads_table_free(st_table *table)
|
974
953
|
{
|
975
|
-
st_foreach(
|
954
|
+
st_foreach(table, free_thread_data, 0);
|
955
|
+
st_free_table(table);
|
976
956
|
}
|
977
957
|
|
958
|
+
|
978
959
|
static int
|
979
960
|
collect_threads(st_data_t key, st_data_t value, st_data_t result)
|
980
961
|
{
|
@@ -984,166 +965,271 @@ collect_threads(st_data_t key, st_data_t value, st_data_t result)
|
|
984
965
|
as an int. */
|
985
966
|
thread_data_t* thread_data = (thread_data_t*) value;
|
986
967
|
VALUE threads_hash = (VALUE) result;
|
987
|
-
|
988
|
-
|
989
|
-
|
968
|
+
|
969
|
+
VALUE methods = rb_ary_new();
|
970
|
+
|
971
|
+
/* Now collect an array of all the called methods */
|
972
|
+
st_foreach(thread_data->method_info_table, collect_methods, methods);
|
973
|
+
|
974
|
+
/* Store the results in the threads hash keyed on the thread id. */
|
975
|
+
rb_hash_aset(threads_hash, ULONG2NUM(thread_data->thread_id), methods);
|
990
976
|
|
991
977
|
return ST_CONTINUE;
|
992
978
|
}
|
993
979
|
|
994
|
-
|
995
|
-
|
996
|
-
|
980
|
+
|
981
|
+
/* ================ Profiling =================*/
|
982
|
+
/* Copied from eval.c */
|
983
|
+
static char *
|
984
|
+
get_event_name(rb_event_t event)
|
997
985
|
{
|
998
|
-
|
999
|
-
|
1000
|
-
|
1001
|
-
|
1002
|
-
|
1003
|
-
|
1004
|
-
|
986
|
+
switch (event) {
|
987
|
+
case RUBY_EVENT_LINE:
|
988
|
+
return "line";
|
989
|
+
case RUBY_EVENT_CLASS:
|
990
|
+
return "class";
|
991
|
+
case RUBY_EVENT_END:
|
992
|
+
return "end";
|
993
|
+
case RUBY_EVENT_CALL:
|
994
|
+
return "call";
|
995
|
+
case RUBY_EVENT_RETURN:
|
996
|
+
return "return";
|
997
|
+
case RUBY_EVENT_C_CALL:
|
998
|
+
return "c-call";
|
999
|
+
case RUBY_EVENT_C_RETURN:
|
1000
|
+
return "c-return";
|
1001
|
+
case RUBY_EVENT_RAISE:
|
1002
|
+
return "raise";
|
1003
|
+
default:
|
1004
|
+
return "unknown";
|
1005
1005
|
}
|
1006
|
+
}
|
1006
1007
|
|
1007
|
-
|
1008
|
-
|
1009
|
-
|
1010
|
-
|
1011
|
-
|
1012
|
-
|
1013
|
-
|
1014
|
-
|
1008
|
+
static void
|
1009
|
+
update_result(thread_data_t* thread_data,
|
1010
|
+
prof_measure_t total_time,
|
1011
|
+
prof_frame_t *parent_frame, prof_frame_t *child_frame)
|
1012
|
+
{
|
1013
|
+
prof_method_t *parent = NULL;
|
1014
|
+
prof_method_t *child = child_frame->method;
|
1015
|
+
prof_call_info_t *parent_call_info = NULL;
|
1016
|
+
prof_call_info_t *child_call_info = NULL;
|
1017
|
+
|
1018
|
+
prof_measure_t wait_time = child_frame->wait_time;
|
1019
|
+
prof_measure_t self_time = total_time - child_frame->child_time - wait_time;
|
1015
1020
|
|
1016
1021
|
/* Update information about the child (ie, the current method) */
|
1017
1022
|
child->called++;
|
1018
1023
|
child->total_time += total_time;
|
1019
1024
|
child->self_time += self_time;
|
1025
|
+
child->wait_time += wait_time;
|
1026
|
+
|
1027
|
+
if (!parent_frame) return;
|
1028
|
+
|
1029
|
+
parent = parent_frame->method;
|
1030
|
+
|
1031
|
+
child_call_info = caller_table_lookup(parent->children, child->key);
|
1032
|
+
if (child_call_info == NULL)
|
1033
|
+
{
|
1034
|
+
child_call_info = call_info_create(child);
|
1035
|
+
caller_table_insert(parent->children, child->key, child_call_info);
|
1036
|
+
}
|
1020
1037
|
|
1021
|
-
|
1022
|
-
|
1023
|
-
|
1038
|
+
child_call_info->called++;
|
1039
|
+
child_call_info->total_time += total_time;
|
1040
|
+
child_call_info->self_time += self_time;
|
1041
|
+
child_call_info->wait_time += wait_time;
|
1042
|
+
child_call_info->line = parent_frame->line;
|
1043
|
+
|
1044
|
+
/* Update child's parent information */
|
1045
|
+
parent_call_info = caller_table_lookup(child->parents, parent->key);
|
1046
|
+
if (parent_call_info == NULL)
|
1047
|
+
{
|
1048
|
+
parent_call_info = call_info_create(parent);
|
1049
|
+
caller_table_insert(child->parents, parent->key, parent_call_info);
|
1050
|
+
}
|
1051
|
+
|
1052
|
+
parent_call_info->called++;
|
1053
|
+
parent_call_info->total_time += total_time;
|
1054
|
+
parent_call_info->self_time += self_time;
|
1055
|
+
parent_call_info->wait_time += wait_time;
|
1056
|
+
parent_call_info->line = (parent_frame ? parent_frame->line : 0);
|
1057
|
+
|
1058
|
+
|
1059
|
+
/* If the caller is the top of the stack, the merge in
|
1060
|
+
all the child results. We have to do this because
|
1061
|
+
the top method is never popped since sooner or later
|
1062
|
+
the user has to call RubyProf::stop.*/
|
1063
|
+
|
1064
|
+
if (stack_size(thread_data->stack) == 1)
|
1065
|
+
{
|
1066
|
+
parent->total_time += total_time;
|
1067
|
+
parent->wait_time += wait_time;
|
1068
|
+
}
|
1024
1069
|
}
|
1025
1070
|
|
1071
|
+
|
1026
1072
|
static void
|
1027
1073
|
prof_event_hook(rb_event_t event, NODE *node, VALUE self, ID mid, VALUE klass)
|
1028
1074
|
{
|
1029
|
-
|
1075
|
+
|
1030
1076
|
VALUE thread;
|
1031
|
-
|
1032
|
-
|
1077
|
+
prof_measure_t now = 0;
|
1078
|
+
thread_data_t* thread_data = NULL;
|
1079
|
+
prof_frame_t *frame = NULL;
|
1080
|
+
|
1081
|
+
|
1082
|
+
/* {
|
1083
|
+
st_data_t key = 0;
|
1084
|
+
static unsigned long last_thread_id = 0;
|
1085
|
+
|
1086
|
+
VALUE thread = rb_thread_current();
|
1087
|
+
unsigned long thread_id = get_thread_id(thread);
|
1088
|
+
char* class_name = rb_obj_classname(klass);
|
1089
|
+
char* method_name = rb_id2name(mid);
|
1090
|
+
char* source_file = node ? node->nd_file : 0;
|
1091
|
+
unsigned int source_line = node ? nd_line(node) : 0;
|
1092
|
+
char* event_name = get_event_name(event);
|
1093
|
+
|
1094
|
+
if (last_thread_id != thread_id)
|
1095
|
+
printf("\n");
|
1096
|
+
|
1097
|
+
if (klass != 0)
|
1098
|
+
klass = (BUILTIN_TYPE(klass) == T_ICLASS ? RBASIC(klass)->klass : klass);
|
1099
|
+
key = method_key(klass, mid, 0);
|
1100
|
+
printf("%2u: %-8s :%2d %s#%s (%u)\n",
|
1101
|
+
thread_id, event_name, source_line, class_name, method_name, key);
|
1102
|
+
last_thread_id = thread_id;
|
1103
|
+
} */
|
1033
1104
|
|
1034
|
-
if (profiling) return;
|
1035
|
-
|
1036
1105
|
/* Special case - skip any methods from the mProf
|
1037
1106
|
module, such as Prof.stop, since they clutter
|
1038
|
-
the results but
|
1107
|
+
the results but aren't important to them results. */
|
1039
1108
|
if (self == mProf) return;
|
1040
1109
|
|
1041
|
-
/*
|
1042
|
-
|
1043
|
-
|
1044
|
-
/*
|
1045
|
-
module class since we want to combine all profiling
|
1046
|
-
results for that module. */
|
1047
|
-
if (BUILTIN_TYPE(klass) == T_ICLASS)
|
1048
|
-
klass = RBASIC(klass)->klass;
|
1049
|
-
|
1050
|
-
/* // Debug Code
|
1051
|
-
{
|
1052
|
-
VALUE class_name = rb_String(klass);
|
1053
|
-
char* c_class_name = StringValuePtr(class_name);
|
1054
|
-
char* c_method_name = rb_id2name(mid);
|
1055
|
-
VALUE generated_name = method_name(klass, mid);
|
1056
|
-
char* c_generated_name = StringValuePtr(generated_name);
|
1057
|
-
printf("Event: %2d, Method: %s#%s\n", event, c_class_name, c_method_name);
|
1058
|
-
}*/
|
1059
|
-
|
1110
|
+
/* Get current measurement*/
|
1111
|
+
now = get_measurement();
|
1112
|
+
|
1113
|
+
/* Get the current thread and thread data. */
|
1060
1114
|
thread = rb_thread_current();
|
1061
1115
|
thread_data = threads_table_lookup(threads_tbl, thread);
|
1062
|
-
|
1116
|
+
|
1117
|
+
/* Get the frame at the top of the stack. This may represent
|
1118
|
+
the current method (EVENT_LINE, EVENT_RETURN) or the
|
1119
|
+
previous method (EVENT_CALL).*/
|
1120
|
+
frame = stack_peek(thread_data->stack);
|
1121
|
+
|
1122
|
+
/* Check for a context switch */
|
1123
|
+
if (last_thread_data && last_thread_data != thread_data)
|
1124
|
+
{
|
1125
|
+
/* Note how long have we been waiting. */
|
1126
|
+
prof_measure_t wait_time = now - thread_data->last_switch;
|
1127
|
+
if (frame)
|
1128
|
+
frame->wait_time += wait_time;
|
1129
|
+
|
1130
|
+
/* Save on the last thread the time of the context switch
|
1131
|
+
and reset this thread's last context switch to 0.*/
|
1132
|
+
last_thread_data->last_switch = now;
|
1133
|
+
thread_data->last_switch = 0;
|
1134
|
+
}
|
1135
|
+
last_thread_data = thread_data;
|
1136
|
+
|
1063
1137
|
switch (event) {
|
1138
|
+
case RUBY_EVENT_LINE:
|
1139
|
+
{
|
1140
|
+
/* Keep track of the current line number in this method. When
|
1141
|
+
a new method is called, we know what line number it was
|
1142
|
+
called from. */
|
1143
|
+
if (frame)
|
1144
|
+
{
|
1145
|
+
if (node)
|
1146
|
+
frame->line = nd_line(node);
|
1147
|
+
break;
|
1148
|
+
}
|
1149
|
+
/* If we get here there was no frame, which means this is
|
1150
|
+
the first method seen for this thread, so fall through
|
1151
|
+
to below to create it. */
|
1152
|
+
}
|
1064
1153
|
case RUBY_EVENT_CALL:
|
1065
1154
|
case RUBY_EVENT_C_CALL:
|
1066
1155
|
{
|
1067
|
-
|
1068
|
-
|
1069
|
-
|
1070
|
-
|
1071
|
-
|
1072
|
-
|
1073
|
-
|
1156
|
+
int depth = 0;
|
1157
|
+
st_data_t key = 0;
|
1158
|
+
prof_method_t *method = NULL;
|
1159
|
+
|
1160
|
+
/* Is this an include for a module? If so get the actual
|
1161
|
+
module class since we want to combine all profiling
|
1162
|
+
results for that module. */
|
1163
|
+
|
1164
|
+
if (klass != 0)
|
1165
|
+
klass = (BUILTIN_TYPE(klass) == T_ICLASS ? RBASIC(klass)->klass : klass);
|
1166
|
+
|
1167
|
+
key = method_key(klass, mid, 0);
|
1168
|
+
|
1169
|
+
method = method_info_table_lookup(thread_data->method_info_table, key);
|
1170
|
+
|
1171
|
+
if (!method)
|
1172
|
+
{
|
1173
|
+
method = prof_method_create(node, key, klass, mid, depth);
|
1174
|
+
method_info_table_insert(thread_data->method_info_table, key, method);
|
1175
|
+
}
|
1176
|
+
|
1177
|
+
depth = method->active_frame;
|
1178
|
+
method->active_frame++;
|
1179
|
+
|
1180
|
+
if (depth > 0)
|
1181
|
+
{
|
1182
|
+
prof_method_t *base_method = method;
|
1183
|
+
key = method_key(klass, mid, depth);
|
1184
|
+
method = method_info_table_lookup(thread_data->method_info_table, key);
|
1185
|
+
|
1186
|
+
if (!method)
|
1187
|
+
{
|
1188
|
+
method = prof_method_create(node, key, klass, mid, depth);
|
1189
|
+
method->base = base_method;
|
1190
|
+
method_info_table_insert(thread_data->method_info_table, key, method);
|
1191
|
+
}
|
1192
|
+
}
|
1074
1193
|
|
1075
|
-
/*
|
1076
|
-
|
1077
|
-
|
1078
|
-
|
1079
|
-
|
1080
|
-
|
1081
|
-
|
1082
|
-
data->start_time = get_clock();
|
1083
|
-
data->child_cost = 0;
|
1194
|
+
/* Push a new frame onto the stack */
|
1195
|
+
frame = stack_push(thread_data->stack);
|
1196
|
+
frame->method = method;
|
1197
|
+
frame->start_time = now;
|
1198
|
+
frame->wait_time = 0;
|
1199
|
+
frame->child_time = 0;
|
1200
|
+
frame->line = node ? nd_line(node) : 0;
|
1084
1201
|
|
1085
|
-
|
1202
|
+
break;
|
1086
1203
|
}
|
1087
1204
|
case RUBY_EVENT_RETURN:
|
1088
1205
|
case RUBY_EVENT_C_RETURN:
|
1089
|
-
|
1090
|
-
|
1091
|
-
prof_method_t *parent;
|
1092
|
-
prof_method_t *child;
|
1093
|
-
prof_clock_t now = get_clock();
|
1094
|
-
prof_clock_t total_time, self_time;
|
1095
|
-
|
1096
|
-
/* Pop data for this method off the stack. */
|
1097
|
-
data = stack_pop(thread_data->stack);
|
1206
|
+
{
|
1207
|
+
prof_frame_t* caller_frame = NULL;
|
1098
1208
|
|
1099
|
-
|
1100
|
-
{
|
1101
|
-
/* Can happen on exceptions. The stack gets unwound without RubyProf.stop
|
1102
|
-
being called. */
|
1103
|
-
VALUE name = method_name(klass, mid);
|
1104
|
-
VALUE message = rb_str_new2("ruby-prof: An error occured when leaving the method %s.\n");
|
1105
|
-
rb_str_cat2(message, " Perhaps an exception occured in the code being profiled?\n" );
|
1106
|
-
rb_warn(StringValuePtr(message), StringValuePtr(name));
|
1107
|
-
|
1108
|
-
return;
|
1109
|
-
}
|
1209
|
+
prof_measure_t total_time;
|
1110
1210
|
|
1111
|
-
|
1112
|
-
|
1113
|
-
|
1211
|
+
frame = stack_pop(thread_data->stack);
|
1212
|
+
caller_frame = stack_peek(thread_data->stack);
|
1213
|
+
|
1214
|
+
/* Frame can be null. This can happen if RubProf.start is called from
|
1215
|
+
a method that exits. And it can happen if an exception is raised
|
1216
|
+
in code that is being profiled and the stack unwinds (RubProf is
|
1217
|
+
not notified of that by the ruby runtime. */
|
1218
|
+
if (frame == NULL) return;
|
1114
1219
|
|
1115
|
-
|
1116
|
-
caller = stack_peek(thread_data->stack);
|
1220
|
+
total_time = now - frame->start_time;
|
1117
1221
|
|
1118
|
-
|
1222
|
+
if (caller_frame)
|
1119
1223
|
{
|
1120
|
-
|
1121
|
-
parent = minfo_table_lookup(thread_data->minfo_table, toplevel_key);
|
1122
|
-
}
|
1123
|
-
else
|
1124
|
-
{
|
1125
|
-
caller->child_cost += total_time;
|
1126
|
-
parent = caller->method_info;
|
1224
|
+
caller_frame->child_time += total_time;
|
1127
1225
|
}
|
1226
|
+
|
1227
|
+
frame->method->base->active_frame--;
|
1128
1228
|
|
1129
|
-
|
1130
|
-
|
1131
|
-
|
1132
|
-
child->stack_count--;
|
1133
|
-
|
1134
|
-
/* If the stack count is greater than zero, then this
|
1135
|
-
method has been called recursively. In that case set the total
|
1136
|
-
time to zero because it will be correctly set when we unwind
|
1137
|
-
the stack up. If we don't do this, then the total time for the
|
1138
|
-
method will be double counted per recursive call. */
|
1139
|
-
if (child->stack_count != 0)
|
1140
|
-
total_time = 0;
|
1141
|
-
|
1142
|
-
update_result(parent, child, total_time, self_time);
|
1143
|
-
break;
|
1144
|
-
}
|
1229
|
+
update_result(thread_data, total_time, caller_frame, frame);
|
1230
|
+
break;
|
1231
|
+
}
|
1145
1232
|
}
|
1146
|
-
profiling--;
|
1147
1233
|
}
|
1148
1234
|
|
1149
1235
|
|
@@ -1192,11 +1278,11 @@ prof_result_new()
|
|
1192
1278
|
static prof_result_t *
|
1193
1279
|
get_prof_result(VALUE obj)
|
1194
1280
|
{
|
1195
|
-
if (
|
1196
|
-
|
1281
|
+
if (BUILTIN_TYPE(obj) != T_DATA ||
|
1282
|
+
RDATA(obj)->dfree != (RUBY_DATA_FUNC) prof_result_free)
|
1197
1283
|
{
|
1198
1284
|
/* Should never happen */
|
1199
|
-
|
1285
|
+
rb_raise(rb_eTypeError, "wrong result object");
|
1200
1286
|
}
|
1201
1287
|
return (prof_result_t *) DATA_PTR(obj);
|
1202
1288
|
}
|
@@ -1217,83 +1303,74 @@ prof_result_threads(VALUE self)
|
|
1217
1303
|
}
|
1218
1304
|
|
1219
1305
|
|
1220
|
-
/* call-seq:
|
1221
|
-
thread_id = int
|
1222
|
-
toplevel(thread_id) -> RubyProf::MethodInfo
|
1223
|
-
|
1224
|
-
Returns the RubyProf::MethodInfo object that represents the root
|
1225
|
-
calling method for this thread. This method will always
|
1226
|
-
be named #toplevel and contains the total amount of time spent
|
1227
|
-
executing code in this thread. */
|
1228
|
-
static VALUE
|
1229
|
-
prof_result_toplevel(VALUE self, VALUE thread_id)
|
1230
|
-
{
|
1231
|
-
prof_result_t *prof_result = get_prof_result(self);
|
1232
|
-
VALUE methods = rb_hash_aref(prof_result->threads, thread_id);
|
1233
|
-
VALUE key = method_name(Qnil, toplevel_id);
|
1234
|
-
VALUE result = rb_hash_aref(methods, key);
|
1235
|
-
|
1236
|
-
if (result == Qnil)
|
1237
|
-
{
|
1238
|
-
/* Should never happen */
|
1239
|
-
rb_raise(rb_eRuntimeError, "Could not find toplevel method information");
|
1240
|
-
}
|
1241
|
-
return result;
|
1242
|
-
}
|
1243
|
-
|
1244
1306
|
|
1245
1307
|
/* call-seq:
|
1246
|
-
|
1308
|
+
measure_mode -> measure_mode
|
1309
|
+
|
1310
|
+
Returns what ruby-prof is measuring. Valid values include:
|
1247
1311
|
|
1248
|
-
|
1249
|
-
*RubyProf::PROCESS_TIME - Measure process time. This is default. It is implemented using the clock function in the C Runtime library.
|
1312
|
+
*RubyProf::PROCESS_TIME - Measure process time. This is default. It is implemented using the clock functions in the C Runtime library.
|
1250
1313
|
*RubyProf::WALL_TIME - Measure wall time using gettimeofday on Linx and GetLocalTime on Windows
|
1251
|
-
*RubyProf::CPU_TIME - Measure time using the CPU clock counter. This mode is only supported on Pentium or PowerPC platforms.
|
1314
|
+
*RubyProf::CPU_TIME - Measure time using the CPU clock counter. This mode is only supported on Pentium or PowerPC platforms.
|
1315
|
+
*RubyProf::ALLOCATIONS - Measure object allocations. This requires a patched Ruby interpreter.*/
|
1252
1316
|
static VALUE
|
1253
|
-
|
1317
|
+
prof_get_measure_mode(VALUE self)
|
1254
1318
|
{
|
1255
|
-
return INT2NUM(
|
1319
|
+
return INT2NUM(measure_mode);
|
1256
1320
|
}
|
1257
1321
|
|
1258
1322
|
/* call-seq:
|
1259
|
-
|
1323
|
+
measure_mode=value -> void
|
1260
1324
|
|
1261
|
-
Specifies
|
1262
|
-
|
1325
|
+
Specifies what ruby-prof should measure. Valid values include:
|
1326
|
+
|
1327
|
+
*RubyProf::PROCESS_TIME - Measure process time. This is default. It is implemented using the clock functions in the C Runtime library.
|
1263
1328
|
*RubyProf::WALL_TIME - Measure wall time using gettimeofday on Linx and GetLocalTime on Windows
|
1264
|
-
*RubyProf::CPU_TIME - Measure time using the CPU clock counter. This mode is only supported on Pentium or PowerPC platforms.
|
1329
|
+
*RubyProf::CPU_TIME - Measure time using the CPU clock counter. This mode is only supported on Pentium or PowerPC platforms.
|
1330
|
+
*RubyProf::ALLOCATIONS - Measure object allocations. This requires a patched Ruby interpreter.*/
|
1265
1331
|
static VALUE
|
1266
|
-
|
1332
|
+
prof_set_measure_mode(VALUE self, VALUE val)
|
1267
1333
|
{
|
1268
|
-
|
1334
|
+
long mode = NUM2LONG(val);
|
1269
1335
|
|
1270
1336
|
if (threads_tbl)
|
1271
1337
|
{
|
1272
|
-
|
1338
|
+
rb_raise(rb_eRuntimeError, "can't set measure_mode while profiling");
|
1273
1339
|
}
|
1274
1340
|
|
1275
1341
|
switch (mode) {
|
1276
|
-
|
1277
|
-
|
1278
|
-
|
1279
|
-
|
1280
|
-
|
1281
|
-
|
1282
|
-
|
1283
|
-
|
1284
|
-
|
1285
|
-
|
1286
|
-
|
1287
|
-
|
1288
|
-
|
1289
|
-
|
1290
|
-
|
1291
|
-
|
1292
|
-
|
1293
|
-
|
1294
|
-
|
1342
|
+
case MEASURE_PROCESS_TIME:
|
1343
|
+
get_measurement = measure_process_time;
|
1344
|
+
convert_measurement = convert_process_time;
|
1345
|
+
break;
|
1346
|
+
|
1347
|
+
case MEASURE_WALL_TIME:
|
1348
|
+
get_measurement = measure_wall_time;
|
1349
|
+
convert_measurement = convert_wall_time;
|
1350
|
+
break;
|
1351
|
+
|
1352
|
+
#if defined(MEASURE_CPU_TIME)
|
1353
|
+
case MEASURE_CPU_TIME:
|
1354
|
+
if (cpu_frequency == 0)
|
1355
|
+
cpu_frequency = measure_cpu_time();
|
1356
|
+
get_measurement = measure_cpu_time;
|
1357
|
+
convert_measurement = convert_cpu_time;
|
1358
|
+
break;
|
1359
|
+
#endif
|
1360
|
+
|
1361
|
+
#if defined(MEASURE_ALLOCATIONS)
|
1362
|
+
case MEASURE_ALLOCATIONS:
|
1363
|
+
get_measurement = measure_allocations;
|
1364
|
+
convert_measurement = convert_allocations;
|
1365
|
+
break;
|
1366
|
+
#endif
|
1367
|
+
|
1368
|
+
default:
|
1369
|
+
rb_raise(rb_eArgError, "invalid mode: %d", mode);
|
1370
|
+
break;
|
1295
1371
|
}
|
1296
|
-
|
1372
|
+
|
1373
|
+
measure_mode = mode;
|
1297
1374
|
return val;
|
1298
1375
|
}
|
1299
1376
|
|
@@ -1320,21 +1397,19 @@ prof_running(VALUE self)
|
|
1320
1397
|
static VALUE
|
1321
1398
|
prof_start(VALUE self)
|
1322
1399
|
{
|
1323
|
-
toplevel_id = rb_intern("toplevel");
|
1324
|
-
toplevel_key = method_key(Qnil, toplevel_id);
|
1325
|
-
|
1326
1400
|
if (threads_tbl != NULL)
|
1327
1401
|
{
|
1328
1402
|
rb_raise(rb_eRuntimeError, "RubyProf.start was already called");
|
1329
1403
|
}
|
1330
1404
|
|
1331
1405
|
/* Setup globals */
|
1332
|
-
|
1406
|
+
last_thread_data = NULL;
|
1333
1407
|
threads_tbl = threads_table_create();
|
1334
1408
|
|
1335
1409
|
rb_add_event_hook(prof_event_hook,
|
1336
|
-
|
1337
|
-
|
1410
|
+
RUBY_EVENT_CALL | RUBY_EVENT_RETURN |
|
1411
|
+
RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN
|
1412
|
+
| RUBY_EVENT_LINE);
|
1338
1413
|
|
1339
1414
|
return Qnil;
|
1340
1415
|
}
|
@@ -1351,7 +1426,7 @@ prof_stop(VALUE self)
|
|
1351
1426
|
|
1352
1427
|
if (threads_tbl == NULL)
|
1353
1428
|
{
|
1354
|
-
rb_raise(rb_eRuntimeError, "RubyProf
|
1429
|
+
rb_raise(rb_eRuntimeError, "RubyProf is not running.");
|
1355
1430
|
}
|
1356
1431
|
|
1357
1432
|
/* Now unregister from event */
|
@@ -1360,14 +1435,12 @@ prof_stop(VALUE self)
|
|
1360
1435
|
/* Create the result */
|
1361
1436
|
result = prof_result_new();
|
1362
1437
|
|
1363
|
-
/*
|
1364
|
-
|
1438
|
+
/* Unset the last_thread_data (very important!)
|
1439
|
+
and the threads table */
|
1440
|
+
last_thread_data = NULL;
|
1365
1441
|
threads_table_free(threads_tbl);
|
1366
1442
|
threads_tbl = NULL;
|
1367
1443
|
|
1368
|
-
/* Free reference to class_tbl */
|
1369
|
-
class_tbl = Qnil;
|
1370
|
-
|
1371
1444
|
return result;
|
1372
1445
|
}
|
1373
1446
|
|
@@ -1403,46 +1476,57 @@ Init_ruby_prof()
|
|
1403
1476
|
rb_define_module_function(mProf, "stop", prof_stop, 0);
|
1404
1477
|
rb_define_module_function(mProf, "running?", prof_running, 0);
|
1405
1478
|
rb_define_module_function(mProf, "profile", prof_profile, 0);
|
1406
|
-
|
1407
|
-
rb_define_singleton_method(mProf, "
|
1408
|
-
|
1409
|
-
rb_define_const(mProf, "WALL_TIME", INT2NUM(CLOCK_MODE_WALL));
|
1410
|
-
#ifdef CLOCK_MODE_CPU
|
1411
|
-
rb_define_const(mProf, "CPU_TIME", INT2NUM(CLOCK_MODE_CPU));
|
1412
|
-
rb_define_singleton_method(mProf, "cpu_frequency",
|
1413
|
-
prof_get_cpu_frequency, 0);
|
1414
|
-
rb_define_singleton_method(mProf, "cpu_frequency=",
|
1415
|
-
prof_set_cpu_freqeuncy, 1);
|
1416
|
-
#endif
|
1479
|
+
|
1480
|
+
rb_define_singleton_method(mProf, "measure_mode", prof_get_measure_mode, 0);
|
1481
|
+
rb_define_singleton_method(mProf, "measure_mode=", prof_set_measure_mode, 1);
|
1417
1482
|
|
1483
|
+
rb_define_const(mProf, "PROCESS_TIME", INT2NUM(MEASURE_PROCESS_TIME));
|
1484
|
+
rb_define_const(mProf, "WALL_TIME", INT2NUM(MEASURE_WALL_TIME));
|
1485
|
+
|
1486
|
+
#if defined(MEASURE_CPU_TIME)
|
1487
|
+
rb_define_const(mProf, "CPU_TIME", INT2NUM(MEASURE_CPU_TIME));
|
1488
|
+
rb_define_singleton_method(mProf, "cpu_frequency", prof_get_cpu_frequency, 0); /* in measure_cpu_time.h */
|
1489
|
+
rb_define_singleton_method(mProf, "cpu_frequency=", prof_set_cpu_frequency, 1); /* in measure_cpu_time.h */
|
1490
|
+
#endif
|
1491
|
+
|
1492
|
+
#if defined(MEASURE_ALLOCATIONS)
|
1493
|
+
rb_define_const(mProf, "ALLOCATED_OBJECTS", INT2NUM(MEASURE_ALLOCATIONS));
|
1494
|
+
#endif
|
1495
|
+
|
1418
1496
|
cResult = rb_define_class_under(mProf, "Result", rb_cObject);
|
1419
1497
|
rb_undef_method(CLASS_OF(cMethodInfo), "new");
|
1420
1498
|
rb_define_method(cResult, "threads", prof_result_threads, 0);
|
1421
|
-
rb_define_method(cResult, "toplevel", prof_result_toplevel, 1);
|
1422
1499
|
|
1423
1500
|
cMethodInfo = rb_define_class_under(mProf, "MethodInfo", rb_cObject);
|
1424
1501
|
rb_include_module(cMethodInfo, rb_mComparable);
|
1425
1502
|
rb_undef_method(CLASS_OF(cMethodInfo), "new");
|
1426
|
-
|
1427
|
-
rb_define_method(cMethodInfo, "
|
1428
|
-
rb_define_method(cMethodInfo, "
|
1429
|
-
rb_define_method(cMethodInfo, "
|
1430
|
-
rb_define_method(cMethodInfo, "
|
1431
|
-
rb_define_method(cMethodInfo, "method_class", prof_method_class, 0);
|
1503
|
+
|
1504
|
+
rb_define_method(cMethodInfo, "klass", prof_method_klass, 0);
|
1505
|
+
rb_define_method(cMethodInfo, "klass_name", prof_klass_name, 0);
|
1506
|
+
rb_define_method(cMethodInfo, "method_name", prof_method_name, 0);
|
1507
|
+
rb_define_method(cMethodInfo, "full_name", prof_full_name, 0);
|
1432
1508
|
rb_define_method(cMethodInfo, "method_id", prof_method_id, 0);
|
1433
|
-
rb_define_method(cMethodInfo, "
|
1509
|
+
rb_define_method(cMethodInfo, "base", prof_method_base, 0);
|
1510
|
+
|
1434
1511
|
rb_define_method(cMethodInfo, "parents", prof_method_parents, 0);
|
1435
1512
|
rb_define_method(cMethodInfo, "children", prof_method_children, 0);
|
1436
1513
|
rb_define_method(cMethodInfo, "<=>", prof_method_cmp, 1);
|
1514
|
+
rb_define_method(cMethodInfo, "source_file", prof_method_source_file,0);
|
1515
|
+
rb_define_method(cMethodInfo, "line", prof_method_line, 0);
|
1516
|
+
rb_define_method(cMethodInfo, "called", prof_method_called, 0);
|
1517
|
+
rb_define_method(cMethodInfo, "total_time", prof_method_total_time, 0);
|
1518
|
+
rb_define_method(cMethodInfo, "self_time", prof_method_self_time, 0);
|
1519
|
+
rb_define_method(cMethodInfo, "wait_time", prof_method_wait_time, 0);
|
1520
|
+
rb_define_method(cMethodInfo, "children_time", prof_method_children_time, 0);
|
1437
1521
|
|
1438
1522
|
cCallInfo = rb_define_class_under(mProf, "CallInfo", rb_cObject);
|
1439
1523
|
rb_undef_method(CLASS_OF(cCallInfo), "new");
|
1524
|
+
rb_define_method(cCallInfo, "target", call_info_target, 0);
|
1440
1525
|
rb_define_method(cCallInfo, "called", call_info_called, 0);
|
1441
1526
|
rb_define_method(cCallInfo, "total_time", call_info_total_time, 0);
|
1442
1527
|
rb_define_method(cCallInfo, "self_time", call_info_self_time, 0);
|
1528
|
+
rb_define_method(cCallInfo, "wait_time", call_info_wait_time, 0);
|
1529
|
+
rb_define_method(cCallInfo, "line", call_info_line, 0);
|
1443
1530
|
rb_define_method(cCallInfo, "children_time", call_info_children_time, 0);
|
1444
|
-
|
1445
|
-
rb_global_variable(&class_tbl);
|
1446
1531
|
}
|
1447
1532
|
|
1448
|
-
/* vim: set filetype=c ts=8 sw=4 noexpandtab : */
|