ruby-prof 0.18.0-x64-mingw32
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGES +500 -0
- data/LICENSE +25 -0
- data/README.rdoc +487 -0
- data/Rakefile +113 -0
- data/bin/ruby-prof +345 -0
- data/bin/ruby-prof-check-trace +45 -0
- data/examples/flat.txt +50 -0
- data/examples/graph.dot +84 -0
- data/examples/graph.html +823 -0
- data/examples/graph.txt +139 -0
- data/examples/multi.flat.txt +23 -0
- data/examples/multi.graph.html +760 -0
- data/examples/multi.grind.dat +114 -0
- data/examples/multi.stack.html +547 -0
- data/examples/stack.html +547 -0
- data/ext/ruby_prof/extconf.rb +68 -0
- data/ext/ruby_prof/rp_call_info.c +425 -0
- data/ext/ruby_prof/rp_call_info.h +53 -0
- data/ext/ruby_prof/rp_measure.c +40 -0
- data/ext/ruby_prof/rp_measure.h +45 -0
- data/ext/ruby_prof/rp_measure_allocations.c +76 -0
- data/ext/ruby_prof/rp_measure_cpu_time.c +136 -0
- data/ext/ruby_prof/rp_measure_gc_runs.c +73 -0
- data/ext/ruby_prof/rp_measure_gc_time.c +60 -0
- data/ext/ruby_prof/rp_measure_memory.c +77 -0
- data/ext/ruby_prof/rp_measure_process_time.c +71 -0
- data/ext/ruby_prof/rp_measure_wall_time.c +45 -0
- data/ext/ruby_prof/rp_method.c +630 -0
- data/ext/ruby_prof/rp_method.h +75 -0
- data/ext/ruby_prof/rp_stack.c +173 -0
- data/ext/ruby_prof/rp_stack.h +63 -0
- data/ext/ruby_prof/rp_thread.c +277 -0
- data/ext/ruby_prof/rp_thread.h +27 -0
- data/ext/ruby_prof/ruby_prof.c +794 -0
- data/ext/ruby_prof/ruby_prof.h +60 -0
- data/ext/ruby_prof/vc/ruby_prof.sln +31 -0
- data/ext/ruby_prof/vc/ruby_prof.vcxproj +141 -0
- data/lib/2.6.3/ruby_prof.so +0 -0
- data/lib/ruby-prof.rb +68 -0
- data/lib/ruby-prof/aggregate_call_info.rb +76 -0
- data/lib/ruby-prof/assets/call_stack_printer.css.html +117 -0
- data/lib/ruby-prof/assets/call_stack_printer.js.html +385 -0
- data/lib/ruby-prof/assets/call_stack_printer.png +0 -0
- data/lib/ruby-prof/call_info.rb +115 -0
- data/lib/ruby-prof/call_info_visitor.rb +40 -0
- data/lib/ruby-prof/compatibility.rb +179 -0
- data/lib/ruby-prof/method_info.rb +121 -0
- data/lib/ruby-prof/printers/abstract_printer.rb +104 -0
- data/lib/ruby-prof/printers/call_info_printer.rb +41 -0
- data/lib/ruby-prof/printers/call_stack_printer.rb +265 -0
- data/lib/ruby-prof/printers/call_tree_printer.rb +143 -0
- data/lib/ruby-prof/printers/dot_printer.rb +132 -0
- data/lib/ruby-prof/printers/flat_printer.rb +70 -0
- data/lib/ruby-prof/printers/flat_printer_with_line_numbers.rb +83 -0
- data/lib/ruby-prof/printers/graph_html_printer.rb +249 -0
- data/lib/ruby-prof/printers/graph_printer.rb +116 -0
- data/lib/ruby-prof/printers/multi_printer.rb +84 -0
- data/lib/ruby-prof/profile.rb +26 -0
- data/lib/ruby-prof/profile/exclude_common_methods.rb +207 -0
- data/lib/ruby-prof/profile/legacy_method_elimination.rb +50 -0
- data/lib/ruby-prof/rack.rb +174 -0
- data/lib/ruby-prof/task.rb +147 -0
- data/lib/ruby-prof/thread.rb +35 -0
- data/lib/ruby-prof/version.rb +3 -0
- data/lib/unprof.rb +10 -0
- data/ruby-prof.gemspec +58 -0
- data/test/abstract_printer_test.rb +53 -0
- data/test/aggregate_test.rb +136 -0
- data/test/basic_test.rb +128 -0
- data/test/block_test.rb +74 -0
- data/test/call_info_test.rb +78 -0
- data/test/call_info_visitor_test.rb +31 -0
- data/test/duplicate_names_test.rb +32 -0
- data/test/dynamic_method_test.rb +55 -0
- data/test/enumerable_test.rb +21 -0
- data/test/exceptions_test.rb +24 -0
- data/test/exclude_methods_test.rb +146 -0
- data/test/exclude_threads_test.rb +53 -0
- data/test/fiber_test.rb +79 -0
- data/test/issue137_test.rb +63 -0
- data/test/line_number_test.rb +80 -0
- data/test/measure_allocations_test.rb +26 -0
- data/test/measure_cpu_time_test.rb +212 -0
- data/test/measure_gc_runs_test.rb +32 -0
- data/test/measure_gc_time_test.rb +36 -0
- data/test/measure_memory_test.rb +33 -0
- data/test/measure_process_time_test.rb +61 -0
- data/test/measure_wall_time_test.rb +255 -0
- data/test/method_elimination_test.rb +84 -0
- data/test/module_test.rb +45 -0
- data/test/multi_printer_test.rb +104 -0
- data/test/no_method_class_test.rb +15 -0
- data/test/pause_resume_test.rb +166 -0
- data/test/prime.rb +54 -0
- data/test/printers_test.rb +275 -0
- data/test/printing_recursive_graph_test.rb +127 -0
- data/test/rack_test.rb +157 -0
- data/test/recursive_test.rb +215 -0
- data/test/singleton_test.rb +38 -0
- data/test/stack_printer_test.rb +77 -0
- data/test/stack_test.rb +138 -0
- data/test/start_stop_test.rb +112 -0
- data/test/test_helper.rb +267 -0
- data/test/thread_test.rb +187 -0
- data/test/unique_call_path_test.rb +202 -0
- data/test/yarv_test.rb +55 -0
- metadata +199 -0
@@ -0,0 +1,75 @@
|
|
1
|
+
/* Copyright (C) 2005-2019 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
|
2
|
+
Please see the LICENSE file for copyright and distribution information */
|
3
|
+
|
4
|
+
#ifndef __RP_METHOD_INFO__
|
5
|
+
#define __RP_METHOD_INFO__
|
6
|
+
|
7
|
+
#include <ruby.h>
|
8
|
+
|
9
|
+
extern VALUE cMethodInfo;
|
10
|
+
|
11
|
+
/* A key used to identify each method */
|
12
|
+
typedef struct
|
13
|
+
{
|
14
|
+
VALUE klass; /* The method's class. */
|
15
|
+
ID mid; /* The method id. */
|
16
|
+
st_index_t key; /* Cache calculated key */
|
17
|
+
} prof_method_key_t;
|
18
|
+
|
19
|
+
/* Source relation bit offsets. */
|
20
|
+
enum {
|
21
|
+
kModuleIncludee = 0, /* Included module */
|
22
|
+
kModuleSingleton, /* Singleton class of a module */
|
23
|
+
kObjectSingleton /* Singleton class of an object */
|
24
|
+
};
|
25
|
+
|
26
|
+
/* Forward declaration, see rp_call_info.h */
|
27
|
+
struct prof_call_infos_t;
|
28
|
+
|
29
|
+
/* Profiling information for each method. */
|
30
|
+
/* Excluded methods have no call_infos, source_klass, or source_file. */
|
31
|
+
typedef struct
|
32
|
+
{
|
33
|
+
/* Hot */
|
34
|
+
|
35
|
+
prof_method_key_t *key; /* Table key */
|
36
|
+
|
37
|
+
struct prof_call_infos_t *call_infos; /* Call infos */
|
38
|
+
int visits; /* Current visits on the stack */
|
39
|
+
|
40
|
+
unsigned int excluded : 1; /* Exclude from profile? */
|
41
|
+
unsigned int recursive : 1; /* Recursive (direct or mutual)? */
|
42
|
+
|
43
|
+
/* Cold */
|
44
|
+
|
45
|
+
VALUE object; /* Cached ruby object */
|
46
|
+
VALUE source_klass; /* Source class */
|
47
|
+
const char *source_file; /* Source file */
|
48
|
+
int line; /* Line number */
|
49
|
+
|
50
|
+
unsigned int resolved : 1; /* Source resolved? */
|
51
|
+
unsigned int relation : 3; /* Source relation bits */
|
52
|
+
} prof_method_t;
|
53
|
+
|
54
|
+
void rp_init_method_info(void);
|
55
|
+
|
56
|
+
void method_key(prof_method_key_t* key, VALUE klass, ID mid);
|
57
|
+
|
58
|
+
st_table * method_table_create();
|
59
|
+
prof_method_t * method_table_lookup(st_table *table, const prof_method_key_t* key);
|
60
|
+
size_t method_table_insert(st_table *table, const prof_method_key_t *key, prof_method_t *val);
|
61
|
+
void method_table_free(st_table *table);
|
62
|
+
|
63
|
+
prof_method_t* prof_method_create(VALUE klass, ID mid, const char* source_file, int line);
|
64
|
+
prof_method_t* prof_method_create_excluded(VALUE klass, ID mid);
|
65
|
+
|
66
|
+
VALUE prof_method_wrap(prof_method_t *result);
|
67
|
+
void prof_method_mark(prof_method_t *method);
|
68
|
+
|
69
|
+
/* Setup infrastructure to use method keys as hash comparisons */
|
70
|
+
int method_table_cmp(prof_method_key_t *key1, prof_method_key_t *key2);
|
71
|
+
st_index_t method_table_hash(prof_method_key_t *key);
|
72
|
+
|
73
|
+
extern struct st_hash_type type_method_hash;
|
74
|
+
|
75
|
+
#endif
|
@@ -0,0 +1,173 @@
|
|
1
|
+
/* Copyright (C) 2005-2019 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
|
2
|
+
Please see the LICENSE file for copyright and distribution information */
|
3
|
+
|
4
|
+
#include "rp_stack.h"
|
5
|
+
|
6
|
+
#define INITIAL_STACK_SIZE 8
|
7
|
+
|
8
|
+
void
|
9
|
+
prof_frame_pause(prof_frame_t *frame, double current_measurement)
|
10
|
+
{
|
11
|
+
if (frame && prof_frame_is_unpaused(frame))
|
12
|
+
frame->pause_time = current_measurement;
|
13
|
+
}
|
14
|
+
|
15
|
+
void
|
16
|
+
prof_frame_unpause(prof_frame_t *frame, double current_measurement)
|
17
|
+
{
|
18
|
+
if (frame && prof_frame_is_paused(frame)) {
|
19
|
+
frame->dead_time += (current_measurement - frame->pause_time);
|
20
|
+
frame->pause_time = -1;
|
21
|
+
}
|
22
|
+
}
|
23
|
+
|
24
|
+
|
25
|
+
/* Creates a stack of prof_frame_t to keep track
|
26
|
+
of timings for active methods. */
|
27
|
+
prof_stack_t *
|
28
|
+
prof_stack_create()
|
29
|
+
{
|
30
|
+
prof_stack_t *stack = ALLOC(prof_stack_t);
|
31
|
+
stack->start = ALLOC_N(prof_frame_t, INITIAL_STACK_SIZE);
|
32
|
+
stack->ptr = stack->start;
|
33
|
+
stack->end = stack->start + INITIAL_STACK_SIZE;
|
34
|
+
|
35
|
+
return stack;
|
36
|
+
}
|
37
|
+
|
38
|
+
void
|
39
|
+
prof_stack_free(prof_stack_t *stack)
|
40
|
+
{
|
41
|
+
xfree(stack->start);
|
42
|
+
xfree(stack);
|
43
|
+
}
|
44
|
+
|
45
|
+
prof_frame_t *
|
46
|
+
prof_stack_push(prof_stack_t *stack, prof_call_info_t *call_info, double measurement, int paused)
|
47
|
+
{
|
48
|
+
prof_frame_t *result;
|
49
|
+
prof_frame_t* parent_frame;
|
50
|
+
prof_method_t *method;
|
51
|
+
|
52
|
+
/* Is there space on the stack? If not, double
|
53
|
+
its size. */
|
54
|
+
if (stack->ptr == stack->end)
|
55
|
+
{
|
56
|
+
size_t len = stack->ptr - stack->start;
|
57
|
+
size_t new_capacity = (stack->end - stack->start) * 2;
|
58
|
+
REALLOC_N(stack->start, prof_frame_t, new_capacity);
|
59
|
+
/* Memory just got moved, reset pointers */
|
60
|
+
stack->ptr = stack->start + len;
|
61
|
+
stack->end = stack->start + new_capacity;
|
62
|
+
}
|
63
|
+
|
64
|
+
parent_frame = prof_stack_peek(stack);
|
65
|
+
|
66
|
+
// Reserve the next available frame pointer.
|
67
|
+
result = stack->ptr++;
|
68
|
+
|
69
|
+
result->call_info = call_info;
|
70
|
+
result->call_info->depth = (int)(stack->ptr - stack->start); // shortening of 64 bit into 32;
|
71
|
+
result->passes = 0;
|
72
|
+
|
73
|
+
result->start_time = measurement;
|
74
|
+
result->pause_time = -1; // init as not paused.
|
75
|
+
result->switch_time = 0;
|
76
|
+
result->wait_time = 0;
|
77
|
+
result->child_time = 0;
|
78
|
+
result->dead_time = 0;
|
79
|
+
|
80
|
+
method = call_info->target;
|
81
|
+
|
82
|
+
/* If the method was visited previously, it's recursive. */
|
83
|
+
if (method->visits > 0)
|
84
|
+
{
|
85
|
+
method->recursive = 1;
|
86
|
+
call_info->recursive = 1;
|
87
|
+
}
|
88
|
+
/* Enter the method. */
|
89
|
+
method->visits++;
|
90
|
+
|
91
|
+
// Unpause the parent frame, if it exists.
|
92
|
+
// If currently paused then:
|
93
|
+
// 1) The child frame will begin paused.
|
94
|
+
// 2) The parent will inherit the child's dead time.
|
95
|
+
prof_frame_unpause(parent_frame, measurement);
|
96
|
+
|
97
|
+
if (paused) {
|
98
|
+
prof_frame_pause(result, measurement);
|
99
|
+
}
|
100
|
+
|
101
|
+
// Return the result
|
102
|
+
return result;
|
103
|
+
}
|
104
|
+
|
105
|
+
prof_frame_t *
|
106
|
+
prof_stack_pop(prof_stack_t *stack, double measurement)
|
107
|
+
{
|
108
|
+
prof_frame_t *frame;
|
109
|
+
prof_frame_t *parent_frame;
|
110
|
+
prof_call_info_t *call_info;
|
111
|
+
prof_method_t *method;
|
112
|
+
|
113
|
+
double total_time;
|
114
|
+
double self_time;
|
115
|
+
|
116
|
+
frame = prof_stack_peek(stack);
|
117
|
+
|
118
|
+
/* Frame can be null, which means the stack is empty. This can happen if
|
119
|
+
RubProf.start is called from a method that exits. And it can happen if an
|
120
|
+
exception is raised in code that is being profiled and the stack unwinds
|
121
|
+
(RubyProf is not notified of that by the ruby runtime. */
|
122
|
+
if (!frame) {
|
123
|
+
return NULL;
|
124
|
+
}
|
125
|
+
|
126
|
+
/* Match passes until we reach the frame itself. */
|
127
|
+
if (prof_frame_is_pass(frame)) {
|
128
|
+
frame->passes--;
|
129
|
+
/* Additional frames can be consumed. See pop_frames(). */
|
130
|
+
return frame;
|
131
|
+
}
|
132
|
+
|
133
|
+
/* Consume this frame. */
|
134
|
+
stack->ptr--;
|
135
|
+
|
136
|
+
/* Calculate the total time this method took */
|
137
|
+
prof_frame_unpause(frame, measurement);
|
138
|
+
total_time = measurement - frame->start_time - frame->dead_time;
|
139
|
+
self_time = total_time - frame->child_time - frame->wait_time;
|
140
|
+
|
141
|
+
/* Update information about the current method */
|
142
|
+
call_info = frame->call_info;
|
143
|
+
method = call_info->target;
|
144
|
+
|
145
|
+
call_info->called++;
|
146
|
+
call_info->total_time += total_time;
|
147
|
+
call_info->self_time += self_time;
|
148
|
+
call_info->wait_time += frame->wait_time;
|
149
|
+
|
150
|
+
/* Leave the method. */
|
151
|
+
method->visits--;
|
152
|
+
|
153
|
+
parent_frame = prof_stack_peek(stack);
|
154
|
+
if (parent_frame)
|
155
|
+
{
|
156
|
+
parent_frame->child_time += total_time;
|
157
|
+
parent_frame->dead_time += frame->dead_time;
|
158
|
+
|
159
|
+
call_info->line = parent_frame->line;
|
160
|
+
}
|
161
|
+
|
162
|
+
return frame;
|
163
|
+
}
|
164
|
+
|
165
|
+
prof_frame_t *
|
166
|
+
prof_stack_pass(prof_stack_t *stack)
|
167
|
+
{
|
168
|
+
prof_frame_t *frame = prof_stack_peek(stack);
|
169
|
+
if (frame) {
|
170
|
+
frame->passes++;
|
171
|
+
}
|
172
|
+
return frame;
|
173
|
+
}
|
@@ -0,0 +1,63 @@
|
|
1
|
+
/* Copyright (C) 2005-2019 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
|
2
|
+
Please see the LICENSE file for copyright and distribution information */
|
3
|
+
|
4
|
+
#ifndef __RP_STACK__
|
5
|
+
#define __RP_STACK__
|
6
|
+
|
7
|
+
#include <ruby.h>
|
8
|
+
|
9
|
+
#include "rp_measure.h"
|
10
|
+
#include "rp_call_info.h"
|
11
|
+
|
12
|
+
|
13
|
+
/* Temporary object that maintains profiling information
|
14
|
+
for active methods. They are created and destroyed
|
15
|
+
as the program moves up and down its stack. */
|
16
|
+
typedef struct
|
17
|
+
{
|
18
|
+
/* Caching prof_method_t values significantly
|
19
|
+
increases performance. */
|
20
|
+
prof_call_info_t *call_info;
|
21
|
+
|
22
|
+
unsigned int line;
|
23
|
+
unsigned int passes; /* Count of "pass" frames, _after_ this one. */
|
24
|
+
|
25
|
+
double start_time;
|
26
|
+
double switch_time; /* Time at switch to different thread */
|
27
|
+
double wait_time;
|
28
|
+
double child_time;
|
29
|
+
double pause_time; // Time pause() was initiated
|
30
|
+
double dead_time; // Time to ignore (i.e. total amount of time between pause/resume blocks)
|
31
|
+
} prof_frame_t;
|
32
|
+
|
33
|
+
#define prof_frame_is_real(f) ((f)->passes == 0)
|
34
|
+
#define prof_frame_is_pass(f) ((f)->passes > 0)
|
35
|
+
|
36
|
+
#define prof_frame_is_paused(f) (f->pause_time >= 0)
|
37
|
+
#define prof_frame_is_unpaused(f) (f->pause_time < 0)
|
38
|
+
|
39
|
+
void prof_frame_pause(prof_frame_t*, double current_measurement);
|
40
|
+
void prof_frame_unpause(prof_frame_t*, double current_measurement);
|
41
|
+
|
42
|
+
/* Current stack of active methods.*/
|
43
|
+
typedef struct
|
44
|
+
{
|
45
|
+
prof_frame_t *start;
|
46
|
+
prof_frame_t *end;
|
47
|
+
prof_frame_t *ptr;
|
48
|
+
} prof_stack_t;
|
49
|
+
|
50
|
+
prof_stack_t *prof_stack_create();
|
51
|
+
void prof_stack_free(prof_stack_t *stack);
|
52
|
+
|
53
|
+
prof_frame_t *prof_stack_push(prof_stack_t *stack, prof_call_info_t *call_info, double measurement, int paused);
|
54
|
+
prof_frame_t *prof_stack_pop(prof_stack_t *stack, double measurement);
|
55
|
+
prof_frame_t *prof_stack_pass(prof_stack_t *stack);
|
56
|
+
|
57
|
+
static inline prof_frame_t *
|
58
|
+
prof_stack_peek(prof_stack_t *stack) {
|
59
|
+
return stack->ptr != stack->start ? stack->ptr - 1 : NULL;
|
60
|
+
}
|
61
|
+
|
62
|
+
|
63
|
+
#endif //__RP_STACK__
|
@@ -0,0 +1,277 @@
|
|
1
|
+
/* Copyright (C) 2005-2013 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
|
2
|
+
Please see the LICENSE file for copyright and distribution information */
|
3
|
+
|
4
|
+
#include "ruby_prof.h"
|
5
|
+
|
6
|
+
VALUE cRpThread;
|
7
|
+
|
8
|
+
/* ====== thread_data_t ====== */
|
9
|
+
thread_data_t*
|
10
|
+
thread_data_create()
|
11
|
+
{
|
12
|
+
thread_data_t* result = ALLOC(thread_data_t);
|
13
|
+
result->stack = prof_stack_create();
|
14
|
+
result->method_table = method_table_create();
|
15
|
+
result->object = Qnil;
|
16
|
+
result->methods = Qnil;
|
17
|
+
return result;
|
18
|
+
}
|
19
|
+
|
20
|
+
/* The underlying c structures are freed when the parent profile is freed.
|
21
|
+
However, on shutdown the Ruby GC frees objects in any will-nilly order.
|
22
|
+
That means the ruby thread object wrapping the c thread struct may
|
23
|
+
be freed before the parent profile. Thus we add in a free function
|
24
|
+
for the garbage collector so that if it does get called will nil
|
25
|
+
out our Ruby object reference.*/
|
26
|
+
static void
|
27
|
+
thread_data_ruby_gc_free(thread_data_t* thread_data)
|
28
|
+
{
|
29
|
+
/* Has this thread object been accessed by Ruby? If
|
30
|
+
yes clean it up so to avoid a segmentation fault. */
|
31
|
+
if (thread_data->object != Qnil)
|
32
|
+
{
|
33
|
+
RDATA(thread_data->object)->data = NULL;
|
34
|
+
RDATA(thread_data->object)->dfree = NULL;
|
35
|
+
RDATA(thread_data->object)->dmark = NULL;
|
36
|
+
}
|
37
|
+
thread_data->object = Qnil;
|
38
|
+
}
|
39
|
+
|
40
|
+
static void
|
41
|
+
thread_data_free(thread_data_t* thread_data)
|
42
|
+
{
|
43
|
+
thread_data_ruby_gc_free(thread_data);
|
44
|
+
method_table_free(thread_data->method_table);
|
45
|
+
prof_stack_free(thread_data->stack);
|
46
|
+
|
47
|
+
thread_data->thread_id = Qnil;
|
48
|
+
|
49
|
+
xfree(thread_data);
|
50
|
+
}
|
51
|
+
|
52
|
+
static int
|
53
|
+
mark_methods(st_data_t key, st_data_t value, st_data_t result)
|
54
|
+
{
|
55
|
+
prof_method_t *method = (prof_method_t *) value;
|
56
|
+
prof_method_mark(method);
|
57
|
+
return ST_CONTINUE;
|
58
|
+
}
|
59
|
+
|
60
|
+
void
|
61
|
+
prof_thread_mark(thread_data_t *thread)
|
62
|
+
{
|
63
|
+
if (thread->object != Qnil)
|
64
|
+
rb_gc_mark(thread->object);
|
65
|
+
|
66
|
+
if (thread->methods != Qnil)
|
67
|
+
rb_gc_mark(thread->methods);
|
68
|
+
|
69
|
+
if (thread->thread_id != Qnil)
|
70
|
+
rb_gc_mark(thread->thread_id);
|
71
|
+
|
72
|
+
if (thread->fiber_id != Qnil)
|
73
|
+
rb_gc_mark(thread->fiber_id);
|
74
|
+
|
75
|
+
st_foreach(thread->method_table, mark_methods, 0);
|
76
|
+
}
|
77
|
+
|
78
|
+
VALUE
|
79
|
+
prof_thread_wrap(thread_data_t *thread)
|
80
|
+
{
|
81
|
+
if (thread->object == Qnil) {
|
82
|
+
thread->object = Data_Wrap_Struct(cRpThread, prof_thread_mark, thread_data_ruby_gc_free, thread);
|
83
|
+
}
|
84
|
+
return thread->object;
|
85
|
+
}
|
86
|
+
|
87
|
+
static thread_data_t*
|
88
|
+
prof_get_thread(VALUE self)
|
89
|
+
{
|
90
|
+
/* Can't use Data_Get_Struct because that triggers the event hook
|
91
|
+
ending up in endless recursion. */
|
92
|
+
thread_data_t* result = DATA_PTR(self);
|
93
|
+
if (!result)
|
94
|
+
rb_raise(rb_eRuntimeError, "This RubyProf::Thread instance has already been freed, likely because its profile has been freed.");
|
95
|
+
|
96
|
+
return result;
|
97
|
+
}
|
98
|
+
|
99
|
+
/* ====== Thread Table ====== */
|
100
|
+
/* The thread table is hash keyed on ruby thread_id that stores instances
|
101
|
+
of thread_data_t. */
|
102
|
+
|
103
|
+
st_table *
|
104
|
+
threads_table_create()
|
105
|
+
{
|
106
|
+
return st_init_numtable();
|
107
|
+
}
|
108
|
+
|
109
|
+
static int
|
110
|
+
thread_table_free_iterator(st_data_t key, st_data_t value, st_data_t dummy)
|
111
|
+
{
|
112
|
+
thread_data_free((thread_data_t*)value);
|
113
|
+
return ST_CONTINUE;
|
114
|
+
}
|
115
|
+
|
116
|
+
void
|
117
|
+
threads_table_free(st_table *table)
|
118
|
+
{
|
119
|
+
st_foreach(table, thread_table_free_iterator, 0);
|
120
|
+
st_free_table(table);
|
121
|
+
}
|
122
|
+
|
123
|
+
size_t
|
124
|
+
threads_table_insert(prof_profile_t* profile, VALUE key, thread_data_t *thread_data)
|
125
|
+
{
|
126
|
+
unsigned LONG_LONG key_value = NUM2ULL(key);
|
127
|
+
return st_insert(profile->threads_tbl, key_value, (st_data_t) thread_data);
|
128
|
+
}
|
129
|
+
|
130
|
+
thread_data_t *
|
131
|
+
threads_table_lookup(prof_profile_t* profile, VALUE thread_id, VALUE fiber_id)
|
132
|
+
{
|
133
|
+
thread_data_t* result;
|
134
|
+
st_data_t val;
|
135
|
+
|
136
|
+
/* If we should merge fibers, we use the thread_id as key, otherwise the fiber id.
|
137
|
+
None of this is perfect, as garbage collected fiber/thread might be reused again later.
|
138
|
+
A real solution would require integration with the garbage collector.
|
139
|
+
*/
|
140
|
+
VALUE key = profile->merge_fibers ? thread_id : fiber_id;
|
141
|
+
unsigned LONG_LONG key_value = NUM2ULL(key);
|
142
|
+
if (st_lookup(profile->threads_tbl, key_value, &val))
|
143
|
+
{
|
144
|
+
result = (thread_data_t *) val;
|
145
|
+
}
|
146
|
+
else
|
147
|
+
{
|
148
|
+
result = thread_data_create();
|
149
|
+
result->thread_id = thread_id;
|
150
|
+
/* We set fiber id to 0 in the merge fiber case. Real fibers never have id 0,
|
151
|
+
so we can identify them later during printing.
|
152
|
+
*/
|
153
|
+
result->fiber_id = profile->merge_fibers ? INT2FIX(0) : fiber_id;
|
154
|
+
/* Insert the table */
|
155
|
+
threads_table_insert(profile, key, result);
|
156
|
+
}
|
157
|
+
return result;
|
158
|
+
}
|
159
|
+
|
160
|
+
thread_data_t *
|
161
|
+
switch_thread(void* prof, VALUE thread_id, VALUE fiber_id)
|
162
|
+
{
|
163
|
+
prof_profile_t* profile = (prof_profile_t*)prof;
|
164
|
+
double measurement = profile->measurer->measure();
|
165
|
+
|
166
|
+
/* Get new thread information. */
|
167
|
+
thread_data_t *thread_data = threads_table_lookup(profile, thread_id, fiber_id);
|
168
|
+
|
169
|
+
/* Get current frame for this thread */
|
170
|
+
prof_frame_t *frame = prof_stack_peek(thread_data->stack);
|
171
|
+
|
172
|
+
/* Update the time this thread waited for another thread */
|
173
|
+
if (frame)
|
174
|
+
{
|
175
|
+
frame->wait_time += measurement - frame->switch_time;
|
176
|
+
frame->switch_time = measurement;
|
177
|
+
}
|
178
|
+
|
179
|
+
/* Save on the last thread the time of the context switch
|
180
|
+
and reset this thread's last context switch to 0.*/
|
181
|
+
if (profile->last_thread_data)
|
182
|
+
{
|
183
|
+
prof_frame_t *last_frame = prof_stack_peek(profile->last_thread_data->stack);
|
184
|
+
if (last_frame)
|
185
|
+
last_frame->switch_time = measurement;
|
186
|
+
}
|
187
|
+
|
188
|
+
profile->last_thread_data = thread_data;
|
189
|
+
return thread_data;
|
190
|
+
}
|
191
|
+
|
192
|
+
int pause_thread(st_data_t key, st_data_t value, st_data_t data)
|
193
|
+
{
|
194
|
+
thread_data_t* thread_data = (thread_data_t *) value;
|
195
|
+
prof_profile_t* profile = (prof_profile_t*)data;
|
196
|
+
|
197
|
+
prof_frame_t* frame = prof_stack_peek(thread_data->stack);
|
198
|
+
prof_frame_pause(frame, profile->measurement_at_pause_resume);
|
199
|
+
|
200
|
+
return ST_CONTINUE;
|
201
|
+
}
|
202
|
+
|
203
|
+
int unpause_thread(st_data_t key, st_data_t value, st_data_t data)
|
204
|
+
{
|
205
|
+
thread_data_t* thread_data = (thread_data_t *) value;
|
206
|
+
prof_profile_t* profile = (prof_profile_t*)data;
|
207
|
+
|
208
|
+
prof_frame_t* frame = prof_stack_peek(thread_data->stack);
|
209
|
+
prof_frame_unpause(frame, profile->measurement_at_pause_resume);
|
210
|
+
|
211
|
+
return ST_CONTINUE;
|
212
|
+
}
|
213
|
+
|
214
|
+
static int
|
215
|
+
collect_methods(st_data_t key, st_data_t value, st_data_t result)
|
216
|
+
{
|
217
|
+
/* Called for each method stored in a thread's method table.
|
218
|
+
We want to store the method info information into an array.*/
|
219
|
+
VALUE methods = (VALUE) result;
|
220
|
+
prof_method_t *method = (prof_method_t *) value;
|
221
|
+
|
222
|
+
if (!method->excluded) {
|
223
|
+
rb_ary_push(methods, prof_method_wrap(method));
|
224
|
+
}
|
225
|
+
|
226
|
+
return ST_CONTINUE;
|
227
|
+
}
|
228
|
+
|
229
|
+
|
230
|
+
/* call-seq:
|
231
|
+
id -> number
|
232
|
+
|
233
|
+
Returns the id of this thread. */
|
234
|
+
static VALUE
|
235
|
+
prof_thread_id(VALUE self)
|
236
|
+
{
|
237
|
+
thread_data_t* thread = prof_get_thread(self);
|
238
|
+
return thread->thread_id;
|
239
|
+
}
|
240
|
+
|
241
|
+
/* call-seq:
|
242
|
+
fiber_id -> number
|
243
|
+
|
244
|
+
Returns the fiber id of this thread. */
|
245
|
+
static VALUE
|
246
|
+
prof_fiber_id(VALUE self)
|
247
|
+
{
|
248
|
+
thread_data_t* thread = prof_get_thread(self);
|
249
|
+
return thread->fiber_id;
|
250
|
+
}
|
251
|
+
|
252
|
+
/* call-seq:
|
253
|
+
methods -> Array of MethodInfo
|
254
|
+
|
255
|
+
Returns an array of methods that were called from this
|
256
|
+
thread during program execution. */
|
257
|
+
static VALUE
|
258
|
+
prof_thread_methods(VALUE self)
|
259
|
+
{
|
260
|
+
thread_data_t* thread = prof_get_thread(self);
|
261
|
+
if (thread->methods == Qnil)
|
262
|
+
{
|
263
|
+
thread->methods = rb_ary_new();
|
264
|
+
st_foreach(thread->method_table, collect_methods, thread->methods);
|
265
|
+
}
|
266
|
+
return thread->methods;
|
267
|
+
}
|
268
|
+
|
269
|
+
void rp_init_thread()
|
270
|
+
{
|
271
|
+
cRpThread = rb_define_class_under(mProf, "Thread", rb_cObject);
|
272
|
+
rb_undef_method(CLASS_OF(cRpThread), "new");
|
273
|
+
|
274
|
+
rb_define_method(cRpThread, "id", prof_thread_id, 0);
|
275
|
+
rb_define_method(cRpThread, "fiber_id", prof_fiber_id, 0);
|
276
|
+
rb_define_method(cRpThread, "methods", prof_thread_methods, 0);
|
277
|
+
}
|