ruby-prof 1.6.1-x64-mingw-ucrt → 1.6.2-x64-mingw-ucrt
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGES +6 -0
- data/bin/ruby-prof +105 -87
- data/ext/ruby_prof/rp_call_tree.c +502 -464
- data/ext/ruby_prof/rp_call_tree.h +47 -44
- data/ext/ruby_prof/rp_call_trees.c +1 -1
- data/ext/ruby_prof/rp_method.c +35 -3
- data/ext/ruby_prof/rp_method.h +63 -62
- data/ext/ruby_prof/rp_profile.c +933 -933
- data/ext/ruby_prof/rp_thread.c +433 -420
- data/ext/ruby_prof/rp_thread.h +39 -39
- data/ext/ruby_prof/vc/ruby_prof.vcxproj +5 -2
- data/lib/3.1/ruby_prof.so +0 -0
- data/lib/3.2/ruby_prof.so +0 -0
- data/lib/ruby-prof/printers/abstract_printer.rb +1 -0
- data/lib/ruby-prof/profile.rb +70 -70
- data/lib/ruby-prof/version.rb +1 -1
- data/test/call_tree_test.rb +94 -197
- data/test/fiber_test.rb +0 -51
- data/test/gc_test.rb +0 -1
- data/test/measure_process_time_test.rb +3 -2
- data/test/merge_test.rb +146 -0
- data/test/scheduler.rb +9 -0
- data/test/test_helper.rb +7 -0
- data/test/thread_test.rb +71 -4
- metadata +5 -4
data/ext/ruby_prof/rp_thread.c
CHANGED
@@ -1,420 +1,433 @@
|
|
1
|
-
/* Copyright (C) 2005-2013 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
|
2
|
-
Please see the LICENSE file for copyright and distribution information */
|
3
|
-
|
4
|
-
/* Document-class: RubyProf::Thread
|
5
|
-
|
6
|
-
The Thread class contains profile results for a single fiber (note a Ruby thread can run multiple fibers).
|
7
|
-
You cannot create an instance of RubyProf::Thread, instead you access it from a RubyProf::Profile object.
|
8
|
-
|
9
|
-
profile = RubyProf::Profile.profile do
|
10
|
-
...
|
11
|
-
end
|
12
|
-
|
13
|
-
profile.threads.each do |thread|
|
14
|
-
thread.root_methods.sort.each do |method|
|
15
|
-
puts method.total_time
|
16
|
-
end
|
17
|
-
end */
|
18
|
-
|
19
|
-
#include "rp_thread.h"
|
20
|
-
#include "rp_profile.h"
|
21
|
-
|
22
|
-
VALUE cRpThread;
|
23
|
-
|
24
|
-
// ====== thread_data_t ======
|
25
|
-
thread_data_t* thread_data_create(void)
|
26
|
-
{
|
27
|
-
thread_data_t* result = ALLOC(thread_data_t);
|
28
|
-
result->owner = OWNER_C;
|
29
|
-
result->stack = prof_stack_create();
|
30
|
-
result->method_table = method_table_create();
|
31
|
-
result->call_tree = NULL;
|
32
|
-
result->object = Qnil;
|
33
|
-
result->methods = Qnil;
|
34
|
-
result->fiber_id = Qnil;
|
35
|
-
result->thread_id = Qnil;
|
36
|
-
result->trace = true;
|
37
|
-
result->fiber = Qnil;
|
38
|
-
return result;
|
39
|
-
}
|
40
|
-
|
41
|
-
static int mark_methods(st_data_t key, st_data_t value, st_data_t result)
|
42
|
-
{
|
43
|
-
prof_method_t* method = (prof_method_t*)value;
|
44
|
-
prof_method_mark(method);
|
45
|
-
return ST_CONTINUE;
|
46
|
-
}
|
47
|
-
|
48
|
-
size_t prof_thread_size(const void* data)
|
49
|
-
{
|
50
|
-
return sizeof(thread_data_t);
|
51
|
-
}
|
52
|
-
|
53
|
-
void prof_thread_mark(void* data)
|
54
|
-
{
|
55
|
-
if (!data)
|
56
|
-
return;
|
57
|
-
|
58
|
-
thread_data_t* thread = (thread_data_t*)data;
|
59
|
-
|
60
|
-
if (thread->object != Qnil)
|
61
|
-
rb_gc_mark_movable(thread->object);
|
62
|
-
|
63
|
-
rb_gc_mark(thread->fiber);
|
64
|
-
|
65
|
-
if (thread->methods != Qnil)
|
66
|
-
rb_gc_mark_movable(thread->methods);
|
67
|
-
|
68
|
-
if (thread->fiber_id != Qnil)
|
69
|
-
rb_gc_mark_movable(thread->fiber_id);
|
70
|
-
|
71
|
-
if (thread->thread_id != Qnil)
|
72
|
-
rb_gc_mark_movable(thread->thread_id);
|
73
|
-
|
74
|
-
if (thread->call_tree)
|
75
|
-
prof_call_tree_mark(thread->call_tree);
|
76
|
-
|
77
|
-
rb_st_foreach(thread->method_table, mark_methods, 0);
|
78
|
-
}
|
79
|
-
|
80
|
-
void prof_thread_compact(void* data)
|
81
|
-
{
|
82
|
-
thread_data_t* thread = (thread_data_t*)data;
|
83
|
-
thread->object = rb_gc_location(thread->object);
|
84
|
-
thread->methods = rb_gc_location(thread->methods);
|
85
|
-
thread->fiber_id = rb_gc_location(thread->fiber_id);
|
86
|
-
thread->thread_id = rb_gc_location(thread->thread_id);
|
87
|
-
}
|
88
|
-
|
89
|
-
static void prof_thread_free(thread_data_t* thread_data)
|
90
|
-
{
|
91
|
-
/* Has this method object been accessed by Ruby? If
|
92
|
-
yes then set its data to nil to avoid a segmentation fault on the next mark and sweep. */
|
93
|
-
if (thread_data->object != Qnil)
|
94
|
-
{
|
95
|
-
RTYPEDDATA(thread_data->object)->data = NULL;
|
96
|
-
thread_data->object = Qnil;
|
97
|
-
}
|
98
|
-
|
99
|
-
method_table_free(thread_data->method_table);
|
100
|
-
|
101
|
-
if (thread_data->call_tree)
|
102
|
-
prof_call_tree_free(thread_data->call_tree);
|
103
|
-
|
104
|
-
prof_stack_free(thread_data->stack);
|
105
|
-
|
106
|
-
xfree(thread_data);
|
107
|
-
}
|
108
|
-
|
109
|
-
void prof_thread_ruby_gc_free(void* data)
|
110
|
-
{
|
111
|
-
thread_data_t* thread_data = (thread_data_t*)data;
|
112
|
-
|
113
|
-
if (!thread_data)
|
114
|
-
{
|
115
|
-
// Object has already been freed by C code
|
116
|
-
return;
|
117
|
-
}
|
118
|
-
else if (thread_data->owner == OWNER_RUBY)
|
119
|
-
{
|
120
|
-
// Ruby owns this object, we need to free the underlying C struct
|
121
|
-
prof_thread_free(thread_data);
|
122
|
-
}
|
123
|
-
else
|
124
|
-
{
|
125
|
-
// The Ruby object is being freed, but not the underlying C structure. So unlink the two.
|
126
|
-
thread_data->object = Qnil;
|
127
|
-
}
|
128
|
-
}
|
129
|
-
|
130
|
-
static const rb_data_type_t thread_type =
|
131
|
-
{
|
132
|
-
.wrap_struct_name = "ThreadInfo",
|
133
|
-
.function =
|
134
|
-
{
|
135
|
-
.dmark = prof_thread_mark,
|
136
|
-
.dfree = prof_thread_ruby_gc_free,
|
137
|
-
.dsize = prof_thread_size,
|
138
|
-
.dcompact = prof_thread_compact
|
139
|
-
},
|
140
|
-
.data = NULL,
|
141
|
-
.flags = RUBY_TYPED_FREE_IMMEDIATELY
|
142
|
-
};
|
143
|
-
|
144
|
-
VALUE prof_thread_wrap(thread_data_t* thread)
|
145
|
-
{
|
146
|
-
if (thread->object == Qnil)
|
147
|
-
{
|
148
|
-
thread->object = TypedData_Wrap_Struct(cRpThread, &thread_type, thread);
|
149
|
-
}
|
150
|
-
return thread->object;
|
151
|
-
}
|
152
|
-
|
153
|
-
static VALUE prof_thread_allocate(VALUE klass)
|
154
|
-
{
|
155
|
-
thread_data_t* thread_data = thread_data_create();
|
156
|
-
thread_data->owner = OWNER_RUBY;
|
157
|
-
thread_data->object = prof_thread_wrap(thread_data);
|
158
|
-
return thread_data->object;
|
159
|
-
}
|
160
|
-
|
161
|
-
thread_data_t* prof_get_thread(VALUE self)
|
162
|
-
{
|
163
|
-
/* Can't use Data_Get_Struct because that triggers the event hook
|
164
|
-
ending up in endless recursion. */
|
165
|
-
thread_data_t* result = RTYPEDDATA_DATA(self);
|
166
|
-
if (!result)
|
167
|
-
rb_raise(rb_eRuntimeError, "This RubyProf::Thread instance has already been freed, likely because its profile has been freed.");
|
168
|
-
|
169
|
-
return result;
|
170
|
-
}
|
171
|
-
|
172
|
-
// ====== Thread Table ======
|
173
|
-
// The thread table is hash keyed on ruby fiber_id that stores instances of thread_data_t.
|
174
|
-
|
175
|
-
st_table* threads_table_create()
|
176
|
-
{
|
177
|
-
return rb_st_init_numtable();
|
178
|
-
}
|
179
|
-
|
180
|
-
static int thread_table_free_iterator(st_data_t key, st_data_t value, st_data_t dummy)
|
181
|
-
{
|
182
|
-
prof_thread_free((thread_data_t*)value);
|
183
|
-
return ST_CONTINUE;
|
184
|
-
}
|
185
|
-
|
186
|
-
void threads_table_free(st_table* table)
|
187
|
-
{
|
188
|
-
rb_st_foreach(table, thread_table_free_iterator, 0);
|
189
|
-
rb_st_free_table(table);
|
190
|
-
}
|
191
|
-
|
192
|
-
thread_data_t* threads_table_lookup(void* prof, VALUE fiber)
|
193
|
-
{
|
194
|
-
prof_profile_t* profile = prof;
|
195
|
-
thread_data_t* result = NULL;
|
196
|
-
st_data_t val;
|
197
|
-
|
198
|
-
VALUE fiber_id = rb_obj_id(fiber);
|
199
|
-
if (rb_st_lookup(profile->threads_tbl, fiber_id, &val))
|
200
|
-
{
|
201
|
-
result = (thread_data_t*)val;
|
202
|
-
}
|
203
|
-
|
204
|
-
return result;
|
205
|
-
}
|
206
|
-
|
207
|
-
thread_data_t* threads_table_insert(void* prof, VALUE fiber)
|
208
|
-
{
|
209
|
-
prof_profile_t* profile = prof;
|
210
|
-
thread_data_t* result = thread_data_create();
|
211
|
-
VALUE thread = rb_thread_current();
|
212
|
-
|
213
|
-
result->fiber = fiber;
|
214
|
-
result->fiber_id = rb_obj_id(fiber);
|
215
|
-
result->thread_id = rb_obj_id(thread);
|
216
|
-
rb_st_insert(profile->threads_tbl, (st_data_t)result->fiber_id, (st_data_t)result);
|
217
|
-
|
218
|
-
// Are we tracing this thread?
|
219
|
-
if (profile->include_threads_tbl && !rb_st_lookup(profile->include_threads_tbl, thread, 0))
|
220
|
-
{
|
221
|
-
result->trace = false;
|
222
|
-
}
|
223
|
-
else if (profile->exclude_threads_tbl && rb_st_lookup(profile->exclude_threads_tbl, thread, 0))
|
224
|
-
{
|
225
|
-
result->trace = false;
|
226
|
-
}
|
227
|
-
else
|
228
|
-
{
|
229
|
-
result->trace = true;
|
230
|
-
}
|
231
|
-
|
232
|
-
return result;
|
233
|
-
}
|
234
|
-
|
235
|
-
// ====== Profiling Methods ======
|
236
|
-
void switch_thread(void* prof, thread_data_t* thread_data, double measurement)
|
237
|
-
{
|
238
|
-
prof_profile_t* profile = prof;
|
239
|
-
|
240
|
-
/* Get current frame for this thread */
|
241
|
-
prof_frame_t* frame = prof_frame_current(thread_data->stack);
|
242
|
-
if (frame)
|
243
|
-
{
|
244
|
-
frame->wait_time += measurement - frame->switch_time;
|
245
|
-
frame->switch_time = 0;
|
246
|
-
}
|
247
|
-
|
248
|
-
/* Save on the last thread the time of the context switch
|
249
|
-
and reset this thread's last context switch to 0.*/
|
250
|
-
if (profile->last_thread_data)
|
251
|
-
{
|
252
|
-
prof_frame_t* last_frame = prof_frame_current(profile->last_thread_data->stack);
|
253
|
-
if (last_frame)
|
254
|
-
last_frame->switch_time = measurement;
|
255
|
-
}
|
256
|
-
|
257
|
-
profile->last_thread_data = thread_data;
|
258
|
-
}
|
259
|
-
|
260
|
-
int pause_thread(st_data_t key, st_data_t value, st_data_t data)
|
261
|
-
{
|
262
|
-
thread_data_t* thread_data = (thread_data_t*)value;
|
263
|
-
prof_profile_t* profile = (prof_profile_t*)data;
|
264
|
-
|
265
|
-
prof_frame_t* frame = prof_frame_current(thread_data->stack);
|
266
|
-
prof_frame_pause(frame, profile->measurement_at_pause_resume);
|
267
|
-
|
268
|
-
return ST_CONTINUE;
|
269
|
-
}
|
270
|
-
|
271
|
-
int unpause_thread(st_data_t key, st_data_t value, st_data_t data)
|
272
|
-
{
|
273
|
-
thread_data_t* thread_data = (thread_data_t*)value;
|
274
|
-
prof_profile_t* profile = (prof_profile_t*)data;
|
275
|
-
|
276
|
-
prof_frame_t* frame = prof_frame_current(thread_data->stack);
|
277
|
-
prof_frame_unpause(frame, profile->measurement_at_pause_resume);
|
278
|
-
|
279
|
-
return ST_CONTINUE;
|
280
|
-
}
|
281
|
-
|
282
|
-
// ====== Helper Methods ======
|
283
|
-
static int collect_methods(st_data_t key, st_data_t value, st_data_t result)
|
284
|
-
{
|
285
|
-
/* Called for each method stored in a thread's method table.
|
286
|
-
We want to store the method info information into an array.*/
|
287
|
-
VALUE methods = (VALUE)result;
|
288
|
-
prof_method_t* method = (prof_method_t*)value;
|
289
|
-
rb_ary_push(methods, prof_method_wrap(method));
|
290
|
-
|
291
|
-
return ST_CONTINUE;
|
292
|
-
}
|
293
|
-
|
294
|
-
// ====== RubyProf::Thread ======
|
295
|
-
/* call-seq:
|
296
|
-
new(call_tree, thread, fiber) -> thread
|
297
|
-
|
298
|
-
Creates a new RubyProf thread instance. +call_tree+ is the root call_tree instance,
|
299
|
-
+thread+ is a reference to a Ruby thread and +fiber+ is a reference to a Ruby fiber.*/
|
300
|
-
static VALUE prof_thread_initialize(VALUE self, VALUE call_tree, VALUE thread, VALUE fiber)
|
301
|
-
{
|
302
|
-
thread_data_t* thread_ptr = prof_get_thread(self);
|
303
|
-
|
304
|
-
// This call tree must now be managed by C
|
305
|
-
thread_ptr->call_tree = prof_get_call_tree(call_tree);
|
306
|
-
thread_ptr->call_tree->owner = OWNER_C;
|
307
|
-
|
308
|
-
thread_ptr->fiber = fiber;
|
309
|
-
thread_ptr->fiber_id = rb_obj_id(fiber);
|
310
|
-
thread_ptr->thread_id = rb_obj_id(thread);
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
}
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
378
|
-
|
379
|
-
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
1
|
+
/* Copyright (C) 2005-2013 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
|
2
|
+
Please see the LICENSE file for copyright and distribution information */
|
3
|
+
|
4
|
+
/* Document-class: RubyProf::Thread
|
5
|
+
|
6
|
+
The Thread class contains profile results for a single fiber (note a Ruby thread can run multiple fibers).
|
7
|
+
You cannot create an instance of RubyProf::Thread, instead you access it from a RubyProf::Profile object.
|
8
|
+
|
9
|
+
profile = RubyProf::Profile.profile do
|
10
|
+
...
|
11
|
+
end
|
12
|
+
|
13
|
+
profile.threads.each do |thread|
|
14
|
+
thread.root_methods.sort.each do |method|
|
15
|
+
puts method.total_time
|
16
|
+
end
|
17
|
+
end */
|
18
|
+
|
19
|
+
#include "rp_thread.h"
|
20
|
+
#include "rp_profile.h"
|
21
|
+
|
22
|
+
VALUE cRpThread;
|
23
|
+
|
24
|
+
// ====== thread_data_t ======
|
25
|
+
thread_data_t* thread_data_create(void)
|
26
|
+
{
|
27
|
+
thread_data_t* result = ALLOC(thread_data_t);
|
28
|
+
result->owner = OWNER_C;
|
29
|
+
result->stack = prof_stack_create();
|
30
|
+
result->method_table = method_table_create();
|
31
|
+
result->call_tree = NULL;
|
32
|
+
result->object = Qnil;
|
33
|
+
result->methods = Qnil;
|
34
|
+
result->fiber_id = Qnil;
|
35
|
+
result->thread_id = Qnil;
|
36
|
+
result->trace = true;
|
37
|
+
result->fiber = Qnil;
|
38
|
+
return result;
|
39
|
+
}
|
40
|
+
|
41
|
+
static int mark_methods(st_data_t key, st_data_t value, st_data_t result)
|
42
|
+
{
|
43
|
+
prof_method_t* method = (prof_method_t*)value;
|
44
|
+
prof_method_mark(method);
|
45
|
+
return ST_CONTINUE;
|
46
|
+
}
|
47
|
+
|
48
|
+
size_t prof_thread_size(const void* data)
|
49
|
+
{
|
50
|
+
return sizeof(thread_data_t);
|
51
|
+
}
|
52
|
+
|
53
|
+
void prof_thread_mark(void* data)
|
54
|
+
{
|
55
|
+
if (!data)
|
56
|
+
return;
|
57
|
+
|
58
|
+
thread_data_t* thread = (thread_data_t*)data;
|
59
|
+
|
60
|
+
if (thread->object != Qnil)
|
61
|
+
rb_gc_mark_movable(thread->object);
|
62
|
+
|
63
|
+
rb_gc_mark(thread->fiber);
|
64
|
+
|
65
|
+
if (thread->methods != Qnil)
|
66
|
+
rb_gc_mark_movable(thread->methods);
|
67
|
+
|
68
|
+
if (thread->fiber_id != Qnil)
|
69
|
+
rb_gc_mark_movable(thread->fiber_id);
|
70
|
+
|
71
|
+
if (thread->thread_id != Qnil)
|
72
|
+
rb_gc_mark_movable(thread->thread_id);
|
73
|
+
|
74
|
+
if (thread->call_tree)
|
75
|
+
prof_call_tree_mark(thread->call_tree);
|
76
|
+
|
77
|
+
rb_st_foreach(thread->method_table, mark_methods, 0);
|
78
|
+
}
|
79
|
+
|
80
|
+
void prof_thread_compact(void* data)
|
81
|
+
{
|
82
|
+
thread_data_t* thread = (thread_data_t*)data;
|
83
|
+
thread->object = rb_gc_location(thread->object);
|
84
|
+
thread->methods = rb_gc_location(thread->methods);
|
85
|
+
thread->fiber_id = rb_gc_location(thread->fiber_id);
|
86
|
+
thread->thread_id = rb_gc_location(thread->thread_id);
|
87
|
+
}
|
88
|
+
|
89
|
+
static void prof_thread_free(thread_data_t* thread_data)
|
90
|
+
{
|
91
|
+
/* Has this method object been accessed by Ruby? If
|
92
|
+
yes then set its data to nil to avoid a segmentation fault on the next mark and sweep. */
|
93
|
+
if (thread_data->object != Qnil)
|
94
|
+
{
|
95
|
+
RTYPEDDATA(thread_data->object)->data = NULL;
|
96
|
+
thread_data->object = Qnil;
|
97
|
+
}
|
98
|
+
|
99
|
+
method_table_free(thread_data->method_table);
|
100
|
+
|
101
|
+
if (thread_data->call_tree)
|
102
|
+
prof_call_tree_free(thread_data->call_tree);
|
103
|
+
|
104
|
+
prof_stack_free(thread_data->stack);
|
105
|
+
|
106
|
+
xfree(thread_data);
|
107
|
+
}
|
108
|
+
|
109
|
+
void prof_thread_ruby_gc_free(void* data)
|
110
|
+
{
|
111
|
+
thread_data_t* thread_data = (thread_data_t*)data;
|
112
|
+
|
113
|
+
if (!thread_data)
|
114
|
+
{
|
115
|
+
// Object has already been freed by C code
|
116
|
+
return;
|
117
|
+
}
|
118
|
+
else if (thread_data->owner == OWNER_RUBY)
|
119
|
+
{
|
120
|
+
// Ruby owns this object, we need to free the underlying C struct
|
121
|
+
prof_thread_free(thread_data);
|
122
|
+
}
|
123
|
+
else
|
124
|
+
{
|
125
|
+
// The Ruby object is being freed, but not the underlying C structure. So unlink the two.
|
126
|
+
thread_data->object = Qnil;
|
127
|
+
}
|
128
|
+
}
|
129
|
+
|
130
|
+
static const rb_data_type_t thread_type =
|
131
|
+
{
|
132
|
+
.wrap_struct_name = "ThreadInfo",
|
133
|
+
.function =
|
134
|
+
{
|
135
|
+
.dmark = prof_thread_mark,
|
136
|
+
.dfree = prof_thread_ruby_gc_free,
|
137
|
+
.dsize = prof_thread_size,
|
138
|
+
.dcompact = prof_thread_compact
|
139
|
+
},
|
140
|
+
.data = NULL,
|
141
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY
|
142
|
+
};
|
143
|
+
|
144
|
+
VALUE prof_thread_wrap(thread_data_t* thread)
|
145
|
+
{
|
146
|
+
if (thread->object == Qnil)
|
147
|
+
{
|
148
|
+
thread->object = TypedData_Wrap_Struct(cRpThread, &thread_type, thread);
|
149
|
+
}
|
150
|
+
return thread->object;
|
151
|
+
}
|
152
|
+
|
153
|
+
static VALUE prof_thread_allocate(VALUE klass)
|
154
|
+
{
|
155
|
+
thread_data_t* thread_data = thread_data_create();
|
156
|
+
thread_data->owner = OWNER_RUBY;
|
157
|
+
thread_data->object = prof_thread_wrap(thread_data);
|
158
|
+
return thread_data->object;
|
159
|
+
}
|
160
|
+
|
161
|
+
thread_data_t* prof_get_thread(VALUE self)
|
162
|
+
{
|
163
|
+
/* Can't use Data_Get_Struct because that triggers the event hook
|
164
|
+
ending up in endless recursion. */
|
165
|
+
thread_data_t* result = RTYPEDDATA_DATA(self);
|
166
|
+
if (!result)
|
167
|
+
rb_raise(rb_eRuntimeError, "This RubyProf::Thread instance has already been freed, likely because its profile has been freed.");
|
168
|
+
|
169
|
+
return result;
|
170
|
+
}
|
171
|
+
|
172
|
+
// ====== Thread Table ======
|
173
|
+
// The thread table is hash keyed on ruby fiber_id that stores instances of thread_data_t.
|
174
|
+
|
175
|
+
st_table* threads_table_create()
|
176
|
+
{
|
177
|
+
return rb_st_init_numtable();
|
178
|
+
}
|
179
|
+
|
180
|
+
static int thread_table_free_iterator(st_data_t key, st_data_t value, st_data_t dummy)
|
181
|
+
{
|
182
|
+
prof_thread_free((thread_data_t*)value);
|
183
|
+
return ST_CONTINUE;
|
184
|
+
}
|
185
|
+
|
186
|
+
void threads_table_free(st_table* table)
|
187
|
+
{
|
188
|
+
rb_st_foreach(table, thread_table_free_iterator, 0);
|
189
|
+
rb_st_free_table(table);
|
190
|
+
}
|
191
|
+
|
192
|
+
thread_data_t* threads_table_lookup(void* prof, VALUE fiber)
|
193
|
+
{
|
194
|
+
prof_profile_t* profile = prof;
|
195
|
+
thread_data_t* result = NULL;
|
196
|
+
st_data_t val;
|
197
|
+
|
198
|
+
VALUE fiber_id = rb_obj_id(fiber);
|
199
|
+
if (rb_st_lookup(profile->threads_tbl, fiber_id, &val))
|
200
|
+
{
|
201
|
+
result = (thread_data_t*)val;
|
202
|
+
}
|
203
|
+
|
204
|
+
return result;
|
205
|
+
}
|
206
|
+
|
207
|
+
thread_data_t* threads_table_insert(void* prof, VALUE fiber)
|
208
|
+
{
|
209
|
+
prof_profile_t* profile = prof;
|
210
|
+
thread_data_t* result = thread_data_create();
|
211
|
+
VALUE thread = rb_thread_current();
|
212
|
+
|
213
|
+
result->fiber = fiber;
|
214
|
+
result->fiber_id = rb_obj_id(fiber);
|
215
|
+
result->thread_id = rb_obj_id(thread);
|
216
|
+
rb_st_insert(profile->threads_tbl, (st_data_t)result->fiber_id, (st_data_t)result);
|
217
|
+
|
218
|
+
// Are we tracing this thread?
|
219
|
+
if (profile->include_threads_tbl && !rb_st_lookup(profile->include_threads_tbl, thread, 0))
|
220
|
+
{
|
221
|
+
result->trace = false;
|
222
|
+
}
|
223
|
+
else if (profile->exclude_threads_tbl && rb_st_lookup(profile->exclude_threads_tbl, thread, 0))
|
224
|
+
{
|
225
|
+
result->trace = false;
|
226
|
+
}
|
227
|
+
else
|
228
|
+
{
|
229
|
+
result->trace = true;
|
230
|
+
}
|
231
|
+
|
232
|
+
return result;
|
233
|
+
}
|
234
|
+
|
235
|
+
// ====== Profiling Methods ======
|
236
|
+
void switch_thread(void* prof, thread_data_t* thread_data, double measurement)
|
237
|
+
{
|
238
|
+
prof_profile_t* profile = prof;
|
239
|
+
|
240
|
+
/* Get current frame for this thread */
|
241
|
+
prof_frame_t* frame = prof_frame_current(thread_data->stack);
|
242
|
+
if (frame)
|
243
|
+
{
|
244
|
+
frame->wait_time += measurement - frame->switch_time;
|
245
|
+
frame->switch_time = 0;
|
246
|
+
}
|
247
|
+
|
248
|
+
/* Save on the last thread the time of the context switch
|
249
|
+
and reset this thread's last context switch to 0.*/
|
250
|
+
if (profile->last_thread_data)
|
251
|
+
{
|
252
|
+
prof_frame_t* last_frame = prof_frame_current(profile->last_thread_data->stack);
|
253
|
+
if (last_frame)
|
254
|
+
last_frame->switch_time = measurement;
|
255
|
+
}
|
256
|
+
|
257
|
+
profile->last_thread_data = thread_data;
|
258
|
+
}
|
259
|
+
|
260
|
+
int pause_thread(st_data_t key, st_data_t value, st_data_t data)
|
261
|
+
{
|
262
|
+
thread_data_t* thread_data = (thread_data_t*)value;
|
263
|
+
prof_profile_t* profile = (prof_profile_t*)data;
|
264
|
+
|
265
|
+
prof_frame_t* frame = prof_frame_current(thread_data->stack);
|
266
|
+
prof_frame_pause(frame, profile->measurement_at_pause_resume);
|
267
|
+
|
268
|
+
return ST_CONTINUE;
|
269
|
+
}
|
270
|
+
|
271
|
+
int unpause_thread(st_data_t key, st_data_t value, st_data_t data)
|
272
|
+
{
|
273
|
+
thread_data_t* thread_data = (thread_data_t*)value;
|
274
|
+
prof_profile_t* profile = (prof_profile_t*)data;
|
275
|
+
|
276
|
+
prof_frame_t* frame = prof_frame_current(thread_data->stack);
|
277
|
+
prof_frame_unpause(frame, profile->measurement_at_pause_resume);
|
278
|
+
|
279
|
+
return ST_CONTINUE;
|
280
|
+
}
|
281
|
+
|
282
|
+
// ====== Helper Methods ======
|
283
|
+
static int collect_methods(st_data_t key, st_data_t value, st_data_t result)
|
284
|
+
{
|
285
|
+
/* Called for each method stored in a thread's method table.
|
286
|
+
We want to store the method info information into an array.*/
|
287
|
+
VALUE methods = (VALUE)result;
|
288
|
+
prof_method_t* method = (prof_method_t*)value;
|
289
|
+
rb_ary_push(methods, prof_method_wrap(method));
|
290
|
+
|
291
|
+
return ST_CONTINUE;
|
292
|
+
}
|
293
|
+
|
294
|
+
// ====== RubyProf::Thread ======
|
295
|
+
/* call-seq:
|
296
|
+
new(call_tree, thread, fiber) -> thread
|
297
|
+
|
298
|
+
Creates a new RubyProf thread instance. +call_tree+ is the root call_tree instance,
|
299
|
+
+thread+ is a reference to a Ruby thread and +fiber+ is a reference to a Ruby fiber.*/
|
300
|
+
static VALUE prof_thread_initialize(VALUE self, VALUE call_tree, VALUE thread, VALUE fiber)
|
301
|
+
{
|
302
|
+
thread_data_t* thread_ptr = prof_get_thread(self);
|
303
|
+
|
304
|
+
// This call tree must now be managed by C
|
305
|
+
thread_ptr->call_tree = prof_get_call_tree(call_tree);
|
306
|
+
thread_ptr->call_tree->owner = OWNER_C;
|
307
|
+
|
308
|
+
thread_ptr->fiber = fiber;
|
309
|
+
thread_ptr->fiber_id = rb_obj_id(fiber);
|
310
|
+
thread_ptr->thread_id = rb_obj_id(thread);
|
311
|
+
|
312
|
+
// Add methods from call trees into thread methods table
|
313
|
+
VALUE methods = prof_call_tree_methods(thread_ptr->call_tree);
|
314
|
+
for (int i = 0; i < rb_array_len(methods); i++)
|
315
|
+
{
|
316
|
+
VALUE method = rb_ary_entry(methods, i);
|
317
|
+
prof_method_t* method_ptr = prof_get_method(method);
|
318
|
+
method_table_insert(thread_ptr->method_table, method_ptr->key, method_ptr);
|
319
|
+
}
|
320
|
+
|
321
|
+
return self;
|
322
|
+
}
|
323
|
+
|
324
|
+
/* call-seq:
|
325
|
+
id -> number
|
326
|
+
|
327
|
+
Returns the thread id of this thread. */
|
328
|
+
static VALUE prof_thread_id(VALUE self)
|
329
|
+
{
|
330
|
+
thread_data_t* thread = prof_get_thread(self);
|
331
|
+
return thread->thread_id;
|
332
|
+
}
|
333
|
+
|
334
|
+
/* call-seq:
|
335
|
+
fiber_id -> number
|
336
|
+
|
337
|
+
Returns the fiber id of this thread. */
|
338
|
+
static VALUE prof_fiber_id(VALUE self)
|
339
|
+
{
|
340
|
+
thread_data_t* thread = prof_get_thread(self);
|
341
|
+
return thread->fiber_id;
|
342
|
+
}
|
343
|
+
|
344
|
+
/* call-seq:
|
345
|
+
call_tree -> CallTree
|
346
|
+
|
347
|
+
Returns the root call tree. */
|
348
|
+
static VALUE prof_call_tree(VALUE self)
|
349
|
+
{
|
350
|
+
thread_data_t* thread = prof_get_thread(self);
|
351
|
+
return prof_call_tree_wrap(thread->call_tree);
|
352
|
+
}
|
353
|
+
|
354
|
+
/* call-seq:
|
355
|
+
methods -> [RubyProf::MethodInfo]
|
356
|
+
|
357
|
+
Returns an array of methods that were called from this
|
358
|
+
thread during program execution. */
|
359
|
+
static VALUE prof_thread_methods(VALUE self)
|
360
|
+
{
|
361
|
+
thread_data_t* thread = prof_get_thread(self);
|
362
|
+
if (thread->methods == Qnil)
|
363
|
+
{
|
364
|
+
thread->methods = rb_ary_new();
|
365
|
+
rb_st_foreach(thread->method_table, collect_methods, thread->methods);
|
366
|
+
}
|
367
|
+
return thread->methods;
|
368
|
+
}
|
369
|
+
|
370
|
+
static VALUE prof_thread_merge(VALUE self, VALUE other)
|
371
|
+
{
|
372
|
+
thread_data_t* self_ptr = prof_get_thread(self);
|
373
|
+
thread_data_t* other_ptr = prof_get_thread(other);
|
374
|
+
prof_method_table_merge(self_ptr->method_table, other_ptr->method_table);
|
375
|
+
prof_call_tree_merge_internal(self_ptr->call_tree, other_ptr->call_tree, self_ptr->method_table);
|
376
|
+
|
377
|
+
// Reset method cache since it just changed
|
378
|
+
self_ptr->methods = Qnil;
|
379
|
+
|
380
|
+
return other;
|
381
|
+
}
|
382
|
+
|
383
|
+
/* :nodoc: */
|
384
|
+
static VALUE prof_thread_dump(VALUE self)
|
385
|
+
{
|
386
|
+
thread_data_t* thread_data = RTYPEDDATA_DATA(self);
|
387
|
+
|
388
|
+
VALUE result = rb_hash_new();
|
389
|
+
rb_hash_aset(result, ID2SYM(rb_intern("owner")), INT2FIX(thread_data->owner));
|
390
|
+
rb_hash_aset(result, ID2SYM(rb_intern("fiber_id")), thread_data->fiber_id);
|
391
|
+
rb_hash_aset(result, ID2SYM(rb_intern("methods")), prof_thread_methods(self));
|
392
|
+
rb_hash_aset(result, ID2SYM(rb_intern("call_tree")), prof_call_tree(self));
|
393
|
+
|
394
|
+
return result;
|
395
|
+
}
|
396
|
+
|
397
|
+
/* :nodoc: */
|
398
|
+
static VALUE prof_thread_load(VALUE self, VALUE data)
|
399
|
+
{
|
400
|
+
thread_data_t* thread_data = RTYPEDDATA_DATA(self);
|
401
|
+
|
402
|
+
thread_data->owner = FIX2INT(rb_hash_aref(data, ID2SYM(rb_intern("owner"))));
|
403
|
+
|
404
|
+
VALUE call_tree = rb_hash_aref(data, ID2SYM(rb_intern("call_tree")));
|
405
|
+
thread_data->call_tree = prof_get_call_tree(call_tree);
|
406
|
+
|
407
|
+
thread_data->fiber_id = rb_hash_aref(data, ID2SYM(rb_intern("fiber_id")));
|
408
|
+
|
409
|
+
VALUE methods = rb_hash_aref(data, ID2SYM(rb_intern("methods")));
|
410
|
+
for (int i = 0; i < rb_array_len(methods); i++)
|
411
|
+
{
|
412
|
+
VALUE method = rb_ary_entry(methods, i);
|
413
|
+
prof_method_t* method_data = RTYPEDDATA_DATA(method);
|
414
|
+
method_table_insert(thread_data->method_table, method_data->key, method_data);
|
415
|
+
}
|
416
|
+
|
417
|
+
return data;
|
418
|
+
}
|
419
|
+
|
420
|
+
void rp_init_thread(void)
|
421
|
+
{
|
422
|
+
cRpThread = rb_define_class_under(mProf, "Thread", rb_cObject);
|
423
|
+
rb_define_alloc_func(cRpThread, prof_thread_allocate);
|
424
|
+
rb_define_method(cRpThread, "initialize", prof_thread_initialize, 3);
|
425
|
+
|
426
|
+
rb_define_method(cRpThread, "id", prof_thread_id, 0);
|
427
|
+
rb_define_method(cRpThread, "call_tree", prof_call_tree, 0);
|
428
|
+
rb_define_method(cRpThread, "fiber_id", prof_fiber_id, 0);
|
429
|
+
rb_define_method(cRpThread, "methods", prof_thread_methods, 0);
|
430
|
+
rb_define_method(cRpThread, "merge!", prof_thread_merge, 1);
|
431
|
+
rb_define_method(cRpThread, "_dump_data", prof_thread_dump, 0);
|
432
|
+
rb_define_method(cRpThread, "_load_data", prof_thread_load, 1);
|
433
|
+
}
|