ruby-prof 1.5.0-x64-mingw-ucrt → 1.6.2-x64-mingw-ucrt

Sign up to get free protection for your applications and to get access to all the features.
Files changed (67) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGES +19 -0
  3. data/bin/ruby-prof +105 -87
  4. data/ext/ruby_prof/rp_allocation.c +136 -81
  5. data/ext/ruby_prof/rp_allocation.h +8 -6
  6. data/ext/ruby_prof/rp_call_tree.c +502 -457
  7. data/ext/ruby_prof/rp_call_tree.h +47 -44
  8. data/ext/ruby_prof/rp_call_trees.c +1 -1
  9. data/ext/ruby_prof/rp_measurement.c +10 -3
  10. data/ext/ruby_prof/rp_method.c +86 -79
  11. data/ext/ruby_prof/rp_method.h +63 -62
  12. data/ext/ruby_prof/rp_profile.c +933 -948
  13. data/ext/ruby_prof/rp_profile.h +1 -0
  14. data/ext/ruby_prof/rp_thread.c +433 -410
  15. data/ext/ruby_prof/rp_thread.h +39 -39
  16. data/ext/ruby_prof/vc/ruby_prof.vcxproj +6 -3
  17. data/lib/3.1/ruby_prof.so +0 -0
  18. data/lib/3.2/ruby_prof.so +0 -0
  19. data/lib/ruby-prof/compatibility.rb +14 -0
  20. data/lib/ruby-prof/printers/abstract_printer.rb +2 -1
  21. data/lib/ruby-prof/printers/call_tree_printer.rb +1 -1
  22. data/lib/ruby-prof/printers/multi_printer.rb +17 -17
  23. data/lib/ruby-prof/profile.rb +70 -70
  24. data/lib/ruby-prof/rack.rb +31 -21
  25. data/lib/ruby-prof/version.rb +1 -1
  26. data/test/abstract_printer_test.rb +1 -0
  27. data/test/alias_test.rb +6 -11
  28. data/test/call_tree_test.rb +94 -197
  29. data/test/call_tree_visitor_test.rb +1 -6
  30. data/test/call_trees_test.rb +2 -2
  31. data/test/{basic_test.rb → compatibility_test.rb} +8 -2
  32. data/test/duplicate_names_test.rb +1 -1
  33. data/test/dynamic_method_test.rb +1 -6
  34. data/test/enumerable_test.rb +1 -1
  35. data/test/exceptions_test.rb +2 -2
  36. data/test/exclude_methods_test.rb +3 -8
  37. data/test/exclude_threads_test.rb +4 -9
  38. data/test/fiber_test.rb +2 -58
  39. data/test/gc_test.rb +2 -2
  40. data/test/inverse_call_tree_test.rb +33 -34
  41. data/test/line_number_test.rb +1 -1
  42. data/test/marshal_test.rb +3 -3
  43. data/test/measure_allocations_test.rb +8 -17
  44. data/test/measure_memory_test.rb +3 -12
  45. data/test/measure_process_time_test.rb +32 -36
  46. data/test/measure_wall_time_test.rb +176 -181
  47. data/test/merge_test.rb +146 -0
  48. data/test/multi_printer_test.rb +0 -5
  49. data/test/no_method_class_test.rb +1 -1
  50. data/test/pause_resume_test.rb +12 -16
  51. data/test/printer_call_stack_test.rb +2 -2
  52. data/test/printer_call_tree_test.rb +2 -2
  53. data/test/printer_flat_test.rb +1 -1
  54. data/test/printer_graph_html_test.rb +2 -2
  55. data/test/printer_graph_test.rb +2 -2
  56. data/test/printers_test.rb +14 -20
  57. data/test/printing_recursive_graph_test.rb +2 -2
  58. data/test/recursive_test.rb +2 -7
  59. data/test/scheduler.rb +9 -0
  60. data/test/singleton_test.rb +1 -1
  61. data/test/stack_printer_test.rb +5 -8
  62. data/test/start_stop_test.rb +11 -14
  63. data/test/test_helper.rb +7 -0
  64. data/test/thread_test.rb +84 -19
  65. data/test/unique_call_path_test.rb +4 -4
  66. data/test/yarv_test.rb +3 -3
  67. metadata +6 -5
@@ -1,410 +1,433 @@
1
- /* Copyright (C) 2005-2013 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
2
- Please see the LICENSE file for copyright and distribution information */
3
-
4
- /* Document-class: RubyProf::Thread
5
-
6
- The Thread class contains profile results for a single fiber (note a Ruby thread can run multiple fibers).
7
- You cannot create an instance of RubyProf::Thread, instead you access it from a RubyProf::Profile object.
8
-
9
- profile = RubyProf::Profile.profile do
10
- ...
11
- end
12
-
13
- profile.threads.each do |thread|
14
- thread.root_methods.sort.each do |method|
15
- puts method.total_time
16
- end
17
- end */
18
-
19
- #include "rp_thread.h"
20
- #include "rp_profile.h"
21
-
22
- VALUE cRpThread;
23
-
24
- // ====== thread_data_t ======
25
- thread_data_t* thread_data_create(void)
26
- {
27
- thread_data_t* result = ALLOC(thread_data_t);
28
- result->owner = OWNER_C;
29
- result->stack = prof_stack_create();
30
- result->method_table = method_table_create();
31
- result->call_tree = NULL;
32
- result->object = Qnil;
33
- result->methods = Qnil;
34
- result->fiber_id = Qnil;
35
- result->thread_id = Qnil;
36
- result->trace = true;
37
- result->fiber = Qnil;
38
- return result;
39
- }
40
-
41
- static int mark_methods(st_data_t key, st_data_t value, st_data_t result)
42
- {
43
- prof_method_t* method = (prof_method_t*)value;
44
- prof_method_mark(method);
45
- return ST_CONTINUE;
46
- }
47
-
48
- size_t prof_thread_size(const void* data)
49
- {
50
- return sizeof(thread_data_t);
51
- }
52
-
53
- void prof_thread_mark(void* data)
54
- {
55
- if (!data)
56
- return;
57
-
58
- thread_data_t* thread = (thread_data_t*)data;
59
-
60
- if (thread->object != Qnil)
61
- rb_gc_mark(thread->object);
62
-
63
- rb_gc_mark(thread->fiber);
64
-
65
- if (thread->methods != Qnil)
66
- rb_gc_mark(thread->methods);
67
-
68
- if (thread->fiber_id != Qnil)
69
- rb_gc_mark(thread->fiber_id);
70
-
71
- if (thread->thread_id != Qnil)
72
- rb_gc_mark(thread->thread_id);
73
-
74
- if (thread->call_tree)
75
- prof_call_tree_mark(thread->call_tree);
76
-
77
- rb_st_foreach(thread->method_table, mark_methods, 0);
78
- }
79
-
80
- static void prof_thread_free(thread_data_t* thread_data)
81
- {
82
- /* Has this method object been accessed by Ruby? If
83
- yes then set its data to nil to avoid a segmentation fault on the next mark and sweep. */
84
- if (thread_data->object != Qnil)
85
- {
86
- RTYPEDDATA(thread_data->object)->data = NULL;
87
- thread_data->object = Qnil;
88
- }
89
-
90
- method_table_free(thread_data->method_table);
91
-
92
- if (thread_data->call_tree)
93
- prof_call_tree_free(thread_data->call_tree);
94
-
95
- prof_stack_free(thread_data->stack);
96
-
97
- xfree(thread_data);
98
- }
99
-
100
- void prof_thread_ruby_gc_free(void* data)
101
- {
102
- thread_data_t* thread_data = (thread_data_t*)data;
103
-
104
- if (!thread_data)
105
- {
106
- // Object has already been freed by C code
107
- return;
108
- }
109
- else if (thread_data->owner == OWNER_RUBY)
110
- {
111
- // Ruby owns this object, we need to free the underlying C struct
112
- prof_thread_free(thread_data);
113
- }
114
- else
115
- {
116
- // The Ruby object is being freed, but not the underlying C structure. So unlink the two.
117
- thread_data->object = Qnil;
118
- }
119
- }
120
-
121
- static const rb_data_type_t thread_type =
122
- {
123
- .wrap_struct_name = "ThreadInfo",
124
- .function =
125
- {
126
- .dmark = prof_thread_mark,
127
- .dfree = prof_thread_ruby_gc_free,
128
- .dsize = prof_thread_size,
129
- },
130
- .data = NULL,
131
- .flags = RUBY_TYPED_FREE_IMMEDIATELY
132
- };
133
-
134
- VALUE prof_thread_wrap(thread_data_t* thread)
135
- {
136
- if (thread->object == Qnil)
137
- {
138
- thread->object = TypedData_Wrap_Struct(cRpThread, &thread_type, thread);
139
- }
140
- return thread->object;
141
- }
142
-
143
- static VALUE prof_thread_allocate(VALUE klass)
144
- {
145
- thread_data_t* thread_data = thread_data_create();
146
- thread_data->owner = OWNER_RUBY;
147
- thread_data->object = prof_thread_wrap(thread_data);
148
- return thread_data->object;
149
- }
150
-
151
- thread_data_t* prof_get_thread(VALUE self)
152
- {
153
- /* Can't use Data_Get_Struct because that triggers the event hook
154
- ending up in endless recursion. */
155
- thread_data_t* result = RTYPEDDATA_DATA(self);
156
- if (!result)
157
- rb_raise(rb_eRuntimeError, "This RubyProf::Thread instance has already been freed, likely because its profile has been freed.");
158
-
159
- return result;
160
- }
161
-
162
- // ====== Thread Table ======
163
- // The thread table is hash keyed on ruby fiber_id that stores instances of thread_data_t.
164
-
165
- st_table* threads_table_create()
166
- {
167
- return rb_st_init_numtable();
168
- }
169
-
170
- static int thread_table_free_iterator(st_data_t key, st_data_t value, st_data_t dummy)
171
- {
172
- prof_thread_free((thread_data_t*)value);
173
- return ST_CONTINUE;
174
- }
175
-
176
- void threads_table_free(st_table* table)
177
- {
178
- rb_st_foreach(table, thread_table_free_iterator, 0);
179
- rb_st_free_table(table);
180
- }
181
-
182
- thread_data_t* threads_table_lookup(void* prof, VALUE fiber)
183
- {
184
- prof_profile_t* profile = prof;
185
- thread_data_t* result = NULL;
186
- st_data_t val;
187
-
188
- VALUE fiber_id = rb_obj_id(fiber);
189
- if (rb_st_lookup(profile->threads_tbl, fiber_id, &val))
190
- {
191
- result = (thread_data_t*)val;
192
- }
193
-
194
- return result;
195
- }
196
-
197
- thread_data_t* threads_table_insert(void* prof, VALUE fiber)
198
- {
199
- prof_profile_t* profile = prof;
200
- thread_data_t* result = thread_data_create();
201
- VALUE thread = rb_thread_current();
202
-
203
- result->fiber = fiber;
204
- result->fiber_id = rb_obj_id(fiber);
205
- result->thread_id = rb_obj_id(thread);
206
- rb_st_insert(profile->threads_tbl, (st_data_t)result->fiber_id, (st_data_t)result);
207
-
208
- // Are we tracing this thread?
209
- if (profile->include_threads_tbl && !rb_st_lookup(profile->include_threads_tbl, thread, 0))
210
- {
211
- result->trace = false;
212
- }
213
- else if (profile->exclude_threads_tbl && rb_st_lookup(profile->exclude_threads_tbl, thread, 0))
214
- {
215
- result->trace = false;
216
- }
217
- else
218
- {
219
- result->trace = true;
220
- }
221
-
222
- return result;
223
- }
224
-
225
- // ====== Profiling Methods ======
226
- void switch_thread(void* prof, thread_data_t* thread_data, double measurement)
227
- {
228
- prof_profile_t* profile = prof;
229
-
230
- /* Get current frame for this thread */
231
- prof_frame_t* frame = prof_frame_current(thread_data->stack);
232
- if (frame)
233
- {
234
- frame->wait_time += measurement - frame->switch_time;
235
- frame->switch_time = 0;
236
- }
237
-
238
- /* Save on the last thread the time of the context switch
239
- and reset this thread's last context switch to 0.*/
240
- if (profile->last_thread_data)
241
- {
242
- prof_frame_t* last_frame = prof_frame_current(profile->last_thread_data->stack);
243
- if (last_frame)
244
- last_frame->switch_time = measurement;
245
- }
246
-
247
- profile->last_thread_data = thread_data;
248
- }
249
-
250
- int pause_thread(st_data_t key, st_data_t value, st_data_t data)
251
- {
252
- thread_data_t* thread_data = (thread_data_t*)value;
253
- prof_profile_t* profile = (prof_profile_t*)data;
254
-
255
- prof_frame_t* frame = prof_frame_current(thread_data->stack);
256
- prof_frame_pause(frame, profile->measurement_at_pause_resume);
257
-
258
- return ST_CONTINUE;
259
- }
260
-
261
- int unpause_thread(st_data_t key, st_data_t value, st_data_t data)
262
- {
263
- thread_data_t* thread_data = (thread_data_t*)value;
264
- prof_profile_t* profile = (prof_profile_t*)data;
265
-
266
- prof_frame_t* frame = prof_frame_current(thread_data->stack);
267
- prof_frame_unpause(frame, profile->measurement_at_pause_resume);
268
-
269
- return ST_CONTINUE;
270
- }
271
-
272
- // ====== Helper Methods ======
273
- static int collect_methods(st_data_t key, st_data_t value, st_data_t result)
274
- {
275
- /* Called for each method stored in a thread's method table.
276
- We want to store the method info information into an array.*/
277
- VALUE methods = (VALUE)result;
278
- prof_method_t* method = (prof_method_t*)value;
279
- rb_ary_push(methods, prof_method_wrap(method));
280
-
281
- return ST_CONTINUE;
282
- }
283
-
284
- // ====== RubyProf::Thread ======
285
- /* call-seq:
286
- new(call_tree, thread, fiber) -> thread
287
-
288
- Creates a new RubyProf thread instance. +call_tree+ is the root call_tree instance,
289
- +thread+ is a reference to a Ruby thread and +fiber+ is a reference to a Ruby fiber.*/
290
- static VALUE prof_thread_initialize(VALUE self, VALUE call_tree, VALUE thread, VALUE fiber)
291
- {
292
- thread_data_t* thread_ptr = prof_get_thread(self);
293
-
294
- // This call tree must now be managed by C
295
- thread_ptr->call_tree = prof_get_call_tree(call_tree);
296
- thread_ptr->call_tree->owner = OWNER_C;
297
-
298
- thread_ptr->fiber = fiber;
299
- thread_ptr->fiber_id = rb_obj_id(fiber);
300
- thread_ptr->thread_id = rb_obj_id(thread);
301
-
302
- return self;
303
- }
304
-
305
- /* call-seq:
306
- id -> number
307
-
308
- Returns the thread id of this thread. */
309
- static VALUE prof_thread_id(VALUE self)
310
- {
311
- thread_data_t* thread = prof_get_thread(self);
312
- return thread->thread_id;
313
- }
314
-
315
- /* call-seq:
316
- fiber_id -> number
317
-
318
- Returns the fiber id of this thread. */
319
- static VALUE prof_fiber_id(VALUE self)
320
- {
321
- thread_data_t* thread = prof_get_thread(self);
322
- return thread->fiber_id;
323
- }
324
-
325
- /* call-seq:
326
- call_tree -> CallTree
327
-
328
- Returns the root call tree. */
329
- static VALUE prof_call_tree(VALUE self)
330
- {
331
- thread_data_t* thread = prof_get_thread(self);
332
- return prof_call_tree_wrap(thread->call_tree);
333
- }
334
-
335
- /* call-seq:
336
- methods -> [RubyProf::MethodInfo]
337
-
338
- Returns an array of methods that were called from this
339
- thread during program execution. */
340
- static VALUE prof_thread_methods(VALUE self)
341
- {
342
- thread_data_t* thread = prof_get_thread(self);
343
- if (thread->methods == Qnil)
344
- {
345
- thread->methods = rb_ary_new();
346
- rb_st_foreach(thread->method_table, collect_methods, thread->methods);
347
- }
348
- return thread->methods;
349
- }
350
-
351
- static VALUE prof_thread_merge(VALUE self, VALUE other)
352
- {
353
- thread_data_t* self_ptr = prof_get_thread(self);
354
- thread_data_t* other_ptr = prof_get_thread(other);
355
- prof_call_tree_merge_internal(self_ptr->call_tree, other_ptr->call_tree);
356
-
357
- return other;
358
- }
359
-
360
- /* :nodoc: */
361
- static VALUE prof_thread_dump(VALUE self)
362
- {
363
- thread_data_t* thread_data = RTYPEDDATA_DATA(self);
364
-
365
- VALUE result = rb_hash_new();
366
- rb_hash_aset(result, ID2SYM(rb_intern("owner")), INT2FIX(thread_data->owner));
367
- rb_hash_aset(result, ID2SYM(rb_intern("fiber_id")), thread_data->fiber_id);
368
- rb_hash_aset(result, ID2SYM(rb_intern("methods")), prof_thread_methods(self));
369
- rb_hash_aset(result, ID2SYM(rb_intern("call_tree")), prof_call_tree(self));
370
-
371
- return result;
372
- }
373
-
374
- /* :nodoc: */
375
- static VALUE prof_thread_load(VALUE self, VALUE data)
376
- {
377
- thread_data_t* thread_data = RTYPEDDATA_DATA(self);
378
-
379
- thread_data->owner = FIX2INT(rb_hash_aref(data, ID2SYM(rb_intern("owner"))));
380
-
381
- VALUE call_tree = rb_hash_aref(data, ID2SYM(rb_intern("call_tree")));
382
- thread_data->call_tree = prof_get_call_tree(call_tree);
383
-
384
- thread_data->fiber_id = rb_hash_aref(data, ID2SYM(rb_intern("fiber_id")));
385
-
386
- VALUE methods = rb_hash_aref(data, ID2SYM(rb_intern("methods")));
387
- for (int i = 0; i < rb_array_len(methods); i++)
388
- {
389
- VALUE method = rb_ary_entry(methods, i);
390
- prof_method_t* method_data = RTYPEDDATA_DATA(method);
391
- method_table_insert(thread_data->method_table, method_data->key, method_data);
392
- }
393
-
394
- return data;
395
- }
396
-
397
- void rp_init_thread(void)
398
- {
399
- cRpThread = rb_define_class_under(mProf, "Thread", rb_cObject);
400
- rb_define_alloc_func(cRpThread, prof_thread_allocate);
401
- rb_define_method(cRpThread, "initialize", prof_thread_initialize, 3);
402
-
403
- rb_define_method(cRpThread, "id", prof_thread_id, 0);
404
- rb_define_method(cRpThread, "call_tree", prof_call_tree, 0);
405
- rb_define_method(cRpThread, "fiber_id", prof_fiber_id, 0);
406
- rb_define_method(cRpThread, "methods", prof_thread_methods, 0);
407
- rb_define_method(cRpThread, "merge!", prof_thread_merge, 1);
408
- rb_define_method(cRpThread, "_dump_data", prof_thread_dump, 0);
409
- rb_define_method(cRpThread, "_load_data", prof_thread_load, 1);
410
- }
1
+ /* Copyright (C) 2005-2013 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
2
+ Please see the LICENSE file for copyright and distribution information */
3
+
4
+ /* Document-class: RubyProf::Thread
5
+
6
+ The Thread class contains profile results for a single fiber (note a Ruby thread can run multiple fibers).
7
+ You cannot create an instance of RubyProf::Thread, instead you access it from a RubyProf::Profile object.
8
+
9
+ profile = RubyProf::Profile.profile do
10
+ ...
11
+ end
12
+
13
+ profile.threads.each do |thread|
14
+ thread.root_methods.sort.each do |method|
15
+ puts method.total_time
16
+ end
17
+ end */
18
+
19
+ #include "rp_thread.h"
20
+ #include "rp_profile.h"
21
+
22
+ VALUE cRpThread;
23
+
24
+ // ====== thread_data_t ======
25
+ thread_data_t* thread_data_create(void)
26
+ {
27
+ thread_data_t* result = ALLOC(thread_data_t);
28
+ result->owner = OWNER_C;
29
+ result->stack = prof_stack_create();
30
+ result->method_table = method_table_create();
31
+ result->call_tree = NULL;
32
+ result->object = Qnil;
33
+ result->methods = Qnil;
34
+ result->fiber_id = Qnil;
35
+ result->thread_id = Qnil;
36
+ result->trace = true;
37
+ result->fiber = Qnil;
38
+ return result;
39
+ }
40
+
41
+ static int mark_methods(st_data_t key, st_data_t value, st_data_t result)
42
+ {
43
+ prof_method_t* method = (prof_method_t*)value;
44
+ prof_method_mark(method);
45
+ return ST_CONTINUE;
46
+ }
47
+
48
+ size_t prof_thread_size(const void* data)
49
+ {
50
+ return sizeof(thread_data_t);
51
+ }
52
+
53
+ void prof_thread_mark(void* data)
54
+ {
55
+ if (!data)
56
+ return;
57
+
58
+ thread_data_t* thread = (thread_data_t*)data;
59
+
60
+ if (thread->object != Qnil)
61
+ rb_gc_mark_movable(thread->object);
62
+
63
+ rb_gc_mark(thread->fiber);
64
+
65
+ if (thread->methods != Qnil)
66
+ rb_gc_mark_movable(thread->methods);
67
+
68
+ if (thread->fiber_id != Qnil)
69
+ rb_gc_mark_movable(thread->fiber_id);
70
+
71
+ if (thread->thread_id != Qnil)
72
+ rb_gc_mark_movable(thread->thread_id);
73
+
74
+ if (thread->call_tree)
75
+ prof_call_tree_mark(thread->call_tree);
76
+
77
+ rb_st_foreach(thread->method_table, mark_methods, 0);
78
+ }
79
+
80
+ void prof_thread_compact(void* data)
81
+ {
82
+ thread_data_t* thread = (thread_data_t*)data;
83
+ thread->object = rb_gc_location(thread->object);
84
+ thread->methods = rb_gc_location(thread->methods);
85
+ thread->fiber_id = rb_gc_location(thread->fiber_id);
86
+ thread->thread_id = rb_gc_location(thread->thread_id);
87
+ }
88
+
89
+ static void prof_thread_free(thread_data_t* thread_data)
90
+ {
91
+ /* Has this method object been accessed by Ruby? If
92
+ yes then set its data to nil to avoid a segmentation fault on the next mark and sweep. */
93
+ if (thread_data->object != Qnil)
94
+ {
95
+ RTYPEDDATA(thread_data->object)->data = NULL;
96
+ thread_data->object = Qnil;
97
+ }
98
+
99
+ method_table_free(thread_data->method_table);
100
+
101
+ if (thread_data->call_tree)
102
+ prof_call_tree_free(thread_data->call_tree);
103
+
104
+ prof_stack_free(thread_data->stack);
105
+
106
+ xfree(thread_data);
107
+ }
108
+
109
+ void prof_thread_ruby_gc_free(void* data)
110
+ {
111
+ thread_data_t* thread_data = (thread_data_t*)data;
112
+
113
+ if (!thread_data)
114
+ {
115
+ // Object has already been freed by C code
116
+ return;
117
+ }
118
+ else if (thread_data->owner == OWNER_RUBY)
119
+ {
120
+ // Ruby owns this object, we need to free the underlying C struct
121
+ prof_thread_free(thread_data);
122
+ }
123
+ else
124
+ {
125
+ // The Ruby object is being freed, but not the underlying C structure. So unlink the two.
126
+ thread_data->object = Qnil;
127
+ }
128
+ }
129
+
130
+ static const rb_data_type_t thread_type =
131
+ {
132
+ .wrap_struct_name = "ThreadInfo",
133
+ .function =
134
+ {
135
+ .dmark = prof_thread_mark,
136
+ .dfree = prof_thread_ruby_gc_free,
137
+ .dsize = prof_thread_size,
138
+ .dcompact = prof_thread_compact
139
+ },
140
+ .data = NULL,
141
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY
142
+ };
143
+
144
+ VALUE prof_thread_wrap(thread_data_t* thread)
145
+ {
146
+ if (thread->object == Qnil)
147
+ {
148
+ thread->object = TypedData_Wrap_Struct(cRpThread, &thread_type, thread);
149
+ }
150
+ return thread->object;
151
+ }
152
+
153
+ static VALUE prof_thread_allocate(VALUE klass)
154
+ {
155
+ thread_data_t* thread_data = thread_data_create();
156
+ thread_data->owner = OWNER_RUBY;
157
+ thread_data->object = prof_thread_wrap(thread_data);
158
+ return thread_data->object;
159
+ }
160
+
161
+ thread_data_t* prof_get_thread(VALUE self)
162
+ {
163
+ /* Can't use Data_Get_Struct because that triggers the event hook
164
+ ending up in endless recursion. */
165
+ thread_data_t* result = RTYPEDDATA_DATA(self);
166
+ if (!result)
167
+ rb_raise(rb_eRuntimeError, "This RubyProf::Thread instance has already been freed, likely because its profile has been freed.");
168
+
169
+ return result;
170
+ }
171
+
172
+ // ====== Thread Table ======
173
+ // The thread table is hash keyed on ruby fiber_id that stores instances of thread_data_t.
174
+
175
+ st_table* threads_table_create()
176
+ {
177
+ return rb_st_init_numtable();
178
+ }
179
+
180
+ static int thread_table_free_iterator(st_data_t key, st_data_t value, st_data_t dummy)
181
+ {
182
+ prof_thread_free((thread_data_t*)value);
183
+ return ST_CONTINUE;
184
+ }
185
+
186
+ void threads_table_free(st_table* table)
187
+ {
188
+ rb_st_foreach(table, thread_table_free_iterator, 0);
189
+ rb_st_free_table(table);
190
+ }
191
+
192
+ thread_data_t* threads_table_lookup(void* prof, VALUE fiber)
193
+ {
194
+ prof_profile_t* profile = prof;
195
+ thread_data_t* result = NULL;
196
+ st_data_t val;
197
+
198
+ VALUE fiber_id = rb_obj_id(fiber);
199
+ if (rb_st_lookup(profile->threads_tbl, fiber_id, &val))
200
+ {
201
+ result = (thread_data_t*)val;
202
+ }
203
+
204
+ return result;
205
+ }
206
+
207
+ thread_data_t* threads_table_insert(void* prof, VALUE fiber)
208
+ {
209
+ prof_profile_t* profile = prof;
210
+ thread_data_t* result = thread_data_create();
211
+ VALUE thread = rb_thread_current();
212
+
213
+ result->fiber = fiber;
214
+ result->fiber_id = rb_obj_id(fiber);
215
+ result->thread_id = rb_obj_id(thread);
216
+ rb_st_insert(profile->threads_tbl, (st_data_t)result->fiber_id, (st_data_t)result);
217
+
218
+ // Are we tracing this thread?
219
+ if (profile->include_threads_tbl && !rb_st_lookup(profile->include_threads_tbl, thread, 0))
220
+ {
221
+ result->trace = false;
222
+ }
223
+ else if (profile->exclude_threads_tbl && rb_st_lookup(profile->exclude_threads_tbl, thread, 0))
224
+ {
225
+ result->trace = false;
226
+ }
227
+ else
228
+ {
229
+ result->trace = true;
230
+ }
231
+
232
+ return result;
233
+ }
234
+
235
+ // ====== Profiling Methods ======
236
+ void switch_thread(void* prof, thread_data_t* thread_data, double measurement)
237
+ {
238
+ prof_profile_t* profile = prof;
239
+
240
+ /* Get current frame for this thread */
241
+ prof_frame_t* frame = prof_frame_current(thread_data->stack);
242
+ if (frame)
243
+ {
244
+ frame->wait_time += measurement - frame->switch_time;
245
+ frame->switch_time = 0;
246
+ }
247
+
248
+ /* Save on the last thread the time of the context switch
249
+ and reset this thread's last context switch to 0.*/
250
+ if (profile->last_thread_data)
251
+ {
252
+ prof_frame_t* last_frame = prof_frame_current(profile->last_thread_data->stack);
253
+ if (last_frame)
254
+ last_frame->switch_time = measurement;
255
+ }
256
+
257
+ profile->last_thread_data = thread_data;
258
+ }
259
+
260
+ int pause_thread(st_data_t key, st_data_t value, st_data_t data)
261
+ {
262
+ thread_data_t* thread_data = (thread_data_t*)value;
263
+ prof_profile_t* profile = (prof_profile_t*)data;
264
+
265
+ prof_frame_t* frame = prof_frame_current(thread_data->stack);
266
+ prof_frame_pause(frame, profile->measurement_at_pause_resume);
267
+
268
+ return ST_CONTINUE;
269
+ }
270
+
271
+ int unpause_thread(st_data_t key, st_data_t value, st_data_t data)
272
+ {
273
+ thread_data_t* thread_data = (thread_data_t*)value;
274
+ prof_profile_t* profile = (prof_profile_t*)data;
275
+
276
+ prof_frame_t* frame = prof_frame_current(thread_data->stack);
277
+ prof_frame_unpause(frame, profile->measurement_at_pause_resume);
278
+
279
+ return ST_CONTINUE;
280
+ }
281
+
282
+ // ====== Helper Methods ======
283
+ static int collect_methods(st_data_t key, st_data_t value, st_data_t result)
284
+ {
285
+ /* Called for each method stored in a thread's method table.
286
+ We want to store the method info information into an array.*/
287
+ VALUE methods = (VALUE)result;
288
+ prof_method_t* method = (prof_method_t*)value;
289
+ rb_ary_push(methods, prof_method_wrap(method));
290
+
291
+ return ST_CONTINUE;
292
+ }
293
+
294
+ // ====== RubyProf::Thread ======
295
+ /* call-seq:
296
+ new(call_tree, thread, fiber) -> thread
297
+
298
+ Creates a new RubyProf thread instance. +call_tree+ is the root call_tree instance,
299
+ +thread+ is a reference to a Ruby thread and +fiber+ is a reference to a Ruby fiber.*/
300
+ static VALUE prof_thread_initialize(VALUE self, VALUE call_tree, VALUE thread, VALUE fiber)
301
+ {
302
+ thread_data_t* thread_ptr = prof_get_thread(self);
303
+
304
+ // This call tree must now be managed by C
305
+ thread_ptr->call_tree = prof_get_call_tree(call_tree);
306
+ thread_ptr->call_tree->owner = OWNER_C;
307
+
308
+ thread_ptr->fiber = fiber;
309
+ thread_ptr->fiber_id = rb_obj_id(fiber);
310
+ thread_ptr->thread_id = rb_obj_id(thread);
311
+
312
+ // Add methods from call trees into thread methods table
313
+ VALUE methods = prof_call_tree_methods(thread_ptr->call_tree);
314
+ for (int i = 0; i < rb_array_len(methods); i++)
315
+ {
316
+ VALUE method = rb_ary_entry(methods, i);
317
+ prof_method_t* method_ptr = prof_get_method(method);
318
+ method_table_insert(thread_ptr->method_table, method_ptr->key, method_ptr);
319
+ }
320
+
321
+ return self;
322
+ }
323
+
324
+ /* call-seq:
325
+ id -> number
326
+
327
+ Returns the thread id of this thread. */
328
+ static VALUE prof_thread_id(VALUE self)
329
+ {
330
+ thread_data_t* thread = prof_get_thread(self);
331
+ return thread->thread_id;
332
+ }
333
+
334
+ /* call-seq:
335
+ fiber_id -> number
336
+
337
+ Returns the fiber id of this thread. */
338
+ static VALUE prof_fiber_id(VALUE self)
339
+ {
340
+ thread_data_t* thread = prof_get_thread(self);
341
+ return thread->fiber_id;
342
+ }
343
+
344
+ /* call-seq:
345
+ call_tree -> CallTree
346
+
347
+ Returns the root call tree. */
348
+ static VALUE prof_call_tree(VALUE self)
349
+ {
350
+ thread_data_t* thread = prof_get_thread(self);
351
+ return prof_call_tree_wrap(thread->call_tree);
352
+ }
353
+
354
+ /* call-seq:
355
+ methods -> [RubyProf::MethodInfo]
356
+
357
+ Returns an array of methods that were called from this
358
+ thread during program execution. */
359
+ static VALUE prof_thread_methods(VALUE self)
360
+ {
361
+ thread_data_t* thread = prof_get_thread(self);
362
+ if (thread->methods == Qnil)
363
+ {
364
+ thread->methods = rb_ary_new();
365
+ rb_st_foreach(thread->method_table, collect_methods, thread->methods);
366
+ }
367
+ return thread->methods;
368
+ }
369
+
370
+ static VALUE prof_thread_merge(VALUE self, VALUE other)
371
+ {
372
+ thread_data_t* self_ptr = prof_get_thread(self);
373
+ thread_data_t* other_ptr = prof_get_thread(other);
374
+ prof_method_table_merge(self_ptr->method_table, other_ptr->method_table);
375
+ prof_call_tree_merge_internal(self_ptr->call_tree, other_ptr->call_tree, self_ptr->method_table);
376
+
377
+ // Reset method cache since it just changed
378
+ self_ptr->methods = Qnil;
379
+
380
+ return other;
381
+ }
382
+
383
+ /* :nodoc: */
384
+ static VALUE prof_thread_dump(VALUE self)
385
+ {
386
+ thread_data_t* thread_data = RTYPEDDATA_DATA(self);
387
+
388
+ VALUE result = rb_hash_new();
389
+ rb_hash_aset(result, ID2SYM(rb_intern("owner")), INT2FIX(thread_data->owner));
390
+ rb_hash_aset(result, ID2SYM(rb_intern("fiber_id")), thread_data->fiber_id);
391
+ rb_hash_aset(result, ID2SYM(rb_intern("methods")), prof_thread_methods(self));
392
+ rb_hash_aset(result, ID2SYM(rb_intern("call_tree")), prof_call_tree(self));
393
+
394
+ return result;
395
+ }
396
+
397
+ /* :nodoc: */
398
+ static VALUE prof_thread_load(VALUE self, VALUE data)
399
+ {
400
+ thread_data_t* thread_data = RTYPEDDATA_DATA(self);
401
+
402
+ thread_data->owner = FIX2INT(rb_hash_aref(data, ID2SYM(rb_intern("owner"))));
403
+
404
+ VALUE call_tree = rb_hash_aref(data, ID2SYM(rb_intern("call_tree")));
405
+ thread_data->call_tree = prof_get_call_tree(call_tree);
406
+
407
+ thread_data->fiber_id = rb_hash_aref(data, ID2SYM(rb_intern("fiber_id")));
408
+
409
+ VALUE methods = rb_hash_aref(data, ID2SYM(rb_intern("methods")));
410
+ for (int i = 0; i < rb_array_len(methods); i++)
411
+ {
412
+ VALUE method = rb_ary_entry(methods, i);
413
+ prof_method_t* method_data = RTYPEDDATA_DATA(method);
414
+ method_table_insert(thread_data->method_table, method_data->key, method_data);
415
+ }
416
+
417
+ return data;
418
+ }
419
+
420
+ void rp_init_thread(void)
421
+ {
422
+ cRpThread = rb_define_class_under(mProf, "Thread", rb_cObject);
423
+ rb_define_alloc_func(cRpThread, prof_thread_allocate);
424
+ rb_define_method(cRpThread, "initialize", prof_thread_initialize, 3);
425
+
426
+ rb_define_method(cRpThread, "id", prof_thread_id, 0);
427
+ rb_define_method(cRpThread, "call_tree", prof_call_tree, 0);
428
+ rb_define_method(cRpThread, "fiber_id", prof_fiber_id, 0);
429
+ rb_define_method(cRpThread, "methods", prof_thread_methods, 0);
430
+ rb_define_method(cRpThread, "merge!", prof_thread_merge, 1);
431
+ rb_define_method(cRpThread, "_dump_data", prof_thread_dump, 0);
432
+ rb_define_method(cRpThread, "_load_data", prof_thread_load, 1);
433
+ }