ruby-prof 1.4.3 → 1.6.3

Sign up to get free protection for your applications and to get access to all the features.
Files changed (87) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGES +59 -9
  3. data/{README.rdoc → README.md} +2 -2
  4. data/Rakefile +4 -4
  5. data/bin/ruby-prof +100 -87
  6. data/ext/ruby_prof/rp_allocation.c +140 -85
  7. data/ext/ruby_prof/rp_allocation.h +8 -6
  8. data/ext/ruby_prof/rp_call_tree.c +502 -369
  9. data/ext/ruby_prof/rp_call_tree.h +47 -43
  10. data/ext/ruby_prof/rp_call_trees.c +16 -8
  11. data/ext/ruby_prof/rp_measure_allocations.c +10 -13
  12. data/ext/ruby_prof/rp_measure_memory.c +8 -4
  13. data/ext/ruby_prof/rp_measure_process_time.c +7 -6
  14. data/ext/ruby_prof/rp_measurement.c +147 -20
  15. data/ext/ruby_prof/rp_measurement.h +4 -1
  16. data/ext/ruby_prof/rp_method.c +142 -83
  17. data/ext/ruby_prof/rp_method.h +63 -62
  18. data/ext/ruby_prof/rp_profile.c +933 -900
  19. data/ext/ruby_prof/rp_profile.h +1 -0
  20. data/ext/ruby_prof/rp_thread.c +433 -362
  21. data/ext/ruby_prof/rp_thread.h +39 -39
  22. data/ext/ruby_prof/ruby_prof.c +0 -2
  23. data/ext/ruby_prof/ruby_prof.h +8 -0
  24. data/ext/ruby_prof/vc/ruby_prof.vcxproj +11 -8
  25. data/lib/ruby-prof/assets/call_stack_printer.html.erb +2 -1
  26. data/lib/ruby-prof/compatibility.rb +14 -0
  27. data/lib/ruby-prof/method_info.rb +8 -1
  28. data/lib/ruby-prof/printers/abstract_printer.rb +2 -1
  29. data/lib/ruby-prof/printers/call_tree_printer.rb +4 -10
  30. data/lib/ruby-prof/printers/graph_html_printer.rb +1 -1
  31. data/lib/ruby-prof/printers/multi_printer.rb +17 -17
  32. data/lib/ruby-prof/profile.rb +70 -37
  33. data/lib/ruby-prof/rack.rb +31 -21
  34. data/lib/ruby-prof/version.rb +1 -1
  35. data/lib/ruby-prof.rb +1 -1
  36. data/ruby-prof.gemspec +2 -3
  37. data/test/abstract_printer_test.rb +1 -0
  38. data/test/alias_test.rb +97 -106
  39. data/test/call_tree_builder.rb +126 -0
  40. data/test/call_tree_test.rb +94 -0
  41. data/test/call_tree_visitor_test.rb +1 -6
  42. data/test/call_trees_test.rb +6 -6
  43. data/test/{basic_test.rb → compatibility_test.rb} +8 -2
  44. data/test/duplicate_names_test.rb +5 -5
  45. data/test/dynamic_method_test.rb +24 -15
  46. data/test/enumerable_test.rb +1 -1
  47. data/test/exceptions_test.rb +2 -2
  48. data/test/exclude_methods_test.rb +3 -8
  49. data/test/exclude_threads_test.rb +4 -9
  50. data/test/fiber_test.rb +74 -8
  51. data/test/gc_test.rb +11 -9
  52. data/test/inverse_call_tree_test.rb +33 -34
  53. data/test/line_number_test.rb +37 -61
  54. data/test/marshal_test.rb +16 -3
  55. data/test/measure_allocations.rb +1 -5
  56. data/test/measure_allocations_test.rb +642 -357
  57. data/test/{measure_memory_trace_test.rb → measure_memory_test.rb} +180 -616
  58. data/test/measure_process_time_test.rb +1566 -741
  59. data/test/measure_wall_time_test.rb +179 -193
  60. data/test/measurement_test.rb +82 -0
  61. data/test/merge_test.rb +146 -0
  62. data/test/method_info_test.rb +95 -0
  63. data/test/multi_printer_test.rb +0 -5
  64. data/test/no_method_class_test.rb +1 -1
  65. data/test/pause_resume_test.rb +12 -16
  66. data/test/printer_call_stack_test.rb +2 -2
  67. data/test/printer_call_tree_test.rb +4 -4
  68. data/test/printer_flat_test.rb +1 -1
  69. data/test/printer_graph_html_test.rb +2 -2
  70. data/test/printer_graph_test.rb +2 -2
  71. data/test/printers_test.rb +14 -20
  72. data/test/printing_recursive_graph_test.rb +2 -2
  73. data/test/profile_test.rb +85 -0
  74. data/test/recursive_test.rb +374 -155
  75. data/test/scheduler.rb +363 -0
  76. data/test/singleton_test.rb +1 -1
  77. data/test/stack_printer_test.rb +5 -8
  78. data/test/start_stop_test.rb +11 -14
  79. data/test/test_helper.rb +11 -8
  80. data/test/thread_test.rb +106 -15
  81. data/test/unique_call_path_test.rb +28 -12
  82. data/test/yarv_test.rb +11 -7
  83. metadata +17 -29
  84. data/ext/ruby_prof/rp_aggregate_call_tree.c +0 -59
  85. data/ext/ruby_prof/rp_aggregate_call_tree.h +0 -13
  86. data/test/measure_allocations_trace_test.rb +0 -375
  87. data/test/temp.rb +0 -20
@@ -1,900 +1,933 @@
1
- /* Copyright (C) 2005-2019 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
2
- Please see the LICENSE file for copyright and distribution information */
3
-
4
- /* Document-class: RubyProf::Profile
5
-
6
- The Profile class represents a single profiling run and provides the main API for using ruby-prof.
7
- After creating a Profile instance, start profiling code by calling the Profile#start method. To finish profiling,
8
- call Profile#stop. Once profiling is completed, the Profile instance contains the results.
9
-
10
- profile = RubyProf::Profile.new
11
- profile.start
12
- ...
13
- result = profile.stop
14
-
15
- Alternatively, you can use the block syntax:
16
-
17
- profile = RubyProf::Profile.profile do
18
- ...
19
- end
20
- */
21
-
22
- #include <assert.h>
23
-
24
- #include "rp_allocation.h"
25
- #include "rp_call_trees.h"
26
- #include "rp_call_tree.h"
27
- #include "rp_profile.h"
28
- #include "rp_method.h"
29
-
30
- VALUE cProfile;
31
-
32
- /* support tracing ruby events from ruby-prof. useful for getting at
33
- what actually happens inside the ruby interpreter (and ruby-prof).
34
- set environment variable RUBY_PROF_TRACE to filename you want to
35
- find the trace in.
36
- */
37
- FILE* trace_file = NULL;
38
-
39
- static const char* get_event_name(rb_event_flag_t event)
40
- {
41
- switch (event) {
42
- case RUBY_EVENT_LINE:
43
- return "line";
44
- case RUBY_EVENT_CLASS:
45
- return "class";
46
- case RUBY_EVENT_END:
47
- return "end";
48
- case RUBY_EVENT_CALL:
49
- return "call";
50
- case RUBY_EVENT_RETURN:
51
- return "return";
52
- case RUBY_EVENT_B_CALL:
53
- return "b-call";
54
- case RUBY_EVENT_B_RETURN:
55
- return "b-return";
56
- case RUBY_EVENT_C_CALL:
57
- return "c-call";
58
- case RUBY_EVENT_C_RETURN:
59
- return "c-return";
60
- case RUBY_EVENT_THREAD_BEGIN:
61
- return "thread-begin";
62
- case RUBY_EVENT_THREAD_END:
63
- return "thread-end";
64
- case RUBY_EVENT_FIBER_SWITCH:
65
- return "fiber-switch";
66
- case RUBY_EVENT_RAISE:
67
- return "raise";
68
- case RUBY_INTERNAL_EVENT_NEWOBJ:
69
- return "newobj";
70
- default:
71
- return "unknown";
72
- }
73
- }
74
-
75
- thread_data_t* check_fiber(prof_profile_t* profile, double measurement)
76
- {
77
- thread_data_t* result = NULL;
78
-
79
- // Get the current fiber
80
- VALUE fiber = rb_fiber_current();
81
-
82
- /* We need to switch the profiling context if we either had none before,
83
- we don't merge fibers and the fiber ids differ, or the thread ids differ. */
84
- if (profile->last_thread_data->fiber != fiber)
85
- {
86
- result = threads_table_lookup(profile, fiber);
87
- if (!result)
88
- {
89
- result = threads_table_insert(profile, fiber);
90
- }
91
- switch_thread(profile, result, measurement);
92
- }
93
- else
94
- {
95
- result = profile->last_thread_data;
96
- }
97
- return result;
98
- }
99
-
100
- static int excludes_method(st_data_t key, prof_profile_t* profile)
101
- {
102
- return (profile->exclude_methods_tbl &&
103
- method_table_lookup(profile->exclude_methods_tbl, key) != NULL);
104
- }
105
-
106
- static prof_method_t* create_method(VALUE profile, st_data_t key, VALUE klass, VALUE msym, VALUE source_file, int source_line)
107
- {
108
- prof_method_t* result = prof_method_create(profile, klass, msym, source_file, source_line);
109
-
110
- prof_profile_t* profile_t = prof_get_profile(profile);
111
- method_table_insert(profile_t->last_thread_data->method_table, result->key, result);
112
-
113
- return result;
114
- }
115
-
116
- static prof_method_t* check_parent_method(VALUE profile, thread_data_t* thread_data)
117
- {
118
- VALUE msym = ID2SYM(rb_intern("_inserted_parent_"));
119
- st_data_t key = method_key(cProfile, msym);
120
-
121
- prof_method_t* result = method_table_lookup(thread_data->method_table, key);
122
-
123
- if (!result)
124
- {
125
- result = create_method(profile, key, cProfile, msym, Qnil, 0);
126
- }
127
-
128
- return result;
129
- }
130
-
131
- prof_method_t* check_method(VALUE profile, rb_trace_arg_t* trace_arg, rb_event_flag_t event, thread_data_t* thread_data)
132
- {
133
- VALUE klass = rb_tracearg_defined_class(trace_arg);
134
-
135
- /* Special case - skip any methods from the mProf
136
- module or cProfile class since they clutter
137
- the results but aren't important to them results. */
138
- if (klass == cProfile)
139
- return NULL;
140
-
141
- VALUE msym = rb_tracearg_callee_id(trace_arg);
142
-
143
- st_data_t key = method_key(klass, msym);
144
-
145
- prof_profile_t* profile_t = prof_get_profile(profile);
146
- if (excludes_method(key, profile_t))
147
- return NULL;
148
-
149
- prof_method_t* result = method_table_lookup(thread_data->method_table, key);
150
-
151
- if (!result)
152
- {
153
- VALUE source_file = (event != RUBY_EVENT_C_CALL ? rb_tracearg_path(trace_arg) : Qnil);
154
- int source_line = (event != RUBY_EVENT_C_CALL ? FIX2INT(rb_tracearg_lineno(trace_arg)) : 0);
155
- result = create_method(profile, key, klass, msym, source_file, source_line);
156
- }
157
-
158
- return result;
159
- }
160
-
161
- /* =========== Profiling ================= */
162
- static void prof_trace(prof_profile_t* profile, rb_trace_arg_t* trace_arg, double measurement)
163
- {
164
- static VALUE last_fiber = Qnil;
165
- VALUE fiber = rb_fiber_current();
166
-
167
- rb_event_flag_t event = rb_tracearg_event_flag(trace_arg);
168
- const char* event_name = get_event_name(event);
169
-
170
- VALUE source_file = rb_tracearg_path(trace_arg);
171
- int source_line = FIX2INT(rb_tracearg_lineno(trace_arg));
172
-
173
- VALUE msym = rb_tracearg_callee_id(trace_arg);
174
-
175
- unsigned int klass_flags;
176
- VALUE klass = rb_tracearg_defined_class(trace_arg);
177
- VALUE resolved_klass = resolve_klass(klass, &klass_flags);
178
- const char* class_name = "";
179
-
180
- if (resolved_klass != Qnil)
181
- class_name = rb_class2name(resolved_klass);
182
-
183
- if (last_fiber != fiber)
184
- {
185
- fprintf(trace_file, "\n");
186
- }
187
-
188
- const char* method_name_char = (msym != Qnil ? rb_id2name(SYM2ID(msym)) : "");
189
- const char* source_file_char = (source_file != Qnil ? StringValuePtr(source_file) : "");
190
-
191
- fprintf(trace_file, "%2lu:%2f %-8s %s#%s %s:%2d\n",
192
- FIX2ULONG(fiber), (double)measurement,
193
- event_name, class_name, method_name_char, source_file_char, source_line);
194
- fflush(trace_file);
195
- last_fiber = fiber;
196
- }
197
-
198
- static void prof_event_hook(VALUE trace_point, void* data)
199
- {
200
- VALUE profile = (VALUE)data;
201
- prof_profile_t* profile_t = prof_get_profile(profile);
202
-
203
- rb_trace_arg_t* trace_arg = rb_tracearg_from_tracepoint(trace_point);
204
- double measurement = prof_measure(profile_t->measurer, trace_arg);
205
- rb_event_flag_t event = rb_tracearg_event_flag(trace_arg);
206
- VALUE self = rb_tracearg_self(trace_arg);
207
-
208
- if (trace_file != NULL)
209
- {
210
- prof_trace(profile_t, trace_arg, measurement);
211
- }
212
-
213
- /* Special case - skip any methods from the mProf
214
- module since they clutter the results but aren't important to them results. */
215
- if (self == mProf)
216
- return;
217
-
218
- thread_data_t* thread_data = check_fiber(profile_t, measurement);
219
-
220
- if (!thread_data->trace)
221
- return;
222
-
223
- switch (event)
224
- {
225
- case RUBY_EVENT_LINE:
226
- {
227
- prof_frame_t* frame = prof_frame_current(thread_data->stack);
228
-
229
- if (!frame)
230
- {
231
- prof_method_t* method = check_method(profile, trace_arg, event, thread_data);
232
-
233
- if (!method)
234
- break;
235
-
236
- prof_call_tree_t* call_tree = prof_call_tree_create(method, NULL, method->source_file, method->source_line);
237
- prof_add_call_tree(method->call_trees, call_tree);
238
-
239
- if (thread_data->call_tree)
240
- {
241
- prof_call_tree_add_parent(thread_data->call_tree, call_tree);
242
- frame = prof_frame_unshift(thread_data->stack, call_tree, thread_data->call_tree, measurement);
243
- }
244
- else
245
- {
246
- frame = prof_frame_push(thread_data->stack, call_tree, measurement, RTEST(profile_t->paused));
247
- }
248
-
249
- thread_data->call_tree = call_tree;
250
- }
251
-
252
- frame->source_file = rb_tracearg_path(trace_arg);
253
- frame->source_line = FIX2INT(rb_tracearg_lineno(trace_arg));
254
-
255
- break;
256
- }
257
- case RUBY_EVENT_CALL:
258
- case RUBY_EVENT_C_CALL:
259
- {
260
- prof_method_t* method = check_method(profile, trace_arg, event, thread_data);
261
-
262
- if (!method)
263
- break;
264
-
265
- prof_frame_t* frame = prof_frame_current(thread_data->stack);
266
- prof_call_tree_t* parent_call_tree = NULL;
267
- prof_call_tree_t* call_tree = NULL;
268
-
269
- // Frame can be NULL if we are switching from one fiber to another (see FiberTest#fiber_test)
270
- if (frame)
271
- {
272
- parent_call_tree = frame->call_tree;
273
- call_tree = call_tree_table_lookup(parent_call_tree->children, method->key);
274
- }
275
- else if (!frame && thread_data->call_tree)
276
- {
277
- // There is no current parent - likely we have returned out of the highest level method we have profiled so far.
278
- // This can happen with enumerators (see fiber_test.rb). So create a new dummy parent.
279
- prof_method_t* parent_method = check_parent_method(profile, thread_data);
280
- parent_call_tree = prof_call_tree_create(parent_method, NULL, Qnil, 0);
281
- prof_add_call_tree(parent_method->call_trees, parent_call_tree);
282
- prof_call_tree_add_parent(thread_data->call_tree, parent_call_tree);
283
- frame = prof_frame_unshift(thread_data->stack, parent_call_tree, thread_data->call_tree, measurement);
284
- thread_data->call_tree = parent_call_tree;
285
- }
286
-
287
- if (!call_tree)
288
- {
289
- // This call info does not yet exist. So create it and add it to previous CallTree's children and the current method.
290
- call_tree = prof_call_tree_create(method, parent_call_tree, frame ? frame->source_file : Qnil, frame? frame->source_line : 0);
291
- prof_add_call_tree(method->call_trees, call_tree);
292
- if (parent_call_tree)
293
- prof_call_tree_add_child(parent_call_tree, call_tree);
294
- }
295
-
296
- if (!thread_data->call_tree)
297
- thread_data->call_tree = call_tree;
298
-
299
- // Push a new frame onto the stack for a new c-call or ruby call (into a method)
300
- prof_frame_t* next_frame = prof_frame_push(thread_data->stack, call_tree, measurement, RTEST(profile_t->paused));
301
- next_frame->source_file = method->source_file;
302
- next_frame->source_line = method->source_line;
303
- break;
304
- }
305
- case RUBY_EVENT_RETURN:
306
- case RUBY_EVENT_C_RETURN:
307
- {
308
- // We need to check for excluded methods so that we don't pop them off the stack
309
- prof_method_t* method = check_method(profile, trace_arg, event, thread_data);
310
-
311
- if (!method)
312
- break;
313
-
314
- prof_frame_pop(thread_data->stack, measurement);
315
- break;
316
- }
317
- case RUBY_INTERNAL_EVENT_NEWOBJ:
318
- {
319
- /* We want to assign the allocations lexically, not the execution context (otherwise all allocations will
320
- show up under Class#new */
321
- int source_line = FIX2INT(rb_tracearg_lineno(trace_arg));
322
- VALUE source_file = rb_tracearg_path(trace_arg);
323
-
324
- prof_method_t* method = prof_find_method(thread_data->stack, source_file, source_line);
325
- if (method)
326
- prof_allocate_increment(method, trace_arg);
327
-
328
- break;
329
- }
330
- }
331
- }
332
-
333
- void prof_install_hook(VALUE self)
334
- {
335
- prof_profile_t* profile = prof_get_profile(self);
336
-
337
- VALUE event_tracepoint = rb_tracepoint_new(Qnil,
338
- RUBY_EVENT_CALL | RUBY_EVENT_RETURN |
339
- RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN |
340
- RUBY_EVENT_LINE,
341
- prof_event_hook, (void*)self);
342
- rb_ary_push(profile->tracepoints, event_tracepoint);
343
-
344
- if (profile->measurer->track_allocations)
345
- {
346
- VALUE allocation_tracepoint = rb_tracepoint_new(Qnil, RUBY_INTERNAL_EVENT_NEWOBJ, prof_event_hook, (void*)self);
347
- rb_ary_push(profile->tracepoints, allocation_tracepoint);
348
- }
349
-
350
- for (int i = 0; i < RARRAY_LEN(profile->tracepoints); i++)
351
- {
352
- rb_tracepoint_enable(rb_ary_entry(profile->tracepoints, i));
353
- }
354
- }
355
-
356
- void prof_remove_hook(VALUE self)
357
- {
358
- prof_profile_t* profile = prof_get_profile(self);
359
-
360
- for (int i = 0; i < RARRAY_LEN(profile->tracepoints); i++)
361
- {
362
- rb_tracepoint_disable(rb_ary_entry(profile->tracepoints, i));
363
- }
364
- rb_ary_clear(profile->tracepoints);
365
- }
366
-
367
- prof_profile_t* prof_get_profile(VALUE self)
368
- {
369
- /* Can't use Data_Get_Struct because that triggers the event hook
370
- ending up in endless recursion. */
371
- return RTYPEDDATA_DATA(self);
372
- }
373
-
374
- static int collect_threads(st_data_t key, st_data_t value, st_data_t result)
375
- {
376
- thread_data_t* thread_data = (thread_data_t*)value;
377
- if (thread_data->trace)
378
- {
379
- VALUE threads_array = (VALUE)result;
380
- rb_ary_push(threads_array, prof_thread_wrap(thread_data));
381
- }
382
- return ST_CONTINUE;
383
- }
384
-
385
- /* ======== Profile Class ====== */
386
- static int mark_threads(st_data_t key, st_data_t value, st_data_t result)
387
- {
388
- thread_data_t* thread = (thread_data_t*)value;
389
- prof_thread_mark(thread);
390
- return ST_CONTINUE;
391
- }
392
-
393
- static int prof_profile_mark_methods(st_data_t key, st_data_t value, st_data_t result)
394
- {
395
- prof_method_t* method = (prof_method_t*)value;
396
- prof_method_mark(method);
397
- return ST_CONTINUE;
398
- }
399
-
400
- static void prof_profile_mark(void* data)
401
- {
402
- prof_profile_t* profile = (prof_profile_t*)data;
403
- rb_gc_mark(profile->tracepoints);
404
- rb_gc_mark(profile->running);
405
- rb_gc_mark(profile->paused);
406
-
407
- // If GC stress is true (useful for debugging), when threads_table_create is called in the
408
- // allocate method Ruby will immediately call this mark method. Thus the threads_tbl will be NULL.
409
- if (profile->threads_tbl)
410
- rb_st_foreach(profile->threads_tbl, mark_threads, 0);
411
-
412
- if (profile->exclude_methods_tbl)
413
- rb_st_foreach(profile->exclude_methods_tbl, prof_profile_mark_methods, 0);
414
- }
415
-
416
- /* Freeing the profile creates a cascade of freeing. It frees its threads table, which frees
417
- each thread and its associated call treee and methods. */
418
- static void prof_profile_ruby_gc_free(void* data)
419
- {
420
- prof_profile_t* profile = (prof_profile_t*)data;
421
- profile->last_thread_data = NULL;
422
-
423
- threads_table_free(profile->threads_tbl);
424
- profile->threads_tbl = NULL;
425
-
426
- if (profile->exclude_threads_tbl)
427
- {
428
- rb_st_free_table(profile->exclude_threads_tbl);
429
- profile->exclude_threads_tbl = NULL;
430
- }
431
-
432
- if (profile->include_threads_tbl)
433
- {
434
- rb_st_free_table(profile->include_threads_tbl);
435
- profile->include_threads_tbl = NULL;
436
- }
437
-
438
- /* This table owns the excluded sentinels for now. */
439
- method_table_free(profile->exclude_methods_tbl);
440
- profile->exclude_methods_tbl = NULL;
441
-
442
- xfree(profile->measurer);
443
- profile->measurer = NULL;
444
-
445
- xfree(profile);
446
- }
447
-
448
- size_t prof_profile_size(const void* data)
449
- {
450
- return sizeof(prof_profile_t);
451
- }
452
-
453
- static const rb_data_type_t profile_type =
454
- {
455
- .wrap_struct_name = "Profile",
456
- .function =
457
- {
458
- .dmark = prof_profile_mark,
459
- .dfree = prof_profile_ruby_gc_free,
460
- .dsize = prof_profile_size,
461
- },
462
- .data = NULL,
463
- .flags = RUBY_TYPED_FREE_IMMEDIATELY
464
- };
465
-
466
- static VALUE prof_allocate(VALUE klass)
467
- {
468
- VALUE result;
469
- prof_profile_t* profile;
470
- result = TypedData_Make_Struct(klass, prof_profile_t, &profile_type, profile);
471
- profile->threads_tbl = threads_table_create();
472
- profile->exclude_threads_tbl = NULL;
473
- profile->include_threads_tbl = NULL;
474
- profile->running = Qfalse;
475
- profile->allow_exceptions = false;
476
- profile->exclude_methods_tbl = method_table_create();
477
- profile->running = Qfalse;
478
- profile->tracepoints = rb_ary_new();
479
- return result;
480
- }
481
-
482
- static void prof_exclude_common_methods(VALUE profile)
483
- {
484
- rb_funcall(profile, rb_intern("exclude_common_methods!"), 0);
485
- }
486
-
487
- static int pop_frames(VALUE key, st_data_t value, st_data_t data)
488
- {
489
- thread_data_t* thread_data = (thread_data_t*)value;
490
- prof_profile_t* profile = (prof_profile_t*)data;
491
- double measurement = prof_measure(profile->measurer, NULL);
492
-
493
- if (profile->last_thread_data->fiber != thread_data->fiber)
494
- switch_thread(profile, thread_data, measurement);
495
-
496
- while (prof_frame_pop(thread_data->stack, measurement));
497
-
498
- return ST_CONTINUE;
499
- }
500
-
501
- static void
502
- prof_stop_threads(prof_profile_t* profile)
503
- {
504
- rb_st_foreach(profile->threads_tbl, pop_frames, (st_data_t)profile);
505
- }
506
-
507
- /* call-seq:
508
- new()
509
- new(options)
510
-
511
- Returns a new profiler. Possible options for the options hash are:
512
-
513
- measure_mode: Measure mode. Specifies the profile measure mode.
514
- If not specified, defaults to RubyProf::WALL_TIME.
515
- allow_exceptions: Whether to raise exceptions encountered during profiling,
516
- or to suppress all exceptions during profiling
517
- track_allocations: Whether to track object allocations while profiling. True or false.
518
- exclude_common: Exclude common methods from the profile. True or false.
519
- exclude_threads: Threads to exclude from the profiling results.
520
- include_threads: Focus profiling on only the given threads. This will ignore
521
- all other threads. */
522
- static VALUE prof_initialize(int argc, VALUE* argv, VALUE self)
523
- {
524
- prof_profile_t* profile = prof_get_profile(self);
525
- VALUE mode_or_options;
526
- VALUE mode = Qnil;
527
- VALUE exclude_threads = Qnil;
528
- VALUE include_threads = Qnil;
529
- VALUE exclude_common = Qnil;
530
- VALUE allow_exceptions = Qfalse;
531
- VALUE track_allocations = Qfalse;
532
-
533
- int i;
534
-
535
- switch (rb_scan_args(argc, argv, "02", &mode_or_options, &exclude_threads))
536
- {
537
- case 0:
538
- break;
539
- case 1:
540
- if (FIXNUM_P(mode_or_options))
541
- {
542
- mode = mode_or_options;
543
- }
544
- else
545
- {
546
- Check_Type(mode_or_options, T_HASH);
547
- mode = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("measure_mode")));
548
- track_allocations = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("track_allocations")));
549
- allow_exceptions = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("allow_exceptions")));
550
- exclude_common = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("exclude_common")));
551
- exclude_threads = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("exclude_threads")));
552
- include_threads = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("include_threads")));
553
- }
554
- break;
555
- case 2:
556
- Check_Type(exclude_threads, T_ARRAY);
557
- break;
558
- }
559
-
560
- if (mode == Qnil)
561
- {
562
- mode = INT2NUM(MEASURE_WALL_TIME);
563
- }
564
- else
565
- {
566
- Check_Type(mode, T_FIXNUM);
567
- }
568
- profile->measurer = prof_get_measurer(NUM2INT(mode), track_allocations == Qtrue);
569
- profile->allow_exceptions = (allow_exceptions == Qtrue);
570
-
571
- if (exclude_threads != Qnil)
572
- {
573
- Check_Type(exclude_threads, T_ARRAY);
574
- assert(profile->exclude_threads_tbl == NULL);
575
- profile->exclude_threads_tbl = threads_table_create();
576
- for (i = 0; i < RARRAY_LEN(exclude_threads); i++)
577
- {
578
- VALUE thread = rb_ary_entry(exclude_threads, i);
579
- rb_st_insert(profile->exclude_threads_tbl, thread, Qtrue);
580
- }
581
- }
582
-
583
- if (include_threads != Qnil)
584
- {
585
- Check_Type(include_threads, T_ARRAY);
586
- assert(profile->include_threads_tbl == NULL);
587
- profile->include_threads_tbl = threads_table_create();
588
- for (i = 0; i < RARRAY_LEN(include_threads); i++)
589
- {
590
- VALUE thread = rb_ary_entry(include_threads, i);
591
- rb_st_insert(profile->include_threads_tbl, thread, Qtrue);
592
- }
593
- }
594
-
595
- if (RTEST(exclude_common))
596
- {
597
- prof_exclude_common_methods(self);
598
- }
599
-
600
- return self;
601
- }
602
-
603
- /* call-seq:
604
- paused? -> boolean
605
-
606
- Returns whether a profile is currently paused.*/
607
- static VALUE prof_paused(VALUE self)
608
- {
609
- prof_profile_t* profile = prof_get_profile(self);
610
- return profile->paused;
611
- }
612
-
613
- /* call-seq:
614
- running? -> boolean
615
-
616
- Returns whether a profile is currently running.*/
617
- static VALUE
618
- prof_running(VALUE self)
619
- {
620
- prof_profile_t* profile = prof_get_profile(self);
621
- return profile->running;
622
- }
623
-
624
- /* call-seq:
625
- mode -> measure_mode
626
-
627
- Returns the measure mode used in this profile.*/
628
- static VALUE prof_profile_measure_mode(VALUE self)
629
- {
630
- prof_profile_t* profile = prof_get_profile(self);
631
- return INT2NUM(profile->measurer->mode);
632
- }
633
-
634
- /* call-seq:
635
- track_allocations -> boolean
636
-
637
- Returns if object allocations were tracked in this profile.*/
638
- static VALUE prof_profile_track_allocations(VALUE self)
639
- {
640
- prof_profile_t* profile = prof_get_profile(self);
641
- return profile->measurer->track_allocations ? Qtrue : Qfalse;
642
- }
643
-
644
- /* call-seq:
645
- start -> self
646
-
647
- Starts recording profile data.*/
648
- static VALUE prof_start(VALUE self)
649
- {
650
- char* trace_file_name;
651
-
652
- prof_profile_t* profile = prof_get_profile(self);
653
-
654
- if (profile->running == Qtrue)
655
- {
656
- rb_raise(rb_eRuntimeError, "RubyProf.start was already called");
657
- }
658
-
659
- profile->running = Qtrue;
660
- profile->paused = Qfalse;
661
- profile->last_thread_data = threads_table_insert(profile, rb_fiber_current());
662
-
663
- /* open trace file if environment wants it */
664
- trace_file_name = getenv("RUBY_PROF_TRACE");
665
-
666
- if (trace_file_name != NULL)
667
- {
668
- if (strcmp(trace_file_name, "stdout") == 0)
669
- {
670
- trace_file = stdout;
671
- }
672
- else if (strcmp(trace_file_name, "stderr") == 0)
673
- {
674
- trace_file = stderr;
675
- }
676
- else
677
- {
678
- trace_file = fopen(trace_file_name, "w");
679
- }
680
- }
681
-
682
- prof_install_hook(self);
683
- return self;
684
- }
685
-
686
- /* call-seq:
687
- pause -> self
688
-
689
- Pauses collecting profile data. */
690
- static VALUE prof_pause(VALUE self)
691
- {
692
- prof_profile_t* profile = prof_get_profile(self);
693
- if (profile->running == Qfalse)
694
- {
695
- rb_raise(rb_eRuntimeError, "RubyProf is not running.");
696
- }
697
-
698
- if (profile->paused == Qfalse)
699
- {
700
- profile->paused = Qtrue;
701
- profile->measurement_at_pause_resume = prof_measure(profile->measurer, NULL);
702
- rb_st_foreach(profile->threads_tbl, pause_thread, (st_data_t)profile);
703
- }
704
-
705
- return self;
706
- }
707
-
708
- /* call-seq:
709
- resume -> self
710
- resume(&block) -> self
711
-
712
- Resumes recording profile data.*/
713
- static VALUE prof_resume(VALUE self)
714
- {
715
- prof_profile_t* profile = prof_get_profile(self);
716
- if (profile->running == Qfalse)
717
- {
718
- rb_raise(rb_eRuntimeError, "RubyProf is not running.");
719
- }
720
-
721
- if (profile->paused == Qtrue)
722
- {
723
- profile->paused = Qfalse;
724
- profile->measurement_at_pause_resume = prof_measure(profile->measurer, NULL);
725
- rb_st_foreach(profile->threads_tbl, unpause_thread, (st_data_t)profile);
726
- }
727
-
728
- return rb_block_given_p() ? rb_ensure(rb_yield, self, prof_pause, self) : self;
729
- }
730
-
731
- /* call-seq:
732
- stop -> self
733
-
734
- Stops collecting profile data.*/
735
- static VALUE prof_stop(VALUE self)
736
- {
737
- prof_profile_t* profile = prof_get_profile(self);
738
-
739
- if (profile->running == Qfalse)
740
- {
741
- rb_raise(rb_eRuntimeError, "RubyProf.start was not yet called");
742
- }
743
-
744
- prof_remove_hook(self);
745
-
746
- /* close trace file if open */
747
- if (trace_file != NULL)
748
- {
749
- if (trace_file != stderr && trace_file != stdout)
750
- {
751
- fclose(trace_file);
752
- }
753
- trace_file = NULL;
754
- }
755
-
756
- prof_stop_threads(profile);
757
-
758
- /* Unset the last_thread_data (very important!)
759
- and the threads table */
760
- profile->running = profile->paused = Qfalse;
761
- profile->last_thread_data = NULL;
762
-
763
- return self;
764
- }
765
-
766
- /* call-seq:
767
- threads -> Array of RubyProf::Thread
768
-
769
- Returns an array of RubyProf::Thread instances that were profiled. */
770
- static VALUE prof_threads(VALUE self)
771
- {
772
- VALUE result = rb_ary_new();
773
- prof_profile_t* profile = prof_get_profile(self);
774
- rb_st_foreach(profile->threads_tbl, collect_threads, result);
775
- return result;
776
- }
777
-
778
- /* Document-method: RubyProf::Profile#Profile
779
- call-seq:
780
- profile(&block) -> self
781
-
782
- Profiles the specified block.
783
-
784
- profile = RubyProf::Profile.new
785
- profile.profile do
786
- ..
787
- end
788
- */
789
- static VALUE prof_profile_object(VALUE self)
790
- {
791
- int result;
792
- prof_profile_t* profile = prof_get_profile(self);
793
-
794
- if (!rb_block_given_p())
795
- {
796
- rb_raise(rb_eArgError, "A block must be provided to the profile method.");
797
- }
798
-
799
- prof_start(self);
800
- rb_protect(rb_yield, self, &result);
801
- self = prof_stop(self);
802
-
803
- if (profile->allow_exceptions && result != 0)
804
- {
805
- rb_jump_tag(result);
806
- }
807
-
808
- return self;
809
- }
810
-
811
- /* Document-method: RubyProf::Profile::Profile
812
- call-seq:
813
- profile(&block) -> RubyProf::Profile
814
- profile(options, &block) -> RubyProf::Profile
815
-
816
- Profiles the specified block and returns a RubyProf::Profile
817
- object. Arguments are passed to Profile initialize method.
818
-
819
- profile = RubyProf::Profile.profile do
820
- ..
821
- end
822
- */
823
- static VALUE prof_profile_class(int argc, VALUE* argv, VALUE klass)
824
- {
825
- return prof_profile_object(rb_class_new_instance(argc, argv, cProfile));
826
- }
827
-
828
- /* call-seq:
829
- exclude_method!(module, method_name) -> self
830
-
831
- Excludes the method from profiling results.
832
- */
833
- static VALUE prof_exclude_method(VALUE self, VALUE klass, VALUE msym)
834
- {
835
- prof_profile_t* profile = prof_get_profile(self);
836
-
837
- if (profile->running == Qtrue)
838
- {
839
- rb_raise(rb_eRuntimeError, "RubyProf.start was already called");
840
- }
841
-
842
- st_data_t key = method_key(klass, msym);
843
- prof_method_t* method = method_table_lookup(profile->exclude_methods_tbl, key);
844
-
845
- if (!method)
846
- {
847
- method = prof_method_create(self, klass, msym, Qnil, 0);
848
- method_table_insert(profile->exclude_methods_tbl, method->key, method);
849
- }
850
-
851
- return self;
852
- }
853
-
854
- /* :nodoc: */
855
- VALUE prof_profile_dump(VALUE self)
856
- {
857
- VALUE result = rb_hash_new();
858
- rb_hash_aset(result, ID2SYM(rb_intern("threads")), prof_threads(self));
859
- return result;
860
- }
861
-
862
- /* :nodoc: */
863
- VALUE prof_profile_load(VALUE self, VALUE data)
864
- {
865
- prof_profile_t* profile = prof_get_profile(self);
866
-
867
- VALUE threads = rb_hash_aref(data, ID2SYM(rb_intern("threads")));
868
- for (int i = 0; i < rb_array_len(threads); i++)
869
- {
870
- VALUE thread = rb_ary_entry(threads, i);
871
- thread_data_t* thread_data = prof_get_thread(thread);
872
- rb_st_insert(profile->threads_tbl, (st_data_t)thread_data->fiber_id, (st_data_t)thread_data);
873
- }
874
-
875
- return data;
876
- }
877
-
878
- void rp_init_profile(void)
879
- {
880
- cProfile = rb_define_class_under(mProf, "Profile", rb_cObject);
881
- rb_define_alloc_func(cProfile, prof_allocate);
882
-
883
- rb_define_singleton_method(cProfile, "profile", prof_profile_class, -1);
884
- rb_define_method(cProfile, "initialize", prof_initialize, -1);
885
- rb_define_method(cProfile, "start", prof_start, 0);
886
- rb_define_method(cProfile, "stop", prof_stop, 0);
887
- rb_define_method(cProfile, "resume", prof_resume, 0);
888
- rb_define_method(cProfile, "pause", prof_pause, 0);
889
- rb_define_method(cProfile, "running?", prof_running, 0);
890
- rb_define_method(cProfile, "paused?", prof_paused, 0);
891
- rb_define_method(cProfile, "threads", prof_threads, 0);
892
- rb_define_method(cProfile, "exclude_method!", prof_exclude_method, 2);
893
- rb_define_method(cProfile, "profile", prof_profile_object, 0);
894
-
895
- rb_define_method(cProfile, "measure_mode", prof_profile_measure_mode, 0);
896
- rb_define_method(cProfile, "track_allocations?", prof_profile_track_allocations, 0);
897
-
898
- rb_define_method(cProfile, "_dump_data", prof_profile_dump, 0);
899
- rb_define_method(cProfile, "_load_data", prof_profile_load, 1);
900
- }
1
+ /* Copyright (C) 2005-2019 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
2
+ Please see the LICENSE file for copyright and distribution information */
3
+
4
+ /* Document-class: RubyProf::Profile
5
+
6
+ The Profile class represents a single profiling run and provides the main API for using ruby-prof.
7
+ After creating a Profile instance, start profiling code by calling the Profile#start method. To finish profiling,
8
+ call Profile#stop. Once profiling is completed, the Profile instance contains the results.
9
+
10
+ profile = RubyProf::Profile.new
11
+ profile.start
12
+ ...
13
+ result = profile.stop
14
+
15
+ Alternatively, you can use the block syntax:
16
+
17
+ profile = RubyProf::Profile.profile do
18
+ ...
19
+ end
20
+ */
21
+
22
+ #include <assert.h>
23
+
24
+ #include "rp_allocation.h"
25
+ #include "rp_call_trees.h"
26
+ #include "rp_call_tree.h"
27
+ #include "rp_profile.h"
28
+ #include "rp_method.h"
29
+
30
+ VALUE cProfile;
31
+
32
+ /* support tracing ruby events from ruby-prof. useful for getting at
33
+ what actually happens inside the ruby interpreter (and ruby-prof).
34
+ set environment variable RUBY_PROF_TRACE to filename you want to
35
+ find the trace in.
36
+ */
37
+ FILE* trace_file = NULL;
38
+
39
+ static const char* get_event_name(rb_event_flag_t event)
40
+ {
41
+ switch (event) {
42
+ case RUBY_EVENT_LINE:
43
+ return "line";
44
+ case RUBY_EVENT_CLASS:
45
+ return "class";
46
+ case RUBY_EVENT_END:
47
+ return "end";
48
+ case RUBY_EVENT_CALL:
49
+ return "call";
50
+ case RUBY_EVENT_RETURN:
51
+ return "return";
52
+ case RUBY_EVENT_B_CALL:
53
+ return "b-call";
54
+ case RUBY_EVENT_B_RETURN:
55
+ return "b-return";
56
+ case RUBY_EVENT_C_CALL:
57
+ return "c-call";
58
+ case RUBY_EVENT_C_RETURN:
59
+ return "c-return";
60
+ case RUBY_EVENT_THREAD_BEGIN:
61
+ return "thread-begin";
62
+ case RUBY_EVENT_THREAD_END:
63
+ return "thread-end";
64
+ case RUBY_EVENT_FIBER_SWITCH:
65
+ return "fiber-switch";
66
+ case RUBY_EVENT_RAISE:
67
+ return "raise";
68
+ case RUBY_INTERNAL_EVENT_NEWOBJ:
69
+ return "newobj";
70
+ default:
71
+ return "unknown";
72
+ }
73
+ }
74
+
75
+ thread_data_t* check_fiber(prof_profile_t* profile, double measurement)
76
+ {
77
+ thread_data_t* result = NULL;
78
+
79
+ // Get the current fiber
80
+ VALUE fiber = rb_fiber_current();
81
+
82
+ /* We need to switch the profiling context if we either had none before,
83
+ we don't merge fibers and the fiber ids differ, or the thread ids differ. */
84
+ if (profile->last_thread_data->fiber != fiber)
85
+ {
86
+ result = threads_table_lookup(profile, fiber);
87
+ if (!result)
88
+ {
89
+ result = threads_table_insert(profile, fiber);
90
+ }
91
+ switch_thread(profile, result, measurement);
92
+ }
93
+ else
94
+ {
95
+ result = profile->last_thread_data;
96
+ }
97
+ return result;
98
+ }
99
+
100
+ static int excludes_method(st_data_t key, prof_profile_t* profile)
101
+ {
102
+ return (profile->exclude_methods_tbl &&
103
+ method_table_lookup(profile->exclude_methods_tbl, key) != NULL);
104
+ }
105
+
106
+ static prof_method_t* create_method(prof_profile_t* profile, st_data_t key, VALUE klass, VALUE msym, VALUE source_file, int source_line)
107
+ {
108
+ prof_method_t* result = prof_method_create(profile->object, klass, msym, source_file, source_line);
109
+ method_table_insert(profile->last_thread_data->method_table, result->key, result);
110
+
111
+ return result;
112
+ }
113
+
114
+ static prof_method_t* check_parent_method(prof_profile_t* profile, thread_data_t* thread_data)
115
+ {
116
+ VALUE msym = ID2SYM(rb_intern("_inserted_parent_"));
117
+ st_data_t key = method_key(cProfile, msym);
118
+
119
+ prof_method_t* result = method_table_lookup(thread_data->method_table, key);
120
+
121
+ if (!result)
122
+ {
123
+ result = create_method(profile, key, cProfile, msym, Qnil, 0);
124
+ }
125
+
126
+ return result;
127
+ }
128
+
129
+ prof_method_t* check_method(prof_profile_t* profile, rb_trace_arg_t* trace_arg, rb_event_flag_t event, thread_data_t* thread_data)
130
+ {
131
+ VALUE klass = rb_tracearg_defined_class(trace_arg);
132
+
133
+ /* Special case - skip any methods from the mProf
134
+ module or cProfile class since they clutter
135
+ the results but aren't important to them results. */
136
+ if (klass == cProfile)
137
+ return NULL;
138
+
139
+ VALUE msym = rb_tracearg_callee_id(trace_arg);
140
+
141
+ st_data_t key = method_key(klass, msym);
142
+
143
+ if (excludes_method(key, profile))
144
+ return NULL;
145
+
146
+ prof_method_t* result = method_table_lookup(thread_data->method_table, key);
147
+
148
+ if (!result)
149
+ {
150
+ VALUE source_file = (event != RUBY_EVENT_C_CALL ? rb_tracearg_path(trace_arg) : Qnil);
151
+ int source_line = (event != RUBY_EVENT_C_CALL ? FIX2INT(rb_tracearg_lineno(trace_arg)) : 0);
152
+ result = create_method(profile, key, klass, msym, source_file, source_line);
153
+ }
154
+
155
+ return result;
156
+ }
157
+
158
+ /* =========== Profiling ================= */
159
+ static void prof_trace(prof_profile_t* profile, rb_trace_arg_t* trace_arg, double measurement)
160
+ {
161
+ static VALUE last_fiber = Qnil;
162
+ VALUE fiber = rb_fiber_current();
163
+
164
+ rb_event_flag_t event = rb_tracearg_event_flag(trace_arg);
165
+ const char* event_name = get_event_name(event);
166
+
167
+ VALUE source_file = rb_tracearg_path(trace_arg);
168
+ int source_line = FIX2INT(rb_tracearg_lineno(trace_arg));
169
+
170
+ VALUE msym = rb_tracearg_callee_id(trace_arg);
171
+
172
+ unsigned int klass_flags;
173
+ VALUE klass = rb_tracearg_defined_class(trace_arg);
174
+ VALUE resolved_klass = resolve_klass(klass, &klass_flags);
175
+ const char* class_name = "";
176
+
177
+ if (resolved_klass != Qnil)
178
+ class_name = rb_class2name(resolved_klass);
179
+
180
+ if (last_fiber != fiber)
181
+ {
182
+ fprintf(trace_file, "\n");
183
+ }
184
+
185
+ const char* method_name_char = (msym != Qnil ? rb_id2name(SYM2ID(msym)) : "");
186
+ const char* source_file_char = (source_file != Qnil ? StringValuePtr(source_file) : "");
187
+
188
+ fprintf(trace_file, "%2lu:%2f %-8s %s#%s %s:%2d\n",
189
+ FIX2ULONG(fiber), (double)measurement,
190
+ event_name, class_name, method_name_char, source_file_char, source_line);
191
+ fflush(trace_file);
192
+ last_fiber = fiber;
193
+ }
194
+
195
+ static void prof_event_hook(VALUE trace_point, void* data)
196
+ {
197
+ prof_profile_t* profile = (prof_profile_t*)(data);
198
+
199
+ rb_trace_arg_t* trace_arg = rb_tracearg_from_tracepoint(trace_point);
200
+ double measurement = prof_measure(profile->measurer, trace_arg);
201
+ rb_event_flag_t event = rb_tracearg_event_flag(trace_arg);
202
+ VALUE self = rb_tracearg_self(trace_arg);
203
+
204
+ if (trace_file != NULL)
205
+ {
206
+ prof_trace(profile, trace_arg, measurement);
207
+ }
208
+
209
+ /* Special case - skip any methods from the mProf
210
+ module since they clutter the results but aren't important. */
211
+ if (self == mProf)
212
+ return;
213
+
214
+ thread_data_t* thread_data = check_fiber(profile, measurement);
215
+
216
+ if (!thread_data->trace)
217
+ return;
218
+
219
+ switch (event)
220
+ {
221
+ case RUBY_EVENT_LINE:
222
+ {
223
+ prof_frame_t* frame = prof_frame_current(thread_data->stack);
224
+
225
+ /* If there is no frame then this is either the first method being profiled or we have climbed the
226
+ call stack higher than where we started. */
227
+ if (!frame)
228
+ {
229
+ prof_method_t* method = check_method(profile, trace_arg, event, thread_data);
230
+
231
+ if (!method)
232
+ break;
233
+
234
+ prof_call_tree_t* call_tree = prof_call_tree_create(method, NULL, method->source_file, method->source_line);
235
+ prof_add_call_tree(method->call_trees, call_tree);
236
+
237
+ // We have climbed higher in the stack then where we started
238
+ if (thread_data->call_tree)
239
+ {
240
+ prof_call_tree_add_parent(thread_data->call_tree, call_tree);
241
+ frame = prof_frame_unshift(thread_data->stack, call_tree, thread_data->call_tree, measurement);
242
+ }
243
+ // This is the first method to be profiled
244
+ else
245
+ {
246
+ frame = prof_frame_push(thread_data->stack, call_tree, measurement, RTEST(profile->paused));
247
+ }
248
+
249
+ thread_data->call_tree = call_tree;
250
+ }
251
+
252
+ frame->source_file = rb_tracearg_path(trace_arg);
253
+ frame->source_line = FIX2INT(rb_tracearg_lineno(trace_arg));
254
+
255
+ break;
256
+ }
257
+ case RUBY_EVENT_CALL:
258
+ case RUBY_EVENT_C_CALL:
259
+ {
260
+ prof_method_t* method = check_method(profile, trace_arg, event, thread_data);
261
+
262
+ if (!method)
263
+ break;
264
+
265
+ prof_frame_t* frame = prof_frame_current(thread_data->stack);
266
+ prof_call_tree_t* parent_call_tree = NULL;
267
+ prof_call_tree_t* call_tree = NULL;
268
+
269
+ // Frame can be NULL if we are switching from one fiber to another (see FiberTest#fiber_test)
270
+ if (frame)
271
+ {
272
+ parent_call_tree = frame->call_tree;
273
+ call_tree = call_tree_table_lookup(parent_call_tree->children, method->key);
274
+ }
275
+ else if (!frame && thread_data->call_tree)
276
+ {
277
+ // There is no current parent - likely we have returned out of the highest level method we have profiled so far.
278
+ // This can happen with enumerators (see fiber_test.rb). So create a new dummy parent.
279
+ prof_method_t* parent_method = check_parent_method(profile, thread_data);
280
+ parent_call_tree = prof_call_tree_create(parent_method, NULL, Qnil, 0);
281
+ prof_add_call_tree(parent_method->call_trees, parent_call_tree);
282
+ prof_call_tree_add_parent(thread_data->call_tree, parent_call_tree);
283
+ frame = prof_frame_unshift(thread_data->stack, parent_call_tree, thread_data->call_tree, measurement);
284
+ thread_data->call_tree = parent_call_tree;
285
+ }
286
+
287
+ if (!call_tree)
288
+ {
289
+ // This call info does not yet exist. So create it and add it to previous CallTree's children and the current method.
290
+ call_tree = prof_call_tree_create(method, parent_call_tree, frame ? frame->source_file : Qnil, frame? frame->source_line : 0);
291
+ prof_add_call_tree(method->call_trees, call_tree);
292
+ if (parent_call_tree)
293
+ prof_call_tree_add_child(parent_call_tree, call_tree);
294
+ }
295
+
296
+ if (!thread_data->call_tree)
297
+ thread_data->call_tree = call_tree;
298
+
299
+ // Push a new frame onto the stack for a new c-call or ruby call (into a method)
300
+ prof_frame_t* next_frame = prof_frame_push(thread_data->stack, call_tree, measurement, RTEST(profile->paused));
301
+ next_frame->source_file = method->source_file;
302
+ next_frame->source_line = method->source_line;
303
+ break;
304
+ }
305
+ case RUBY_EVENT_RETURN:
306
+ case RUBY_EVENT_C_RETURN:
307
+ {
308
+ // We need to check for excluded methods so that we don't pop them off the stack
309
+ prof_method_t* method = check_method(profile, trace_arg, event, thread_data);
310
+
311
+ if (!method)
312
+ break;
313
+
314
+ prof_frame_pop(thread_data->stack, measurement);
315
+ break;
316
+ }
317
+ case RUBY_INTERNAL_EVENT_NEWOBJ:
318
+ {
319
+ /* We want to assign the allocations lexically, not the execution context (otherwise all allocations will
320
+ show up under Class#new */
321
+ int source_line = FIX2INT(rb_tracearg_lineno(trace_arg));
322
+ VALUE source_file = rb_tracearg_path(trace_arg);
323
+
324
+ prof_method_t* method = prof_find_method(thread_data->stack, source_file, source_line);
325
+ if (method)
326
+ prof_allocate_increment(method->allocations_table, trace_arg);
327
+
328
+ break;
329
+ }
330
+ }
331
+ }
332
+
333
+ void prof_install_hook(VALUE self)
334
+ {
335
+ prof_profile_t* profile = prof_get_profile(self);
336
+
337
+ VALUE event_tracepoint = rb_tracepoint_new(Qnil,
338
+ RUBY_EVENT_CALL | RUBY_EVENT_RETURN |
339
+ RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN |
340
+ RUBY_EVENT_LINE,
341
+ prof_event_hook, profile);
342
+ rb_ary_push(profile->tracepoints, event_tracepoint);
343
+
344
+ if (profile->measurer->track_allocations)
345
+ {
346
+ VALUE allocation_tracepoint = rb_tracepoint_new(Qnil, RUBY_INTERNAL_EVENT_NEWOBJ, prof_event_hook, profile);
347
+ rb_ary_push(profile->tracepoints, allocation_tracepoint);
348
+ }
349
+
350
+ for (int i = 0; i < RARRAY_LEN(profile->tracepoints); i++)
351
+ {
352
+ rb_tracepoint_enable(rb_ary_entry(profile->tracepoints, i));
353
+ }
354
+ }
355
+
356
+ void prof_remove_hook(VALUE self)
357
+ {
358
+ prof_profile_t* profile = prof_get_profile(self);
359
+
360
+ for (int i = 0; i < RARRAY_LEN(profile->tracepoints); i++)
361
+ {
362
+ rb_tracepoint_disable(rb_ary_entry(profile->tracepoints, i));
363
+ }
364
+ rb_ary_clear(profile->tracepoints);
365
+ }
366
+
367
+ prof_profile_t* prof_get_profile(VALUE self)
368
+ {
369
+ /* Can't use Data_Get_Struct because that triggers the event hook
370
+ ending up in endless recursion. */
371
+ return RTYPEDDATA_DATA(self);
372
+ }
373
+
374
+ static int collect_threads(st_data_t key, st_data_t value, st_data_t result)
375
+ {
376
+ thread_data_t* thread_data = (thread_data_t*)value;
377
+ if (thread_data->trace && thread_data->call_tree)
378
+ {
379
+ VALUE threads_array = (VALUE)result;
380
+ rb_ary_push(threads_array, prof_thread_wrap(thread_data));
381
+ }
382
+ return ST_CONTINUE;
383
+ }
384
+
385
+ /* ======== Profile Class ====== */
386
+ static int mark_threads(st_data_t key, st_data_t value, st_data_t result)
387
+ {
388
+ thread_data_t* thread = (thread_data_t*)value;
389
+ prof_thread_mark(thread);
390
+ return ST_CONTINUE;
391
+ }
392
+
393
+ static int prof_profile_mark_methods(st_data_t key, st_data_t value, st_data_t result)
394
+ {
395
+ prof_method_t* method = (prof_method_t*)value;
396
+ prof_method_mark(method);
397
+ return ST_CONTINUE;
398
+ }
399
+
400
+ static void prof_profile_mark(void* data)
401
+ {
402
+ prof_profile_t* profile = (prof_profile_t*)data;
403
+ rb_gc_mark_movable(profile->object);
404
+ rb_gc_mark_movable(profile->tracepoints);
405
+ rb_gc_mark_movable(profile->running);
406
+ rb_gc_mark_movable(profile->paused);
407
+
408
+ // If GC stress is true (useful for debugging), when threads_table_create is called in the
409
+ // allocate method Ruby will immediately call this mark method. Thus the threads_tbl will be NULL.
410
+ if (profile->threads_tbl)
411
+ rb_st_foreach(profile->threads_tbl, mark_threads, 0);
412
+
413
+ if (profile->exclude_methods_tbl)
414
+ rb_st_foreach(profile->exclude_methods_tbl, prof_profile_mark_methods, 0);
415
+ }
416
+
417
+ void prof_profile_compact(void* data)
418
+ {
419
+ prof_profile_t* profile = (prof_profile_t*)data;
420
+ profile->object = rb_gc_location(profile->object);
421
+ profile->tracepoints = rb_gc_location(profile->tracepoints);
422
+ profile->running = rb_gc_location(profile->running);
423
+ profile->paused = rb_gc_location(profile->paused);
424
+ }
425
+
426
+ /* Freeing the profile creates a cascade of freeing. It frees its threads table, which frees
427
+ each thread and its associated call treee and methods. */
428
+ static void prof_profile_ruby_gc_free(void* data)
429
+ {
430
+ prof_profile_t* profile = (prof_profile_t*)data;
431
+ profile->last_thread_data = NULL;
432
+
433
+ threads_table_free(profile->threads_tbl);
434
+ profile->threads_tbl = NULL;
435
+
436
+ if (profile->exclude_threads_tbl)
437
+ {
438
+ rb_st_free_table(profile->exclude_threads_tbl);
439
+ profile->exclude_threads_tbl = NULL;
440
+ }
441
+
442
+ if (profile->include_threads_tbl)
443
+ {
444
+ rb_st_free_table(profile->include_threads_tbl);
445
+ profile->include_threads_tbl = NULL;
446
+ }
447
+
448
+ /* This table owns the excluded sentinels for now. */
449
+ method_table_free(profile->exclude_methods_tbl);
450
+ profile->exclude_methods_tbl = NULL;
451
+
452
+ xfree(profile->measurer);
453
+ profile->measurer = NULL;
454
+
455
+ xfree(profile);
456
+ }
457
+
458
+ size_t prof_profile_size(const void* data)
459
+ {
460
+ return sizeof(prof_profile_t);
461
+ }
462
+
463
+ static const rb_data_type_t profile_type =
464
+ {
465
+ .wrap_struct_name = "Profile",
466
+ .function =
467
+ {
468
+ .dmark = prof_profile_mark,
469
+ .dfree = prof_profile_ruby_gc_free,
470
+ .dsize = prof_profile_size,
471
+ .dcompact = prof_profile_compact
472
+ },
473
+ .data = NULL,
474
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY
475
+ };
476
+
477
+ static VALUE prof_allocate(VALUE klass)
478
+ {
479
+ VALUE result;
480
+ prof_profile_t* profile;
481
+ result = TypedData_Make_Struct(klass, prof_profile_t, &profile_type, profile);
482
+ profile->object = result;
483
+ profile->threads_tbl = threads_table_create();
484
+ profile->exclude_threads_tbl = NULL;
485
+ profile->include_threads_tbl = NULL;
486
+ profile->running = Qfalse;
487
+ profile->allow_exceptions = false;
488
+ profile->exclude_methods_tbl = method_table_create();
489
+ profile->running = Qfalse;
490
+ profile->tracepoints = rb_ary_new();
491
+ return result;
492
+ }
493
+
494
+ static void prof_exclude_common_methods(VALUE profile)
495
+ {
496
+ rb_funcall(profile, rb_intern("exclude_common_methods!"), 0);
497
+ }
498
+
499
+ static int pop_frames(VALUE key, st_data_t value, st_data_t data)
500
+ {
501
+ thread_data_t* thread_data = (thread_data_t*)value;
502
+ prof_profile_t* profile = (prof_profile_t*)data;
503
+ double measurement = prof_measure(profile->measurer, NULL);
504
+
505
+ if (profile->last_thread_data->fiber != thread_data->fiber)
506
+ switch_thread(profile, thread_data, measurement);
507
+
508
+ while (prof_frame_pop(thread_data->stack, measurement));
509
+
510
+ return ST_CONTINUE;
511
+ }
512
+
513
+ static void
514
+ prof_stop_threads(prof_profile_t* profile)
515
+ {
516
+ rb_st_foreach(profile->threads_tbl, pop_frames, (st_data_t)profile);
517
+ }
518
+
519
+ /* call-seq:
520
+ new()
521
+ new(keyword_args)
522
+
523
+ Returns a new profiler. Possible keyword arguments include:
524
+
525
+ measure_mode: Measure mode. Specifies the profile measure mode.
526
+ If not specified, defaults to RubyProf::WALL_TIME.
527
+ allow_exceptions: Whether to raise exceptions encountered during profiling,
528
+ or to suppress all exceptions during profiling
529
+ track_allocations: Whether to track object allocations while profiling. True or false.
530
+ exclude_common: Exclude common methods from the profile. True or false.
531
+ exclude_threads: Threads to exclude from the profiling results.
532
+ include_threads: Focus profiling on only the given threads. This will ignore
533
+ all other threads. */
534
+ static VALUE prof_initialize(int argc, VALUE* argv, VALUE self)
535
+ {
536
+ VALUE keywords;
537
+ rb_scan_args_kw(RB_SCAN_ARGS_KEYWORDS, argc, argv, ":", &keywords);
538
+
539
+ ID table[] = {rb_intern("measure_mode"),
540
+ rb_intern("track_allocations"),
541
+ rb_intern("allow_exceptions"),
542
+ rb_intern("exclude_common"),
543
+ rb_intern("exclude_threads"),
544
+ rb_intern("include_threads") };
545
+ VALUE values[6];
546
+ rb_get_kwargs(keywords, table, 0, 6, values);
547
+
548
+ VALUE mode = values[0] == Qundef ? INT2NUM(MEASURE_WALL_TIME) : values[0];
549
+ VALUE track_allocations = values[1] == Qtrue ? Qtrue : Qfalse;
550
+ VALUE allow_exceptions = values[2] == Qtrue ? Qtrue : Qfalse;
551
+ VALUE exclude_common = values[3] == Qtrue ? Qtrue : Qfalse;
552
+ VALUE exclude_threads = values[4];
553
+ VALUE include_threads = values[5];
554
+
555
+ Check_Type(mode, T_FIXNUM);
556
+ prof_profile_t* profile = prof_get_profile(self);
557
+ profile->measurer = prof_measurer_create(NUM2INT(mode), RB_TEST(track_allocations));
558
+ profile->allow_exceptions = RB_TEST(allow_exceptions);
559
+
560
+ if (exclude_threads != Qundef)
561
+ {
562
+ Check_Type(exclude_threads, T_ARRAY);
563
+ assert(profile->exclude_threads_tbl == NULL);
564
+ profile->exclude_threads_tbl = threads_table_create();
565
+ for (int i = 0; i < RARRAY_LEN(exclude_threads); i++)
566
+ {
567
+ VALUE thread = rb_ary_entry(exclude_threads, i);
568
+ rb_st_insert(profile->exclude_threads_tbl, thread, Qtrue);
569
+ }
570
+ }
571
+
572
+ if (include_threads != Qundef)
573
+ {
574
+ Check_Type(include_threads, T_ARRAY);
575
+ assert(profile->include_threads_tbl == NULL);
576
+ profile->include_threads_tbl = threads_table_create();
577
+ for (int i = 0; i < RARRAY_LEN(include_threads); i++)
578
+ {
579
+ VALUE thread = rb_ary_entry(include_threads, i);
580
+ rb_st_insert(profile->include_threads_tbl, thread, Qtrue);
581
+ }
582
+ }
583
+
584
+ if (RB_TEST(exclude_common))
585
+ {
586
+ prof_exclude_common_methods(self);
587
+ }
588
+
589
+ return self;
590
+ }
591
+
592
+ /* call-seq:
593
+ paused? -> boolean
594
+
595
+ Returns whether a profile is currently paused.*/
596
+ static VALUE prof_paused(VALUE self)
597
+ {
598
+ prof_profile_t* profile = prof_get_profile(self);
599
+ return profile->paused;
600
+ }
601
+
602
+ /* call-seq:
603
+ running? -> boolean
604
+
605
+ Returns whether a profile is currently running.*/
606
+ static VALUE
607
+ prof_running(VALUE self)
608
+ {
609
+ prof_profile_t* profile = prof_get_profile(self);
610
+ return profile->running;
611
+ }
612
+
613
+ /* call-seq:
614
+ mode -> measure_mode
615
+
616
+ Returns the measure mode used in this profile.*/
617
+ static VALUE prof_profile_measure_mode(VALUE self)
618
+ {
619
+ prof_profile_t* profile = prof_get_profile(self);
620
+ return INT2NUM(profile->measurer->mode);
621
+ }
622
+
623
+ /* call-seq:
624
+ track_allocations -> boolean
625
+
626
+ Returns if object allocations were tracked in this profile.*/
627
+ static VALUE prof_profile_track_allocations(VALUE self)
628
+ {
629
+ prof_profile_t* profile = prof_get_profile(self);
630
+ return profile->measurer->track_allocations ? Qtrue : Qfalse;
631
+ }
632
+
633
+ /* call-seq:
634
+ start -> self
635
+
636
+ Starts recording profile data.*/
637
+ static VALUE prof_start(VALUE self)
638
+ {
639
+ char* trace_file_name;
640
+
641
+ prof_profile_t* profile = prof_get_profile(self);
642
+
643
+ if (profile->running == Qtrue)
644
+ {
645
+ rb_raise(rb_eRuntimeError, "RubyProf.start was already called");
646
+ }
647
+
648
+ profile->running = Qtrue;
649
+ profile->paused = Qfalse;
650
+ profile->last_thread_data = threads_table_insert(profile, rb_fiber_current());
651
+
652
+ /* open trace file if environment wants it */
653
+ trace_file_name = getenv("RUBY_PROF_TRACE");
654
+
655
+ if (trace_file_name != NULL)
656
+ {
657
+ if (strcmp(trace_file_name, "stdout") == 0)
658
+ {
659
+ trace_file = stdout;
660
+ }
661
+ else if (strcmp(trace_file_name, "stderr") == 0)
662
+ {
663
+ trace_file = stderr;
664
+ }
665
+ else
666
+ {
667
+ trace_file = fopen(trace_file_name, "w");
668
+ }
669
+ }
670
+
671
+ prof_install_hook(self);
672
+ return self;
673
+ }
674
+
675
+ /* call-seq:
676
+ pause -> self
677
+
678
+ Pauses collecting profile data. */
679
+ static VALUE prof_pause(VALUE self)
680
+ {
681
+ prof_profile_t* profile = prof_get_profile(self);
682
+ if (profile->running == Qfalse)
683
+ {
684
+ rb_raise(rb_eRuntimeError, "RubyProf is not running.");
685
+ }
686
+
687
+ if (profile->paused == Qfalse)
688
+ {
689
+ profile->paused = Qtrue;
690
+ profile->measurement_at_pause_resume = prof_measure(profile->measurer, NULL);
691
+ rb_st_foreach(profile->threads_tbl, pause_thread, (st_data_t)profile);
692
+ }
693
+
694
+ return self;
695
+ }
696
+
697
+ /* call-seq:
698
+ resume -> self
699
+ resume(&block) -> self
700
+
701
+ Resumes recording profile data.*/
702
+ static VALUE prof_resume(VALUE self)
703
+ {
704
+ prof_profile_t* profile = prof_get_profile(self);
705
+ if (profile->running == Qfalse)
706
+ {
707
+ rb_raise(rb_eRuntimeError, "RubyProf is not running.");
708
+ }
709
+
710
+ if (profile->paused == Qtrue)
711
+ {
712
+ profile->paused = Qfalse;
713
+ profile->measurement_at_pause_resume = prof_measure(profile->measurer, NULL);
714
+ rb_st_foreach(profile->threads_tbl, unpause_thread, (st_data_t)profile);
715
+ }
716
+
717
+ return rb_block_given_p() ? rb_ensure(rb_yield, self, prof_pause, self) : self;
718
+ }
719
+
720
+ /* call-seq:
721
+ stop -> self
722
+
723
+ Stops collecting profile data.*/
724
+ static VALUE prof_stop(VALUE self)
725
+ {
726
+ prof_profile_t* profile = prof_get_profile(self);
727
+
728
+ if (profile->running == Qfalse)
729
+ {
730
+ rb_raise(rb_eRuntimeError, "RubyProf.start was not yet called");
731
+ }
732
+
733
+ prof_remove_hook(self);
734
+
735
+ /* close trace file if open */
736
+ if (trace_file != NULL)
737
+ {
738
+ if (trace_file != stderr && trace_file != stdout)
739
+ {
740
+ fclose(trace_file);
741
+ }
742
+ trace_file = NULL;
743
+ }
744
+
745
+ prof_stop_threads(profile);
746
+
747
+ /* Unset the last_thread_data (very important!)
748
+ and the threads table */
749
+ profile->running = profile->paused = Qfalse;
750
+ profile->last_thread_data = NULL;
751
+
752
+ return self;
753
+ }
754
+
755
+ /* call-seq:
756
+ threads -> Array of RubyProf::Thread
757
+
758
+ Returns an array of RubyProf::Thread instances that were profiled. */
759
+ static VALUE prof_threads(VALUE self)
760
+ {
761
+ VALUE result = rb_ary_new();
762
+ prof_profile_t* profile = prof_get_profile(self);
763
+ rb_st_foreach(profile->threads_tbl, collect_threads, result);
764
+ return result;
765
+ }
766
+
767
+ /* call-seq:
768
+ add_thread(thread) -> thread
769
+
770
+ Adds the specified RubyProf thread to the profile. */
771
+ static VALUE prof_add_thread(VALUE self, VALUE thread)
772
+ {
773
+ prof_profile_t* profile_ptr = prof_get_profile(self);
774
+
775
+ // This thread is now going to be owned by C
776
+ thread_data_t* thread_ptr = prof_get_thread(thread);
777
+ thread_ptr->owner = OWNER_C;
778
+
779
+ rb_st_insert(profile_ptr->threads_tbl, thread_ptr->fiber_id, (st_data_t)thread_ptr);
780
+ return thread;
781
+ }
782
+
783
+ /* call-seq:
784
+ remove_thread(thread) -> thread
785
+
786
+ Removes the specified thread from the profile. This is used to remove threads
787
+ after they have been merged togher. Retuns the removed thread. */
788
+ static VALUE prof_remove_thread(VALUE self, VALUE thread)
789
+ {
790
+ prof_profile_t* profile_ptr = prof_get_profile(self);
791
+ thread_data_t* thread_ptr = prof_get_thread(thread);
792
+ VALUE fiber_id = thread_ptr->fiber_id;
793
+ rb_st_delete(profile_ptr->threads_tbl, (st_data_t*)&fiber_id, NULL);
794
+ return thread;
795
+ }
796
+
797
+ /* Document-method: RubyProf::Profile#Profile
798
+ call-seq:
799
+ profile(&block) -> self
800
+
801
+ Profiles the specified block.
802
+
803
+ profile = RubyProf::Profile.new
804
+ profile.profile do
805
+ ..
806
+ end
807
+ */
808
+ static VALUE prof_profile_instance(VALUE self)
809
+ {
810
+ int result;
811
+ prof_profile_t* profile = prof_get_profile(self);
812
+
813
+ if (!rb_block_given_p())
814
+ {
815
+ rb_raise(rb_eArgError, "A block must be provided to the profile method.");
816
+ }
817
+
818
+ prof_start(self);
819
+ rb_protect(rb_yield, self, &result);
820
+ self = prof_stop(self);
821
+
822
+ if (profile->allow_exceptions && result != 0)
823
+ {
824
+ rb_jump_tag(result);
825
+ }
826
+
827
+ return self;
828
+ }
829
+
830
+ /* Document-method: RubyProf::Profile::Profile
831
+ call-seq:
832
+ profile(&block) -> RubyProf::Profile
833
+ profile(options, &block) -> RubyProf::Profile
834
+
835
+ Profiles the specified block and returns a RubyProf::Profile
836
+ object. Arguments are passed to Profile initialize method.
837
+
838
+ profile = RubyProf::Profile.profile do
839
+ ..
840
+ end
841
+ */
842
+ static VALUE prof_profile_class(int argc, VALUE* argv, VALUE klass)
843
+ {
844
+ return prof_profile_instance(rb_class_new_instance(argc, argv, cProfile));
845
+ }
846
+
847
+ /* call-seq:
848
+ exclude_method!(module, method_name) -> self
849
+
850
+ Excludes the method from profiling results.
851
+ */
852
+ static VALUE prof_exclude_method(VALUE self, VALUE klass, VALUE msym)
853
+ {
854
+ prof_profile_t* profile = prof_get_profile(self);
855
+
856
+ if (profile->running == Qtrue)
857
+ {
858
+ rb_raise(rb_eRuntimeError, "RubyProf.start was already called");
859
+ }
860
+
861
+ st_data_t key = method_key(klass, msym);
862
+ prof_method_t* method = method_table_lookup(profile->exclude_methods_tbl, key);
863
+
864
+ if (!method)
865
+ {
866
+ method = prof_method_create(self, klass, msym, Qnil, 0);
867
+ method_table_insert(profile->exclude_methods_tbl, method->key, method);
868
+ }
869
+
870
+ return self;
871
+ }
872
+
873
+ /* :nodoc: */
874
+ VALUE prof_profile_dump(VALUE self)
875
+ {
876
+ prof_profile_t* profile = prof_get_profile(self);
877
+
878
+ VALUE result = rb_hash_new();
879
+ rb_hash_aset(result, ID2SYM(rb_intern("threads")), prof_threads(self));
880
+ rb_hash_aset(result, ID2SYM(rb_intern("measurer_mode")), INT2NUM(profile->measurer->mode));
881
+ rb_hash_aset(result, ID2SYM(rb_intern("measurer_track_allocations")),
882
+ profile->measurer->track_allocations ? Qtrue : Qfalse);
883
+
884
+ return result;
885
+ }
886
+
887
+ /* :nodoc: */
888
+ VALUE prof_profile_load(VALUE self, VALUE data)
889
+ {
890
+ prof_profile_t* profile = prof_get_profile(self);
891
+
892
+ VALUE measurer_mode = rb_hash_aref(data, ID2SYM(rb_intern("measurer_mode")));
893
+ VALUE measurer_track_allocations = rb_hash_aref(data, ID2SYM(rb_intern("measurer_track_allocations")));
894
+ profile->measurer = prof_measurer_create((prof_measure_mode_t)(NUM2INT(measurer_mode)),
895
+ measurer_track_allocations == Qtrue ? true : false);
896
+
897
+ VALUE threads = rb_hash_aref(data, ID2SYM(rb_intern("threads")));
898
+ for (int i = 0; i < rb_array_len(threads); i++)
899
+ {
900
+ VALUE thread = rb_ary_entry(threads, i);
901
+ thread_data_t* thread_data = prof_get_thread(thread);
902
+ rb_st_insert(profile->threads_tbl, (st_data_t)thread_data->fiber_id, (st_data_t)thread_data);
903
+ }
904
+
905
+ return data;
906
+ }
907
+
908
+ void rp_init_profile(void)
909
+ {
910
+ cProfile = rb_define_class_under(mProf, "Profile", rb_cObject);
911
+ rb_define_alloc_func(cProfile, prof_allocate);
912
+
913
+ rb_define_singleton_method(cProfile, "profile", prof_profile_class, -1);
914
+ rb_define_method(cProfile, "initialize", prof_initialize, -1);
915
+ rb_define_method(cProfile, "profile", prof_profile_instance, 0);
916
+ rb_define_method(cProfile, "start", prof_start, 0);
917
+ rb_define_method(cProfile, "stop", prof_stop, 0);
918
+ rb_define_method(cProfile, "resume", prof_resume, 0);
919
+ rb_define_method(cProfile, "pause", prof_pause, 0);
920
+ rb_define_method(cProfile, "running?", prof_running, 0);
921
+ rb_define_method(cProfile, "paused?", prof_paused, 0);
922
+
923
+ rb_define_method(cProfile, "exclude_method!", prof_exclude_method, 2);
924
+ rb_define_method(cProfile, "measure_mode", prof_profile_measure_mode, 0);
925
+ rb_define_method(cProfile, "track_allocations?", prof_profile_track_allocations, 0);
926
+
927
+ rb_define_method(cProfile, "threads", prof_threads, 0);
928
+ rb_define_method(cProfile, "add_thread", prof_add_thread, 1);
929
+ rb_define_method(cProfile, "remove_thread", prof_remove_thread, 1);
930
+
931
+ rb_define_method(cProfile, "_dump_data", prof_profile_dump, 0);
932
+ rb_define_method(cProfile, "_load_data", prof_profile_load, 1);
933
+ }