ruby-prof 1.5.0 → 1.6.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (65) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGES +19 -0
  3. data/bin/ruby-prof +105 -87
  4. data/ext/ruby_prof/rp_allocation.c +136 -81
  5. data/ext/ruby_prof/rp_allocation.h +8 -6
  6. data/ext/ruby_prof/rp_call_tree.c +502 -457
  7. data/ext/ruby_prof/rp_call_tree.h +47 -44
  8. data/ext/ruby_prof/rp_call_trees.c +1 -1
  9. data/ext/ruby_prof/rp_measurement.c +10 -3
  10. data/ext/ruby_prof/rp_method.c +86 -79
  11. data/ext/ruby_prof/rp_method.h +63 -62
  12. data/ext/ruby_prof/rp_profile.c +933 -948
  13. data/ext/ruby_prof/rp_profile.h +1 -0
  14. data/ext/ruby_prof/rp_thread.c +433 -410
  15. data/ext/ruby_prof/rp_thread.h +39 -39
  16. data/ext/ruby_prof/vc/ruby_prof.vcxproj +6 -3
  17. data/lib/ruby-prof/compatibility.rb +14 -0
  18. data/lib/ruby-prof/printers/abstract_printer.rb +2 -1
  19. data/lib/ruby-prof/printers/call_tree_printer.rb +1 -1
  20. data/lib/ruby-prof/printers/multi_printer.rb +17 -17
  21. data/lib/ruby-prof/profile.rb +70 -70
  22. data/lib/ruby-prof/rack.rb +31 -21
  23. data/lib/ruby-prof/version.rb +1 -1
  24. data/test/abstract_printer_test.rb +1 -0
  25. data/test/alias_test.rb +6 -11
  26. data/test/call_tree_test.rb +94 -197
  27. data/test/call_tree_visitor_test.rb +1 -6
  28. data/test/call_trees_test.rb +2 -2
  29. data/test/{basic_test.rb → compatibility_test.rb} +8 -2
  30. data/test/duplicate_names_test.rb +1 -1
  31. data/test/dynamic_method_test.rb +1 -6
  32. data/test/enumerable_test.rb +1 -1
  33. data/test/exceptions_test.rb +2 -2
  34. data/test/exclude_methods_test.rb +3 -8
  35. data/test/exclude_threads_test.rb +4 -9
  36. data/test/fiber_test.rb +2 -58
  37. data/test/gc_test.rb +2 -2
  38. data/test/inverse_call_tree_test.rb +33 -34
  39. data/test/line_number_test.rb +1 -1
  40. data/test/marshal_test.rb +3 -3
  41. data/test/measure_allocations_test.rb +8 -17
  42. data/test/measure_memory_test.rb +3 -12
  43. data/test/measure_process_time_test.rb +32 -36
  44. data/test/measure_wall_time_test.rb +176 -181
  45. data/test/merge_test.rb +146 -0
  46. data/test/multi_printer_test.rb +0 -5
  47. data/test/no_method_class_test.rb +1 -1
  48. data/test/pause_resume_test.rb +12 -16
  49. data/test/printer_call_stack_test.rb +2 -2
  50. data/test/printer_call_tree_test.rb +2 -2
  51. data/test/printer_flat_test.rb +1 -1
  52. data/test/printer_graph_html_test.rb +2 -2
  53. data/test/printer_graph_test.rb +2 -2
  54. data/test/printers_test.rb +14 -20
  55. data/test/printing_recursive_graph_test.rb +2 -2
  56. data/test/recursive_test.rb +2 -7
  57. data/test/scheduler.rb +9 -0
  58. data/test/singleton_test.rb +1 -1
  59. data/test/stack_printer_test.rb +5 -8
  60. data/test/start_stop_test.rb +11 -14
  61. data/test/test_helper.rb +7 -0
  62. data/test/thread_test.rb +84 -19
  63. data/test/unique_call_path_test.rb +4 -4
  64. data/test/yarv_test.rb +3 -3
  65. metadata +6 -5
@@ -1,948 +1,933 @@
1
- /* Copyright (C) 2005-2019 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
2
- Please see the LICENSE file for copyright and distribution information */
3
-
4
- /* Document-class: RubyProf::Profile
5
-
6
- The Profile class represents a single profiling run and provides the main API for using ruby-prof.
7
- After creating a Profile instance, start profiling code by calling the Profile#start method. To finish profiling,
8
- call Profile#stop. Once profiling is completed, the Profile instance contains the results.
9
-
10
- profile = RubyProf::Profile.new
11
- profile.start
12
- ...
13
- result = profile.stop
14
-
15
- Alternatively, you can use the block syntax:
16
-
17
- profile = RubyProf::Profile.profile do
18
- ...
19
- end
20
- */
21
-
22
- #include <assert.h>
23
-
24
- #include "rp_allocation.h"
25
- #include "rp_call_trees.h"
26
- #include "rp_call_tree.h"
27
- #include "rp_profile.h"
28
- #include "rp_method.h"
29
-
30
- VALUE cProfile;
31
-
32
- /* support tracing ruby events from ruby-prof. useful for getting at
33
- what actually happens inside the ruby interpreter (and ruby-prof).
34
- set environment variable RUBY_PROF_TRACE to filename you want to
35
- find the trace in.
36
- */
37
- FILE* trace_file = NULL;
38
-
39
- static const char* get_event_name(rb_event_flag_t event)
40
- {
41
- switch (event) {
42
- case RUBY_EVENT_LINE:
43
- return "line";
44
- case RUBY_EVENT_CLASS:
45
- return "class";
46
- case RUBY_EVENT_END:
47
- return "end";
48
- case RUBY_EVENT_CALL:
49
- return "call";
50
- case RUBY_EVENT_RETURN:
51
- return "return";
52
- case RUBY_EVENT_B_CALL:
53
- return "b-call";
54
- case RUBY_EVENT_B_RETURN:
55
- return "b-return";
56
- case RUBY_EVENT_C_CALL:
57
- return "c-call";
58
- case RUBY_EVENT_C_RETURN:
59
- return "c-return";
60
- case RUBY_EVENT_THREAD_BEGIN:
61
- return "thread-begin";
62
- case RUBY_EVENT_THREAD_END:
63
- return "thread-end";
64
- case RUBY_EVENT_FIBER_SWITCH:
65
- return "fiber-switch";
66
- case RUBY_EVENT_RAISE:
67
- return "raise";
68
- case RUBY_INTERNAL_EVENT_NEWOBJ:
69
- return "newobj";
70
- default:
71
- return "unknown";
72
- }
73
- }
74
-
75
- thread_data_t* check_fiber(prof_profile_t* profile, double measurement)
76
- {
77
- thread_data_t* result = NULL;
78
-
79
- // Get the current fiber
80
- VALUE fiber = rb_fiber_current();
81
-
82
- /* We need to switch the profiling context if we either had none before,
83
- we don't merge fibers and the fiber ids differ, or the thread ids differ. */
84
- if (profile->last_thread_data->fiber != fiber)
85
- {
86
- result = threads_table_lookup(profile, fiber);
87
- if (!result)
88
- {
89
- result = threads_table_insert(profile, fiber);
90
- }
91
- switch_thread(profile, result, measurement);
92
- }
93
- else
94
- {
95
- result = profile->last_thread_data;
96
- }
97
- return result;
98
- }
99
-
100
- static int excludes_method(st_data_t key, prof_profile_t* profile)
101
- {
102
- return (profile->exclude_methods_tbl &&
103
- method_table_lookup(profile->exclude_methods_tbl, key) != NULL);
104
- }
105
-
106
- static prof_method_t* create_method(VALUE profile, st_data_t key, VALUE klass, VALUE msym, VALUE source_file, int source_line)
107
- {
108
- prof_method_t* result = prof_method_create(profile, klass, msym, source_file, source_line);
109
-
110
- prof_profile_t* profile_t = prof_get_profile(profile);
111
- method_table_insert(profile_t->last_thread_data->method_table, result->key, result);
112
-
113
- return result;
114
- }
115
-
116
- static prof_method_t* check_parent_method(VALUE profile, thread_data_t* thread_data)
117
- {
118
- VALUE msym = ID2SYM(rb_intern("_inserted_parent_"));
119
- st_data_t key = method_key(cProfile, msym);
120
-
121
- prof_method_t* result = method_table_lookup(thread_data->method_table, key);
122
-
123
- if (!result)
124
- {
125
- result = create_method(profile, key, cProfile, msym, Qnil, 0);
126
- }
127
-
128
- return result;
129
- }
130
-
131
- prof_method_t* check_method(VALUE profile, rb_trace_arg_t* trace_arg, rb_event_flag_t event, thread_data_t* thread_data)
132
- {
133
- VALUE klass = rb_tracearg_defined_class(trace_arg);
134
-
135
- /* Special case - skip any methods from the mProf
136
- module or cProfile class since they clutter
137
- the results but aren't important to them results. */
138
- if (klass == cProfile)
139
- return NULL;
140
-
141
- VALUE msym = rb_tracearg_callee_id(trace_arg);
142
-
143
- st_data_t key = method_key(klass, msym);
144
-
145
- prof_profile_t* profile_t = prof_get_profile(profile);
146
- if (excludes_method(key, profile_t))
147
- return NULL;
148
-
149
- prof_method_t* result = method_table_lookup(thread_data->method_table, key);
150
-
151
- if (!result)
152
- {
153
- VALUE source_file = (event != RUBY_EVENT_C_CALL ? rb_tracearg_path(trace_arg) : Qnil);
154
- int source_line = (event != RUBY_EVENT_C_CALL ? FIX2INT(rb_tracearg_lineno(trace_arg)) : 0);
155
- result = create_method(profile, key, klass, msym, source_file, source_line);
156
- }
157
-
158
- return result;
159
- }
160
-
161
- /* =========== Profiling ================= */
162
- static void prof_trace(prof_profile_t* profile, rb_trace_arg_t* trace_arg, double measurement)
163
- {
164
- static VALUE last_fiber = Qnil;
165
- VALUE fiber = rb_fiber_current();
166
-
167
- rb_event_flag_t event = rb_tracearg_event_flag(trace_arg);
168
- const char* event_name = get_event_name(event);
169
-
170
- VALUE source_file = rb_tracearg_path(trace_arg);
171
- int source_line = FIX2INT(rb_tracearg_lineno(trace_arg));
172
-
173
- VALUE msym = rb_tracearg_callee_id(trace_arg);
174
-
175
- unsigned int klass_flags;
176
- VALUE klass = rb_tracearg_defined_class(trace_arg);
177
- VALUE resolved_klass = resolve_klass(klass, &klass_flags);
178
- const char* class_name = "";
179
-
180
- if (resolved_klass != Qnil)
181
- class_name = rb_class2name(resolved_klass);
182
-
183
- if (last_fiber != fiber)
184
- {
185
- fprintf(trace_file, "\n");
186
- }
187
-
188
- const char* method_name_char = (msym != Qnil ? rb_id2name(SYM2ID(msym)) : "");
189
- const char* source_file_char = (source_file != Qnil ? StringValuePtr(source_file) : "");
190
-
191
- fprintf(trace_file, "%2lu:%2f %-8s %s#%s %s:%2d\n",
192
- FIX2ULONG(fiber), (double)measurement,
193
- event_name, class_name, method_name_char, source_file_char, source_line);
194
- fflush(trace_file);
195
- last_fiber = fiber;
196
- }
197
-
198
- static void prof_event_hook(VALUE trace_point, void* data)
199
- {
200
- VALUE profile = (VALUE)data;
201
- prof_profile_t* profile_t = prof_get_profile(profile);
202
-
203
- rb_trace_arg_t* trace_arg = rb_tracearg_from_tracepoint(trace_point);
204
- double measurement = prof_measure(profile_t->measurer, trace_arg);
205
- rb_event_flag_t event = rb_tracearg_event_flag(trace_arg);
206
- VALUE self = rb_tracearg_self(trace_arg);
207
-
208
- if (trace_file != NULL)
209
- {
210
- prof_trace(profile_t, trace_arg, measurement);
211
- }
212
-
213
- /* Special case - skip any methods from the mProf
214
- module since they clutter the results but aren't important. */
215
- if (self == mProf)
216
- return;
217
-
218
- thread_data_t* thread_data = check_fiber(profile_t, measurement);
219
-
220
- if (!thread_data->trace)
221
- return;
222
-
223
- switch (event)
224
- {
225
- case RUBY_EVENT_LINE:
226
- {
227
- prof_frame_t* frame = prof_frame_current(thread_data->stack);
228
-
229
- /* If there is no frame then this is either the first method being profiled or we have climbed the
230
- call stack higher than where we started. */
231
- if (!frame)
232
- {
233
- prof_method_t* method = check_method(profile, trace_arg, event, thread_data);
234
-
235
- if (!method)
236
- break;
237
-
238
- prof_call_tree_t* call_tree = prof_call_tree_create(method, NULL, method->source_file, method->source_line);
239
- prof_add_call_tree(method->call_trees, call_tree);
240
-
241
- // We have climbed higher in the stack then where we started
242
- if (thread_data->call_tree)
243
- {
244
- prof_call_tree_add_parent(thread_data->call_tree, call_tree);
245
- frame = prof_frame_unshift(thread_data->stack, call_tree, thread_data->call_tree, measurement);
246
- }
247
- // This is the first method to be profiled
248
- else
249
- {
250
- frame = prof_frame_push(thread_data->stack, call_tree, measurement, RTEST(profile_t->paused));
251
- }
252
-
253
- thread_data->call_tree = call_tree;
254
- }
255
-
256
- frame->source_file = rb_tracearg_path(trace_arg);
257
- frame->source_line = FIX2INT(rb_tracearg_lineno(trace_arg));
258
-
259
- break;
260
- }
261
- case RUBY_EVENT_CALL:
262
- case RUBY_EVENT_C_CALL:
263
- {
264
- prof_method_t* method = check_method(profile, trace_arg, event, thread_data);
265
-
266
- if (!method)
267
- break;
268
-
269
- prof_frame_t* frame = prof_frame_current(thread_data->stack);
270
- prof_call_tree_t* parent_call_tree = NULL;
271
- prof_call_tree_t* call_tree = NULL;
272
-
273
- // Frame can be NULL if we are switching from one fiber to another (see FiberTest#fiber_test)
274
- if (frame)
275
- {
276
- parent_call_tree = frame->call_tree;
277
- call_tree = call_tree_table_lookup(parent_call_tree->children, method->key);
278
- }
279
- else if (!frame && thread_data->call_tree)
280
- {
281
- // There is no current parent - likely we have returned out of the highest level method we have profiled so far.
282
- // This can happen with enumerators (see fiber_test.rb). So create a new dummy parent.
283
- prof_method_t* parent_method = check_parent_method(profile, thread_data);
284
- parent_call_tree = prof_call_tree_create(parent_method, NULL, Qnil, 0);
285
- prof_add_call_tree(parent_method->call_trees, parent_call_tree);
286
- prof_call_tree_add_parent(thread_data->call_tree, parent_call_tree);
287
- frame = prof_frame_unshift(thread_data->stack, parent_call_tree, thread_data->call_tree, measurement);
288
- thread_data->call_tree = parent_call_tree;
289
- }
290
-
291
- if (!call_tree)
292
- {
293
- // This call info does not yet exist. So create it and add it to previous CallTree's children and the current method.
294
- call_tree = prof_call_tree_create(method, parent_call_tree, frame ? frame->source_file : Qnil, frame? frame->source_line : 0);
295
- prof_add_call_tree(method->call_trees, call_tree);
296
- if (parent_call_tree)
297
- prof_call_tree_add_child(parent_call_tree, call_tree);
298
- }
299
-
300
- if (!thread_data->call_tree)
301
- thread_data->call_tree = call_tree;
302
-
303
- // Push a new frame onto the stack for a new c-call or ruby call (into a method)
304
- prof_frame_t* next_frame = prof_frame_push(thread_data->stack, call_tree, measurement, RTEST(profile_t->paused));
305
- next_frame->source_file = method->source_file;
306
- next_frame->source_line = method->source_line;
307
- break;
308
- }
309
- case RUBY_EVENT_RETURN:
310
- case RUBY_EVENT_C_RETURN:
311
- {
312
- // We need to check for excluded methods so that we don't pop them off the stack
313
- prof_method_t* method = check_method(profile, trace_arg, event, thread_data);
314
-
315
- if (!method)
316
- break;
317
-
318
- prof_frame_pop(thread_data->stack, measurement);
319
- break;
320
- }
321
- case RUBY_INTERNAL_EVENT_NEWOBJ:
322
- {
323
- /* We want to assign the allocations lexically, not the execution context (otherwise all allocations will
324
- show up under Class#new */
325
- int source_line = FIX2INT(rb_tracearg_lineno(trace_arg));
326
- VALUE source_file = rb_tracearg_path(trace_arg);
327
-
328
- prof_method_t* method = prof_find_method(thread_data->stack, source_file, source_line);
329
- if (method)
330
- prof_allocate_increment(method, trace_arg);
331
-
332
- break;
333
- }
334
- }
335
- }
336
-
337
- void prof_install_hook(VALUE self)
338
- {
339
- prof_profile_t* profile = prof_get_profile(self);
340
-
341
- VALUE event_tracepoint = rb_tracepoint_new(Qnil,
342
- RUBY_EVENT_CALL | RUBY_EVENT_RETURN |
343
- RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN |
344
- RUBY_EVENT_LINE,
345
- prof_event_hook, (void*)self);
346
- rb_ary_push(profile->tracepoints, event_tracepoint);
347
-
348
- if (profile->measurer->track_allocations)
349
- {
350
- VALUE allocation_tracepoint = rb_tracepoint_new(Qnil, RUBY_INTERNAL_EVENT_NEWOBJ, prof_event_hook, (void*)self);
351
- rb_ary_push(profile->tracepoints, allocation_tracepoint);
352
- }
353
-
354
- for (int i = 0; i < RARRAY_LEN(profile->tracepoints); i++)
355
- {
356
- rb_tracepoint_enable(rb_ary_entry(profile->tracepoints, i));
357
- }
358
- }
359
-
360
- void prof_remove_hook(VALUE self)
361
- {
362
- prof_profile_t* profile = prof_get_profile(self);
363
-
364
- for (int i = 0; i < RARRAY_LEN(profile->tracepoints); i++)
365
- {
366
- rb_tracepoint_disable(rb_ary_entry(profile->tracepoints, i));
367
- }
368
- rb_ary_clear(profile->tracepoints);
369
- }
370
-
371
- prof_profile_t* prof_get_profile(VALUE self)
372
- {
373
- /* Can't use Data_Get_Struct because that triggers the event hook
374
- ending up in endless recursion. */
375
- return RTYPEDDATA_DATA(self);
376
- }
377
-
378
- static int collect_threads(st_data_t key, st_data_t value, st_data_t result)
379
- {
380
- thread_data_t* thread_data = (thread_data_t*)value;
381
- if (thread_data->trace && thread_data->call_tree)
382
- {
383
- VALUE threads_array = (VALUE)result;
384
- rb_ary_push(threads_array, prof_thread_wrap(thread_data));
385
- }
386
- return ST_CONTINUE;
387
- }
388
-
389
- /* ======== Profile Class ====== */
390
- static int mark_threads(st_data_t key, st_data_t value, st_data_t result)
391
- {
392
- thread_data_t* thread = (thread_data_t*)value;
393
- prof_thread_mark(thread);
394
- return ST_CONTINUE;
395
- }
396
-
397
- static int prof_profile_mark_methods(st_data_t key, st_data_t value, st_data_t result)
398
- {
399
- prof_method_t* method = (prof_method_t*)value;
400
- prof_method_mark(method);
401
- return ST_CONTINUE;
402
- }
403
-
404
- static void prof_profile_mark(void* data)
405
- {
406
- prof_profile_t* profile = (prof_profile_t*)data;
407
- rb_gc_mark(profile->tracepoints);
408
- rb_gc_mark(profile->running);
409
- rb_gc_mark(profile->paused);
410
-
411
- // If GC stress is true (useful for debugging), when threads_table_create is called in the
412
- // allocate method Ruby will immediately call this mark method. Thus the threads_tbl will be NULL.
413
- if (profile->threads_tbl)
414
- rb_st_foreach(profile->threads_tbl, mark_threads, 0);
415
-
416
- if (profile->exclude_methods_tbl)
417
- rb_st_foreach(profile->exclude_methods_tbl, prof_profile_mark_methods, 0);
418
- }
419
-
420
- /* Freeing the profile creates a cascade of freeing. It frees its threads table, which frees
421
- each thread and its associated call treee and methods. */
422
- static void prof_profile_ruby_gc_free(void* data)
423
- {
424
- prof_profile_t* profile = (prof_profile_t*)data;
425
- profile->last_thread_data = NULL;
426
-
427
- threads_table_free(profile->threads_tbl);
428
- profile->threads_tbl = NULL;
429
-
430
- if (profile->exclude_threads_tbl)
431
- {
432
- rb_st_free_table(profile->exclude_threads_tbl);
433
- profile->exclude_threads_tbl = NULL;
434
- }
435
-
436
- if (profile->include_threads_tbl)
437
- {
438
- rb_st_free_table(profile->include_threads_tbl);
439
- profile->include_threads_tbl = NULL;
440
- }
441
-
442
- /* This table owns the excluded sentinels for now. */
443
- method_table_free(profile->exclude_methods_tbl);
444
- profile->exclude_methods_tbl = NULL;
445
-
446
- xfree(profile->measurer);
447
- profile->measurer = NULL;
448
-
449
- xfree(profile);
450
- }
451
-
452
- size_t prof_profile_size(const void* data)
453
- {
454
- return sizeof(prof_profile_t);
455
- }
456
-
457
- static const rb_data_type_t profile_type =
458
- {
459
- .wrap_struct_name = "Profile",
460
- .function =
461
- {
462
- .dmark = prof_profile_mark,
463
- .dfree = prof_profile_ruby_gc_free,
464
- .dsize = prof_profile_size,
465
- },
466
- .data = NULL,
467
- .flags = RUBY_TYPED_FREE_IMMEDIATELY
468
- };
469
-
470
- static VALUE prof_allocate(VALUE klass)
471
- {
472
- VALUE result;
473
- prof_profile_t* profile;
474
- result = TypedData_Make_Struct(klass, prof_profile_t, &profile_type, profile);
475
- profile->threads_tbl = threads_table_create();
476
- profile->exclude_threads_tbl = NULL;
477
- profile->include_threads_tbl = NULL;
478
- profile->running = Qfalse;
479
- profile->allow_exceptions = false;
480
- profile->exclude_methods_tbl = method_table_create();
481
- profile->running = Qfalse;
482
- profile->tracepoints = rb_ary_new();
483
- return result;
484
- }
485
-
486
- static void prof_exclude_common_methods(VALUE profile)
487
- {
488
- rb_funcall(profile, rb_intern("exclude_common_methods!"), 0);
489
- }
490
-
491
- static int pop_frames(VALUE key, st_data_t value, st_data_t data)
492
- {
493
- thread_data_t* thread_data = (thread_data_t*)value;
494
- prof_profile_t* profile = (prof_profile_t*)data;
495
- double measurement = prof_measure(profile->measurer, NULL);
496
-
497
- if (profile->last_thread_data->fiber != thread_data->fiber)
498
- switch_thread(profile, thread_data, measurement);
499
-
500
- while (prof_frame_pop(thread_data->stack, measurement));
501
-
502
- return ST_CONTINUE;
503
- }
504
-
505
- static void
506
- prof_stop_threads(prof_profile_t* profile)
507
- {
508
- rb_st_foreach(profile->threads_tbl, pop_frames, (st_data_t)profile);
509
- }
510
-
511
- /* call-seq:
512
- new()
513
- new(options)
514
-
515
- Returns a new profiler. Possible options for the options hash are:
516
-
517
- measure_mode: Measure mode. Specifies the profile measure mode.
518
- If not specified, defaults to RubyProf::WALL_TIME.
519
- allow_exceptions: Whether to raise exceptions encountered during profiling,
520
- or to suppress all exceptions during profiling
521
- track_allocations: Whether to track object allocations while profiling. True or false.
522
- exclude_common: Exclude common methods from the profile. True or false.
523
- exclude_threads: Threads to exclude from the profiling results.
524
- include_threads: Focus profiling on only the given threads. This will ignore
525
- all other threads. */
526
- static VALUE prof_initialize(int argc, VALUE* argv, VALUE self)
527
- {
528
- prof_profile_t* profile = prof_get_profile(self);
529
- VALUE mode_or_options;
530
- VALUE mode = Qnil;
531
- VALUE exclude_threads = Qnil;
532
- VALUE include_threads = Qnil;
533
- VALUE exclude_common = Qnil;
534
- VALUE allow_exceptions = Qfalse;
535
- VALUE track_allocations = Qfalse;
536
-
537
- int i;
538
-
539
- switch (rb_scan_args(argc, argv, "02", &mode_or_options, &exclude_threads))
540
- {
541
- case 0:
542
- break;
543
- case 1:
544
- if (FIXNUM_P(mode_or_options))
545
- {
546
- mode = mode_or_options;
547
- }
548
- else
549
- {
550
- Check_Type(mode_or_options, T_HASH);
551
- mode = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("measure_mode")));
552
- track_allocations = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("track_allocations")));
553
- allow_exceptions = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("allow_exceptions")));
554
- exclude_common = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("exclude_common")));
555
- exclude_threads = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("exclude_threads")));
556
- include_threads = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("include_threads")));
557
- }
558
- break;
559
- case 2:
560
- Check_Type(exclude_threads, T_ARRAY);
561
- break;
562
- }
563
-
564
- if (mode == Qnil)
565
- {
566
- mode = INT2NUM(MEASURE_WALL_TIME);
567
- }
568
- else
569
- {
570
- Check_Type(mode, T_FIXNUM);
571
- }
572
- profile->measurer = prof_measurer_create(NUM2INT(mode), track_allocations == Qtrue);
573
- profile->allow_exceptions = (allow_exceptions == Qtrue);
574
-
575
- if (exclude_threads != Qnil)
576
- {
577
- Check_Type(exclude_threads, T_ARRAY);
578
- assert(profile->exclude_threads_tbl == NULL);
579
- profile->exclude_threads_tbl = threads_table_create();
580
- for (i = 0; i < RARRAY_LEN(exclude_threads); i++)
581
- {
582
- VALUE thread = rb_ary_entry(exclude_threads, i);
583
- rb_st_insert(profile->exclude_threads_tbl, thread, Qtrue);
584
- }
585
- }
586
-
587
- if (include_threads != Qnil)
588
- {
589
- Check_Type(include_threads, T_ARRAY);
590
- assert(profile->include_threads_tbl == NULL);
591
- profile->include_threads_tbl = threads_table_create();
592
- for (i = 0; i < RARRAY_LEN(include_threads); i++)
593
- {
594
- VALUE thread = rb_ary_entry(include_threads, i);
595
- rb_st_insert(profile->include_threads_tbl, thread, Qtrue);
596
- }
597
- }
598
-
599
- if (RTEST(exclude_common))
600
- {
601
- prof_exclude_common_methods(self);
602
- }
603
-
604
- return self;
605
- }
606
-
607
- /* call-seq:
608
- paused? -> boolean
609
-
610
- Returns whether a profile is currently paused.*/
611
- static VALUE prof_paused(VALUE self)
612
- {
613
- prof_profile_t* profile = prof_get_profile(self);
614
- return profile->paused;
615
- }
616
-
617
- /* call-seq:
618
- running? -> boolean
619
-
620
- Returns whether a profile is currently running.*/
621
- static VALUE
622
- prof_running(VALUE self)
623
- {
624
- prof_profile_t* profile = prof_get_profile(self);
625
- return profile->running;
626
- }
627
-
628
- /* call-seq:
629
- mode -> measure_mode
630
-
631
- Returns the measure mode used in this profile.*/
632
- static VALUE prof_profile_measure_mode(VALUE self)
633
- {
634
- prof_profile_t* profile = prof_get_profile(self);
635
- return INT2NUM(profile->measurer->mode);
636
- }
637
-
638
- /* call-seq:
639
- track_allocations -> boolean
640
-
641
- Returns if object allocations were tracked in this profile.*/
642
- static VALUE prof_profile_track_allocations(VALUE self)
643
- {
644
- prof_profile_t* profile = prof_get_profile(self);
645
- return profile->measurer->track_allocations ? Qtrue : Qfalse;
646
- }
647
-
648
- /* call-seq:
649
- start -> self
650
-
651
- Starts recording profile data.*/
652
- static VALUE prof_start(VALUE self)
653
- {
654
- char* trace_file_name;
655
-
656
- prof_profile_t* profile = prof_get_profile(self);
657
-
658
- if (profile->running == Qtrue)
659
- {
660
- rb_raise(rb_eRuntimeError, "RubyProf.start was already called");
661
- }
662
-
663
- profile->running = Qtrue;
664
- profile->paused = Qfalse;
665
- profile->last_thread_data = threads_table_insert(profile, rb_fiber_current());
666
-
667
- /* open trace file if environment wants it */
668
- trace_file_name = getenv("RUBY_PROF_TRACE");
669
-
670
- if (trace_file_name != NULL)
671
- {
672
- if (strcmp(trace_file_name, "stdout") == 0)
673
- {
674
- trace_file = stdout;
675
- }
676
- else if (strcmp(trace_file_name, "stderr") == 0)
677
- {
678
- trace_file = stderr;
679
- }
680
- else
681
- {
682
- trace_file = fopen(trace_file_name, "w");
683
- }
684
- }
685
-
686
- prof_install_hook(self);
687
- return self;
688
- }
689
-
690
- /* call-seq:
691
- pause -> self
692
-
693
- Pauses collecting profile data. */
694
- static VALUE prof_pause(VALUE self)
695
- {
696
- prof_profile_t* profile = prof_get_profile(self);
697
- if (profile->running == Qfalse)
698
- {
699
- rb_raise(rb_eRuntimeError, "RubyProf is not running.");
700
- }
701
-
702
- if (profile->paused == Qfalse)
703
- {
704
- profile->paused = Qtrue;
705
- profile->measurement_at_pause_resume = prof_measure(profile->measurer, NULL);
706
- rb_st_foreach(profile->threads_tbl, pause_thread, (st_data_t)profile);
707
- }
708
-
709
- return self;
710
- }
711
-
712
- /* call-seq:
713
- resume -> self
714
- resume(&block) -> self
715
-
716
- Resumes recording profile data.*/
717
- static VALUE prof_resume(VALUE self)
718
- {
719
- prof_profile_t* profile = prof_get_profile(self);
720
- if (profile->running == Qfalse)
721
- {
722
- rb_raise(rb_eRuntimeError, "RubyProf is not running.");
723
- }
724
-
725
- if (profile->paused == Qtrue)
726
- {
727
- profile->paused = Qfalse;
728
- profile->measurement_at_pause_resume = prof_measure(profile->measurer, NULL);
729
- rb_st_foreach(profile->threads_tbl, unpause_thread, (st_data_t)profile);
730
- }
731
-
732
- return rb_block_given_p() ? rb_ensure(rb_yield, self, prof_pause, self) : self;
733
- }
734
-
735
- /* call-seq:
736
- stop -> self
737
-
738
- Stops collecting profile data.*/
739
- static VALUE prof_stop(VALUE self)
740
- {
741
- prof_profile_t* profile = prof_get_profile(self);
742
-
743
- if (profile->running == Qfalse)
744
- {
745
- rb_raise(rb_eRuntimeError, "RubyProf.start was not yet called");
746
- }
747
-
748
- prof_remove_hook(self);
749
-
750
- /* close trace file if open */
751
- if (trace_file != NULL)
752
- {
753
- if (trace_file != stderr && trace_file != stdout)
754
- {
755
- fclose(trace_file);
756
- }
757
- trace_file = NULL;
758
- }
759
-
760
- prof_stop_threads(profile);
761
-
762
- /* Unset the last_thread_data (very important!)
763
- and the threads table */
764
- profile->running = profile->paused = Qfalse;
765
- profile->last_thread_data = NULL;
766
-
767
- return self;
768
- }
769
-
770
- /* call-seq:
771
- threads -> Array of RubyProf::Thread
772
-
773
- Returns an array of RubyProf::Thread instances that were profiled. */
774
- static VALUE prof_threads(VALUE self)
775
- {
776
- VALUE result = rb_ary_new();
777
- prof_profile_t* profile = prof_get_profile(self);
778
- rb_st_foreach(profile->threads_tbl, collect_threads, result);
779
- return result;
780
- }
781
-
782
- /* call-seq:
783
- add_thread(thread) -> thread
784
-
785
- Adds the specified RubyProf thread to the profile. */
786
- static VALUE prof_add_thread(VALUE self, VALUE thread)
787
- {
788
- prof_profile_t* profile_ptr = prof_get_profile(self);
789
-
790
- // This thread is now going to be owned by C
791
- thread_data_t* thread_ptr = prof_get_thread(thread);
792
- thread_ptr->owner = OWNER_C;
793
-
794
- rb_st_insert(profile_ptr->threads_tbl, thread_ptr->fiber_id, (st_data_t)thread_ptr);
795
- return thread;
796
- }
797
-
798
- /* call-seq:
799
- remove_thread(thread) -> thread
800
-
801
- Removes the specified thread from the profile. This is used to remove threads
802
- after they have been merged togher. Retuns the removed thread. */
803
- static VALUE prof_remove_thread(VALUE self, VALUE thread)
804
- {
805
- prof_profile_t* profile_ptr = prof_get_profile(self);
806
- thread_data_t* thread_ptr = prof_get_thread(thread);
807
- VALUE fiber_id = thread_ptr->fiber_id;
808
- rb_st_delete(profile_ptr->threads_tbl, (st_data_t*)&fiber_id, NULL);
809
- return thread;
810
- }
811
-
812
- /* Document-method: RubyProf::Profile#Profile
813
- call-seq:
814
- profile(&block) -> self
815
-
816
- Profiles the specified block.
817
-
818
- profile = RubyProf::Profile.new
819
- profile.profile do
820
- ..
821
- end
822
- */
823
- static VALUE prof_profile_object(VALUE self)
824
- {
825
- int result;
826
- prof_profile_t* profile = prof_get_profile(self);
827
-
828
- if (!rb_block_given_p())
829
- {
830
- rb_raise(rb_eArgError, "A block must be provided to the profile method.");
831
- }
832
-
833
- prof_start(self);
834
- rb_protect(rb_yield, self, &result);
835
- self = prof_stop(self);
836
-
837
- if (profile->allow_exceptions && result != 0)
838
- {
839
- rb_jump_tag(result);
840
- }
841
-
842
- return self;
843
- }
844
-
845
- /* Document-method: RubyProf::Profile::Profile
846
- call-seq:
847
- profile(&block) -> RubyProf::Profile
848
- profile(options, &block) -> RubyProf::Profile
849
-
850
- Profiles the specified block and returns a RubyProf::Profile
851
- object. Arguments are passed to Profile initialize method.
852
-
853
- profile = RubyProf::Profile.profile do
854
- ..
855
- end
856
- */
857
- static VALUE prof_profile_class(int argc, VALUE* argv, VALUE klass)
858
- {
859
- return prof_profile_object(rb_class_new_instance(argc, argv, cProfile));
860
- }
861
-
862
- /* call-seq:
863
- exclude_method!(module, method_name) -> self
864
-
865
- Excludes the method from profiling results.
866
- */
867
- static VALUE prof_exclude_method(VALUE self, VALUE klass, VALUE msym)
868
- {
869
- prof_profile_t* profile = prof_get_profile(self);
870
-
871
- if (profile->running == Qtrue)
872
- {
873
- rb_raise(rb_eRuntimeError, "RubyProf.start was already called");
874
- }
875
-
876
- st_data_t key = method_key(klass, msym);
877
- prof_method_t* method = method_table_lookup(profile->exclude_methods_tbl, key);
878
-
879
- if (!method)
880
- {
881
- method = prof_method_create(self, klass, msym, Qnil, 0);
882
- method_table_insert(profile->exclude_methods_tbl, method->key, method);
883
- }
884
-
885
- return self;
886
- }
887
-
888
- /* :nodoc: */
889
- VALUE prof_profile_dump(VALUE self)
890
- {
891
- prof_profile_t* profile = prof_get_profile(self);
892
-
893
- VALUE result = rb_hash_new();
894
- rb_hash_aset(result, ID2SYM(rb_intern("threads")), prof_threads(self));
895
- rb_hash_aset(result, ID2SYM(rb_intern("measurer_mode")), INT2NUM(profile->measurer->mode));
896
- rb_hash_aset(result, ID2SYM(rb_intern("measurer_track_allocations")),
897
- profile->measurer->track_allocations ? Qtrue : Qfalse);
898
-
899
- return result;
900
- }
901
-
902
- /* :nodoc: */
903
- VALUE prof_profile_load(VALUE self, VALUE data)
904
- {
905
- prof_profile_t* profile = prof_get_profile(self);
906
-
907
- VALUE measurer_mode = rb_hash_aref(data, ID2SYM(rb_intern("measurer_mode")));
908
- VALUE measurer_track_allocations = rb_hash_aref(data, ID2SYM(rb_intern("measurer_track_allocations")));
909
- profile->measurer = prof_measurer_create((prof_measure_mode_t)(NUM2INT(measurer_mode)),
910
- measurer_track_allocations == Qtrue ? true : false);
911
-
912
- VALUE threads = rb_hash_aref(data, ID2SYM(rb_intern("threads")));
913
- for (int i = 0; i < rb_array_len(threads); i++)
914
- {
915
- VALUE thread = rb_ary_entry(threads, i);
916
- thread_data_t* thread_data = prof_get_thread(thread);
917
- rb_st_insert(profile->threads_tbl, (st_data_t)thread_data->fiber_id, (st_data_t)thread_data);
918
- }
919
-
920
- return data;
921
- }
922
-
923
- void rp_init_profile(void)
924
- {
925
- cProfile = rb_define_class_under(mProf, "Profile", rb_cObject);
926
- rb_define_alloc_func(cProfile, prof_allocate);
927
-
928
- rb_define_singleton_method(cProfile, "profile", prof_profile_class, -1);
929
- rb_define_method(cProfile, "initialize", prof_initialize, -1);
930
- rb_define_method(cProfile, "profile", prof_profile_object, 0);
931
- rb_define_method(cProfile, "start", prof_start, 0);
932
- rb_define_method(cProfile, "stop", prof_stop, 0);
933
- rb_define_method(cProfile, "resume", prof_resume, 0);
934
- rb_define_method(cProfile, "pause", prof_pause, 0);
935
- rb_define_method(cProfile, "running?", prof_running, 0);
936
- rb_define_method(cProfile, "paused?", prof_paused, 0);
937
-
938
- rb_define_method(cProfile, "exclude_method!", prof_exclude_method, 2);
939
- rb_define_method(cProfile, "measure_mode", prof_profile_measure_mode, 0);
940
- rb_define_method(cProfile, "track_allocations?", prof_profile_track_allocations, 0);
941
-
942
- rb_define_method(cProfile, "threads", prof_threads, 0);
943
- rb_define_method(cProfile, "add_thread", prof_add_thread, 1);
944
- rb_define_method(cProfile, "remove_thread", prof_remove_thread, 1);
945
-
946
- rb_define_method(cProfile, "_dump_data", prof_profile_dump, 0);
947
- rb_define_method(cProfile, "_load_data", prof_profile_load, 1);
948
- }
1
+ /* Copyright (C) 2005-2019 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
2
+ Please see the LICENSE file for copyright and distribution information */
3
+
4
+ /* Document-class: RubyProf::Profile
5
+
6
+ The Profile class represents a single profiling run and provides the main API for using ruby-prof.
7
+ After creating a Profile instance, start profiling code by calling the Profile#start method. To finish profiling,
8
+ call Profile#stop. Once profiling is completed, the Profile instance contains the results.
9
+
10
+ profile = RubyProf::Profile.new
11
+ profile.start
12
+ ...
13
+ result = profile.stop
14
+
15
+ Alternatively, you can use the block syntax:
16
+
17
+ profile = RubyProf::Profile.profile do
18
+ ...
19
+ end
20
+ */
21
+
22
+ #include <assert.h>
23
+
24
+ #include "rp_allocation.h"
25
+ #include "rp_call_trees.h"
26
+ #include "rp_call_tree.h"
27
+ #include "rp_profile.h"
28
+ #include "rp_method.h"
29
+
30
+ VALUE cProfile;
31
+
32
+ /* support tracing ruby events from ruby-prof. useful for getting at
33
+ what actually happens inside the ruby interpreter (and ruby-prof).
34
+ set environment variable RUBY_PROF_TRACE to filename you want to
35
+ find the trace in.
36
+ */
37
+ FILE* trace_file = NULL;
38
+
39
+ static const char* get_event_name(rb_event_flag_t event)
40
+ {
41
+ switch (event) {
42
+ case RUBY_EVENT_LINE:
43
+ return "line";
44
+ case RUBY_EVENT_CLASS:
45
+ return "class";
46
+ case RUBY_EVENT_END:
47
+ return "end";
48
+ case RUBY_EVENT_CALL:
49
+ return "call";
50
+ case RUBY_EVENT_RETURN:
51
+ return "return";
52
+ case RUBY_EVENT_B_CALL:
53
+ return "b-call";
54
+ case RUBY_EVENT_B_RETURN:
55
+ return "b-return";
56
+ case RUBY_EVENT_C_CALL:
57
+ return "c-call";
58
+ case RUBY_EVENT_C_RETURN:
59
+ return "c-return";
60
+ case RUBY_EVENT_THREAD_BEGIN:
61
+ return "thread-begin";
62
+ case RUBY_EVENT_THREAD_END:
63
+ return "thread-end";
64
+ case RUBY_EVENT_FIBER_SWITCH:
65
+ return "fiber-switch";
66
+ case RUBY_EVENT_RAISE:
67
+ return "raise";
68
+ case RUBY_INTERNAL_EVENT_NEWOBJ:
69
+ return "newobj";
70
+ default:
71
+ return "unknown";
72
+ }
73
+ }
74
+
75
+ thread_data_t* check_fiber(prof_profile_t* profile, double measurement)
76
+ {
77
+ thread_data_t* result = NULL;
78
+
79
+ // Get the current fiber
80
+ VALUE fiber = rb_fiber_current();
81
+
82
+ /* We need to switch the profiling context if we either had none before,
83
+ we don't merge fibers and the fiber ids differ, or the thread ids differ. */
84
+ if (profile->last_thread_data->fiber != fiber)
85
+ {
86
+ result = threads_table_lookup(profile, fiber);
87
+ if (!result)
88
+ {
89
+ result = threads_table_insert(profile, fiber);
90
+ }
91
+ switch_thread(profile, result, measurement);
92
+ }
93
+ else
94
+ {
95
+ result = profile->last_thread_data;
96
+ }
97
+ return result;
98
+ }
99
+
100
+ static int excludes_method(st_data_t key, prof_profile_t* profile)
101
+ {
102
+ return (profile->exclude_methods_tbl &&
103
+ method_table_lookup(profile->exclude_methods_tbl, key) != NULL);
104
+ }
105
+
106
+ static prof_method_t* create_method(prof_profile_t* profile, st_data_t key, VALUE klass, VALUE msym, VALUE source_file, int source_line)
107
+ {
108
+ prof_method_t* result = prof_method_create(profile->object, klass, msym, source_file, source_line);
109
+ method_table_insert(profile->last_thread_data->method_table, result->key, result);
110
+
111
+ return result;
112
+ }
113
+
114
+ static prof_method_t* check_parent_method(prof_profile_t* profile, thread_data_t* thread_data)
115
+ {
116
+ VALUE msym = ID2SYM(rb_intern("_inserted_parent_"));
117
+ st_data_t key = method_key(cProfile, msym);
118
+
119
+ prof_method_t* result = method_table_lookup(thread_data->method_table, key);
120
+
121
+ if (!result)
122
+ {
123
+ result = create_method(profile, key, cProfile, msym, Qnil, 0);
124
+ }
125
+
126
+ return result;
127
+ }
128
+
129
+ prof_method_t* check_method(prof_profile_t* profile, rb_trace_arg_t* trace_arg, rb_event_flag_t event, thread_data_t* thread_data)
130
+ {
131
+ VALUE klass = rb_tracearg_defined_class(trace_arg);
132
+
133
+ /* Special case - skip any methods from the mProf
134
+ module or cProfile class since they clutter
135
+ the results but aren't important to them results. */
136
+ if (klass == cProfile)
137
+ return NULL;
138
+
139
+ VALUE msym = rb_tracearg_callee_id(trace_arg);
140
+
141
+ st_data_t key = method_key(klass, msym);
142
+
143
+ if (excludes_method(key, profile))
144
+ return NULL;
145
+
146
+ prof_method_t* result = method_table_lookup(thread_data->method_table, key);
147
+
148
+ if (!result)
149
+ {
150
+ VALUE source_file = (event != RUBY_EVENT_C_CALL ? rb_tracearg_path(trace_arg) : Qnil);
151
+ int source_line = (event != RUBY_EVENT_C_CALL ? FIX2INT(rb_tracearg_lineno(trace_arg)) : 0);
152
+ result = create_method(profile, key, klass, msym, source_file, source_line);
153
+ }
154
+
155
+ return result;
156
+ }
157
+
158
+ /* =========== Profiling ================= */
159
+ static void prof_trace(prof_profile_t* profile, rb_trace_arg_t* trace_arg, double measurement)
160
+ {
161
+ static VALUE last_fiber = Qnil;
162
+ VALUE fiber = rb_fiber_current();
163
+
164
+ rb_event_flag_t event = rb_tracearg_event_flag(trace_arg);
165
+ const char* event_name = get_event_name(event);
166
+
167
+ VALUE source_file = rb_tracearg_path(trace_arg);
168
+ int source_line = FIX2INT(rb_tracearg_lineno(trace_arg));
169
+
170
+ VALUE msym = rb_tracearg_callee_id(trace_arg);
171
+
172
+ unsigned int klass_flags;
173
+ VALUE klass = rb_tracearg_defined_class(trace_arg);
174
+ VALUE resolved_klass = resolve_klass(klass, &klass_flags);
175
+ const char* class_name = "";
176
+
177
+ if (resolved_klass != Qnil)
178
+ class_name = rb_class2name(resolved_klass);
179
+
180
+ if (last_fiber != fiber)
181
+ {
182
+ fprintf(trace_file, "\n");
183
+ }
184
+
185
+ const char* method_name_char = (msym != Qnil ? rb_id2name(SYM2ID(msym)) : "");
186
+ const char* source_file_char = (source_file != Qnil ? StringValuePtr(source_file) : "");
187
+
188
+ fprintf(trace_file, "%2lu:%2f %-8s %s#%s %s:%2d\n",
189
+ FIX2ULONG(fiber), (double)measurement,
190
+ event_name, class_name, method_name_char, source_file_char, source_line);
191
+ fflush(trace_file);
192
+ last_fiber = fiber;
193
+ }
194
+
195
+ static void prof_event_hook(VALUE trace_point, void* data)
196
+ {
197
+ prof_profile_t* profile = (prof_profile_t*)(data);
198
+
199
+ rb_trace_arg_t* trace_arg = rb_tracearg_from_tracepoint(trace_point);
200
+ double measurement = prof_measure(profile->measurer, trace_arg);
201
+ rb_event_flag_t event = rb_tracearg_event_flag(trace_arg);
202
+ VALUE self = rb_tracearg_self(trace_arg);
203
+
204
+ if (trace_file != NULL)
205
+ {
206
+ prof_trace(profile, trace_arg, measurement);
207
+ }
208
+
209
+ /* Special case - skip any methods from the mProf
210
+ module since they clutter the results but aren't important. */
211
+ if (self == mProf)
212
+ return;
213
+
214
+ thread_data_t* thread_data = check_fiber(profile, measurement);
215
+
216
+ if (!thread_data->trace)
217
+ return;
218
+
219
+ switch (event)
220
+ {
221
+ case RUBY_EVENT_LINE:
222
+ {
223
+ prof_frame_t* frame = prof_frame_current(thread_data->stack);
224
+
225
+ /* If there is no frame then this is either the first method being profiled or we have climbed the
226
+ call stack higher than where we started. */
227
+ if (!frame)
228
+ {
229
+ prof_method_t* method = check_method(profile, trace_arg, event, thread_data);
230
+
231
+ if (!method)
232
+ break;
233
+
234
+ prof_call_tree_t* call_tree = prof_call_tree_create(method, NULL, method->source_file, method->source_line);
235
+ prof_add_call_tree(method->call_trees, call_tree);
236
+
237
+ // We have climbed higher in the stack then where we started
238
+ if (thread_data->call_tree)
239
+ {
240
+ prof_call_tree_add_parent(thread_data->call_tree, call_tree);
241
+ frame = prof_frame_unshift(thread_data->stack, call_tree, thread_data->call_tree, measurement);
242
+ }
243
+ // This is the first method to be profiled
244
+ else
245
+ {
246
+ frame = prof_frame_push(thread_data->stack, call_tree, measurement, RTEST(profile->paused));
247
+ }
248
+
249
+ thread_data->call_tree = call_tree;
250
+ }
251
+
252
+ frame->source_file = rb_tracearg_path(trace_arg);
253
+ frame->source_line = FIX2INT(rb_tracearg_lineno(trace_arg));
254
+
255
+ break;
256
+ }
257
+ case RUBY_EVENT_CALL:
258
+ case RUBY_EVENT_C_CALL:
259
+ {
260
+ prof_method_t* method = check_method(profile, trace_arg, event, thread_data);
261
+
262
+ if (!method)
263
+ break;
264
+
265
+ prof_frame_t* frame = prof_frame_current(thread_data->stack);
266
+ prof_call_tree_t* parent_call_tree = NULL;
267
+ prof_call_tree_t* call_tree = NULL;
268
+
269
+ // Frame can be NULL if we are switching from one fiber to another (see FiberTest#fiber_test)
270
+ if (frame)
271
+ {
272
+ parent_call_tree = frame->call_tree;
273
+ call_tree = call_tree_table_lookup(parent_call_tree->children, method->key);
274
+ }
275
+ else if (!frame && thread_data->call_tree)
276
+ {
277
+ // There is no current parent - likely we have returned out of the highest level method we have profiled so far.
278
+ // This can happen with enumerators (see fiber_test.rb). So create a new dummy parent.
279
+ prof_method_t* parent_method = check_parent_method(profile, thread_data);
280
+ parent_call_tree = prof_call_tree_create(parent_method, NULL, Qnil, 0);
281
+ prof_add_call_tree(parent_method->call_trees, parent_call_tree);
282
+ prof_call_tree_add_parent(thread_data->call_tree, parent_call_tree);
283
+ frame = prof_frame_unshift(thread_data->stack, parent_call_tree, thread_data->call_tree, measurement);
284
+ thread_data->call_tree = parent_call_tree;
285
+ }
286
+
287
+ if (!call_tree)
288
+ {
289
+ // This call info does not yet exist. So create it and add it to previous CallTree's children and the current method.
290
+ call_tree = prof_call_tree_create(method, parent_call_tree, frame ? frame->source_file : Qnil, frame? frame->source_line : 0);
291
+ prof_add_call_tree(method->call_trees, call_tree);
292
+ if (parent_call_tree)
293
+ prof_call_tree_add_child(parent_call_tree, call_tree);
294
+ }
295
+
296
+ if (!thread_data->call_tree)
297
+ thread_data->call_tree = call_tree;
298
+
299
+ // Push a new frame onto the stack for a new c-call or ruby call (into a method)
300
+ prof_frame_t* next_frame = prof_frame_push(thread_data->stack, call_tree, measurement, RTEST(profile->paused));
301
+ next_frame->source_file = method->source_file;
302
+ next_frame->source_line = method->source_line;
303
+ break;
304
+ }
305
+ case RUBY_EVENT_RETURN:
306
+ case RUBY_EVENT_C_RETURN:
307
+ {
308
+ // We need to check for excluded methods so that we don't pop them off the stack
309
+ prof_method_t* method = check_method(profile, trace_arg, event, thread_data);
310
+
311
+ if (!method)
312
+ break;
313
+
314
+ prof_frame_pop(thread_data->stack, measurement);
315
+ break;
316
+ }
317
+ case RUBY_INTERNAL_EVENT_NEWOBJ:
318
+ {
319
+ /* We want to assign the allocations lexically, not the execution context (otherwise all allocations will
320
+ show up under Class#new */
321
+ int source_line = FIX2INT(rb_tracearg_lineno(trace_arg));
322
+ VALUE source_file = rb_tracearg_path(trace_arg);
323
+
324
+ prof_method_t* method = prof_find_method(thread_data->stack, source_file, source_line);
325
+ if (method)
326
+ prof_allocate_increment(method->allocations_table, trace_arg);
327
+
328
+ break;
329
+ }
330
+ }
331
+ }
332
+
333
+ void prof_install_hook(VALUE self)
334
+ {
335
+ prof_profile_t* profile = prof_get_profile(self);
336
+
337
+ VALUE event_tracepoint = rb_tracepoint_new(Qnil,
338
+ RUBY_EVENT_CALL | RUBY_EVENT_RETURN |
339
+ RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN |
340
+ RUBY_EVENT_LINE,
341
+ prof_event_hook, profile);
342
+ rb_ary_push(profile->tracepoints, event_tracepoint);
343
+
344
+ if (profile->measurer->track_allocations)
345
+ {
346
+ VALUE allocation_tracepoint = rb_tracepoint_new(Qnil, RUBY_INTERNAL_EVENT_NEWOBJ, prof_event_hook, profile);
347
+ rb_ary_push(profile->tracepoints, allocation_tracepoint);
348
+ }
349
+
350
+ for (int i = 0; i < RARRAY_LEN(profile->tracepoints); i++)
351
+ {
352
+ rb_tracepoint_enable(rb_ary_entry(profile->tracepoints, i));
353
+ }
354
+ }
355
+
356
+ void prof_remove_hook(VALUE self)
357
+ {
358
+ prof_profile_t* profile = prof_get_profile(self);
359
+
360
+ for (int i = 0; i < RARRAY_LEN(profile->tracepoints); i++)
361
+ {
362
+ rb_tracepoint_disable(rb_ary_entry(profile->tracepoints, i));
363
+ }
364
+ rb_ary_clear(profile->tracepoints);
365
+ }
366
+
367
+ prof_profile_t* prof_get_profile(VALUE self)
368
+ {
369
+ /* Can't use Data_Get_Struct because that triggers the event hook
370
+ ending up in endless recursion. */
371
+ return RTYPEDDATA_DATA(self);
372
+ }
373
+
374
+ static int collect_threads(st_data_t key, st_data_t value, st_data_t result)
375
+ {
376
+ thread_data_t* thread_data = (thread_data_t*)value;
377
+ if (thread_data->trace && thread_data->call_tree)
378
+ {
379
+ VALUE threads_array = (VALUE)result;
380
+ rb_ary_push(threads_array, prof_thread_wrap(thread_data));
381
+ }
382
+ return ST_CONTINUE;
383
+ }
384
+
385
+ /* ======== Profile Class ====== */
386
+ static int mark_threads(st_data_t key, st_data_t value, st_data_t result)
387
+ {
388
+ thread_data_t* thread = (thread_data_t*)value;
389
+ prof_thread_mark(thread);
390
+ return ST_CONTINUE;
391
+ }
392
+
393
+ static int prof_profile_mark_methods(st_data_t key, st_data_t value, st_data_t result)
394
+ {
395
+ prof_method_t* method = (prof_method_t*)value;
396
+ prof_method_mark(method);
397
+ return ST_CONTINUE;
398
+ }
399
+
400
+ static void prof_profile_mark(void* data)
401
+ {
402
+ prof_profile_t* profile = (prof_profile_t*)data;
403
+ rb_gc_mark_movable(profile->object);
404
+ rb_gc_mark_movable(profile->tracepoints);
405
+ rb_gc_mark_movable(profile->running);
406
+ rb_gc_mark_movable(profile->paused);
407
+
408
+ // If GC stress is true (useful for debugging), when threads_table_create is called in the
409
+ // allocate method Ruby will immediately call this mark method. Thus the threads_tbl will be NULL.
410
+ if (profile->threads_tbl)
411
+ rb_st_foreach(profile->threads_tbl, mark_threads, 0);
412
+
413
+ if (profile->exclude_methods_tbl)
414
+ rb_st_foreach(profile->exclude_methods_tbl, prof_profile_mark_methods, 0);
415
+ }
416
+
417
+ void prof_profile_compact(void* data)
418
+ {
419
+ prof_profile_t* profile = (prof_profile_t*)data;
420
+ profile->object = rb_gc_location(profile->object);
421
+ profile->tracepoints = rb_gc_location(profile->tracepoints);
422
+ profile->running = rb_gc_location(profile->running);
423
+ profile->paused = rb_gc_location(profile->paused);
424
+ }
425
+
426
+ /* Freeing the profile creates a cascade of freeing. It frees its threads table, which frees
427
+ each thread and its associated call treee and methods. */
428
+ static void prof_profile_ruby_gc_free(void* data)
429
+ {
430
+ prof_profile_t* profile = (prof_profile_t*)data;
431
+ profile->last_thread_data = NULL;
432
+
433
+ threads_table_free(profile->threads_tbl);
434
+ profile->threads_tbl = NULL;
435
+
436
+ if (profile->exclude_threads_tbl)
437
+ {
438
+ rb_st_free_table(profile->exclude_threads_tbl);
439
+ profile->exclude_threads_tbl = NULL;
440
+ }
441
+
442
+ if (profile->include_threads_tbl)
443
+ {
444
+ rb_st_free_table(profile->include_threads_tbl);
445
+ profile->include_threads_tbl = NULL;
446
+ }
447
+
448
+ /* This table owns the excluded sentinels for now. */
449
+ method_table_free(profile->exclude_methods_tbl);
450
+ profile->exclude_methods_tbl = NULL;
451
+
452
+ xfree(profile->measurer);
453
+ profile->measurer = NULL;
454
+
455
+ xfree(profile);
456
+ }
457
+
458
+ size_t prof_profile_size(const void* data)
459
+ {
460
+ return sizeof(prof_profile_t);
461
+ }
462
+
463
+ static const rb_data_type_t profile_type =
464
+ {
465
+ .wrap_struct_name = "Profile",
466
+ .function =
467
+ {
468
+ .dmark = prof_profile_mark,
469
+ .dfree = prof_profile_ruby_gc_free,
470
+ .dsize = prof_profile_size,
471
+ .dcompact = prof_profile_compact
472
+ },
473
+ .data = NULL,
474
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY
475
+ };
476
+
477
+ static VALUE prof_allocate(VALUE klass)
478
+ {
479
+ VALUE result;
480
+ prof_profile_t* profile;
481
+ result = TypedData_Make_Struct(klass, prof_profile_t, &profile_type, profile);
482
+ profile->object = result;
483
+ profile->threads_tbl = threads_table_create();
484
+ profile->exclude_threads_tbl = NULL;
485
+ profile->include_threads_tbl = NULL;
486
+ profile->running = Qfalse;
487
+ profile->allow_exceptions = false;
488
+ profile->exclude_methods_tbl = method_table_create();
489
+ profile->running = Qfalse;
490
+ profile->tracepoints = rb_ary_new();
491
+ return result;
492
+ }
493
+
494
+ static void prof_exclude_common_methods(VALUE profile)
495
+ {
496
+ rb_funcall(profile, rb_intern("exclude_common_methods!"), 0);
497
+ }
498
+
499
+ static int pop_frames(VALUE key, st_data_t value, st_data_t data)
500
+ {
501
+ thread_data_t* thread_data = (thread_data_t*)value;
502
+ prof_profile_t* profile = (prof_profile_t*)data;
503
+ double measurement = prof_measure(profile->measurer, NULL);
504
+
505
+ if (profile->last_thread_data->fiber != thread_data->fiber)
506
+ switch_thread(profile, thread_data, measurement);
507
+
508
+ while (prof_frame_pop(thread_data->stack, measurement));
509
+
510
+ return ST_CONTINUE;
511
+ }
512
+
513
+ static void
514
+ prof_stop_threads(prof_profile_t* profile)
515
+ {
516
+ rb_st_foreach(profile->threads_tbl, pop_frames, (st_data_t)profile);
517
+ }
518
+
519
+ /* call-seq:
520
+ new()
521
+ new(keyword_args)
522
+
523
+ Returns a new profiler. Possible keyword arguments include:
524
+
525
+ measure_mode: Measure mode. Specifies the profile measure mode.
526
+ If not specified, defaults to RubyProf::WALL_TIME.
527
+ allow_exceptions: Whether to raise exceptions encountered during profiling,
528
+ or to suppress all exceptions during profiling
529
+ track_allocations: Whether to track object allocations while profiling. True or false.
530
+ exclude_common: Exclude common methods from the profile. True or false.
531
+ exclude_threads: Threads to exclude from the profiling results.
532
+ include_threads: Focus profiling on only the given threads. This will ignore
533
+ all other threads. */
534
+ static VALUE prof_initialize(int argc, VALUE* argv, VALUE self)
535
+ {
536
+ VALUE keywords;
537
+ rb_scan_args_kw(RB_SCAN_ARGS_KEYWORDS, argc, argv, ":", &keywords);
538
+
539
+ ID table[] = {rb_intern("measure_mode"),
540
+ rb_intern("track_allocations"),
541
+ rb_intern("allow_exceptions"),
542
+ rb_intern("exclude_common"),
543
+ rb_intern("exclude_threads"),
544
+ rb_intern("include_threads") };
545
+ VALUE values[6];
546
+ rb_get_kwargs(keywords, table, 0, 6, values);
547
+
548
+ VALUE mode = values[0] == Qundef ? INT2NUM(MEASURE_WALL_TIME) : values[0];
549
+ VALUE track_allocations = values[1] == Qtrue ? Qtrue : Qfalse;
550
+ VALUE allow_exceptions = values[2] == Qtrue ? Qtrue : Qfalse;
551
+ VALUE exclude_common = values[3] == Qtrue ? Qtrue : Qfalse;
552
+ VALUE exclude_threads = values[4];
553
+ VALUE include_threads = values[5];
554
+
555
+ Check_Type(mode, T_FIXNUM);
556
+ prof_profile_t* profile = prof_get_profile(self);
557
+ profile->measurer = prof_measurer_create(NUM2INT(mode), RB_TEST(track_allocations));
558
+ profile->allow_exceptions = RB_TEST(allow_exceptions);
559
+
560
+ if (exclude_threads != Qundef)
561
+ {
562
+ Check_Type(exclude_threads, T_ARRAY);
563
+ assert(profile->exclude_threads_tbl == NULL);
564
+ profile->exclude_threads_tbl = threads_table_create();
565
+ for (int i = 0; i < RARRAY_LEN(exclude_threads); i++)
566
+ {
567
+ VALUE thread = rb_ary_entry(exclude_threads, i);
568
+ rb_st_insert(profile->exclude_threads_tbl, thread, Qtrue);
569
+ }
570
+ }
571
+
572
+ if (include_threads != Qundef)
573
+ {
574
+ Check_Type(include_threads, T_ARRAY);
575
+ assert(profile->include_threads_tbl == NULL);
576
+ profile->include_threads_tbl = threads_table_create();
577
+ for (int i = 0; i < RARRAY_LEN(include_threads); i++)
578
+ {
579
+ VALUE thread = rb_ary_entry(include_threads, i);
580
+ rb_st_insert(profile->include_threads_tbl, thread, Qtrue);
581
+ }
582
+ }
583
+
584
+ if (RB_TEST(exclude_common))
585
+ {
586
+ prof_exclude_common_methods(self);
587
+ }
588
+
589
+ return self;
590
+ }
591
+
592
+ /* call-seq:
593
+ paused? -> boolean
594
+
595
+ Returns whether a profile is currently paused.*/
596
+ static VALUE prof_paused(VALUE self)
597
+ {
598
+ prof_profile_t* profile = prof_get_profile(self);
599
+ return profile->paused;
600
+ }
601
+
602
+ /* call-seq:
603
+ running? -> boolean
604
+
605
+ Returns whether a profile is currently running.*/
606
+ static VALUE
607
+ prof_running(VALUE self)
608
+ {
609
+ prof_profile_t* profile = prof_get_profile(self);
610
+ return profile->running;
611
+ }
612
+
613
+ /* call-seq:
614
+ mode -> measure_mode
615
+
616
+ Returns the measure mode used in this profile.*/
617
+ static VALUE prof_profile_measure_mode(VALUE self)
618
+ {
619
+ prof_profile_t* profile = prof_get_profile(self);
620
+ return INT2NUM(profile->measurer->mode);
621
+ }
622
+
623
+ /* call-seq:
624
+ track_allocations -> boolean
625
+
626
+ Returns if object allocations were tracked in this profile.*/
627
+ static VALUE prof_profile_track_allocations(VALUE self)
628
+ {
629
+ prof_profile_t* profile = prof_get_profile(self);
630
+ return profile->measurer->track_allocations ? Qtrue : Qfalse;
631
+ }
632
+
633
+ /* call-seq:
634
+ start -> self
635
+
636
+ Starts recording profile data.*/
637
+ static VALUE prof_start(VALUE self)
638
+ {
639
+ char* trace_file_name;
640
+
641
+ prof_profile_t* profile = prof_get_profile(self);
642
+
643
+ if (profile->running == Qtrue)
644
+ {
645
+ rb_raise(rb_eRuntimeError, "RubyProf.start was already called");
646
+ }
647
+
648
+ profile->running = Qtrue;
649
+ profile->paused = Qfalse;
650
+ profile->last_thread_data = threads_table_insert(profile, rb_fiber_current());
651
+
652
+ /* open trace file if environment wants it */
653
+ trace_file_name = getenv("RUBY_PROF_TRACE");
654
+
655
+ if (trace_file_name != NULL)
656
+ {
657
+ if (strcmp(trace_file_name, "stdout") == 0)
658
+ {
659
+ trace_file = stdout;
660
+ }
661
+ else if (strcmp(trace_file_name, "stderr") == 0)
662
+ {
663
+ trace_file = stderr;
664
+ }
665
+ else
666
+ {
667
+ trace_file = fopen(trace_file_name, "w");
668
+ }
669
+ }
670
+
671
+ prof_install_hook(self);
672
+ return self;
673
+ }
674
+
675
+ /* call-seq:
676
+ pause -> self
677
+
678
+ Pauses collecting profile data. */
679
+ static VALUE prof_pause(VALUE self)
680
+ {
681
+ prof_profile_t* profile = prof_get_profile(self);
682
+ if (profile->running == Qfalse)
683
+ {
684
+ rb_raise(rb_eRuntimeError, "RubyProf is not running.");
685
+ }
686
+
687
+ if (profile->paused == Qfalse)
688
+ {
689
+ profile->paused = Qtrue;
690
+ profile->measurement_at_pause_resume = prof_measure(profile->measurer, NULL);
691
+ rb_st_foreach(profile->threads_tbl, pause_thread, (st_data_t)profile);
692
+ }
693
+
694
+ return self;
695
+ }
696
+
697
+ /* call-seq:
698
+ resume -> self
699
+ resume(&block) -> self
700
+
701
+ Resumes recording profile data.*/
702
+ static VALUE prof_resume(VALUE self)
703
+ {
704
+ prof_profile_t* profile = prof_get_profile(self);
705
+ if (profile->running == Qfalse)
706
+ {
707
+ rb_raise(rb_eRuntimeError, "RubyProf is not running.");
708
+ }
709
+
710
+ if (profile->paused == Qtrue)
711
+ {
712
+ profile->paused = Qfalse;
713
+ profile->measurement_at_pause_resume = prof_measure(profile->measurer, NULL);
714
+ rb_st_foreach(profile->threads_tbl, unpause_thread, (st_data_t)profile);
715
+ }
716
+
717
+ return rb_block_given_p() ? rb_ensure(rb_yield, self, prof_pause, self) : self;
718
+ }
719
+
720
+ /* call-seq:
721
+ stop -> self
722
+
723
+ Stops collecting profile data.*/
724
+ static VALUE prof_stop(VALUE self)
725
+ {
726
+ prof_profile_t* profile = prof_get_profile(self);
727
+
728
+ if (profile->running == Qfalse)
729
+ {
730
+ rb_raise(rb_eRuntimeError, "RubyProf.start was not yet called");
731
+ }
732
+
733
+ prof_remove_hook(self);
734
+
735
+ /* close trace file if open */
736
+ if (trace_file != NULL)
737
+ {
738
+ if (trace_file != stderr && trace_file != stdout)
739
+ {
740
+ fclose(trace_file);
741
+ }
742
+ trace_file = NULL;
743
+ }
744
+
745
+ prof_stop_threads(profile);
746
+
747
+ /* Unset the last_thread_data (very important!)
748
+ and the threads table */
749
+ profile->running = profile->paused = Qfalse;
750
+ profile->last_thread_data = NULL;
751
+
752
+ return self;
753
+ }
754
+
755
+ /* call-seq:
756
+ threads -> Array of RubyProf::Thread
757
+
758
+ Returns an array of RubyProf::Thread instances that were profiled. */
759
+ static VALUE prof_threads(VALUE self)
760
+ {
761
+ VALUE result = rb_ary_new();
762
+ prof_profile_t* profile = prof_get_profile(self);
763
+ rb_st_foreach(profile->threads_tbl, collect_threads, result);
764
+ return result;
765
+ }
766
+
767
+ /* call-seq:
768
+ add_thread(thread) -> thread
769
+
770
+ Adds the specified RubyProf thread to the profile. */
771
+ static VALUE prof_add_thread(VALUE self, VALUE thread)
772
+ {
773
+ prof_profile_t* profile_ptr = prof_get_profile(self);
774
+
775
+ // This thread is now going to be owned by C
776
+ thread_data_t* thread_ptr = prof_get_thread(thread);
777
+ thread_ptr->owner = OWNER_C;
778
+
779
+ rb_st_insert(profile_ptr->threads_tbl, thread_ptr->fiber_id, (st_data_t)thread_ptr);
780
+ return thread;
781
+ }
782
+
783
+ /* call-seq:
784
+ remove_thread(thread) -> thread
785
+
786
+ Removes the specified thread from the profile. This is used to remove threads
787
+ after they have been merged togher. Retuns the removed thread. */
788
+ static VALUE prof_remove_thread(VALUE self, VALUE thread)
789
+ {
790
+ prof_profile_t* profile_ptr = prof_get_profile(self);
791
+ thread_data_t* thread_ptr = prof_get_thread(thread);
792
+ VALUE fiber_id = thread_ptr->fiber_id;
793
+ rb_st_delete(profile_ptr->threads_tbl, (st_data_t*)&fiber_id, NULL);
794
+ return thread;
795
+ }
796
+
797
+ /* Document-method: RubyProf::Profile#Profile
798
+ call-seq:
799
+ profile(&block) -> self
800
+
801
+ Profiles the specified block.
802
+
803
+ profile = RubyProf::Profile.new
804
+ profile.profile do
805
+ ..
806
+ end
807
+ */
808
+ static VALUE prof_profile_instance(VALUE self)
809
+ {
810
+ int result;
811
+ prof_profile_t* profile = prof_get_profile(self);
812
+
813
+ if (!rb_block_given_p())
814
+ {
815
+ rb_raise(rb_eArgError, "A block must be provided to the profile method.");
816
+ }
817
+
818
+ prof_start(self);
819
+ rb_protect(rb_yield, self, &result);
820
+ self = prof_stop(self);
821
+
822
+ if (profile->allow_exceptions && result != 0)
823
+ {
824
+ rb_jump_tag(result);
825
+ }
826
+
827
+ return self;
828
+ }
829
+
830
+ /* Document-method: RubyProf::Profile::Profile
831
+ call-seq:
832
+ profile(&block) -> RubyProf::Profile
833
+ profile(options, &block) -> RubyProf::Profile
834
+
835
+ Profiles the specified block and returns a RubyProf::Profile
836
+ object. Arguments are passed to Profile initialize method.
837
+
838
+ profile = RubyProf::Profile.profile do
839
+ ..
840
+ end
841
+ */
842
+ static VALUE prof_profile_class(int argc, VALUE* argv, VALUE klass)
843
+ {
844
+ return prof_profile_instance(rb_class_new_instance(argc, argv, cProfile));
845
+ }
846
+
847
+ /* call-seq:
848
+ exclude_method!(module, method_name) -> self
849
+
850
+ Excludes the method from profiling results.
851
+ */
852
+ static VALUE prof_exclude_method(VALUE self, VALUE klass, VALUE msym)
853
+ {
854
+ prof_profile_t* profile = prof_get_profile(self);
855
+
856
+ if (profile->running == Qtrue)
857
+ {
858
+ rb_raise(rb_eRuntimeError, "RubyProf.start was already called");
859
+ }
860
+
861
+ st_data_t key = method_key(klass, msym);
862
+ prof_method_t* method = method_table_lookup(profile->exclude_methods_tbl, key);
863
+
864
+ if (!method)
865
+ {
866
+ method = prof_method_create(self, klass, msym, Qnil, 0);
867
+ method_table_insert(profile->exclude_methods_tbl, method->key, method);
868
+ }
869
+
870
+ return self;
871
+ }
872
+
873
+ /* :nodoc: */
874
+ VALUE prof_profile_dump(VALUE self)
875
+ {
876
+ prof_profile_t* profile = prof_get_profile(self);
877
+
878
+ VALUE result = rb_hash_new();
879
+ rb_hash_aset(result, ID2SYM(rb_intern("threads")), prof_threads(self));
880
+ rb_hash_aset(result, ID2SYM(rb_intern("measurer_mode")), INT2NUM(profile->measurer->mode));
881
+ rb_hash_aset(result, ID2SYM(rb_intern("measurer_track_allocations")),
882
+ profile->measurer->track_allocations ? Qtrue : Qfalse);
883
+
884
+ return result;
885
+ }
886
+
887
+ /* :nodoc: */
888
+ VALUE prof_profile_load(VALUE self, VALUE data)
889
+ {
890
+ prof_profile_t* profile = prof_get_profile(self);
891
+
892
+ VALUE measurer_mode = rb_hash_aref(data, ID2SYM(rb_intern("measurer_mode")));
893
+ VALUE measurer_track_allocations = rb_hash_aref(data, ID2SYM(rb_intern("measurer_track_allocations")));
894
+ profile->measurer = prof_measurer_create((prof_measure_mode_t)(NUM2INT(measurer_mode)),
895
+ measurer_track_allocations == Qtrue ? true : false);
896
+
897
+ VALUE threads = rb_hash_aref(data, ID2SYM(rb_intern("threads")));
898
+ for (int i = 0; i < rb_array_len(threads); i++)
899
+ {
900
+ VALUE thread = rb_ary_entry(threads, i);
901
+ thread_data_t* thread_data = prof_get_thread(thread);
902
+ rb_st_insert(profile->threads_tbl, (st_data_t)thread_data->fiber_id, (st_data_t)thread_data);
903
+ }
904
+
905
+ return data;
906
+ }
907
+
908
+ void rp_init_profile(void)
909
+ {
910
+ cProfile = rb_define_class_under(mProf, "Profile", rb_cObject);
911
+ rb_define_alloc_func(cProfile, prof_allocate);
912
+
913
+ rb_define_singleton_method(cProfile, "profile", prof_profile_class, -1);
914
+ rb_define_method(cProfile, "initialize", prof_initialize, -1);
915
+ rb_define_method(cProfile, "profile", prof_profile_instance, 0);
916
+ rb_define_method(cProfile, "start", prof_start, 0);
917
+ rb_define_method(cProfile, "stop", prof_stop, 0);
918
+ rb_define_method(cProfile, "resume", prof_resume, 0);
919
+ rb_define_method(cProfile, "pause", prof_pause, 0);
920
+ rb_define_method(cProfile, "running?", prof_running, 0);
921
+ rb_define_method(cProfile, "paused?", prof_paused, 0);
922
+
923
+ rb_define_method(cProfile, "exclude_method!", prof_exclude_method, 2);
924
+ rb_define_method(cProfile, "measure_mode", prof_profile_measure_mode, 0);
925
+ rb_define_method(cProfile, "track_allocations?", prof_profile_track_allocations, 0);
926
+
927
+ rb_define_method(cProfile, "threads", prof_threads, 0);
928
+ rb_define_method(cProfile, "add_thread", prof_add_thread, 1);
929
+ rb_define_method(cProfile, "remove_thread", prof_remove_thread, 1);
930
+
931
+ rb_define_method(cProfile, "_dump_data", prof_profile_dump, 0);
932
+ rb_define_method(cProfile, "_load_data", prof_profile_load, 1);
933
+ }