ruby-prof 1.1.0-x64-mingw32 → 1.4.2-x64-mingw32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGES +48 -1
  3. data/Rakefile +2 -14
  4. data/bin/ruby-prof +100 -152
  5. data/ext/ruby_prof/extconf.rb +8 -28
  6. data/ext/ruby_prof/rp_aggregate_call_tree.c +59 -0
  7. data/ext/ruby_prof/rp_aggregate_call_tree.h +13 -0
  8. data/ext/ruby_prof/rp_allocation.c +67 -59
  9. data/ext/ruby_prof/rp_allocation.h +3 -3
  10. data/ext/ruby_prof/rp_call_tree.c +369 -0
  11. data/ext/ruby_prof/rp_call_tree.h +43 -0
  12. data/ext/ruby_prof/rp_call_trees.c +288 -0
  13. data/ext/ruby_prof/rp_call_trees.h +28 -0
  14. data/ext/ruby_prof/rp_measure_allocations.c +12 -14
  15. data/ext/ruby_prof/rp_measure_process_time.c +12 -14
  16. data/ext/ruby_prof/rp_measure_wall_time.c +17 -15
  17. data/ext/ruby_prof/rp_measurement.c +47 -40
  18. data/ext/ruby_prof/rp_measurement.h +7 -7
  19. data/ext/ruby_prof/rp_method.c +116 -255
  20. data/ext/ruby_prof/rp_method.h +31 -39
  21. data/ext/ruby_prof/rp_profile.c +316 -303
  22. data/ext/ruby_prof/rp_profile.h +1 -3
  23. data/ext/ruby_prof/rp_stack.c +122 -106
  24. data/ext/ruby_prof/rp_stack.h +17 -20
  25. data/ext/ruby_prof/rp_thread.c +136 -111
  26. data/ext/ruby_prof/rp_thread.h +12 -9
  27. data/ext/ruby_prof/ruby_prof.c +27 -23
  28. data/ext/ruby_prof/ruby_prof.h +9 -0
  29. data/ext/ruby_prof/vc/ruby_prof.sln +8 -0
  30. data/ext/ruby_prof/vc/ruby_prof.vcxproj +22 -7
  31. data/lib/2.7/ruby_prof.so +0 -0
  32. data/lib/ruby-prof.rb +5 -5
  33. data/lib/ruby-prof/assets/call_stack_printer.html.erb +4 -7
  34. data/lib/ruby-prof/assets/graph_printer.html.erb +5 -6
  35. data/lib/ruby-prof/{call_info.rb → call_tree.rb} +6 -6
  36. data/lib/ruby-prof/call_tree_visitor.rb +36 -0
  37. data/lib/ruby-prof/compatibility.rb +0 -10
  38. data/lib/ruby-prof/measurement.rb +5 -2
  39. data/lib/ruby-prof/method_info.rb +3 -15
  40. data/lib/ruby-prof/printers/abstract_printer.rb +12 -2
  41. data/lib/ruby-prof/printers/call_info_printer.rb +12 -10
  42. data/lib/ruby-prof/printers/call_stack_printer.rb +20 -22
  43. data/lib/ruby-prof/printers/call_tree_printer.rb +1 -1
  44. data/lib/ruby-prof/printers/dot_printer.rb +3 -3
  45. data/lib/ruby-prof/printers/flat_printer.rb +3 -2
  46. data/lib/ruby-prof/printers/graph_printer.rb +4 -5
  47. data/lib/ruby-prof/printers/multi_printer.rb +2 -2
  48. data/lib/ruby-prof/profile.rb +8 -4
  49. data/lib/ruby-prof/rack.rb +51 -127
  50. data/lib/ruby-prof/thread.rb +3 -18
  51. data/lib/ruby-prof/version.rb +1 -1
  52. data/ruby-prof.gemspec +7 -0
  53. data/test/alias_test.rb +42 -45
  54. data/test/basic_test.rb +0 -86
  55. data/test/{call_info_visitor_test.rb → call_tree_visitor_test.rb} +6 -5
  56. data/test/call_trees_test.rb +66 -0
  57. data/test/exclude_methods_test.rb +17 -12
  58. data/test/fiber_test.rb +95 -39
  59. data/test/gc_test.rb +36 -42
  60. data/test/inverse_call_tree_test.rb +175 -0
  61. data/test/line_number_test.rb +67 -70
  62. data/test/marshal_test.rb +7 -13
  63. data/test/measure_allocations_test.rb +224 -234
  64. data/test/measure_allocations_trace_test.rb +224 -234
  65. data/test/measure_memory_trace_test.rb +814 -469
  66. data/test/measure_process_time_test.rb +0 -64
  67. data/test/measure_times.rb +2 -0
  68. data/test/measure_wall_time_test.rb +34 -58
  69. data/test/pause_resume_test.rb +19 -10
  70. data/test/prime.rb +1 -3
  71. data/test/prime_script.rb +6 -0
  72. data/test/printer_call_stack_test.rb +0 -1
  73. data/test/printer_call_tree_test.rb +0 -1
  74. data/test/printer_flat_test.rb +61 -30
  75. data/test/printer_graph_html_test.rb +0 -1
  76. data/test/printer_graph_test.rb +3 -4
  77. data/test/printers_test.rb +2 -2
  78. data/test/printing_recursive_graph_test.rb +1 -1
  79. data/test/profile_test.rb +16 -0
  80. data/test/rack_test.rb +0 -64
  81. data/test/recursive_test.rb +50 -54
  82. data/test/start_stop_test.rb +19 -19
  83. data/test/test_helper.rb +6 -17
  84. data/test/thread_test.rb +11 -11
  85. data/test/unique_call_path_test.rb +25 -95
  86. metadata +22 -11
  87. data/ext/ruby_prof/rp_call_info.c +0 -271
  88. data/ext/ruby_prof/rp_call_info.h +0 -35
  89. data/lib/2.6.5/ruby_prof.so +0 -0
  90. data/lib/ruby-prof/call_info_visitor.rb +0 -38
  91. data/test/parser_timings.rb +0 -24
@@ -9,62 +9,54 @@
9
9
 
10
10
  extern VALUE cRpMethodInfo;
11
11
 
12
- /* Source relation bit offsets. */
12
+ // Source relation bit offsets.
13
13
  enum {
14
- kModuleIncludee = 0x1, /* Included in module */
15
- kClassSingleton = 0x2, /* Singleton of a class */
16
- kModuleSingleton = 0x4, /* Singleton of a module */
17
- kObjectSingleton = 0x8, /* Singleton of an object */
18
- kOtherSingleton = 0x10 /* Singleton of unkown object */
14
+ kModuleIncludee = 0x1, // Included in module
15
+ kClassSingleton = 0x2, // Singleton of a class
16
+ kModuleSingleton = 0x4, // Singleton of a module
17
+ kObjectSingleton = 0x8, // Singleton of an object
18
+ kOtherSingleton = 0x10 // Singleton of unkown object
19
19
  };
20
20
 
21
- /* Forward declaration, see rp_call_info.h */
22
- struct prof_call_infos_t;
23
-
24
- /* Profiling information for each method. */
25
- /* Excluded methods have no call_infos, source_klass, or source_file. */
26
- typedef struct
21
+ // Profiling information for each method.
22
+ // Excluded methods have no call_trees, source_klass, or source_file.
23
+ typedef struct prof_method_t
27
24
  {
28
- st_data_t key; /* Table key */
29
-
30
- int visits; /* Current visits on the stack */
31
- bool excluded; /* Exclude from profile? */
32
-
33
- st_table* parent_call_infos; /* Call infos that call this method */
34
- st_table* child_call_infos; /* Call infos that this method calls */
35
- st_table* allocations_table; /* Tracks object allocations */
25
+ VALUE profile; // Profile this method is associated with - needed for mark phase
26
+ struct prof_call_trees_t* call_trees; // Call infos that call this method
27
+ st_table* allocations_table; // Tracks object allocations
36
28
 
37
- unsigned int klass_flags; /* Information about the type of class */
38
- VALUE klass; /* Resolved klass */
39
- VALUE klass_name; /* Resolved klass name for this method */
40
- VALUE method_name; /* Resolved method name for this method */
29
+ st_data_t key; // Table key
30
+ unsigned int klass_flags; // Information about the type of class
31
+ VALUE klass; // Resolved klass
32
+ VALUE klass_name; // Resolved klass name for this method
33
+ VALUE method_name; // Resolved method name for this method
41
34
 
42
- VALUE object; /* Cached ruby object */
35
+ VALUE object; // Cached ruby object
43
36
 
44
- bool root; /* Is this a root method */
45
37
  bool recursive;
46
- VALUE source_file; /* Source file */
47
- int source_line; /* Line number */
38
+ int visits; // Current visits on the stack
39
+ VALUE source_file; // Source file
40
+ int source_line; // Line number
48
41
 
49
- prof_measurement_t *measurement;
42
+ prof_measurement_t* measurement; // Stores measurement data for this method
50
43
  } prof_method_t;
51
44
 
52
45
  void rp_init_method_info(void);
53
46
 
54
47
  st_data_t method_key(VALUE klass, VALUE msym);
55
48
 
56
- st_table *method_table_create(void);
57
- prof_method_t* prof_method_create_excluded(VALUE klass, VALUE msym);
58
- prof_method_t *method_table_lookup(st_table *table, st_data_t key);
59
- size_t method_table_insert(st_table *table, st_data_t key, prof_method_t *val);
60
- void method_table_free(st_table *table);
61
- prof_method_t *prof_method_create(VALUE klass, VALUE msym, VALUE source_file, int source_line);
62
- prof_method_t *prof_method_get(VALUE self);
49
+ st_table* method_table_create(void);
50
+ prof_method_t* method_table_lookup(st_table* table, st_data_t key);
51
+ size_t method_table_insert(st_table* table, st_data_t key, prof_method_t* val);
52
+ void method_table_free(st_table* table);
53
+ prof_method_t* prof_method_create(VALUE profile, VALUE klass, VALUE msym, VALUE source_file, int source_line);
54
+ prof_method_t* prof_get_method(VALUE self);
63
55
 
64
- VALUE prof_method_wrap(prof_method_t *result);
65
- void prof_method_mark(void *data);
56
+ VALUE prof_method_wrap(prof_method_t* result);
57
+ void prof_method_mark(void* data);
66
58
 
67
- VALUE resolve_klass(VALUE klass, unsigned int *klass_flags);
59
+ VALUE resolve_klass(VALUE klass, unsigned int* klass_flags);
68
60
  VALUE resolve_klass_name(VALUE klass, unsigned int* klass_flags);
69
61
 
70
62
  #endif //__RP_METHOD_INFO__
@@ -1,28 +1,29 @@
1
1
  /* Copyright (C) 2005-2019 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
2
2
  Please see the LICENSE file for copyright and distribution information */
3
3
 
4
- /* Document-class: RubyProf::Profile
4
+ /* Document-class: RubyProf::Profile
5
5
 
6
- The Profile class represents a single profiling run and provides the main API for using ruby-prof.
7
- After creating a Profile instance, start profiling code by calling the Profile#start method. To finish profiling,
8
- call Profile#stop. Once profiling is completed, the Profile instance contains the results.
6
+ The Profile class represents a single profiling run and provides the main API for using ruby-prof.
7
+ After creating a Profile instance, start profiling code by calling the Profile#start method. To finish profiling,
8
+ call Profile#stop. Once profiling is completed, the Profile instance contains the results.
9
9
 
10
- profile = RubyProf::Profile.new
11
- profile.start
12
- ...
13
- result = profile.stop
10
+ profile = RubyProf::Profile.new
11
+ profile.start
12
+ ...
13
+ result = profile.stop
14
14
 
15
- Alternatively, you can use the block syntax:
15
+ Alternatively, you can use the block syntax:
16
16
 
17
- profile = RubyProf::Profile.profile do
18
- ...
19
- end
20
- */
17
+ profile = RubyProf::Profile.profile do
18
+ ...
19
+ end
20
+ */
21
21
 
22
22
  #include <assert.h>
23
23
 
24
24
  #include "rp_allocation.h"
25
- #include "rp_call_info.h"
25
+ #include "rp_call_trees.h"
26
+ #include "rp_call_tree.h"
26
27
  #include "rp_profile.h"
27
28
  #include "rp_method.h"
28
29
 
@@ -35,259 +36,276 @@ VALUE cProfile;
35
36
  */
36
37
  FILE* trace_file = NULL;
37
38
 
38
- static int
39
- excludes_method(st_data_t key, prof_profile_t* profile)
39
+ static const char* get_event_name(rb_event_flag_t event)
40
40
  {
41
- return (profile->exclude_methods_tbl &&
42
- method_table_lookup(profile->exclude_methods_tbl, key) != NULL);
43
- }
44
-
45
- static prof_method_t*
46
- create_method(prof_profile_t* profile, st_data_t key, VALUE klass, VALUE msym, VALUE source_file, int source_line)
41
+ switch (event) {
42
+ case RUBY_EVENT_LINE:
43
+ return "line";
44
+ case RUBY_EVENT_CLASS:
45
+ return "class";
46
+ case RUBY_EVENT_END:
47
+ return "end";
48
+ case RUBY_EVENT_CALL:
49
+ return "call";
50
+ case RUBY_EVENT_RETURN:
51
+ return "return";
52
+ case RUBY_EVENT_B_CALL:
53
+ return "b-call";
54
+ case RUBY_EVENT_B_RETURN:
55
+ return "b-return";
56
+ case RUBY_EVENT_C_CALL:
57
+ return "c-call";
58
+ case RUBY_EVENT_C_RETURN:
59
+ return "c-return";
60
+ case RUBY_EVENT_THREAD_BEGIN:
61
+ return "thread-begin";
62
+ case RUBY_EVENT_THREAD_END:
63
+ return "thread-end";
64
+ case RUBY_EVENT_FIBER_SWITCH:
65
+ return "fiber-switch";
66
+ case RUBY_EVENT_RAISE:
67
+ return "raise";
68
+ case RUBY_INTERNAL_EVENT_NEWOBJ:
69
+ return "newobj";
70
+ default:
71
+ return "unknown";
72
+ }
73
+ }
74
+
75
+ thread_data_t* check_fiber(prof_profile_t* profile, double measurement)
47
76
  {
48
- prof_method_t* result = NULL;
49
-
50
- if (excludes_method(key, profile))
77
+ thread_data_t* result = NULL;
78
+
79
+ // Get the current fiber
80
+ VALUE fiber = rb_fiber_current();
81
+
82
+ /* We need to switch the profiling context if we either had none before,
83
+ we don't merge fibers and the fiber ids differ, or the thread ids differ. */
84
+ if (profile->last_thread_data->fiber != fiber)
51
85
  {
52
- /* We found a exclusion sentinel so propagate it into the thread's local hash table. */
53
- /* TODO(nelgau): Is there a way to avoid this allocation completely so that all these
54
- tables share the same exclusion method struct? The first attempt failed due to my
55
- ignorance of the whims of the GC. */
56
- result = prof_method_create_excluded(klass, msym);
86
+ result = threads_table_lookup(profile, fiber);
87
+ if (!result)
88
+ {
89
+ result = threads_table_insert(profile, fiber);
90
+ }
91
+ switch_thread(profile, result, measurement);
57
92
  }
58
93
  else
59
94
  {
60
- result = prof_method_create(klass, msym, source_file, source_line);
95
+ result = profile->last_thread_data;
61
96
  }
62
-
63
- /* Insert the newly created method, or the exlcusion sentinel. */
64
- method_table_insert(profile->last_thread_data->method_table, result->key, result);
65
-
66
97
  return result;
67
98
  }
68
99
 
69
- static const char *
70
- get_event_name(rb_event_flag_t event)
100
+ static int excludes_method(st_data_t key, prof_profile_t* profile)
71
101
  {
72
- switch (event) {
73
- case RUBY_EVENT_LINE:
74
- return "line";
75
- case RUBY_EVENT_CLASS:
76
- return "class";
77
- case RUBY_EVENT_END:
78
- return "end";
79
- case RUBY_EVENT_CALL:
80
- return "call";
81
- case RUBY_EVENT_RETURN:
82
- return "return";
83
- case RUBY_EVENT_B_CALL:
84
- return "b-call";
85
- case RUBY_EVENT_B_RETURN:
86
- return "b-return";
87
- case RUBY_EVENT_C_CALL:
88
- return "c-call";
89
- case RUBY_EVENT_C_RETURN:
90
- return "c-return";
91
- case RUBY_EVENT_THREAD_BEGIN:
92
- return "thread-begin";
93
- case RUBY_EVENT_THREAD_END:
94
- return "thread-end";
95
- case RUBY_EVENT_FIBER_SWITCH:
96
- return "fiber-switch";
97
- case RUBY_EVENT_RAISE:
98
- return "raise";
99
- default:
100
- return "unknown";
101
- }
102
+ return (profile->exclude_methods_tbl &&
103
+ method_table_lookup(profile->exclude_methods_tbl, key) != NULL);
102
104
  }
103
105
 
104
- VALUE get_fiber(prof_profile_t* profile)
106
+ static prof_method_t* create_method(VALUE profile, st_data_t key, VALUE klass, VALUE msym, VALUE source_file, int source_line)
105
107
  {
106
- if (profile->merge_fibers)
107
- return rb_thread_current();
108
- else
109
- return rb_fiber_current();
108
+ prof_method_t* result = prof_method_create(profile, klass, msym, source_file, source_line);
109
+
110
+ prof_profile_t* profile_t = prof_get_profile(profile);
111
+ method_table_insert(profile_t->last_thread_data->method_table, result->key, result);
112
+
113
+ return result;
110
114
  }
111
- /* =========== Profiling ================= */
112
- thread_data_t* check_fiber(prof_profile_t *profile, double measurement)
115
+
116
+ static prof_method_t* check_parent_method(VALUE profile, thread_data_t* thread_data)
113
117
  {
114
- thread_data_t* result = NULL;
115
-
116
- /* Get the current thread and fiber information. */
117
- VALUE fiber = get_fiber(profile);
118
-
119
- /* We need to switch the profiling context if we either had none before,
120
- we don't merge fibers and the fiber ids differ, or the thread ids differ. */
121
- if (profile->last_thread_data->fiber != fiber)
118
+ VALUE msym = ID2SYM(rb_intern("_inserted_parent_"));
119
+ st_data_t key = method_key(cProfile, msym);
120
+
121
+ prof_method_t* result = method_table_lookup(thread_data->method_table, key);
122
+
123
+ if (!result)
122
124
  {
123
- result = threads_table_lookup(profile, fiber);
124
- if (!result)
125
- {
126
- result = threads_table_insert(profile, fiber);
127
- }
128
- switch_thread(profile, result, measurement);
125
+ result = create_method(profile, key, cProfile, msym, Qnil, 0);
129
126
  }
130
- else
127
+
128
+ return result;
129
+ }
130
+
131
+ prof_method_t* check_method(VALUE profile, rb_trace_arg_t* trace_arg, rb_event_flag_t event, thread_data_t* thread_data)
132
+ {
133
+ VALUE klass = rb_tracearg_defined_class(trace_arg);
134
+
135
+ /* Special case - skip any methods from the mProf
136
+ module or cProfile class since they clutter
137
+ the results but aren't important to them results. */
138
+ if (klass == cProfile)
139
+ return NULL;
140
+
141
+ #ifdef HAVE_RB_TRACEARG_CALLEE_ID
142
+ VALUE msym = rb_tracearg_callee_id(trace_arg);
143
+ #else
144
+ VALUE msym = rb_tracearg_method_id(trace_arg);
145
+ #endif
146
+
147
+ st_data_t key = method_key(klass, msym);
148
+
149
+ prof_profile_t* profile_t = prof_get_profile(profile);
150
+ if (excludes_method(key, profile_t))
151
+ return NULL;
152
+
153
+ prof_method_t* result = method_table_lookup(thread_data->method_table, key);
154
+
155
+ if (!result)
131
156
  {
132
- result = profile->last_thread_data;
157
+ VALUE source_file = (event != RUBY_EVENT_C_CALL ? rb_tracearg_path(trace_arg) : Qnil);
158
+ int source_line = (event != RUBY_EVENT_C_CALL ? FIX2INT(rb_tracearg_lineno(trace_arg)) : 0);
159
+ result = create_method(profile, key, klass, msym, source_file, source_line);
133
160
  }
161
+
134
162
  return result;
135
163
  }
136
164
 
137
- static void
138
- prof_trace(prof_profile_t* profile, rb_trace_arg_t *trace_arg, double measurement)
165
+ /* =========== Profiling ================= */
166
+ static void prof_trace(prof_profile_t* profile, rb_trace_arg_t* trace_arg, double measurement)
139
167
  {
140
168
  static VALUE last_fiber = Qnil;
141
- VALUE fiber = get_fiber(profile);
142
-
169
+ VALUE fiber = rb_fiber_current();
170
+
143
171
  rb_event_flag_t event = rb_tracearg_event_flag(trace_arg);
144
172
  const char* event_name = get_event_name(event);
145
-
173
+
146
174
  VALUE source_file = rb_tracearg_path(trace_arg);
147
175
  int source_line = FIX2INT(rb_tracearg_lineno(trace_arg));
148
-
149
- #ifdef HAVE_RB_TRACEARG_CALLEE_ID
176
+
177
+ #ifdef HAVE_RB_TRACEARG_CALLEE_ID
150
178
  VALUE msym = rb_tracearg_callee_id(trace_arg);
151
- #else
179
+ #else
152
180
  VALUE msym = rb_tracearg_method_id(trace_arg);
153
- #endif
154
-
181
+ #endif
182
+
155
183
  unsigned int klass_flags;
156
184
  VALUE klass = rb_tracearg_defined_class(trace_arg);
157
185
  VALUE resolved_klass = resolve_klass(klass, &klass_flags);
158
186
  const char* class_name = "";
159
-
187
+
160
188
  if (resolved_klass != Qnil)
161
189
  class_name = rb_class2name(resolved_klass);
162
-
190
+
163
191
  if (last_fiber != fiber)
164
192
  {
165
193
  fprintf(trace_file, "\n");
166
194
  }
167
-
195
+
168
196
  const char* method_name_char = (msym != Qnil ? rb_id2name(SYM2ID(msym)) : "");
169
197
  const char* source_file_char = (source_file != Qnil ? StringValuePtr(source_file) : "");
170
-
198
+
171
199
  fprintf(trace_file, "%2lu:%2f %-8s %s#%s %s:%2d\n",
172
- FIX2ULONG(fiber), (double) measurement,
200
+ FIX2ULONG(fiber), (double)measurement,
173
201
  event_name, class_name, method_name_char, source_file_char, source_line);
174
202
  fflush(trace_file);
175
203
  last_fiber = fiber;
176
204
  }
177
205
 
178
- static void
179
- prof_event_hook(VALUE trace_point, void* data)
206
+ static void prof_event_hook(VALUE trace_point, void* data)
180
207
  {
181
- prof_profile_t* profile = (prof_profile_t*)data;
182
- thread_data_t* thread_data = NULL;
183
- prof_frame_t *frame = NULL;
208
+ VALUE profile = (VALUE)data;
209
+ prof_profile_t* profile_t = prof_get_profile(profile);
210
+
184
211
  rb_trace_arg_t* trace_arg = rb_tracearg_from_tracepoint(trace_point);
185
- double measurement = prof_measure(profile->measurer, trace_arg);
212
+ double measurement = prof_measure(profile_t->measurer, trace_arg);
186
213
  rb_event_flag_t event = rb_tracearg_event_flag(trace_arg);
187
214
  VALUE self = rb_tracearg_self(trace_arg);
188
-
215
+
189
216
  if (trace_file != NULL)
190
217
  {
191
- prof_trace(profile, trace_arg, measurement);
218
+ prof_trace(profile_t, trace_arg, measurement);
192
219
  }
193
-
220
+
194
221
  /* Special case - skip any methods from the mProf
195
222
  module since they clutter the results but aren't important to them results. */
196
223
  if (self == mProf)
197
224
  return;
198
-
199
- thread_data = check_fiber(profile, measurement);
200
-
225
+
226
+ thread_data_t* thread_data = check_fiber(profile_t, measurement);
227
+
201
228
  if (!thread_data->trace)
202
229
  return;
203
-
204
- /* Get the current frame for the current thread. */
205
- frame = thread_data->stack->ptr;
206
-
230
+
207
231
  switch (event)
208
232
  {
209
233
  case RUBY_EVENT_LINE:
210
234
  {
211
- /* Keep track of the current line number in this method. When
212
- a new method is called, we know what line number it was
213
- called from. */
214
- if (frame->call_info)
235
+ prof_frame_t* frame = prof_frame_current(thread_data->stack);
236
+
237
+ if (!frame)
215
238
  {
216
- if (prof_frame_is_real(frame))
239
+ prof_method_t* method = check_method(profile, trace_arg, event, thread_data);
240
+
241
+ if (!method)
242
+ break;
243
+
244
+ prof_call_tree_t* call_tree = prof_call_tree_create(method, NULL, method->source_file, method->source_line);
245
+ prof_add_call_tree(method->call_trees, call_tree);
246
+
247
+ if (thread_data->call_tree)
217
248
  {
218
- frame->source_file = rb_tracearg_path(trace_arg);
219
- frame->source_line = FIX2INT(rb_tracearg_lineno(trace_arg));
249
+ prof_call_tree_add_parent(thread_data->call_tree, call_tree);
250
+ frame = prof_frame_unshift(thread_data->stack, call_tree, thread_data->call_tree, measurement);
220
251
  }
221
- break;
252
+ else
253
+ {
254
+ frame = prof_frame_push(thread_data->stack, call_tree, measurement, RTEST(profile_t->paused));
255
+ }
256
+
257
+ thread_data->call_tree = call_tree;
222
258
  }
223
259
 
224
- /* If we get here there was no frame, which means this is
225
- the first method seen for this thread, so fall through
226
- to below to create it. */
260
+ frame->source_file = rb_tracearg_path(trace_arg);
261
+ frame->source_line = FIX2INT(rb_tracearg_lineno(trace_arg));
262
+
263
+ break;
227
264
  }
228
265
  case RUBY_EVENT_CALL:
229
266
  case RUBY_EVENT_C_CALL:
230
267
  {
231
- prof_frame_t* next_frame;
232
- prof_call_info_t* call_info;
233
- prof_method_t* method;
268
+ prof_method_t* method = check_method(profile, trace_arg, event, thread_data);
234
269
 
235
- /* Get current measurement */
236
- measurement = prof_measure(profile->measurer, trace_arg);
270
+ if (!method)
271
+ break;
237
272
 
238
- VALUE klass = rb_tracearg_defined_class(trace_arg);
239
-
240
- /* Special case - skip any methods from the mProf
241
- module or cProfile class since they clutter
242
- the results but aren't important to them results. */
243
- if (klass == cProfile)
244
- return;
245
-
246
- #ifdef HAVE_RB_TRACEARG_CALLEE_ID
247
- VALUE msym = rb_tracearg_callee_id(trace_arg);
248
- #else
249
- VALUE msym = rb_tracearg_method_id(trace_arg);
250
- #endif
273
+ prof_frame_t* frame = prof_frame_current(thread_data->stack);
274
+ prof_call_tree_t* parent_call_tree = NULL;
275
+ prof_call_tree_t* call_tree = NULL;
251
276
 
252
- st_data_t key = method_key(klass, msym);
253
-
254
- method = method_table_lookup(thread_data->method_table, key);
255
-
256
- if (!method)
257
- {
258
- VALUE source_file = (event != RUBY_EVENT_C_CALL ? rb_tracearg_path(trace_arg) : Qnil);
259
- int source_line = (event != RUBY_EVENT_C_CALL ? FIX2INT(rb_tracearg_lineno(trace_arg)) : 0);
260
- method = create_method(profile, key, klass, msym, source_file, source_line);
261
- }
262
-
263
- if (method->excluded)
277
+ // Frame can be NULL if we are switching from one fiber to another (see FiberTest#fiber_test)
278
+ if (frame)
264
279
  {
265
- prof_stack_pass(thread_data->stack);
266
- break;
280
+ parent_call_tree = frame->call_tree;
281
+ call_tree = call_tree_table_lookup(parent_call_tree->children, method->key);
267
282
  }
268
-
269
- if (!frame->call_info)
283
+ else if (!frame && thread_data->call_tree)
270
284
  {
271
- method->root = true;
272
- call_info = prof_call_info_create(method, NULL, method->source_file, method->source_line);
273
- st_insert(method->parent_call_infos, (st_data_t)&key, (st_data_t)call_info);
285
+ // There is no current parent - likely we have returned out of the highest level method we have profiled so far.
286
+ // This can happen with enumerators (see fiber_test.rb). So create a new dummy parent.
287
+ prof_method_t* parent_method = check_parent_method(profile, thread_data);
288
+ parent_call_tree = prof_call_tree_create(parent_method, NULL, Qnil, 0);
289
+ prof_add_call_tree(parent_method->call_trees, parent_call_tree);
290
+ prof_call_tree_add_parent(thread_data->call_tree, parent_call_tree);
291
+ frame = prof_frame_unshift(thread_data->stack, parent_call_tree, thread_data->call_tree, measurement);
292
+ thread_data->call_tree = parent_call_tree;
274
293
  }
275
- else
294
+
295
+ if (!call_tree)
276
296
  {
277
- call_info = call_info_table_lookup(method->parent_call_infos, frame->call_info->method->key);
278
-
279
- if (!call_info)
280
- {
281
- /* This call info does not yet exist. So create it, then add
282
- it to previous callinfo's children and to the current method .*/
283
- call_info = prof_call_info_create(method, frame->call_info->method, frame->source_file, frame->source_line);
284
- call_info_table_insert(method->parent_call_infos, frame->call_info->method->key, call_info);
285
- call_info_table_insert(frame->call_info->method->child_call_infos, method->key, call_info);
286
- }
297
+ // This call info does not yet exist. So create it and add it to previous CallTree's children and the current method.
298
+ call_tree = prof_call_tree_create(method, parent_call_tree, frame ? frame->source_file : Qnil, frame? frame->source_line : 0);
299
+ prof_add_call_tree(method->call_trees, call_tree);
300
+ if (parent_call_tree)
301
+ prof_call_tree_add_child(parent_call_tree, call_tree);
287
302
  }
288
303
 
289
- /* Push a new frame onto the stack for a new c-call or ruby call (into a method) */
290
- next_frame = prof_stack_push(thread_data->stack, call_info, measurement, RTEST(profile->paused));
304
+ if (!thread_data->call_tree)
305
+ thread_data->call_tree = call_tree;
306
+
307
+ // Push a new frame onto the stack for a new c-call or ruby call (into a method)
308
+ prof_frame_t* next_frame = prof_frame_push(thread_data->stack, call_tree, measurement, RTEST(profile_t->paused));
291
309
  next_frame->source_file = method->source_file;
292
310
  next_frame->source_line = method->source_line;
293
311
  break;
@@ -295,8 +313,13 @@ prof_event_hook(VALUE trace_point, void* data)
295
313
  case RUBY_EVENT_RETURN:
296
314
  case RUBY_EVENT_C_RETURN:
297
315
  {
298
- /* Get current measurement */
299
- prof_stack_pop(thread_data->stack, measurement);
316
+ // We need to check for excluded methods so that we don't pop them off the stack
317
+ prof_method_t* method = check_method(profile, trace_arg, event, thread_data);
318
+
319
+ if (!method)
320
+ break;
321
+
322
+ prof_frame_pop(thread_data->stack, measurement);
300
323
  break;
301
324
  }
302
325
  case RUBY_INTERNAL_EVENT_NEWOBJ:
@@ -315,35 +338,33 @@ prof_event_hook(VALUE trace_point, void* data)
315
338
  }
316
339
  }
317
340
 
318
- void
319
- prof_install_hook(VALUE self)
341
+ void prof_install_hook(VALUE self)
320
342
  {
321
343
  prof_profile_t* profile = prof_get_profile(self);
322
-
344
+
323
345
  VALUE event_tracepoint = rb_tracepoint_new(Qnil,
324
346
  RUBY_EVENT_CALL | RUBY_EVENT_RETURN |
325
347
  RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN |
326
348
  RUBY_EVENT_LINE,
327
- prof_event_hook, profile);
349
+ prof_event_hook, (void*)self);
328
350
  rb_ary_push(profile->tracepoints, event_tracepoint);
329
-
351
+
330
352
  if (profile->measurer->track_allocations)
331
353
  {
332
- VALUE allocation_tracepoint = rb_tracepoint_new(Qnil, RUBY_INTERNAL_EVENT_NEWOBJ, prof_event_hook, profile);
354
+ VALUE allocation_tracepoint = rb_tracepoint_new(Qnil, RUBY_INTERNAL_EVENT_NEWOBJ, prof_event_hook, (void*)self);
333
355
  rb_ary_push(profile->tracepoints, allocation_tracepoint);
334
356
  }
335
-
357
+
336
358
  for (int i = 0; i < RARRAY_LEN(profile->tracepoints); i++)
337
359
  {
338
360
  rb_tracepoint_enable(rb_ary_entry(profile->tracepoints, i));
339
361
  }
340
362
  }
341
363
 
342
- void
343
- prof_remove_hook(VALUE self)
364
+ void prof_remove_hook(VALUE self)
344
365
  {
345
366
  prof_profile_t* profile = prof_get_profile(self);
346
-
367
+
347
368
  for (int i = 0; i < RARRAY_LEN(profile->tracepoints); i++)
348
369
  {
349
370
  rb_tracepoint_disable(rb_ary_entry(profile->tracepoints, i));
@@ -351,18 +372,16 @@ prof_remove_hook(VALUE self)
351
372
  rb_ary_clear(profile->tracepoints);
352
373
  }
353
374
 
354
- prof_profile_t*
355
- prof_get_profile(VALUE self)
375
+ prof_profile_t* prof_get_profile(VALUE self)
356
376
  {
357
377
  /* Can't use Data_Get_Struct because that triggers the event hook
358
378
  ending up in endless recursion. */
359
- return DATA_PTR(self);
379
+ return RTYPEDDATA_DATA(self);
360
380
  }
361
381
 
362
- static int
363
- collect_threads(st_data_t key, st_data_t value, st_data_t result)
382
+ static int collect_threads(st_data_t key, st_data_t value, st_data_t result)
364
383
  {
365
- thread_data_t* thread_data = (thread_data_t*) value;
384
+ thread_data_t* thread_data = (thread_data_t*)value;
366
385
  if (thread_data->trace)
367
386
  {
368
387
  VALUE threads_array = (VALUE)result;
@@ -372,36 +391,41 @@ collect_threads(st_data_t key, st_data_t value, st_data_t result)
372
391
  }
373
392
 
374
393
  /* ======== Profile Class ====== */
375
- static int
376
- mark_threads(st_data_t key, st_data_t value, st_data_t result)
394
+ static int mark_threads(st_data_t key, st_data_t value, st_data_t result)
377
395
  {
378
- thread_data_t *thread = (thread_data_t *) value;
396
+ thread_data_t* thread = (thread_data_t*)value;
379
397
  prof_thread_mark(thread);
380
398
  return ST_CONTINUE;
381
399
  }
382
400
 
383
- static int
384
- mark_methods(st_data_t key, st_data_t value, st_data_t result)
401
+ static int prof_profile_mark_methods(st_data_t key, st_data_t value, st_data_t result)
385
402
  {
386
- prof_method_t *method = (prof_method_t *) value;
403
+ prof_method_t* method = (prof_method_t*)value;
387
404
  prof_method_mark(method);
388
405
  return ST_CONTINUE;
389
406
  }
390
407
 
391
- static void
392
- prof_mark(prof_profile_t *profile)
408
+ static void prof_profile_mark(void* data)
393
409
  {
410
+ prof_profile_t* profile = (prof_profile_t*)data;
394
411
  rb_gc_mark(profile->tracepoints);
395
- st_foreach(profile->threads_tbl, mark_threads, 0);
396
- st_foreach(profile->exclude_methods_tbl, mark_methods, 0);
412
+ rb_gc_mark(profile->running);
413
+ rb_gc_mark(profile->paused);
414
+
415
+ // If GC stress is true (useful for debugging), when threads_table_create is called in the
416
+ // allocate method Ruby will immediately call this mark method. Thus the threads_tbl will be NULL.
417
+ if (profile->threads_tbl)
418
+ rb_st_foreach(profile->threads_tbl, mark_threads, 0);
419
+
420
+ if (profile->exclude_methods_tbl)
421
+ rb_st_foreach(profile->exclude_methods_tbl, prof_profile_mark_methods, 0);
397
422
  }
398
423
 
399
- /* Freeing the profile creates a cascade of freeing.
400
- It fress the thread table, which frees its methods,
401
- which frees its call infos. */
402
- static void
403
- prof_free(prof_profile_t *profile)
424
+ /* Freeing the profile creates a cascade of freeing. It frees its threads table, which frees
425
+ each thread and its associated call treee and methods. */
426
+ static void prof_profile_ruby_gc_free(void* data)
404
427
  {
428
+ prof_profile_t* profile = (prof_profile_t*)data;
405
429
  profile->last_thread_data = NULL;
406
430
 
407
431
  threads_table_free(profile->threads_tbl);
@@ -409,13 +433,13 @@ prof_free(prof_profile_t *profile)
409
433
 
410
434
  if (profile->exclude_threads_tbl)
411
435
  {
412
- st_free_table(profile->exclude_threads_tbl);
436
+ rb_st_free_table(profile->exclude_threads_tbl);
413
437
  profile->exclude_threads_tbl = NULL;
414
438
  }
415
439
 
416
440
  if (profile->include_threads_tbl)
417
441
  {
418
- st_free_table(profile->include_threads_tbl);
442
+ rb_st_free_table(profile->include_threads_tbl);
419
443
  profile->include_threads_tbl = NULL;
420
444
  }
421
445
 
@@ -429,32 +453,46 @@ prof_free(prof_profile_t *profile)
429
453
  xfree(profile);
430
454
  }
431
455
 
432
- static VALUE
433
- prof_allocate(VALUE klass)
456
+ size_t prof_profile_size(const void* data)
457
+ {
458
+ return sizeof(prof_profile_t);
459
+ }
460
+
461
+ static const rb_data_type_t profile_type =
462
+ {
463
+ .wrap_struct_name = "Profile",
464
+ .function =
465
+ {
466
+ .dmark = prof_profile_mark,
467
+ .dfree = prof_profile_ruby_gc_free,
468
+ .dsize = prof_profile_size,
469
+ },
470
+ .data = NULL,
471
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY
472
+ };
473
+
474
+ static VALUE prof_allocate(VALUE klass)
434
475
  {
435
476
  VALUE result;
436
477
  prof_profile_t* profile;
437
- result = Data_Make_Struct(klass, prof_profile_t, prof_mark, prof_free, profile);
478
+ result = TypedData_Make_Struct(klass, prof_profile_t, &profile_type, profile);
438
479
  profile->threads_tbl = threads_table_create();
439
480
  profile->exclude_threads_tbl = NULL;
440
481
  profile->include_threads_tbl = NULL;
441
482
  profile->running = Qfalse;
442
483
  profile->allow_exceptions = false;
443
- profile->merge_fibers = false;
444
484
  profile->exclude_methods_tbl = method_table_create();
445
485
  profile->running = Qfalse;
446
486
  profile->tracepoints = rb_ary_new();
447
487
  return result;
448
488
  }
449
489
 
450
- static void
451
- prof_exclude_common_methods(VALUE profile)
490
+ static void prof_exclude_common_methods(VALUE profile)
452
491
  {
453
492
  rb_funcall(profile, rb_intern("exclude_common_methods!"), 0);
454
493
  }
455
494
 
456
- static int
457
- pop_frames(VALUE key, st_data_t value, st_data_t data)
495
+ static int pop_frames(VALUE key, st_data_t value, st_data_t data)
458
496
  {
459
497
  thread_data_t* thread_data = (thread_data_t*)value;
460
498
  prof_profile_t* profile = (prof_profile_t*)data;
@@ -463,7 +501,7 @@ pop_frames(VALUE key, st_data_t value, st_data_t data)
463
501
  if (profile->last_thread_data->fiber != thread_data->fiber)
464
502
  switch_thread(profile, thread_data, measurement);
465
503
 
466
- while (prof_stack_pop(thread_data->stack, measurement));
504
+ while (prof_frame_pop(thread_data->stack, measurement));
467
505
 
468
506
  return ST_CONTINUE;
469
507
  }
@@ -471,7 +509,7 @@ pop_frames(VALUE key, st_data_t value, st_data_t data)
471
509
  static void
472
510
  prof_stop_threads(prof_profile_t* profile)
473
511
  {
474
- st_foreach(profile->threads_tbl, pop_frames, (st_data_t)profile);
512
+ rb_st_foreach(profile->threads_tbl, pop_frames, (st_data_t)profile);
475
513
  }
476
514
 
477
515
  /* call-seq:
@@ -480,19 +518,16 @@ prof_stop_threads(prof_profile_t* profile)
480
518
 
481
519
  Returns a new profiler. Possible options for the options hash are:
482
520
 
483
- measure_mode:: Measure mode. Specifies the profile measure mode.
521
+ measure_mode: Measure mode. Specifies the profile measure mode.
484
522
  If not specified, defaults to RubyProf::WALL_TIME.
485
- exclude_threads:: Threads to exclude from the profiling results.
486
- include_threads:: Focus profiling on only the given threads. This will ignore
487
- all other threads.
488
- allow_exceptions:: Whether to raise exceptions encountered during profiling,
523
+ allow_exceptions: Whether to raise exceptions encountered during profiling,
489
524
  or to suppress all exceptions during profiling
490
- merge_fibers:: Whether profiling data for a given thread's fibers should all be
491
- subsumed under a single entry. Basically only useful to produce
492
- callgrind profiles.
493
- */
494
- static VALUE
495
- prof_initialize(int argc, VALUE *argv, VALUE self)
525
+ track_allocations: Whether to track object allocations while profiling. True or false.
526
+ exclude_common: Exclude common methods from the profile. True or false.
527
+ exclude_threads: Threads to exclude from the profiling results.
528
+ include_threads: Focus profiling on only the given threads. This will ignore
529
+ all other threads. */
530
+ static VALUE prof_initialize(int argc, VALUE* argv, VALUE self)
496
531
  {
497
532
  prof_profile_t* profile = prof_get_profile(self);
498
533
  VALUE mode_or_options;
@@ -501,7 +536,6 @@ prof_initialize(int argc, VALUE *argv, VALUE self)
501
536
  VALUE include_threads = Qnil;
502
537
  VALUE exclude_common = Qnil;
503
538
  VALUE allow_exceptions = Qfalse;
504
- VALUE merge_fibers = Qfalse;
505
539
  VALUE track_allocations = Qfalse;
506
540
 
507
541
  int i;
@@ -521,7 +555,6 @@ prof_initialize(int argc, VALUE *argv, VALUE self)
521
555
  mode = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("measure_mode")));
522
556
  track_allocations = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("track_allocations")));
523
557
  allow_exceptions = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("allow_exceptions")));
524
- merge_fibers = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("merge_fibers")));
525
558
  exclude_common = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("exclude_common")));
526
559
  exclude_threads = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("exclude_threads")));
527
560
  include_threads = rb_hash_aref(mode_or_options, ID2SYM(rb_intern("include_threads")));
@@ -542,7 +575,6 @@ prof_initialize(int argc, VALUE *argv, VALUE self)
542
575
  }
543
576
  profile->measurer = prof_get_measurer(NUM2INT(mode), track_allocations == Qtrue);
544
577
  profile->allow_exceptions = (allow_exceptions == Qtrue);
545
- profile->merge_fibers = (merge_fibers == Qtrue);
546
578
 
547
579
  if (exclude_threads != Qnil)
548
580
  {
@@ -552,7 +584,7 @@ prof_initialize(int argc, VALUE *argv, VALUE self)
552
584
  for (i = 0; i < RARRAY_LEN(exclude_threads); i++)
553
585
  {
554
586
  VALUE thread = rb_ary_entry(exclude_threads, i);
555
- st_insert(profile->exclude_threads_tbl, thread, Qtrue);
587
+ rb_st_insert(profile->exclude_threads_tbl, thread, Qtrue);
556
588
  }
557
589
  }
558
590
 
@@ -564,7 +596,7 @@ prof_initialize(int argc, VALUE *argv, VALUE self)
564
596
  for (i = 0; i < RARRAY_LEN(include_threads); i++)
565
597
  {
566
598
  VALUE thread = rb_ary_entry(include_threads, i);
567
- st_insert(profile->include_threads_tbl, thread, Qtrue);
599
+ rb_st_insert(profile->include_threads_tbl, thread, Qtrue);
568
600
  }
569
601
  }
570
602
 
@@ -580,8 +612,7 @@ prof_initialize(int argc, VALUE *argv, VALUE self)
580
612
  paused? -> boolean
581
613
 
582
614
  Returns whether a profile is currently paused.*/
583
- static VALUE
584
- prof_paused(VALUE self)
615
+ static VALUE prof_paused(VALUE self)
585
616
  {
586
617
  prof_profile_t* profile = prof_get_profile(self);
587
618
  return profile->paused;
@@ -602,8 +633,7 @@ prof_running(VALUE self)
602
633
  mode -> measure_mode
603
634
 
604
635
  Returns the measure mode used in this profile.*/
605
- static VALUE
606
- prof_profile_measure_mode(VALUE self)
636
+ static VALUE prof_profile_measure_mode(VALUE self)
607
637
  {
608
638
  prof_profile_t* profile = prof_get_profile(self);
609
639
  return INT2NUM(profile->measurer->mode);
@@ -613,8 +643,7 @@ prof_profile_measure_mode(VALUE self)
613
643
  track_allocations -> boolean
614
644
 
615
645
  Returns if object allocations were tracked in this profile.*/
616
- static VALUE
617
- prof_profile_track_allocations(VALUE self)
646
+ static VALUE prof_profile_track_allocations(VALUE self)
618
647
  {
619
648
  prof_profile_t* profile = prof_get_profile(self);
620
649
  return profile->measurer->track_allocations ? Qtrue : Qfalse;
@@ -624,8 +653,7 @@ prof_profile_track_allocations(VALUE self)
624
653
  start -> self
625
654
 
626
655
  Starts recording profile data.*/
627
- static VALUE
628
- prof_start(VALUE self)
656
+ static VALUE prof_start(VALUE self)
629
657
  {
630
658
  char* trace_file_name;
631
659
 
@@ -638,24 +666,25 @@ prof_start(VALUE self)
638
666
 
639
667
  profile->running = Qtrue;
640
668
  profile->paused = Qfalse;
641
- profile->last_thread_data = threads_table_insert(profile, get_fiber(profile));
669
+ profile->last_thread_data = threads_table_insert(profile, rb_fiber_current());
642
670
 
643
671
  /* open trace file if environment wants it */
644
672
  trace_file_name = getenv("RUBY_PROF_TRACE");
645
- if (trace_file_name != NULL)
646
- {
647
- if (strcmp(trace_file_name, "stdout") == 0)
648
- {
649
- trace_file = stdout;
650
- }
651
- else if (strcmp(trace_file_name, "stderr") == 0)
652
- {
653
- trace_file = stderr;
654
- }
655
- else
656
- {
657
- trace_file = fopen(trace_file_name, "w");
658
- }
673
+
674
+ if (trace_file_name != NULL)
675
+ {
676
+ if (strcmp(trace_file_name, "stdout") == 0)
677
+ {
678
+ trace_file = stdout;
679
+ }
680
+ else if (strcmp(trace_file_name, "stderr") == 0)
681
+ {
682
+ trace_file = stderr;
683
+ }
684
+ else
685
+ {
686
+ trace_file = fopen(trace_file_name, "w");
687
+ }
659
688
  }
660
689
 
661
690
  prof_install_hook(self);
@@ -666,8 +695,7 @@ prof_start(VALUE self)
666
695
  pause -> self
667
696
 
668
697
  Pauses collecting profile data. */
669
- static VALUE
670
- prof_pause(VALUE self)
698
+ static VALUE prof_pause(VALUE self)
671
699
  {
672
700
  prof_profile_t* profile = prof_get_profile(self);
673
701
  if (profile->running == Qfalse)
@@ -679,7 +707,7 @@ prof_pause(VALUE self)
679
707
  {
680
708
  profile->paused = Qtrue;
681
709
  profile->measurement_at_pause_resume = prof_measure(profile->measurer, NULL);
682
- st_foreach(profile->threads_tbl, pause_thread, (st_data_t) profile);
710
+ rb_st_foreach(profile->threads_tbl, pause_thread, (st_data_t)profile);
683
711
  }
684
712
 
685
713
  return self;
@@ -690,8 +718,7 @@ prof_pause(VALUE self)
690
718
  resume(&block) -> self
691
719
 
692
720
  Resumes recording profile data.*/
693
- static VALUE
694
- prof_resume(VALUE self)
721
+ static VALUE prof_resume(VALUE self)
695
722
  {
696
723
  prof_profile_t* profile = prof_get_profile(self);
697
724
  if (profile->running == Qfalse)
@@ -703,7 +730,7 @@ prof_resume(VALUE self)
703
730
  {
704
731
  profile->paused = Qfalse;
705
732
  profile->measurement_at_pause_resume = prof_measure(profile->measurer, NULL);
706
- st_foreach(profile->threads_tbl, unpause_thread, (st_data_t) profile);
733
+ rb_st_foreach(profile->threads_tbl, unpause_thread, (st_data_t)profile);
707
734
  }
708
735
 
709
736
  return rb_block_given_p() ? rb_ensure(rb_yield, self, prof_pause, self) : self;
@@ -713,8 +740,7 @@ prof_resume(VALUE self)
713
740
  stop -> self
714
741
 
715
742
  Stops collecting profile data.*/
716
- static VALUE
717
- prof_stop(VALUE self)
743
+ static VALUE prof_stop(VALUE self)
718
744
  {
719
745
  prof_profile_t* profile = prof_get_profile(self);
720
746
 
@@ -728,15 +754,11 @@ prof_stop(VALUE self)
728
754
  /* close trace file if open */
729
755
  if (trace_file != NULL)
730
756
  {
731
- if (trace_file !=stderr && trace_file != stdout)
732
- {
733
- #ifdef _MSC_VER
734
- _fcloseall();
735
- #else
736
- fclose(trace_file);
737
- #endif
738
- }
739
- trace_file = NULL;
757
+ if (trace_file != stderr && trace_file != stdout)
758
+ {
759
+ fclose(trace_file);
760
+ }
761
+ trace_file = NULL;
740
762
  }
741
763
 
742
764
  prof_stop_threads(profile);
@@ -753,12 +775,11 @@ prof_stop(VALUE self)
753
775
  threads -> Array of RubyProf::Thread
754
776
 
755
777
  Returns an array of RubyProf::Thread instances that were profiled. */
756
- static VALUE
757
- prof_threads(VALUE self)
778
+ static VALUE prof_threads(VALUE self)
758
779
  {
759
780
  VALUE result = rb_ary_new();
760
781
  prof_profile_t* profile = prof_get_profile(self);
761
- st_foreach(profile->threads_tbl, collect_threads, result);
782
+ rb_st_foreach(profile->threads_tbl, collect_threads, result);
762
783
  return result;
763
784
  }
764
785
 
@@ -773,8 +794,7 @@ prof_threads(VALUE self)
773
794
  ..
774
795
  end
775
796
  */
776
- static VALUE
777
- prof_profile_object(VALUE self)
797
+ static VALUE prof_profile_object(VALUE self)
778
798
  {
779
799
  int result;
780
800
  prof_profile_t* profile = prof_get_profile(self);
@@ -794,7 +814,6 @@ prof_profile_object(VALUE self)
794
814
  }
795
815
 
796
816
  return self;
797
-
798
817
  }
799
818
 
800
819
  /* Document-method: RubyProf::Profile::Profile
@@ -809,8 +828,7 @@ prof_profile_object(VALUE self)
809
828
  ..
810
829
  end
811
830
  */
812
- static VALUE
813
- prof_profile_class(int argc, VALUE *argv, VALUE klass)
831
+ static VALUE prof_profile_class(int argc, VALUE* argv, VALUE klass)
814
832
  {
815
833
  return prof_profile_object(rb_class_new_instance(argc, argv, cProfile));
816
834
  }
@@ -820,25 +838,22 @@ prof_profile_class(int argc, VALUE *argv, VALUE klass)
820
838
 
821
839
  Excludes the method from profiling results.
822
840
  */
823
- static VALUE
824
- prof_exclude_method(VALUE self, VALUE klass, VALUE msym)
841
+ static VALUE prof_exclude_method(VALUE self, VALUE klass, VALUE msym)
825
842
  {
826
843
  prof_profile_t* profile = prof_get_profile(self);
827
844
 
828
- st_data_t key = method_key(klass, msym);
829
- prof_method_t *method;
830
-
831
845
  if (profile->running == Qtrue)
832
846
  {
833
847
  rb_raise(rb_eRuntimeError, "RubyProf.start was already called");
834
848
  }
835
849
 
836
- method = method_table_lookup(profile->exclude_methods_tbl, key);
850
+ st_data_t key = method_key(klass, msym);
851
+ prof_method_t* method = method_table_lookup(profile->exclude_methods_tbl, key);
837
852
 
838
853
  if (!method)
839
854
  {
840
- method = prof_method_create_excluded(klass, msym);
841
- method_table_insert(profile->exclude_methods_tbl, method->key, method);
855
+ method = prof_method_create(self, klass, msym, Qnil, 0);
856
+ method_table_insert(profile->exclude_methods_tbl, method->key, method);
842
857
  }
843
858
 
844
859
  return self;
@@ -861,8 +876,8 @@ VALUE prof_profile_load(VALUE self, VALUE data)
861
876
  for (int i = 0; i < rb_array_len(threads); i++)
862
877
  {
863
878
  VALUE thread = rb_ary_entry(threads, i);
864
- thread_data_t* thread_data = DATA_PTR(thread);
865
- st_insert(profile->threads_tbl, (st_data_t)thread_data->fiber_id, (st_data_t)thread_data);
879
+ thread_data_t* thread_data = prof_get_thread(thread);
880
+ rb_st_insert(profile->threads_tbl, (st_data_t)thread_data->fiber_id, (st_data_t)thread_data);
866
881
  }
867
882
 
868
883
  return data;
@@ -870,8 +885,6 @@ VALUE prof_profile_load(VALUE self, VALUE data)
870
885
 
871
886
  void rp_init_profile(void)
872
887
  {
873
- mProf = rb_define_module("RubyProf");
874
-
875
888
  cProfile = rb_define_class_under(mProf, "Profile", rb_cObject);
876
889
  rb_define_alloc_func(cProfile, prof_allocate);
877
890