ruby-prof 1.1.0-x64-mingw32 → 1.3.0-x64-mingw32

Sign up to get free protection for your applications and to get access to all the features.
Files changed (75) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGES +19 -1
  3. data/bin/ruby-prof +100 -152
  4. data/ext/ruby_prof/rp_aggregate_call_tree.c +59 -0
  5. data/ext/ruby_prof/rp_aggregate_call_tree.h +13 -0
  6. data/ext/ruby_prof/rp_allocation.c +67 -59
  7. data/ext/ruby_prof/rp_allocation.h +3 -3
  8. data/ext/ruby_prof/rp_call_tree.c +369 -0
  9. data/ext/ruby_prof/rp_call_tree.h +43 -0
  10. data/ext/ruby_prof/rp_call_trees.c +288 -0
  11. data/ext/ruby_prof/rp_call_trees.h +28 -0
  12. data/ext/ruby_prof/rp_measure_allocations.c +11 -13
  13. data/ext/ruby_prof/rp_measure_process_time.c +11 -13
  14. data/ext/ruby_prof/rp_measure_wall_time.c +17 -15
  15. data/ext/ruby_prof/rp_measurement.c +47 -40
  16. data/ext/ruby_prof/rp_measurement.h +7 -7
  17. data/ext/ruby_prof/rp_method.c +116 -255
  18. data/ext/ruby_prof/rp_method.h +31 -39
  19. data/ext/ruby_prof/rp_profile.c +311 -281
  20. data/ext/ruby_prof/rp_profile.h +1 -2
  21. data/ext/ruby_prof/rp_stack.c +113 -105
  22. data/ext/ruby_prof/rp_stack.h +17 -20
  23. data/ext/ruby_prof/rp_thread.c +136 -111
  24. data/ext/ruby_prof/rp_thread.h +12 -9
  25. data/ext/ruby_prof/ruby_prof.c +27 -23
  26. data/ext/ruby_prof/ruby_prof.h +9 -0
  27. data/ext/ruby_prof/vc/ruby_prof.vcxproj +11 -7
  28. data/lib/ruby-prof.rb +2 -3
  29. data/lib/ruby-prof/assets/call_stack_printer.html.erb +4 -7
  30. data/lib/ruby-prof/assets/graph_printer.html.erb +5 -6
  31. data/lib/ruby-prof/{call_info.rb → call_tree.rb} +6 -6
  32. data/lib/ruby-prof/call_tree_visitor.rb +36 -0
  33. data/lib/ruby-prof/measurement.rb +5 -2
  34. data/lib/ruby-prof/method_info.rb +3 -15
  35. data/lib/ruby-prof/printers/call_info_printer.rb +12 -10
  36. data/lib/ruby-prof/printers/call_stack_printer.rb +19 -22
  37. data/lib/ruby-prof/printers/call_tree_printer.rb +1 -1
  38. data/lib/ruby-prof/printers/dot_printer.rb +3 -3
  39. data/lib/ruby-prof/printers/graph_printer.rb +3 -4
  40. data/lib/ruby-prof/printers/multi_printer.rb +2 -2
  41. data/lib/ruby-prof/rack.rb +3 -0
  42. data/lib/ruby-prof/thread.rb +3 -18
  43. data/lib/ruby-prof/version.rb +1 -1
  44. data/ruby-prof.gemspec +7 -0
  45. data/test/alias_test.rb +42 -45
  46. data/test/basic_test.rb +0 -86
  47. data/test/{call_info_visitor_test.rb → call_tree_visitor_test.rb} +6 -5
  48. data/test/call_trees_test.rb +66 -0
  49. data/test/exclude_methods_test.rb +17 -12
  50. data/test/fiber_test.rb +197 -9
  51. data/test/gc_test.rb +36 -42
  52. data/test/inverse_call_tree_test.rb +175 -0
  53. data/test/line_number_test.rb +67 -70
  54. data/test/marshal_test.rb +7 -11
  55. data/test/measure_allocations_test.rb +224 -234
  56. data/test/measure_allocations_trace_test.rb +224 -234
  57. data/test/measure_memory_trace_test.rb +814 -469
  58. data/test/measure_process_time_test.rb +0 -64
  59. data/test/measure_times.rb +2 -0
  60. data/test/measure_wall_time_test.rb +34 -58
  61. data/test/pause_resume_test.rb +19 -10
  62. data/test/prime.rb +1 -3
  63. data/test/prime_script.rb +6 -0
  64. data/test/printers_test.rb +1 -1
  65. data/test/recursive_test.rb +50 -54
  66. data/test/start_stop_test.rb +19 -19
  67. data/test/test_helper.rb +3 -15
  68. data/test/thread_test.rb +11 -11
  69. data/test/unique_call_path_test.rb +25 -95
  70. metadata +19 -10
  71. data/ext/ruby_prof/rp_call_info.c +0 -271
  72. data/ext/ruby_prof/rp_call_info.h +0 -35
  73. data/lib/2.6.5/ruby_prof.so +0 -0
  74. data/lib/ruby-prof/call_info_visitor.rb +0 -38
  75. data/test/parser_timings.rb +0 -24
@@ -10,13 +10,12 @@
10
10
 
11
11
  extern VALUE cProfile;
12
12
 
13
- typedef struct
13
+ typedef struct prof_profile_t
14
14
  {
15
15
  VALUE running;
16
16
  VALUE paused;
17
17
 
18
18
  prof_measurer_t* measurer;
19
- VALUE threads;
20
19
 
21
20
  VALUE tracepoints;
22
21
 
@@ -3,31 +3,12 @@
3
3
 
4
4
  #include "rp_stack.h"
5
5
 
6
- #define INITIAL_STACK_SIZE 32
6
+ #define INITIAL_STACK_SIZE 16
7
7
 
8
- void
9
- prof_frame_pause(prof_frame_t *frame, double current_measurement)
8
+ // Creates a stack of prof_frame_t to keep track of timings for active methods.
9
+ prof_stack_t* prof_stack_create()
10
10
  {
11
- if (frame && prof_frame_is_unpaused(frame))
12
- frame->pause_time = current_measurement;
13
- }
14
-
15
- void
16
- prof_frame_unpause(prof_frame_t *frame, double current_measurement)
17
- {
18
- if (frame && prof_frame_is_paused(frame))
19
- {
20
- frame->dead_time += (current_measurement - frame->pause_time);
21
- frame->pause_time = -1;
22
- }
23
- }
24
-
25
- /* Creates a stack of prof_frame_t to keep track
26
- of timings for active methods. */
27
- prof_stack_t *
28
- prof_stack_create()
29
- {
30
- prof_stack_t *stack = ALLOC(prof_stack_t);
11
+ prof_stack_t* stack = ALLOC(prof_stack_t);
31
12
  stack->start = ZALLOC_N(prof_frame_t, INITIAL_STACK_SIZE);
32
13
  stack->ptr = stack->start;
33
14
  stack->end = stack->start + INITIAL_STACK_SIZE;
@@ -35,22 +16,24 @@ prof_stack_create()
35
16
  return stack;
36
17
  }
37
18
 
38
- void
39
- prof_stack_free(prof_stack_t *stack)
19
+ void prof_stack_free(prof_stack_t* stack)
40
20
  {
41
21
  xfree(stack->start);
42
22
  xfree(stack);
43
23
  }
44
24
 
45
- prof_frame_t *
46
- prof_stack_push(prof_stack_t *stack, prof_call_info_t *call_info, double measurement, int paused)
25
+ prof_frame_t* prof_stack_last(prof_stack_t* stack)
47
26
  {
48
- prof_frame_t *result;
49
- prof_frame_t* parent_frame;
27
+ if (stack->ptr == stack->start)
28
+ return NULL;
29
+ else
30
+ return stack->ptr - 1;
31
+ }
50
32
 
51
- /* Is there space on the stack? If not, double
52
- its size. */
53
- if (stack->ptr == stack->end - 1)
33
+ void prof_stack_verify_size(prof_stack_t* stack)
34
+ {
35
+ // Is there space on the stack? If not, double its size.
36
+ if (stack->ptr == stack->end)
54
37
  {
55
38
  size_t len = stack->ptr - stack->start;
56
39
  size_t new_capacity = (stack->end - stack->start) * 2;
@@ -60,14 +43,53 @@ prof_stack_push(prof_stack_t *stack, prof_call_info_t *call_info, double measure
60
43
  stack->ptr = stack->start + len;
61
44
  stack->end = stack->start + new_capacity;
62
45
  }
46
+ }
47
+
48
+ prof_frame_t* prof_stack_push(prof_stack_t* stack)
49
+ {
50
+ prof_stack_verify_size(stack);
63
51
 
64
- parent_frame = stack->ptr;
52
+ prof_frame_t* result = stack->ptr;
65
53
  stack->ptr++;
54
+ return result;
55
+ }
56
+
57
+ prof_frame_t* prof_stack_pop(prof_stack_t* stack)
58
+ {
59
+ prof_frame_t* result = prof_stack_last(stack);
60
+ if (result)
61
+ stack->ptr--;
62
+
63
+ return result;
64
+ }
65
+
66
+ // ---------------- Frame Methods ----------------------------
67
+ void prof_frame_pause(prof_frame_t* frame, double current_measurement)
68
+ {
69
+ if (frame && prof_frame_is_unpaused(frame))
70
+ frame->pause_time = current_measurement;
71
+ }
72
+
73
+ void prof_frame_unpause(prof_frame_t* frame, double current_measurement)
74
+ {
75
+ if (prof_frame_is_paused(frame))
76
+ {
77
+ frame->dead_time += (current_measurement - frame->pause_time);
78
+ frame->pause_time = -1;
79
+ }
80
+ }
81
+
82
+ prof_frame_t* prof_frame_current(prof_stack_t* stack)
83
+ {
84
+ return prof_stack_last(stack);
85
+ }
66
86
 
67
- result = stack->ptr;
68
- result->call_info = call_info;
69
- result->call_info->depth = (int)(stack->ptr - stack->start); // shortening of 64 bit into 32;
70
- result->passes = 0;
87
+ prof_frame_t* prof_frame_push(prof_stack_t* stack, prof_call_tree_t* call_tree, double measurement, bool paused)
88
+ {
89
+ prof_frame_t* parent_frame = prof_stack_last(stack);
90
+ prof_frame_t* result = prof_stack_push(stack);
91
+
92
+ result->call_tree = call_tree;
71
93
 
72
94
  result->start_time = measurement;
73
95
  result->pause_time = -1; // init as not paused.
@@ -78,21 +100,22 @@ prof_stack_push(prof_stack_t *stack, prof_call_info_t *call_info, double measure
78
100
  result->source_file = Qnil;
79
101
  result->source_line = 0;
80
102
 
81
- call_info->measurement->called++;
82
- call_info->visits++;
103
+ call_tree->measurement->called++;
104
+ call_tree->visits++;
83
105
 
84
- if (call_info->method->visits > 0)
106
+ if (call_tree->method->visits > 0)
85
107
  {
86
- call_info->method->recursive = true;
108
+ call_tree->method->recursive = true;
87
109
  }
88
- call_info->method->measurement->called++;
89
- call_info->method->visits++;
110
+ call_tree->method->measurement->called++;
111
+ call_tree->method->visits++;
90
112
 
91
113
  // Unpause the parent frame, if it exists.
92
114
  // If currently paused then:
93
115
  // 1) The child frame will begin paused.
94
116
  // 2) The parent will inherit the child's dead time.
95
- prof_frame_unpause(parent_frame, measurement);
117
+ if (parent_frame)
118
+ prof_frame_unpause(parent_frame, measurement);
96
119
 
97
120
  if (paused)
98
121
  {
@@ -103,92 +126,77 @@ prof_stack_push(prof_stack_t *stack, prof_call_info_t *call_info, double measure
103
126
  return result;
104
127
  }
105
128
 
106
- prof_frame_t *
107
- prof_stack_pop(prof_stack_t *stack, double measurement)
129
+ prof_frame_t* prof_frame_unshift(prof_stack_t* stack, prof_call_tree_t* parent_call_tree, prof_call_tree_t* call_tree, double measurement)
108
130
  {
109
- prof_frame_t *frame;
110
- prof_frame_t *parent_frame;
111
- prof_call_info_t *call_info;
131
+ if (prof_stack_last(stack))
132
+ rb_raise(rb_eRuntimeError, "Stach unshift can only be called with an empty stack");
112
133
 
113
- double total_time;
114
- double self_time;
134
+ parent_call_tree->measurement->total_time = call_tree->measurement->total_time;
135
+ parent_call_tree->measurement->self_time = 0;
136
+ parent_call_tree->measurement->wait_time = call_tree->measurement->wait_time;
115
137
 
116
- if (stack->ptr == stack->start)
117
- return NULL;
138
+ parent_call_tree->method->measurement->total_time += call_tree->measurement->total_time;
139
+ parent_call_tree->method->measurement->wait_time += call_tree->measurement->wait_time;
118
140
 
119
- frame = stack->ptr;
120
-
121
- /* Match passes until we reach the frame itself. */
122
- if (prof_frame_is_pass(frame))
123
- {
124
- frame->passes--;
125
- /* Additional frames can be consumed. See pop_frames(). */
126
- return frame;
127
- }
128
-
129
- /* Consume this frame. */
130
- stack->ptr--;
131
-
132
- parent_frame = stack->ptr;
141
+ return prof_frame_push(stack, parent_call_tree, measurement, false);
142
+ }
133
143
 
134
- /* Calculate the total time this method took */
135
- prof_frame_unpause(frame, measurement);
136
-
137
- total_time = measurement - frame->start_time - frame->dead_time;
138
- self_time = total_time - frame->child_time - frame->wait_time;
144
+ prof_frame_t* prof_frame_pop(prof_stack_t* stack, double measurement)
145
+ {
146
+ prof_frame_t* frame = prof_stack_pop(stack);
139
147
 
140
- /* Update information about the current method */
141
- call_info = frame->call_info;
148
+ if (!frame)
149
+ return NULL;
142
150
 
143
- // Update method measurement
144
- call_info->method->measurement->self_time += self_time;
145
- call_info->method->measurement->wait_time += frame->wait_time;
146
- if (call_info->method->visits == 1)
147
- call_info->method->measurement->total_time += total_time;
151
+ /* Calculate the total time this method took */
152
+ prof_frame_unpause(frame, measurement);
148
153
 
149
- call_info->method->visits--;
154
+ double total_time = measurement - frame->start_time - frame->dead_time;
155
+ double self_time = total_time - frame->child_time - frame->wait_time;
150
156
 
151
- // Update method measurement
152
- call_info->measurement->self_time += self_time;
153
- call_info->measurement->wait_time += frame->wait_time;
154
- if (call_info->visits == 1)
155
- call_info->measurement->total_time += total_time;
157
+ /* Update information about the current method */
158
+ prof_call_tree_t* call_tree = frame->call_tree;
156
159
 
157
- call_info->visits--;
160
+ // Update method measurement
161
+ call_tree->method->measurement->self_time += self_time;
162
+ call_tree->method->measurement->wait_time += frame->wait_time;
163
+ if (call_tree->method->visits == 1)
164
+ call_tree->method->measurement->total_time += total_time;
158
165
 
159
- if (parent_frame)
160
- {
161
- parent_frame->child_time += total_time;
162
- parent_frame->dead_time += frame->dead_time;
163
- }
166
+ call_tree->method->visits--;
164
167
 
165
- return frame;
166
- }
168
+ // Update method measurement
169
+ call_tree->measurement->self_time += self_time;
170
+ call_tree->measurement->wait_time += frame->wait_time;
171
+ if (call_tree->visits == 1)
172
+ call_tree->measurement->total_time += total_time;
167
173
 
168
- prof_frame_t *
169
- prof_stack_pass(prof_stack_t *stack)
170
- {
171
- prof_frame_t* frame = stack->ptr;
172
- if (frame)
174
+ call_tree->visits--;
175
+
176
+ prof_frame_t* parent_frame = prof_stack_last(stack);
177
+ if (parent_frame)
173
178
  {
174
- frame->passes++;
179
+ parent_frame->child_time += total_time;
180
+ parent_frame->dead_time += frame->dead_time;
175
181
  }
182
+
183
+ frame->source_file = Qnil;
184
+
176
185
  return frame;
177
186
  }
178
187
 
179
- prof_method_t*
180
- prof_find_method(prof_stack_t* stack, VALUE source_file, int source_line)
188
+ prof_method_t* prof_find_method(prof_stack_t* stack, VALUE source_file, int source_line)
181
189
  {
182
190
  prof_frame_t* frame = stack->ptr;
183
191
  while (frame >= stack->start)
184
192
  {
185
- if (!frame->call_info)
193
+ if (!frame->call_tree)
186
194
  return NULL;
187
195
 
188
- if (rb_str_equal(source_file, frame->call_info->method->source_file) &&
189
- source_line >= frame->call_info->method->source_line)
196
+ if (rb_str_equal(source_file, frame->call_tree->method->source_file) &&
197
+ source_line >= frame->call_tree->method->source_line)
190
198
  {
191
- return frame->call_info->method;
199
+ return frame->call_tree->method;
192
200
  }
193
201
  frame--;
194
202
  }
@@ -5,20 +5,19 @@
5
5
  #define __RP_STACK__
6
6
 
7
7
  #include "ruby_prof.h"
8
- #include "rp_call_info.h"
8
+ #include "rp_call_tree.h"
9
9
 
10
- /* Temporary object that maintains profiling information
11
- for active methods. They are created and destroyed
12
- as the program moves up and down its stack. */
13
- typedef struct
10
+ /* Temporary object that maintains profiling information
11
+ for active methods. They are created and destroyed
12
+ as the program moves up and down its stack. */
13
+ typedef struct prof_frame_t
14
14
  {
15
15
  /* Caching prof_method_t values significantly
16
16
  increases performance. */
17
- prof_call_info_t *call_info;
17
+ prof_call_tree_t* call_tree;
18
18
 
19
19
  VALUE source_file;
20
20
  unsigned int source_line;
21
- unsigned int passes; /* Count of "pass" frames, _after_ this one. */
22
21
 
23
22
  double start_time;
24
23
  double switch_time; /* Time at switch to different thread */
@@ -28,9 +27,6 @@ typedef struct
28
27
  double dead_time; // Time to ignore (i.e. total amount of time between pause/resume blocks)
29
28
  } prof_frame_t;
30
29
 
31
- #define prof_frame_is_real(f) ((f)->passes == 0)
32
- #define prof_frame_is_pass(f) ((f)->passes > 0)
33
-
34
30
  #define prof_frame_is_paused(f) (f->pause_time >= 0)
35
31
  #define prof_frame_is_unpaused(f) (f->pause_time < 0)
36
32
 
@@ -38,19 +34,20 @@ void prof_frame_pause(prof_frame_t*, double current_measurement);
38
34
  void prof_frame_unpause(prof_frame_t*, double current_measurement);
39
35
 
40
36
  /* Current stack of active methods.*/
41
- typedef struct
37
+ typedef struct prof_stack_t
42
38
  {
43
- prof_frame_t *start;
44
- prof_frame_t *end;
45
- prof_frame_t *ptr;
39
+ prof_frame_t* start;
40
+ prof_frame_t* end;
41
+ prof_frame_t* ptr;
46
42
  } prof_stack_t;
47
43
 
48
- prof_stack_t *prof_stack_create(void);
49
- void prof_stack_free(prof_stack_t *stack);
44
+ prof_stack_t* prof_stack_create(void);
45
+ void prof_stack_free(prof_stack_t* stack);
50
46
 
51
- prof_frame_t *prof_stack_push(prof_stack_t *stack, prof_call_info_t *call_info, double measurement, int paused);
52
- prof_frame_t *prof_stack_pop(prof_stack_t *stack, double measurement);
53
- prof_frame_t *prof_stack_pass(prof_stack_t *stack);
54
- prof_method_t *prof_find_method(prof_stack_t* stack, VALUE source_file, int source_line);
47
+ prof_frame_t* prof_frame_current(prof_stack_t* stack);
48
+ prof_frame_t* prof_frame_push(prof_stack_t* stack, prof_call_tree_t* call_tree, double measurement, bool paused);
49
+ prof_frame_t* prof_frame_unshift(prof_stack_t* stack, prof_call_tree_t* parent_call_tree, prof_call_tree_t* call_tree, double measurement);
50
+ prof_frame_t* prof_frame_pop(prof_stack_t* stack, double measurement);
51
+ prof_method_t* prof_find_method(prof_stack_t* stack, VALUE source_file, int source_line);
55
52
 
56
53
  #endif //__RP_STACK__
@@ -14,21 +14,20 @@ You cannot create an instance of RubyProf::Thread, instead you access it from a
14
14
  thread.root_methods.sort.each do |method|
15
15
  puts method.total_time
16
16
  end
17
- end
18
- */
17
+ end */
19
18
 
20
19
  #include "rp_thread.h"
21
20
  #include "rp_profile.h"
22
21
 
23
22
  VALUE cRpThread;
24
23
 
25
- /* ====== thread_data_t ====== */
26
- thread_data_t*
27
- thread_data_create(void)
24
+ // ====== thread_data_t ======
25
+ thread_data_t* thread_data_create(void)
28
26
  {
29
27
  thread_data_t* result = ALLOC(thread_data_t);
30
28
  result->stack = prof_stack_create();
31
29
  result->method_table = method_table_create();
30
+ result->call_tree = NULL;
32
31
  result->object = Qnil;
33
32
  result->methods = Qnil;
34
33
  result->fiber_id = Qnil;
@@ -38,123 +37,141 @@ thread_data_create(void)
38
37
  return result;
39
38
  }
40
39
 
41
- static void
42
- prof_thread_free(thread_data_t* thread_data)
40
+ static int mark_methods(st_data_t key, st_data_t value, st_data_t result)
43
41
  {
44
- /* Has this method object been accessed by Ruby? If
45
- yes then set its data to nil to avoid a segmentation fault on the next mark and sweep. */
46
- if (thread_data->object != Qnil)
47
- {
48
- RDATA(thread_data->object)->dmark = NULL;
49
- RDATA(thread_data->object)->dfree = NULL;
50
- RDATA(thread_data->object)->data = NULL;
51
- thread_data->object = Qnil;
52
- }
53
-
54
- method_table_free(thread_data->method_table);
55
- prof_stack_free(thread_data->stack);
56
-
57
-
58
- xfree(thread_data);
59
- }
60
-
61
- static int
62
- mark_methods(st_data_t key, st_data_t value, st_data_t result)
63
- {
64
- prof_method_t *method = (prof_method_t *) value;
42
+ prof_method_t* method = (prof_method_t*)value;
65
43
  prof_method_mark(method);
66
44
  return ST_CONTINUE;
67
45
  }
68
46
 
69
- size_t
70
- prof_thread_size(const void *data)
47
+ size_t prof_thread_size(const void* data)
71
48
  {
72
- return sizeof(prof_call_info_t);
49
+ return sizeof(thread_data_t);
73
50
  }
74
51
 
75
- void
76
- prof_thread_mark(void *data)
52
+ void prof_thread_mark(void* data)
77
53
  {
78
- thread_data_t *thread = (thread_data_t*)data;
79
-
54
+ if (!data)
55
+ return;
56
+
57
+ thread_data_t* thread = (thread_data_t*)data;
58
+
80
59
  if (thread->object != Qnil)
81
60
  rb_gc_mark(thread->object);
82
-
61
+
62
+ rb_gc_mark(thread->fiber);
63
+
83
64
  if (thread->methods != Qnil)
84
65
  rb_gc_mark(thread->methods);
85
-
66
+
86
67
  if (thread->fiber_id != Qnil)
87
68
  rb_gc_mark(thread->fiber_id);
88
-
69
+
89
70
  if (thread->thread_id != Qnil)
90
71
  rb_gc_mark(thread->thread_id);
91
-
92
- st_foreach(thread->method_table, mark_methods, 0);
72
+
73
+ if (thread->call_tree)
74
+ prof_call_tree_mark(thread->call_tree);
75
+
76
+ rb_st_foreach(thread->method_table, mark_methods, 0);
93
77
  }
94
78
 
79
+ void prof_thread_ruby_gc_free(void* data)
80
+ {
81
+ if (data)
82
+ {
83
+ thread_data_t* thread_data = (thread_data_t*)data;
84
+ thread_data->object = Qnil;
85
+ }
86
+ }
87
+
88
+ static void prof_thread_free(thread_data_t* thread_data)
89
+ {
90
+ /* Has this method object been accessed by Ruby? If
91
+ yes then set its data to nil to avoid a segmentation fault on the next mark and sweep. */
92
+ if (thread_data->object != Qnil)
93
+ {
94
+ RTYPEDDATA(thread_data->object)->data = NULL;
95
+ thread_data->object = Qnil;
96
+ }
97
+
98
+ method_table_free(thread_data->method_table);
99
+
100
+ if (thread_data->call_tree)
101
+ prof_call_tree_free(thread_data->call_tree);
102
+
103
+ prof_stack_free(thread_data->stack);
104
+
105
+ xfree(thread_data);
106
+ }
95
107
 
96
- VALUE
97
- prof_thread_wrap(thread_data_t *thread)
108
+ static const rb_data_type_t thread_type =
109
+ {
110
+ .wrap_struct_name = "ThreadInfo",
111
+ .function =
112
+ {
113
+ .dmark = prof_thread_mark,
114
+ .dfree = prof_thread_ruby_gc_free,
115
+ .dsize = prof_thread_size,
116
+ },
117
+ .data = NULL,
118
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY
119
+ };
120
+
121
+ VALUE prof_thread_wrap(thread_data_t* thread)
98
122
  {
99
123
  if (thread->object == Qnil)
100
124
  {
101
- thread->object = Data_Wrap_Struct(cRpThread, prof_thread_mark, NULL, thread);
125
+ thread->object = TypedData_Wrap_Struct(cRpThread, &thread_type, thread);
102
126
  }
103
127
  return thread->object;
104
128
  }
105
129
 
106
- static VALUE
107
- prof_thread_allocate(VALUE klass)
130
+ static VALUE prof_thread_allocate(VALUE klass)
108
131
  {
109
132
  thread_data_t* thread_data = thread_data_create();
110
133
  thread_data->object = prof_thread_wrap(thread_data);
111
134
  return thread_data->object;
112
135
  }
113
136
 
114
- static thread_data_t*
115
- prof_get_thread(VALUE self)
137
+ thread_data_t* prof_get_thread(VALUE self)
116
138
  {
117
139
  /* Can't use Data_Get_Struct because that triggers the event hook
118
140
  ending up in endless recursion. */
119
- thread_data_t* result = DATA_PTR(self);
141
+ thread_data_t* result = RTYPEDDATA_DATA(self);
120
142
  if (!result)
121
143
  rb_raise(rb_eRuntimeError, "This RubyProf::Thread instance has already been freed, likely because its profile has been freed.");
122
144
 
123
145
  return result;
124
146
  }
125
147
 
126
- /* ====== Thread Table ====== */
127
- /* The thread table is hash keyed on ruby thread_id that stores instances
128
- of thread_data_t. */
148
+ // ====== Thread Table ======
149
+ // The thread table is hash keyed on ruby fiber_id that stores instances of thread_data_t.
129
150
 
130
- st_table *
131
- threads_table_create()
151
+ st_table* threads_table_create()
132
152
  {
133
- return st_init_numtable();
153
+ return rb_st_init_numtable();
134
154
  }
135
155
 
136
- static int
137
- thread_table_free_iterator(st_data_t key, st_data_t value, st_data_t dummy)
156
+ static int thread_table_free_iterator(st_data_t key, st_data_t value, st_data_t dummy)
138
157
  {
139
158
  prof_thread_free((thread_data_t*)value);
140
159
  return ST_CONTINUE;
141
160
  }
142
161
 
143
- void
144
- threads_table_free(st_table *table)
162
+ void threads_table_free(st_table* table)
145
163
  {
146
- st_foreach(table, thread_table_free_iterator, 0);
147
- st_free_table(table);
164
+ rb_st_foreach(table, thread_table_free_iterator, 0);
165
+ rb_st_free_table(table);
148
166
  }
149
167
 
150
- thread_data_t *
151
- threads_table_lookup(void *prof, VALUE fiber)
168
+ thread_data_t* threads_table_lookup(void* prof, VALUE fiber)
152
169
  {
153
- prof_profile_t *profile = prof;
170
+ prof_profile_t* profile = prof;
154
171
  thread_data_t* result = NULL;
155
172
  st_data_t val;
156
173
 
157
- if (st_lookup(profile->threads_tbl, fiber, &val))
174
+ if (rb_st_lookup(profile->threads_tbl, fiber, &val))
158
175
  {
159
176
  result = (thread_data_t*)val;
160
177
  }
@@ -162,24 +179,23 @@ threads_table_lookup(void *prof, VALUE fiber)
162
179
  return result;
163
180
  }
164
181
 
165
- thread_data_t*
166
- threads_table_insert(void *prof, VALUE fiber)
182
+ thread_data_t* threads_table_insert(void* prof, VALUE fiber)
167
183
  {
168
- prof_profile_t *profile = prof;
169
- thread_data_t *result = thread_data_create();
184
+ prof_profile_t* profile = prof;
185
+ thread_data_t* result = thread_data_create();
170
186
  VALUE thread = rb_thread_current();
171
187
 
172
188
  result->fiber = fiber;
173
189
  result->fiber_id = rb_obj_id(fiber);
174
190
  result->thread_id = rb_obj_id(thread);
175
- st_insert(profile->threads_tbl, (st_data_t)fiber, (st_data_t)result);
191
+ rb_st_insert(profile->threads_tbl, (st_data_t)fiber, (st_data_t)result);
176
192
 
177
193
  // Are we tracing this thread?
178
- if (profile->include_threads_tbl && !st_lookup(profile->include_threads_tbl, thread, 0))
194
+ if (profile->include_threads_tbl && !rb_st_lookup(profile->include_threads_tbl, thread, 0))
179
195
  {
180
- result->trace= false;
196
+ result->trace = false;
181
197
  }
182
- else if (profile->exclude_threads_tbl && st_lookup(profile->exclude_threads_tbl, thread, 0))
198
+ else if (profile->exclude_threads_tbl && rb_st_lookup(profile->exclude_threads_tbl, thread, 0))
183
199
  {
184
200
  result->trace = false;
185
201
  }
@@ -187,25 +203,28 @@ threads_table_insert(void *prof, VALUE fiber)
187
203
  {
188
204
  result->trace = true;
189
205
  }
190
-
206
+
191
207
  return result;
192
208
  }
193
209
 
194
- void
195
- switch_thread(void *prof, thread_data_t *thread_data, double measurement)
210
+ // ====== Profiling Methods ======
211
+ void switch_thread(void* prof, thread_data_t* thread_data, double measurement)
196
212
  {
197
- prof_profile_t *profile = prof;
213
+ prof_profile_t* profile = prof;
198
214
 
199
215
  /* Get current frame for this thread */
200
- prof_frame_t* frame = thread_data->stack->ptr;
201
- frame->wait_time += measurement - frame->switch_time;
202
- frame->switch_time = measurement;
203
-
216
+ prof_frame_t* frame = prof_frame_current(thread_data->stack);
217
+ if (frame)
218
+ {
219
+ frame->wait_time += measurement - frame->switch_time;
220
+ frame->switch_time = 0;
221
+ }
222
+
204
223
  /* Save on the last thread the time of the context switch
205
224
  and reset this thread's last context switch to 0.*/
206
225
  if (profile->last_thread_data)
207
226
  {
208
- prof_frame_t* last_frame = profile->last_thread_data->stack->ptr;
227
+ prof_frame_t* last_frame = prof_frame_current(profile->last_thread_data->stack);
209
228
  if (last_frame)
210
229
  last_frame->switch_time = measurement;
211
230
  }
@@ -215,10 +234,10 @@ switch_thread(void *prof, thread_data_t *thread_data, double measurement)
215
234
 
216
235
  int pause_thread(st_data_t key, st_data_t value, st_data_t data)
217
236
  {
218
- thread_data_t* thread_data = (thread_data_t *) value;
237
+ thread_data_t* thread_data = (thread_data_t*)value;
219
238
  prof_profile_t* profile = (prof_profile_t*)data;
220
239
 
221
- prof_frame_t* frame = thread_data->stack->ptr;
240
+ prof_frame_t* frame = prof_frame_current(thread_data->stack);
222
241
  prof_frame_pause(frame, profile->measurement_at_pause_resume);
223
242
 
224
243
  return ST_CONTINUE;
@@ -226,37 +245,33 @@ int pause_thread(st_data_t key, st_data_t value, st_data_t data)
226
245
 
227
246
  int unpause_thread(st_data_t key, st_data_t value, st_data_t data)
228
247
  {
229
- thread_data_t* thread_data = (thread_data_t *) value;
248
+ thread_data_t* thread_data = (thread_data_t*)value;
230
249
  prof_profile_t* profile = (prof_profile_t*)data;
231
250
 
232
- prof_frame_t* frame = thread_data->stack->ptr;
251
+ prof_frame_t* frame = prof_frame_current(thread_data->stack);
233
252
  prof_frame_unpause(frame, profile->measurement_at_pause_resume);
234
253
 
235
254
  return ST_CONTINUE;
236
255
  }
237
256
 
238
- static int
239
- collect_methods(st_data_t key, st_data_t value, st_data_t result)
257
+ // ====== Helper Methods ======
258
+ static int collect_methods(st_data_t key, st_data_t value, st_data_t result)
240
259
  {
241
260
  /* Called for each method stored in a thread's method table.
242
261
  We want to store the method info information into an array.*/
243
- VALUE methods = (VALUE) result;
244
- prof_method_t *method = (prof_method_t *) value;
245
-
246
- if (!method->excluded)
247
- {
248
- rb_ary_push(methods, prof_method_wrap(method));
249
- }
262
+ VALUE methods = (VALUE)result;
263
+ prof_method_t* method = (prof_method_t*)value;
264
+ rb_ary_push(methods, prof_method_wrap(method));
250
265
 
251
266
  return ST_CONTINUE;
252
267
  }
253
268
 
269
+ // ====== RubyProf::Thread ======
254
270
  /* call-seq:
255
271
  id -> number
256
272
 
257
273
  Returns the thread id of this thread. */
258
- static VALUE
259
- prof_thread_id(VALUE self)
274
+ static VALUE prof_thread_id(VALUE self)
260
275
  {
261
276
  thread_data_t* thread = prof_get_thread(self);
262
277
  return thread->thread_id;
@@ -266,57 +281,66 @@ prof_thread_id(VALUE self)
266
281
  fiber_id -> number
267
282
 
268
283
  Returns the fiber id of this thread. */
269
- static VALUE
270
- prof_fiber_id(VALUE self)
284
+ static VALUE prof_fiber_id(VALUE self)
271
285
  {
272
286
  thread_data_t* thread = prof_get_thread(self);
273
287
  return thread->fiber_id;
274
288
  }
275
289
 
290
+ /* call-seq:
291
+ call_tree -> CallTree
292
+
293
+ Returns the root of the call tree. */
294
+ static VALUE prof_call_tree(VALUE self)
295
+ {
296
+ thread_data_t* thread = prof_get_thread(self);
297
+ return prof_call_tree_wrap(thread->call_tree);
298
+ }
299
+
276
300
  /* call-seq:
277
301
  methods -> [RubyProf::MethodInfo]
278
302
 
279
303
  Returns an array of methods that were called from this
280
304
  thread during program execution. */
281
- static VALUE
282
- prof_thread_methods(VALUE self)
305
+ static VALUE prof_thread_methods(VALUE self)
283
306
  {
284
307
  thread_data_t* thread = prof_get_thread(self);
285
308
  if (thread->methods == Qnil)
286
309
  {
287
310
  thread->methods = rb_ary_new();
288
- st_foreach(thread->method_table, collect_methods, thread->methods);
311
+ rb_st_foreach(thread->method_table, collect_methods, thread->methods);
289
312
  }
290
313
  return thread->methods;
291
314
  }
292
315
 
293
316
  /* :nodoc: */
294
- static VALUE
295
- prof_thread_dump(VALUE self)
317
+ static VALUE prof_thread_dump(VALUE self)
296
318
  {
297
- thread_data_t* thread_data = DATA_PTR(self);
319
+ thread_data_t* thread_data = RTYPEDDATA_DATA(self);
298
320
 
299
321
  VALUE result = rb_hash_new();
300
322
  rb_hash_aset(result, ID2SYM(rb_intern("fiber_id")), thread_data->fiber_id);
301
323
  rb_hash_aset(result, ID2SYM(rb_intern("methods")), prof_thread_methods(self));
324
+ rb_hash_aset(result, ID2SYM(rb_intern("call_tree")), prof_call_tree(self));
302
325
 
303
326
  return result;
304
327
  }
305
328
 
306
329
  /* :nodoc: */
307
- static VALUE
308
- prof_thread_load(VALUE self, VALUE data)
330
+ static VALUE prof_thread_load(VALUE self, VALUE data)
309
331
  {
310
- thread_data_t* thread_data = DATA_PTR(self);
311
- thread_data->object = self;
332
+ thread_data_t* thread_data = RTYPEDDATA_DATA(self);
333
+
334
+ VALUE call_tree = rb_hash_aref(data, ID2SYM(rb_intern("call_tree")));
335
+ thread_data->call_tree = prof_get_call_tree(call_tree);
312
336
 
313
337
  thread_data->fiber_id = rb_hash_aref(data, ID2SYM(rb_intern("fiber_id")));
314
- VALUE methods = rb_hash_aref(data, ID2SYM(rb_intern("methods")));
315
338
 
339
+ VALUE methods = rb_hash_aref(data, ID2SYM(rb_intern("methods")));
316
340
  for (int i = 0; i < rb_array_len(methods); i++)
317
341
  {
318
342
  VALUE method = rb_ary_entry(methods, i);
319
- prof_method_t *method_data = DATA_PTR(method);
343
+ prof_method_t* method_data = RTYPEDDATA_DATA(method);
320
344
  method_table_insert(thread_data->method_table, method_data->key, method_data);
321
345
  }
322
346
 
@@ -325,11 +349,12 @@ prof_thread_load(VALUE self, VALUE data)
325
349
 
326
350
  void rp_init_thread(void)
327
351
  {
328
- cRpThread = rb_define_class_under(mProf, "Thread", rb_cData);
352
+ cRpThread = rb_define_class_under(mProf, "Thread", rb_cObject);
329
353
  rb_undef_method(CLASS_OF(cRpThread), "new");
330
354
  rb_define_alloc_func(cRpThread, prof_thread_allocate);
331
355
 
332
356
  rb_define_method(cRpThread, "id", prof_thread_id, 0);
357
+ rb_define_method(cRpThread, "call_tree", prof_call_tree, 0);
333
358
  rb_define_method(cRpThread, "fiber_id", prof_fiber_id, 0);
334
359
  rb_define_method(cRpThread, "methods", prof_thread_methods, 0);
335
360
  rb_define_method(cRpThread, "_dump_data", prof_thread_dump, 0);