ruby-prof 0.18.0-x64-mingw32 → 1.1.0-x64-mingw32

Sign up to get free protection for your applications and to get access to all the features.
Files changed (119) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGES +32 -0
  3. data/LICENSE +2 -2
  4. data/README.rdoc +1 -483
  5. data/Rakefile +3 -6
  6. data/bin/ruby-prof +65 -30
  7. data/ext/ruby_prof/extconf.rb +6 -38
  8. data/ext/ruby_prof/rp_allocation.c +279 -0
  9. data/ext/ruby_prof/rp_allocation.h +31 -0
  10. data/ext/ruby_prof/rp_call_info.c +129 -283
  11. data/ext/ruby_prof/rp_call_info.h +16 -34
  12. data/ext/ruby_prof/rp_measure_allocations.c +25 -49
  13. data/ext/ruby_prof/rp_measure_memory.c +21 -56
  14. data/ext/ruby_prof/rp_measure_process_time.c +35 -39
  15. data/ext/ruby_prof/rp_measure_wall_time.c +36 -19
  16. data/ext/ruby_prof/rp_measurement.c +230 -0
  17. data/ext/ruby_prof/rp_measurement.h +50 -0
  18. data/ext/ruby_prof/rp_method.c +389 -389
  19. data/ext/ruby_prof/rp_method.h +34 -39
  20. data/ext/ruby_prof/rp_profile.c +895 -0
  21. data/ext/ruby_prof/rp_profile.h +37 -0
  22. data/ext/ruby_prof/rp_stack.c +103 -80
  23. data/ext/ruby_prof/rp_stack.h +5 -12
  24. data/ext/ruby_prof/rp_thread.c +143 -83
  25. data/ext/ruby_prof/rp_thread.h +15 -6
  26. data/ext/ruby_prof/ruby_prof.c +11 -757
  27. data/ext/ruby_prof/ruby_prof.h +4 -47
  28. data/ext/ruby_prof/vc/ruby_prof.vcxproj +10 -8
  29. data/lib/{2.6.3 → 2.6.5}/ruby_prof.so +0 -0
  30. data/lib/ruby-prof.rb +2 -18
  31. data/lib/ruby-prof/assets/call_stack_printer.html.erb +713 -0
  32. data/lib/ruby-prof/assets/call_stack_printer.png +0 -0
  33. data/lib/ruby-prof/assets/graph_printer.html.erb +356 -0
  34. data/lib/ruby-prof/call_info.rb +35 -93
  35. data/lib/ruby-prof/call_info_visitor.rb +19 -21
  36. data/lib/ruby-prof/compatibility.rb +37 -107
  37. data/lib/ruby-prof/exclude_common_methods.rb +198 -0
  38. data/lib/ruby-prof/measurement.rb +14 -0
  39. data/lib/ruby-prof/method_info.rb +52 -83
  40. data/lib/ruby-prof/printers/abstract_printer.rb +73 -50
  41. data/lib/ruby-prof/printers/call_info_printer.rb +13 -3
  42. data/lib/ruby-prof/printers/call_stack_printer.rb +62 -145
  43. data/lib/ruby-prof/printers/call_tree_printer.rb +20 -12
  44. data/lib/ruby-prof/printers/dot_printer.rb +5 -5
  45. data/lib/ruby-prof/printers/flat_printer.rb +6 -24
  46. data/lib/ruby-prof/printers/graph_html_printer.rb +6 -192
  47. data/lib/ruby-prof/printers/graph_printer.rb +13 -15
  48. data/lib/ruby-prof/printers/multi_printer.rb +66 -23
  49. data/lib/ruby-prof/profile.rb +10 -3
  50. data/lib/ruby-prof/rack.rb +0 -3
  51. data/lib/ruby-prof/thread.rb +12 -12
  52. data/lib/ruby-prof/version.rb +1 -1
  53. data/ruby-prof.gemspec +2 -2
  54. data/test/abstract_printer_test.rb +0 -27
  55. data/test/alias_test.rb +129 -0
  56. data/test/basic_test.rb +41 -40
  57. data/test/call_info_visitor_test.rb +3 -3
  58. data/test/dynamic_method_test.rb +0 -2
  59. data/test/fiber_test.rb +11 -17
  60. data/test/gc_test.rb +96 -0
  61. data/test/line_number_test.rb +120 -39
  62. data/test/marshal_test.rb +119 -0
  63. data/test/measure_allocations.rb +30 -0
  64. data/test/measure_allocations_test.rb +371 -12
  65. data/test/measure_allocations_trace_test.rb +385 -0
  66. data/test/measure_memory_trace_test.rb +756 -0
  67. data/test/measure_process_time_test.rb +821 -33
  68. data/test/measure_times.rb +54 -0
  69. data/test/measure_wall_time_test.rb +349 -145
  70. data/test/multi_printer_test.rb +1 -34
  71. data/test/parser_timings.rb +24 -0
  72. data/test/pause_resume_test.rb +5 -5
  73. data/test/prime.rb +2 -0
  74. data/test/printer_call_stack_test.rb +28 -0
  75. data/test/printer_call_tree_test.rb +31 -0
  76. data/test/printer_flat_test.rb +68 -0
  77. data/test/printer_graph_html_test.rb +60 -0
  78. data/test/printer_graph_test.rb +41 -0
  79. data/test/printers_test.rb +32 -166
  80. data/test/printing_recursive_graph_test.rb +26 -72
  81. data/test/recursive_test.rb +72 -77
  82. data/test/stack_printer_test.rb +2 -15
  83. data/test/start_stop_test.rb +22 -25
  84. data/test/test_helper.rb +5 -248
  85. data/test/thread_test.rb +11 -54
  86. data/test/unique_call_path_test.rb +16 -28
  87. data/test/yarv_test.rb +1 -0
  88. metadata +28 -36
  89. data/examples/flat.txt +0 -50
  90. data/examples/graph.dot +0 -84
  91. data/examples/graph.html +0 -823
  92. data/examples/graph.txt +0 -139
  93. data/examples/multi.flat.txt +0 -23
  94. data/examples/multi.graph.html +0 -760
  95. data/examples/multi.grind.dat +0 -114
  96. data/examples/multi.stack.html +0 -547
  97. data/examples/stack.html +0 -547
  98. data/ext/ruby_prof/rp_measure.c +0 -40
  99. data/ext/ruby_prof/rp_measure.h +0 -45
  100. data/ext/ruby_prof/rp_measure_cpu_time.c +0 -136
  101. data/ext/ruby_prof/rp_measure_gc_runs.c +0 -73
  102. data/ext/ruby_prof/rp_measure_gc_time.c +0 -60
  103. data/lib/ruby-prof/aggregate_call_info.rb +0 -76
  104. data/lib/ruby-prof/assets/call_stack_printer.css.html +0 -117
  105. data/lib/ruby-prof/assets/call_stack_printer.js.html +0 -385
  106. data/lib/ruby-prof/printers/flat_printer_with_line_numbers.rb +0 -83
  107. data/lib/ruby-prof/profile/exclude_common_methods.rb +0 -207
  108. data/lib/ruby-prof/profile/legacy_method_elimination.rb +0 -50
  109. data/test/aggregate_test.rb +0 -136
  110. data/test/block_test.rb +0 -74
  111. data/test/call_info_test.rb +0 -78
  112. data/test/issue137_test.rb +0 -63
  113. data/test/measure_cpu_time_test.rb +0 -212
  114. data/test/measure_gc_runs_test.rb +0 -32
  115. data/test/measure_gc_time_test.rb +0 -36
  116. data/test/measure_memory_test.rb +0 -33
  117. data/test/method_elimination_test.rb +0 -84
  118. data/test/module_test.rb +0 -45
  119. data/test/stack_test.rb +0 -138
@@ -0,0 +1,37 @@
1
+ /* Copyright (C) 2005-2019 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
2
+ Please see the LICENSE file for copyright and distribution information */
3
+
4
+ #ifndef __RP_PROFILE_H__
5
+ #define __RP_PROFILE_H__
6
+
7
+ #include "ruby_prof.h"
8
+ #include "rp_measurement.h"
9
+ #include "rp_thread.h"
10
+
11
+ extern VALUE cProfile;
12
+
13
+ typedef struct
14
+ {
15
+ VALUE running;
16
+ VALUE paused;
17
+
18
+ prof_measurer_t* measurer;
19
+ VALUE threads;
20
+
21
+ VALUE tracepoints;
22
+
23
+ st_table* threads_tbl;
24
+ st_table* exclude_threads_tbl;
25
+ st_table* include_threads_tbl;
26
+ st_table* exclude_methods_tbl;
27
+ thread_data_t* last_thread_data;
28
+ double measurement_at_pause_resume;
29
+ bool allow_exceptions;
30
+ bool merge_fibers;
31
+ } prof_profile_t;
32
+
33
+ void rp_init_profile(void);
34
+ prof_profile_t* prof_get_profile(VALUE self);
35
+
36
+
37
+ #endif //__RP_PROFILE_H__
@@ -3,7 +3,7 @@
3
3
 
4
4
  #include "rp_stack.h"
5
5
 
6
- #define INITIAL_STACK_SIZE 8
6
+ #define INITIAL_STACK_SIZE 32
7
7
 
8
8
  void
9
9
  prof_frame_pause(prof_frame_t *frame, double current_measurement)
@@ -15,20 +15,20 @@ prof_frame_pause(prof_frame_t *frame, double current_measurement)
15
15
  void
16
16
  prof_frame_unpause(prof_frame_t *frame, double current_measurement)
17
17
  {
18
- if (frame && prof_frame_is_paused(frame)) {
18
+ if (frame && prof_frame_is_paused(frame))
19
+ {
19
20
  frame->dead_time += (current_measurement - frame->pause_time);
20
21
  frame->pause_time = -1;
21
22
  }
22
23
  }
23
24
 
24
-
25
25
  /* Creates a stack of prof_frame_t to keep track
26
26
  of timings for active methods. */
27
27
  prof_stack_t *
28
28
  prof_stack_create()
29
29
  {
30
30
  prof_stack_t *stack = ALLOC(prof_stack_t);
31
- stack->start = ALLOC_N(prof_frame_t, INITIAL_STACK_SIZE);
31
+ stack->start = ZALLOC_N(prof_frame_t, INITIAL_STACK_SIZE);
32
32
  stack->ptr = stack->start;
33
33
  stack->end = stack->start + INITIAL_STACK_SIZE;
34
34
 
@@ -45,61 +45,62 @@ prof_stack_free(prof_stack_t *stack)
45
45
  prof_frame_t *
46
46
  prof_stack_push(prof_stack_t *stack, prof_call_info_t *call_info, double measurement, int paused)
47
47
  {
48
- prof_frame_t *result;
49
- prof_frame_t* parent_frame;
50
- prof_method_t *method;
48
+ prof_frame_t *result;
49
+ prof_frame_t* parent_frame;
51
50
 
52
- /* Is there space on the stack? If not, double
51
+ /* Is there space on the stack? If not, double
53
52
  its size. */
54
- if (stack->ptr == stack->end)
55
- {
56
- size_t len = stack->ptr - stack->start;
57
- size_t new_capacity = (stack->end - stack->start) * 2;
58
- REALLOC_N(stack->start, prof_frame_t, new_capacity);
59
- /* Memory just got moved, reset pointers */
60
- stack->ptr = stack->start + len;
61
- stack->end = stack->start + new_capacity;
62
- }
63
-
64
- parent_frame = prof_stack_peek(stack);
65
-
66
- // Reserve the next available frame pointer.
67
- result = stack->ptr++;
68
-
69
- result->call_info = call_info;
70
- result->call_info->depth = (int)(stack->ptr - stack->start); // shortening of 64 bit into 32;
71
- result->passes = 0;
72
-
73
- result->start_time = measurement;
74
- result->pause_time = -1; // init as not paused.
75
- result->switch_time = 0;
76
- result->wait_time = 0;
77
- result->child_time = 0;
78
- result->dead_time = 0;
79
-
80
- method = call_info->target;
81
-
82
- /* If the method was visited previously, it's recursive. */
83
- if (method->visits > 0)
84
- {
85
- method->recursive = 1;
86
- call_info->recursive = 1;
87
- }
88
- /* Enter the method. */
89
- method->visits++;
90
-
91
- // Unpause the parent frame, if it exists.
92
- // If currently paused then:
93
- // 1) The child frame will begin paused.
94
- // 2) The parent will inherit the child's dead time.
95
- prof_frame_unpause(parent_frame, measurement);
53
+ if (stack->ptr == stack->end - 1)
54
+ {
55
+ size_t len = stack->ptr - stack->start;
56
+ size_t new_capacity = (stack->end - stack->start) * 2;
57
+ REALLOC_N(stack->start, prof_frame_t, new_capacity);
58
+
59
+ /* Memory just got moved, reset pointers */
60
+ stack->ptr = stack->start + len;
61
+ stack->end = stack->start + new_capacity;
62
+ }
96
63
 
97
- if (paused) {
98
- prof_frame_pause(result, measurement);
99
- }
64
+ parent_frame = stack->ptr;
65
+ stack->ptr++;
66
+
67
+ result = stack->ptr;
68
+ result->call_info = call_info;
69
+ result->call_info->depth = (int)(stack->ptr - stack->start); // shortening of 64 bit into 32;
70
+ result->passes = 0;
71
+
72
+ result->start_time = measurement;
73
+ result->pause_time = -1; // init as not paused.
74
+ result->switch_time = 0;
75
+ result->wait_time = 0;
76
+ result->child_time = 0;
77
+ result->dead_time = 0;
78
+ result->source_file = Qnil;
79
+ result->source_line = 0;
80
+
81
+ call_info->measurement->called++;
82
+ call_info->visits++;
83
+
84
+ if (call_info->method->visits > 0)
85
+ {
86
+ call_info->method->recursive = true;
87
+ }
88
+ call_info->method->measurement->called++;
89
+ call_info->method->visits++;
90
+
91
+ // Unpause the parent frame, if it exists.
92
+ // If currently paused then:
93
+ // 1) The child frame will begin paused.
94
+ // 2) The parent will inherit the child's dead time.
95
+ prof_frame_unpause(parent_frame, measurement);
96
+
97
+ if (paused)
98
+ {
99
+ prof_frame_pause(result, measurement);
100
+ }
100
101
 
101
- // Return the result
102
- return result;
102
+ // Return the result
103
+ return result;
103
104
  }
104
105
 
105
106
  prof_frame_t *
@@ -108,23 +109,18 @@ prof_stack_pop(prof_stack_t *stack, double measurement)
108
109
  prof_frame_t *frame;
109
110
  prof_frame_t *parent_frame;
110
111
  prof_call_info_t *call_info;
111
- prof_method_t *method;
112
112
 
113
113
  double total_time;
114
114
  double self_time;
115
115
 
116
- frame = prof_stack_peek(stack);
116
+ if (stack->ptr == stack->start)
117
+ return NULL;
117
118
 
118
- /* Frame can be null, which means the stack is empty. This can happen if
119
- RubProf.start is called from a method that exits. And it can happen if an
120
- exception is raised in code that is being profiled and the stack unwinds
121
- (RubyProf is not notified of that by the ruby runtime. */
122
- if (!frame) {
123
- return NULL;
124
- }
119
+ frame = stack->ptr;
125
120
 
126
121
  /* Match passes until we reach the frame itself. */
127
- if (prof_frame_is_pass(frame)) {
122
+ if (prof_frame_is_pass(frame))
123
+ {
128
124
  frame->passes--;
129
125
  /* Additional frames can be consumed. See pop_frames(). */
130
126
  return frame;
@@ -133,30 +129,37 @@ prof_stack_pop(prof_stack_t *stack, double measurement)
133
129
  /* Consume this frame. */
134
130
  stack->ptr--;
135
131
 
132
+ parent_frame = stack->ptr;
133
+
136
134
  /* Calculate the total time this method took */
137
135
  prof_frame_unpause(frame, measurement);
136
+
138
137
  total_time = measurement - frame->start_time - frame->dead_time;
139
138
  self_time = total_time - frame->child_time - frame->wait_time;
140
139
 
141
140
  /* Update information about the current method */
142
141
  call_info = frame->call_info;
143
- method = call_info->target;
144
142
 
145
- call_info->called++;
146
- call_info->total_time += total_time;
147
- call_info->self_time += self_time;
148
- call_info->wait_time += frame->wait_time;
143
+ // Update method measurement
144
+ call_info->method->measurement->self_time += self_time;
145
+ call_info->method->measurement->wait_time += frame->wait_time;
146
+ if (call_info->method->visits == 1)
147
+ call_info->method->measurement->total_time += total_time;
149
148
 
150
- /* Leave the method. */
151
- method->visits--;
149
+ call_info->method->visits--;
150
+
151
+ // Update method measurement
152
+ call_info->measurement->self_time += self_time;
153
+ call_info->measurement->wait_time += frame->wait_time;
154
+ if (call_info->visits == 1)
155
+ call_info->measurement->total_time += total_time;
156
+
157
+ call_info->visits--;
152
158
 
153
- parent_frame = prof_stack_peek(stack);
154
159
  if (parent_frame)
155
160
  {
156
161
  parent_frame->child_time += total_time;
157
162
  parent_frame->dead_time += frame->dead_time;
158
-
159
- call_info->line = parent_frame->line;
160
163
  }
161
164
 
162
165
  return frame;
@@ -165,9 +168,29 @@ prof_stack_pop(prof_stack_t *stack, double measurement)
165
168
  prof_frame_t *
166
169
  prof_stack_pass(prof_stack_t *stack)
167
170
  {
168
- prof_frame_t *frame = prof_stack_peek(stack);
169
- if (frame) {
170
- frame->passes++;
171
- }
172
- return frame;
171
+ prof_frame_t* frame = stack->ptr;
172
+ if (frame)
173
+ {
174
+ frame->passes++;
175
+ }
176
+ return frame;
177
+ }
178
+
179
+ prof_method_t*
180
+ prof_find_method(prof_stack_t* stack, VALUE source_file, int source_line)
181
+ {
182
+ prof_frame_t* frame = stack->ptr;
183
+ while (frame >= stack->start)
184
+ {
185
+ if (!frame->call_info)
186
+ return NULL;
187
+
188
+ if (rb_str_equal(source_file, frame->call_info->method->source_file) &&
189
+ source_line >= frame->call_info->method->source_line)
190
+ {
191
+ return frame->call_info->method;
192
+ }
193
+ frame--;
194
+ }
195
+ return NULL;
173
196
  }
@@ -4,12 +4,9 @@
4
4
  #ifndef __RP_STACK__
5
5
  #define __RP_STACK__
6
6
 
7
- #include <ruby.h>
8
-
9
- #include "rp_measure.h"
7
+ #include "ruby_prof.h"
10
8
  #include "rp_call_info.h"
11
9
 
12
-
13
10
  /* Temporary object that maintains profiling information
14
11
  for active methods. They are created and destroyed
15
12
  as the program moves up and down its stack. */
@@ -19,7 +16,8 @@ typedef struct
19
16
  increases performance. */
20
17
  prof_call_info_t *call_info;
21
18
 
22
- unsigned int line;
19
+ VALUE source_file;
20
+ unsigned int source_line;
23
21
  unsigned int passes; /* Count of "pass" frames, _after_ this one. */
24
22
 
25
23
  double start_time;
@@ -47,17 +45,12 @@ typedef struct
47
45
  prof_frame_t *ptr;
48
46
  } prof_stack_t;
49
47
 
50
- prof_stack_t *prof_stack_create();
48
+ prof_stack_t *prof_stack_create(void);
51
49
  void prof_stack_free(prof_stack_t *stack);
52
50
 
53
51
  prof_frame_t *prof_stack_push(prof_stack_t *stack, prof_call_info_t *call_info, double measurement, int paused);
54
52
  prof_frame_t *prof_stack_pop(prof_stack_t *stack, double measurement);
55
53
  prof_frame_t *prof_stack_pass(prof_stack_t *stack);
56
-
57
- static inline prof_frame_t *
58
- prof_stack_peek(prof_stack_t *stack) {
59
- return stack->ptr != stack->start ? stack->ptr - 1 : NULL;
60
- }
61
-
54
+ prof_method_t *prof_find_method(prof_stack_t* stack, VALUE source_file, int source_line);
62
55
 
63
56
  #endif //__RP_STACK__
@@ -1,50 +1,59 @@
1
1
  /* Copyright (C) 2005-2013 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
2
2
  Please see the LICENSE file for copyright and distribution information */
3
3
 
4
- #include "ruby_prof.h"
4
+ /* Document-class: RubyProf::Thread
5
+
6
+ The Thread class contains profile results for a single fiber (note a Ruby thread can run multiple fibers).
7
+ You cannot create an instance of RubyProf::Thread, instead you access it from a RubyProf::Profile object.
8
+
9
+ profile = RubyProf::Profile.profile do
10
+ ...
11
+ end
12
+
13
+ profile.threads.each do |thread|
14
+ thread.root_methods.sort.each do |method|
15
+ puts method.total_time
16
+ end
17
+ end
18
+ */
19
+
20
+ #include "rp_thread.h"
21
+ #include "rp_profile.h"
5
22
 
6
23
  VALUE cRpThread;
7
24
 
8
25
  /* ====== thread_data_t ====== */
9
26
  thread_data_t*
10
- thread_data_create()
27
+ thread_data_create(void)
11
28
  {
12
29
  thread_data_t* result = ALLOC(thread_data_t);
13
30
  result->stack = prof_stack_create();
14
31
  result->method_table = method_table_create();
15
32
  result->object = Qnil;
16
33
  result->methods = Qnil;
34
+ result->fiber_id = Qnil;
35
+ result->thread_id = Qnil;
36
+ result->trace = true;
37
+ result->fiber = Qnil;
17
38
  return result;
18
39
  }
19
40
 
20
- /* The underlying c structures are freed when the parent profile is freed.
21
- However, on shutdown the Ruby GC frees objects in any will-nilly order.
22
- That means the ruby thread object wrapping the c thread struct may
23
- be freed before the parent profile. Thus we add in a free function
24
- for the garbage collector so that if it does get called will nil
25
- out our Ruby object reference.*/
26
41
  static void
27
- thread_data_ruby_gc_free(thread_data_t* thread_data)
42
+ prof_thread_free(thread_data_t* thread_data)
28
43
  {
29
- /* Has this thread object been accessed by Ruby? If
30
- yes clean it up so to avoid a segmentation fault. */
44
+ /* Has this method object been accessed by Ruby? If
45
+ yes then set its data to nil to avoid a segmentation fault on the next mark and sweep. */
31
46
  if (thread_data->object != Qnil)
32
47
  {
33
- RDATA(thread_data->object)->data = NULL;
34
- RDATA(thread_data->object)->dfree = NULL;
35
48
  RDATA(thread_data->object)->dmark = NULL;
49
+ RDATA(thread_data->object)->dfree = NULL;
50
+ RDATA(thread_data->object)->data = NULL;
51
+ thread_data->object = Qnil;
36
52
  }
37
- thread_data->object = Qnil;
38
- }
39
-
40
- static void
41
- thread_data_free(thread_data_t* thread_data)
42
- {
43
- thread_data_ruby_gc_free(thread_data);
53
+
44
54
  method_table_free(thread_data->method_table);
45
55
  prof_stack_free(thread_data->stack);
46
56
 
47
- thread_data->thread_id = Qnil;
48
57
 
49
58
  xfree(thread_data);
50
59
  }
@@ -57,33 +66,51 @@ mark_methods(st_data_t key, st_data_t value, st_data_t result)
57
66
  return ST_CONTINUE;
58
67
  }
59
68
 
69
+ size_t
70
+ prof_thread_size(const void *data)
71
+ {
72
+ return sizeof(prof_call_info_t);
73
+ }
74
+
60
75
  void
61
- prof_thread_mark(thread_data_t *thread)
76
+ prof_thread_mark(void *data)
62
77
  {
78
+ thread_data_t *thread = (thread_data_t*)data;
79
+
63
80
  if (thread->object != Qnil)
64
81
  rb_gc_mark(thread->object);
65
-
82
+
66
83
  if (thread->methods != Qnil)
67
84
  rb_gc_mark(thread->methods);
68
-
69
- if (thread->thread_id != Qnil)
70
- rb_gc_mark(thread->thread_id);
71
-
85
+
72
86
  if (thread->fiber_id != Qnil)
73
87
  rb_gc_mark(thread->fiber_id);
74
-
88
+
89
+ if (thread->thread_id != Qnil)
90
+ rb_gc_mark(thread->thread_id);
91
+
75
92
  st_foreach(thread->method_table, mark_methods, 0);
76
93
  }
77
94
 
95
+
78
96
  VALUE
79
97
  prof_thread_wrap(thread_data_t *thread)
80
98
  {
81
- if (thread->object == Qnil) {
82
- thread->object = Data_Wrap_Struct(cRpThread, prof_thread_mark, thread_data_ruby_gc_free, thread);
99
+ if (thread->object == Qnil)
100
+ {
101
+ thread->object = Data_Wrap_Struct(cRpThread, prof_thread_mark, NULL, thread);
83
102
  }
84
103
  return thread->object;
85
104
  }
86
105
 
106
+ static VALUE
107
+ prof_thread_allocate(VALUE klass)
108
+ {
109
+ thread_data_t* thread_data = thread_data_create();
110
+ thread_data->object = prof_thread_wrap(thread_data);
111
+ return thread_data->object;
112
+ }
113
+
87
114
  static thread_data_t*
88
115
  prof_get_thread(VALUE self)
89
116
  {
@@ -109,7 +136,7 @@ threads_table_create()
109
136
  static int
110
137
  thread_table_free_iterator(st_data_t key, st_data_t value, st_data_t dummy)
111
138
  {
112
- thread_data_free((thread_data_t*)value);
139
+ prof_thread_free((thread_data_t*)value);
113
140
  return ST_CONTINUE;
114
141
  }
115
142
 
@@ -120,73 +147,70 @@ threads_table_free(st_table *table)
120
147
  st_free_table(table);
121
148
  }
122
149
 
123
- size_t
124
- threads_table_insert(prof_profile_t* profile, VALUE key, thread_data_t *thread_data)
150
+ thread_data_t *
151
+ threads_table_lookup(void *prof, VALUE fiber)
125
152
  {
126
- unsigned LONG_LONG key_value = NUM2ULL(key);
127
- return st_insert(profile->threads_tbl, key_value, (st_data_t) thread_data);
153
+ prof_profile_t *profile = prof;
154
+ thread_data_t* result = NULL;
155
+ st_data_t val;
156
+
157
+ if (st_lookup(profile->threads_tbl, fiber, &val))
158
+ {
159
+ result = (thread_data_t*)val;
160
+ }
161
+
162
+ return result;
128
163
  }
129
164
 
130
- thread_data_t *
131
- threads_table_lookup(prof_profile_t* profile, VALUE thread_id, VALUE fiber_id)
165
+ thread_data_t*
166
+ threads_table_insert(void *prof, VALUE fiber)
132
167
  {
133
- thread_data_t* result;
134
- st_data_t val;
168
+ prof_profile_t *profile = prof;
169
+ thread_data_t *result = thread_data_create();
170
+ VALUE thread = rb_thread_current();
171
+
172
+ result->fiber = fiber;
173
+ result->fiber_id = rb_obj_id(fiber);
174
+ result->thread_id = rb_obj_id(thread);
175
+ st_insert(profile->threads_tbl, (st_data_t)fiber, (st_data_t)result);
135
176
 
136
- /* If we should merge fibers, we use the thread_id as key, otherwise the fiber id.
137
- None of this is perfect, as garbage collected fiber/thread might be reused again later.
138
- A real solution would require integration with the garbage collector.
139
- */
140
- VALUE key = profile->merge_fibers ? thread_id : fiber_id;
141
- unsigned LONG_LONG key_value = NUM2ULL(key);
142
- if (st_lookup(profile->threads_tbl, key_value, &val))
177
+ // Are we tracing this thread?
178
+ if (profile->include_threads_tbl && !st_lookup(profile->include_threads_tbl, thread, 0))
179
+ {
180
+ result->trace= false;
181
+ }
182
+ else if (profile->exclude_threads_tbl && st_lookup(profile->exclude_threads_tbl, thread, 0))
143
183
  {
144
- result = (thread_data_t *) val;
184
+ result->trace = false;
145
185
  }
146
186
  else
147
187
  {
148
- result = thread_data_create();
149
- result->thread_id = thread_id;
150
- /* We set fiber id to 0 in the merge fiber case. Real fibers never have id 0,
151
- so we can identify them later during printing.
152
- */
153
- result->fiber_id = profile->merge_fibers ? INT2FIX(0) : fiber_id;
154
- /* Insert the table */
155
- threads_table_insert(profile, key, result);
188
+ result->trace = true;
156
189
  }
190
+
157
191
  return result;
158
192
  }
159
193
 
160
- thread_data_t *
161
- switch_thread(void* prof, VALUE thread_id, VALUE fiber_id)
194
+ void
195
+ switch_thread(void *prof, thread_data_t *thread_data, double measurement)
162
196
  {
163
- prof_profile_t* profile = (prof_profile_t*)prof;
164
- double measurement = profile->measurer->measure();
165
-
166
- /* Get new thread information. */
167
- thread_data_t *thread_data = threads_table_lookup(profile, thread_id, fiber_id);
197
+ prof_profile_t *profile = prof;
168
198
 
169
199
  /* Get current frame for this thread */
170
- prof_frame_t *frame = prof_stack_peek(thread_data->stack);
171
-
172
- /* Update the time this thread waited for another thread */
173
- if (frame)
174
- {
175
- frame->wait_time += measurement - frame->switch_time;
176
- frame->switch_time = measurement;
177
- }
178
-
200
+ prof_frame_t* frame = thread_data->stack->ptr;
201
+ frame->wait_time += measurement - frame->switch_time;
202
+ frame->switch_time = measurement;
203
+
179
204
  /* Save on the last thread the time of the context switch
180
205
  and reset this thread's last context switch to 0.*/
181
206
  if (profile->last_thread_data)
182
207
  {
183
- prof_frame_t *last_frame = prof_stack_peek(profile->last_thread_data->stack);
184
- if (last_frame)
185
- last_frame->switch_time = measurement;
208
+ prof_frame_t* last_frame = profile->last_thread_data->stack->ptr;
209
+ if (last_frame)
210
+ last_frame->switch_time = measurement;
186
211
  }
187
212
 
188
213
  profile->last_thread_data = thread_data;
189
- return thread_data;
190
214
  }
191
215
 
192
216
  int pause_thread(st_data_t key, st_data_t value, st_data_t data)
@@ -194,7 +218,7 @@ int pause_thread(st_data_t key, st_data_t value, st_data_t data)
194
218
  thread_data_t* thread_data = (thread_data_t *) value;
195
219
  prof_profile_t* profile = (prof_profile_t*)data;
196
220
 
197
- prof_frame_t* frame = prof_stack_peek(thread_data->stack);
221
+ prof_frame_t* frame = thread_data->stack->ptr;
198
222
  prof_frame_pause(frame, profile->measurement_at_pause_resume);
199
223
 
200
224
  return ST_CONTINUE;
@@ -205,7 +229,7 @@ int unpause_thread(st_data_t key, st_data_t value, st_data_t data)
205
229
  thread_data_t* thread_data = (thread_data_t *) value;
206
230
  prof_profile_t* profile = (prof_profile_t*)data;
207
231
 
208
- prof_frame_t* frame = prof_stack_peek(thread_data->stack);
232
+ prof_frame_t* frame = thread_data->stack->ptr;
209
233
  prof_frame_unpause(frame, profile->measurement_at_pause_resume);
210
234
 
211
235
  return ST_CONTINUE;
@@ -219,18 +243,18 @@ collect_methods(st_data_t key, st_data_t value, st_data_t result)
219
243
  VALUE methods = (VALUE) result;
220
244
  prof_method_t *method = (prof_method_t *) value;
221
245
 
222
- if (!method->excluded) {
246
+ if (!method->excluded)
247
+ {
223
248
  rb_ary_push(methods, prof_method_wrap(method));
224
249
  }
225
250
 
226
251
  return ST_CONTINUE;
227
252
  }
228
253
 
229
-
230
254
  /* call-seq:
231
255
  id -> number
232
256
 
233
- Returns the id of this thread. */
257
+ Returns the thread id of this thread. */
234
258
  static VALUE
235
259
  prof_thread_id(VALUE self)
236
260
  {
@@ -250,7 +274,7 @@ prof_fiber_id(VALUE self)
250
274
  }
251
275
 
252
276
  /* call-seq:
253
- methods -> Array of MethodInfo
277
+ methods -> [RubyProf::MethodInfo]
254
278
 
255
279
  Returns an array of methods that were called from this
256
280
  thread during program execution. */
@@ -266,12 +290,48 @@ prof_thread_methods(VALUE self)
266
290
  return thread->methods;
267
291
  }
268
292
 
269
- void rp_init_thread()
293
+ /* :nodoc: */
294
+ static VALUE
295
+ prof_thread_dump(VALUE self)
296
+ {
297
+ thread_data_t* thread_data = DATA_PTR(self);
298
+
299
+ VALUE result = rb_hash_new();
300
+ rb_hash_aset(result, ID2SYM(rb_intern("fiber_id")), thread_data->fiber_id);
301
+ rb_hash_aset(result, ID2SYM(rb_intern("methods")), prof_thread_methods(self));
302
+
303
+ return result;
304
+ }
305
+
306
+ /* :nodoc: */
307
+ static VALUE
308
+ prof_thread_load(VALUE self, VALUE data)
309
+ {
310
+ thread_data_t* thread_data = DATA_PTR(self);
311
+ thread_data->object = self;
312
+
313
+ thread_data->fiber_id = rb_hash_aref(data, ID2SYM(rb_intern("fiber_id")));
314
+ VALUE methods = rb_hash_aref(data, ID2SYM(rb_intern("methods")));
315
+
316
+ for (int i = 0; i < rb_array_len(methods); i++)
317
+ {
318
+ VALUE method = rb_ary_entry(methods, i);
319
+ prof_method_t *method_data = DATA_PTR(method);
320
+ method_table_insert(thread_data->method_table, method_data->key, method_data);
321
+ }
322
+
323
+ return data;
324
+ }
325
+
326
+ void rp_init_thread(void)
270
327
  {
271
- cRpThread = rb_define_class_under(mProf, "Thread", rb_cObject);
328
+ cRpThread = rb_define_class_under(mProf, "Thread", rb_cData);
272
329
  rb_undef_method(CLASS_OF(cRpThread), "new");
330
+ rb_define_alloc_func(cRpThread, prof_thread_allocate);
273
331
 
274
332
  rb_define_method(cRpThread, "id", prof_thread_id, 0);
275
333
  rb_define_method(cRpThread, "fiber_id", prof_fiber_id, 0);
276
334
  rb_define_method(cRpThread, "methods", prof_thread_methods, 0);
335
+ rb_define_method(cRpThread, "_dump_data", prof_thread_dump, 0);
336
+ rb_define_method(cRpThread, "_load_data", prof_thread_load, 1);
277
337
  }