ruby-prof 1.4.3 → 1.6.3

Sign up to get free protection for your applications and to get access to all the features.
Files changed (87) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGES +59 -9
  3. data/{README.rdoc → README.md} +2 -2
  4. data/Rakefile +4 -4
  5. data/bin/ruby-prof +100 -87
  6. data/ext/ruby_prof/rp_allocation.c +140 -85
  7. data/ext/ruby_prof/rp_allocation.h +8 -6
  8. data/ext/ruby_prof/rp_call_tree.c +502 -369
  9. data/ext/ruby_prof/rp_call_tree.h +47 -43
  10. data/ext/ruby_prof/rp_call_trees.c +16 -8
  11. data/ext/ruby_prof/rp_measure_allocations.c +10 -13
  12. data/ext/ruby_prof/rp_measure_memory.c +8 -4
  13. data/ext/ruby_prof/rp_measure_process_time.c +7 -6
  14. data/ext/ruby_prof/rp_measurement.c +147 -20
  15. data/ext/ruby_prof/rp_measurement.h +4 -1
  16. data/ext/ruby_prof/rp_method.c +142 -83
  17. data/ext/ruby_prof/rp_method.h +63 -62
  18. data/ext/ruby_prof/rp_profile.c +933 -900
  19. data/ext/ruby_prof/rp_profile.h +1 -0
  20. data/ext/ruby_prof/rp_thread.c +433 -362
  21. data/ext/ruby_prof/rp_thread.h +39 -39
  22. data/ext/ruby_prof/ruby_prof.c +0 -2
  23. data/ext/ruby_prof/ruby_prof.h +8 -0
  24. data/ext/ruby_prof/vc/ruby_prof.vcxproj +11 -8
  25. data/lib/ruby-prof/assets/call_stack_printer.html.erb +2 -1
  26. data/lib/ruby-prof/compatibility.rb +14 -0
  27. data/lib/ruby-prof/method_info.rb +8 -1
  28. data/lib/ruby-prof/printers/abstract_printer.rb +2 -1
  29. data/lib/ruby-prof/printers/call_tree_printer.rb +4 -10
  30. data/lib/ruby-prof/printers/graph_html_printer.rb +1 -1
  31. data/lib/ruby-prof/printers/multi_printer.rb +17 -17
  32. data/lib/ruby-prof/profile.rb +70 -37
  33. data/lib/ruby-prof/rack.rb +31 -21
  34. data/lib/ruby-prof/version.rb +1 -1
  35. data/lib/ruby-prof.rb +1 -1
  36. data/ruby-prof.gemspec +2 -3
  37. data/test/abstract_printer_test.rb +1 -0
  38. data/test/alias_test.rb +97 -106
  39. data/test/call_tree_builder.rb +126 -0
  40. data/test/call_tree_test.rb +94 -0
  41. data/test/call_tree_visitor_test.rb +1 -6
  42. data/test/call_trees_test.rb +6 -6
  43. data/test/{basic_test.rb → compatibility_test.rb} +8 -2
  44. data/test/duplicate_names_test.rb +5 -5
  45. data/test/dynamic_method_test.rb +24 -15
  46. data/test/enumerable_test.rb +1 -1
  47. data/test/exceptions_test.rb +2 -2
  48. data/test/exclude_methods_test.rb +3 -8
  49. data/test/exclude_threads_test.rb +4 -9
  50. data/test/fiber_test.rb +74 -8
  51. data/test/gc_test.rb +11 -9
  52. data/test/inverse_call_tree_test.rb +33 -34
  53. data/test/line_number_test.rb +37 -61
  54. data/test/marshal_test.rb +16 -3
  55. data/test/measure_allocations.rb +1 -5
  56. data/test/measure_allocations_test.rb +642 -357
  57. data/test/{measure_memory_trace_test.rb → measure_memory_test.rb} +180 -616
  58. data/test/measure_process_time_test.rb +1566 -741
  59. data/test/measure_wall_time_test.rb +179 -193
  60. data/test/measurement_test.rb +82 -0
  61. data/test/merge_test.rb +146 -0
  62. data/test/method_info_test.rb +95 -0
  63. data/test/multi_printer_test.rb +0 -5
  64. data/test/no_method_class_test.rb +1 -1
  65. data/test/pause_resume_test.rb +12 -16
  66. data/test/printer_call_stack_test.rb +2 -2
  67. data/test/printer_call_tree_test.rb +4 -4
  68. data/test/printer_flat_test.rb +1 -1
  69. data/test/printer_graph_html_test.rb +2 -2
  70. data/test/printer_graph_test.rb +2 -2
  71. data/test/printers_test.rb +14 -20
  72. data/test/printing_recursive_graph_test.rb +2 -2
  73. data/test/profile_test.rb +85 -0
  74. data/test/recursive_test.rb +374 -155
  75. data/test/scheduler.rb +363 -0
  76. data/test/singleton_test.rb +1 -1
  77. data/test/stack_printer_test.rb +5 -8
  78. data/test/start_stop_test.rb +11 -14
  79. data/test/test_helper.rb +11 -8
  80. data/test/thread_test.rb +106 -15
  81. data/test/unique_call_path_test.rb +28 -12
  82. data/test/yarv_test.rb +11 -7
  83. metadata +17 -29
  84. data/ext/ruby_prof/rp_aggregate_call_tree.c +0 -59
  85. data/ext/ruby_prof/rp_aggregate_call_tree.h +0 -13
  86. data/test/measure_allocations_trace_test.rb +0 -375
  87. data/test/temp.rb +0 -20
@@ -1,43 +1,47 @@
1
- /* Copyright (C) 2005-2019 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
2
- Please see the LICENSE file for copyright and distribution information */
3
-
4
- #ifndef __RP_CALL_TREE_H__
5
- #define __RP_CALL_TREE_H__
6
-
7
- #include "ruby_prof.h"
8
- #include "rp_measurement.h"
9
- #include "rp_method.h"
10
-
11
- extern VALUE cRpCallTree;
12
-
13
- /* Callers and callee information for a method. */
14
- typedef struct prof_call_tree_t
15
- {
16
- prof_method_t* method;
17
- struct prof_call_tree_t* parent;
18
- st_table* children; /* Call infos that this call info calls */
19
- prof_measurement_t* measurement;
20
- VALUE object;
21
-
22
- int visits; /* Current visits on the stack */
23
-
24
- unsigned int source_line;
25
- VALUE source_file;
26
- } prof_call_tree_t;
27
-
28
- prof_call_tree_t* prof_call_tree_create(prof_method_t* method, prof_call_tree_t* parent, VALUE source_file, int source_line);
29
- prof_call_tree_t* prof_call_tree_copy(prof_call_tree_t* other);
30
- void prof_call_tree_merge(prof_call_tree_t* result, prof_call_tree_t* other);
31
- void prof_call_tree_mark(void* data);
32
- prof_call_tree_t* call_tree_table_lookup(st_table* table, st_data_t key);
33
-
34
- void prof_call_tree_add_parent(prof_call_tree_t* self, prof_call_tree_t* parent);
35
- void prof_call_tree_add_child(prof_call_tree_t* self, prof_call_tree_t* child);
36
-
37
- uint32_t prof_call_figure_depth(prof_call_tree_t* call_tree_data);
38
- prof_call_tree_t* prof_get_call_tree(VALUE self);
39
- VALUE prof_call_tree_wrap(prof_call_tree_t* call_tree);
40
- void prof_call_tree_free(prof_call_tree_t* call_tree);
41
- void rp_init_call_tree(void);
42
-
43
- #endif //__RP_CALL_TREE_H__
1
+ /* Copyright (C) 2005-2019 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
2
+ Please see the LICENSE file for copyright and distribution information */
3
+
4
+ #ifndef __RP_CALL_TREE_H__
5
+ #define __RP_CALL_TREE_H__
6
+
7
+ #include "ruby_prof.h"
8
+ #include "rp_measurement.h"
9
+ #include "rp_method.h"
10
+
11
+ extern VALUE cRpCallTree;
12
+
13
+ /* Callers and callee information for a method. */
14
+ typedef struct prof_call_tree_t
15
+ {
16
+ prof_owner_t owner;
17
+ prof_method_t* method;
18
+ struct prof_call_tree_t* parent;
19
+ st_table* children; /* Call infos that this call info calls */
20
+ prof_measurement_t* measurement;
21
+ VALUE object;
22
+
23
+ int visits; /* Current visits on the stack */
24
+
25
+ unsigned int source_line;
26
+ VALUE source_file;
27
+ } prof_call_tree_t;
28
+
29
+ prof_call_tree_t* prof_call_tree_create(prof_method_t* method, prof_call_tree_t* parent, VALUE source_file, int source_line);
30
+ prof_call_tree_t* prof_call_tree_copy(prof_call_tree_t* other);
31
+ void prof_call_tree_merge_internal(prof_call_tree_t* destination, prof_call_tree_t* other, st_table* method_table);
32
+ void prof_call_tree_mark(void* data);
33
+ prof_call_tree_t* call_tree_table_lookup(st_table* table, st_data_t key);
34
+
35
+ void prof_call_tree_add_parent(prof_call_tree_t* self, prof_call_tree_t* parent);
36
+ void prof_call_tree_add_child(prof_call_tree_t* self, prof_call_tree_t* child);
37
+
38
+ uint32_t prof_call_tree_figure_depth(prof_call_tree_t* call_tree);
39
+ VALUE prof_call_tree_methods(prof_call_tree_t* call_tree);
40
+
41
+ prof_call_tree_t* prof_get_call_tree(VALUE self);
42
+ VALUE prof_call_tree_wrap(prof_call_tree_t* call_tree);
43
+ void prof_call_tree_free(prof_call_tree_t* call_tree);
44
+
45
+ void rp_init_call_tree();
46
+
47
+ #endif //__RP_CALL_TREE_H__
@@ -1,7 +1,6 @@
1
1
  /* Copyright (C) 2005-2013 Shugo Maeda <shugo@ruby-lang.org> and Charlie Savage <cfis@savagexi.com>
2
2
  Please see the LICENSE file for copyright and distribution information */
3
3
 
4
- #include "rp_aggregate_call_tree.h"
5
4
  #include "rp_call_trees.h"
6
5
  #include "rp_measurement.h"
7
6
 
@@ -68,11 +67,11 @@ void prof_call_trees_ruby_gc_free(void* data)
68
67
  }
69
68
  }
70
69
 
71
- static int prof_call_trees_collect_aggregates(st_data_t key, st_data_t value, st_data_t data)
70
+ static int prof_call_trees_collect(st_data_t key, st_data_t value, st_data_t data)
72
71
  {
73
72
  VALUE result = (VALUE)data;
74
73
  prof_call_tree_t* call_tree_data = (prof_call_tree_t*)value;
75
- VALUE aggregate_call_tree = prof_aggregate_call_tree_wrap(call_tree_data);
74
+ VALUE aggregate_call_tree = prof_call_tree_wrap(call_tree_data);
76
75
  rb_ary_push(result, aggregate_call_tree);
77
76
 
78
77
  return ST_CONTINUE;
@@ -87,11 +86,16 @@ static int prof_call_trees_collect_callees(st_data_t key, st_data_t value, st_da
87
86
 
88
87
  if (rb_st_lookup(callers, call_tree_data->method->key, (st_data_t*)&aggregate_call_tree_data))
89
88
  {
90
- prof_call_tree_merge(aggregate_call_tree_data, call_tree_data);
89
+ prof_measurement_merge_internal(aggregate_call_tree_data->measurement, call_tree_data->measurement);
91
90
  }
92
91
  else
93
92
  {
93
+ // Copy the call tree so we don't touch the original and give Ruby ownerhip
94
+ // of it so that it is freed on GC
94
95
  aggregate_call_tree_data = prof_call_tree_copy(call_tree_data);
96
+ aggregate_call_tree_data->owner = OWNER_RUBY;
97
+
98
+
95
99
  rb_st_insert(callers, call_tree_data->method->key, (st_data_t)aggregate_call_tree_data);
96
100
  }
97
101
 
@@ -166,7 +170,7 @@ VALUE prof_call_trees_min_depth(VALUE self)
166
170
  prof_call_trees_t* call_trees = prof_get_call_trees(self);
167
171
  for (prof_call_tree_t** p_call_tree = call_trees->start; p_call_tree < call_trees->ptr; p_call_tree++)
168
172
  {
169
- unsigned int call_tree_depth = prof_call_figure_depth(*p_call_tree);
173
+ unsigned int call_tree_depth = prof_call_tree_figure_depth(*p_call_tree);
170
174
  if (call_tree_depth < depth)
171
175
  depth = call_tree_depth;
172
176
  }
@@ -210,17 +214,21 @@ VALUE prof_call_trees_callers(VALUE self)
210
214
 
211
215
  if (rb_st_lookup(callers, parent->method->key, (st_data_t*)&aggregate_call_tree_data))
212
216
  {
213
- prof_call_tree_merge(aggregate_call_tree_data, *p_call_tree);
217
+ prof_measurement_merge_internal(aggregate_call_tree_data->measurement, (*p_call_tree)->measurement);
214
218
  }
215
219
  else
216
220
  {
221
+ // Copy the call tree so we don't touch the original and give Ruby ownerhip
222
+ // of it so that it is freed on GC
217
223
  aggregate_call_tree_data = prof_call_tree_copy(*p_call_tree);
224
+ aggregate_call_tree_data->owner = OWNER_RUBY;
225
+
218
226
  rb_st_insert(callers, parent->method->key, (st_data_t)aggregate_call_tree_data);
219
227
  }
220
228
  }
221
229
 
222
230
  VALUE result = rb_ary_new_capa((long)callers->num_entries);
223
- rb_st_foreach(callers, prof_call_trees_collect_aggregates, result);
231
+ rb_st_foreach(callers, prof_call_trees_collect, result);
224
232
  rb_st_free_table(callers);
225
233
  return result;
226
234
  }
@@ -240,7 +248,7 @@ VALUE prof_call_trees_callees(VALUE self)
240
248
  }
241
249
 
242
250
  VALUE result = rb_ary_new_capa((long)callees->num_entries);
243
- rb_st_foreach(callees, prof_call_trees_collect_aggregates, result);
251
+ rb_st_foreach(callees, prof_call_trees_collect, result);
244
252
  rb_st_free_table(callees);
245
253
  return result;
246
254
  }
@@ -8,20 +8,20 @@
8
8
  static VALUE cMeasureAllocations;
9
9
  VALUE total_allocated_objects_key;
10
10
 
11
- static double measure_allocations_via_gc_stats(rb_trace_arg_t* trace_arg)
12
- {
13
- return (double)rb_gc_stat(total_allocated_objects_key);
14
- }
15
-
16
- static double measure_allocations_via_tracing(rb_trace_arg_t* trace_arg)
11
+ static double measure_allocations(rb_trace_arg_t* trace_arg)
17
12
  {
18
13
  static double result = 0;
19
14
 
20
15
  if (trace_arg)
21
16
  {
17
+ // Only process creation of new objects
22
18
  rb_event_flag_t event = rb_tracearg_event_flag(trace_arg);
23
- if (event == RUBY_INTERNAL_EVENT_NEWOBJ)
24
- result++;
19
+ if (event == RUBY_INTERNAL_EVENT_NEWOBJ) {
20
+ // Don't count allocations of internal IMemo objects
21
+ VALUE object = rb_tracearg_object(trace_arg);
22
+ if (BUILTIN_TYPE(object) != T_IMEMO)
23
+ result++;
24
+ }
25
25
  }
26
26
  return result;
27
27
  }
@@ -30,14 +30,11 @@ prof_measurer_t* prof_measurer_allocations(bool track_allocations)
30
30
  {
31
31
  prof_measurer_t* measure = ALLOC(prof_measurer_t);
32
32
  measure->mode = MEASURE_ALLOCATIONS;
33
+ measure->measure = measure_allocations;
33
34
  measure->multiplier = 1;
35
+ // Need to track allocations to get RUBY_INTERNAL_EVENT_NEWOBJ event
34
36
  measure->track_allocations = track_allocations;
35
37
 
36
- if (track_allocations)
37
- measure->measure = measure_allocations_via_tracing;
38
- else
39
- measure->measure = measure_allocations_via_gc_stats;
40
-
41
38
  return measure;
42
39
  }
43
40
 
@@ -7,20 +7,23 @@
7
7
 
8
8
  static VALUE cMeasureMemory;
9
9
 
10
- static double
11
- measure_memory_via_tracing(rb_trace_arg_t* trace_arg)
10
+ static double measure_memory(rb_trace_arg_t* trace_arg)
12
11
  {
13
12
  static double result = 0;
14
13
 
15
14
  if (trace_arg)
16
15
  {
16
+ // Only process creation of new objects
17
17
  rb_event_flag_t event = rb_tracearg_event_flag(trace_arg);
18
18
  if (event == RUBY_INTERNAL_EVENT_NEWOBJ)
19
19
  {
20
+ // Don't count allocations of internal IMemo objects
20
21
  VALUE object = rb_tracearg_object(trace_arg);
21
- result += rb_obj_memsize_of(object);
22
+ if (BUILTIN_TYPE(object) != T_IMEMO)
23
+ result += rb_obj_memsize_of(object);
22
24
  }
23
25
  }
26
+
24
27
  return result;
25
28
  }
26
29
 
@@ -28,8 +31,9 @@ prof_measurer_t* prof_measurer_memory(bool track_allocations)
28
31
  {
29
32
  prof_measurer_t* measure = ALLOC(prof_measurer_t);
30
33
  measure->mode = MEASURE_MEMORY;
31
- measure->measure = measure_memory_via_tracing;
34
+ measure->measure = measure_memory;
32
35
  measure->multiplier = 1;
36
+ // Need to track allocations to get RUBY_INTERNAL_EVENT_NEWOBJ event
33
37
  measure->track_allocations = true;
34
38
  return measure;
35
39
  }
@@ -11,21 +11,22 @@ static double measure_process_time(rb_trace_arg_t* trace_arg)
11
11
  #if defined(_WIN32)
12
12
  FILETIME createTime;
13
13
  FILETIME exitTime;
14
- FILETIME sysTime;
14
+ FILETIME kernelTime;
15
15
  FILETIME userTime;
16
16
 
17
- ULARGE_INTEGER sysTimeInt;
17
+ ULARGE_INTEGER kernelTimeInt;
18
18
  ULARGE_INTEGER userTimeInt;
19
19
 
20
- GetProcessTimes(GetCurrentProcess(), &createTime, &exitTime, &sysTime, &userTime);
20
+ GetProcessTimes(GetCurrentProcess(), &createTime, &exitTime, &kernelTime, &userTime);
21
21
 
22
- sysTimeInt.LowPart = sysTime.dwLowDateTime;
23
- sysTimeInt.HighPart = sysTime.dwHighDateTime;
22
+ kernelTimeInt.LowPart = kernelTime.dwLowDateTime;
23
+ kernelTimeInt.HighPart = kernelTime.dwHighDateTime;
24
24
  userTimeInt.LowPart = userTime.dwLowDateTime;
25
25
  userTimeInt.HighPart = userTime.dwHighDateTime;
26
26
 
27
- return (double)(sysTimeInt.QuadPart + userTimeInt.QuadPart);
27
+ return (double)(kernelTimeInt.QuadPart + userTimeInt.QuadPart);
28
28
  #elif !defined(CLOCK_PROCESS_CPUTIME_ID)
29
+ #include <sys/resource.h>
29
30
  struct rusage usage;
30
31
  getrusage(RUSAGE_SELF, &usage);
31
32
  return usage.ru_stime.tv_sec + usage.ru_utime.tv_sec + ((usage.ru_stime.tv_usec + usage.ru_utime.tv_usec) / 1000000.0);
@@ -16,7 +16,7 @@ void rp_init_measure_memory(void);
16
16
  void rp_init_measure_process_time(void);
17
17
  void rp_init_measure_wall_time(void);
18
18
 
19
- prof_measurer_t* prof_get_measurer(prof_measure_mode_t measure, bool track_allocations)
19
+ prof_measurer_t* prof_measurer_create(prof_measure_mode_t measure, bool track_allocations)
20
20
  {
21
21
  switch (measure)
22
22
  {
@@ -43,6 +43,7 @@ double prof_measure(prof_measurer_t* measurer, rb_trace_arg_t* trace_arg)
43
43
  prof_measurement_t* prof_measurement_create(void)
44
44
  {
45
45
  prof_measurement_t* result = ALLOC(prof_measurement_t);
46
+ result->owner = OWNER_C;
46
47
  result->total_time = 0;
47
48
  result->self_time = 0;
48
49
  result->wait_time = 0;
@@ -51,24 +52,66 @@ prof_measurement_t* prof_measurement_create(void)
51
52
  return result;
52
53
  }
53
54
 
55
+ /* call-seq:
56
+ new(total_time, self_time, wait_time, called) -> Measurement
57
+
58
+ Creates a new measuremen instance. */
59
+ static VALUE prof_measurement_initialize(VALUE self, VALUE total_time, VALUE self_time, VALUE wait_time, VALUE called)
60
+ {
61
+ prof_measurement_t* result = prof_get_measurement(self);
62
+
63
+ result->total_time = NUM2DBL(total_time);
64
+ result->self_time = NUM2DBL(self_time);
65
+ result->wait_time = NUM2DBL(wait_time);
66
+ result->called = NUM2INT(called);
67
+ result->object = self;
68
+ return self;
69
+ }
70
+
71
+ prof_measurement_t* prof_measurement_copy(prof_measurement_t* other)
72
+ {
73
+ prof_measurement_t* result = prof_measurement_create();
74
+ result->called = other->called;
75
+ result->total_time = other->total_time;
76
+ result->self_time = other->self_time;
77
+ result->wait_time = other->wait_time;
78
+
79
+ return result;
80
+ }
81
+
82
+ static VALUE prof_measurement_initialize_copy(VALUE self, VALUE other)
83
+ {
84
+ // This object was created by Ruby either via Measurment#clone or Measurement#dup
85
+ // and thus prof_measurement_allocate was called so the object is owned by Ruby
86
+
87
+ if (self == other)
88
+ return self;
89
+
90
+ prof_measurement_t* self_ptr = prof_get_measurement(self);
91
+ prof_measurement_t* other_ptr = prof_get_measurement(other);
92
+
93
+ self_ptr->called = other_ptr->called;
94
+ self_ptr->total_time = other_ptr->total_time;
95
+ self_ptr->self_time = other_ptr->self_time;
96
+ self_ptr->wait_time = other_ptr->wait_time;
97
+
98
+ return self;
99
+ }
100
+
54
101
  void prof_measurement_mark(void* data)
55
102
  {
56
103
  if (!data) return;
57
104
 
58
- prof_measurement_t* measurement_data = (prof_measurement_t*)data;
105
+ prof_measurement_t* measurement = (prof_measurement_t*)data;
59
106
 
60
- if (measurement_data->object != Qnil)
61
- rb_gc_mark(measurement_data->object);
107
+ if (measurement->object != Qnil)
108
+ rb_gc_mark_movable(measurement->object);
62
109
  }
63
110
 
64
- static void prof_measurement_ruby_gc_free(void* data)
111
+ void prof_measurement_compact(void* data)
65
112
  {
66
- if (data)
67
- {
68
- // Measurements are freed by their owning object (call info or method)
69
- prof_measurement_t* measurement = (prof_measurement_t*)data;
70
- measurement->object = Qnil;
71
- }
113
+ prof_measurement_t* measurement = (prof_measurement_t*)data;
114
+ measurement->object = rb_gc_location(measurement->object);
72
115
  }
73
116
 
74
117
  void prof_measurement_free(prof_measurement_t* measurement)
@@ -84,6 +127,27 @@ void prof_measurement_free(prof_measurement_t* measurement)
84
127
  xfree(measurement);
85
128
  }
86
129
 
130
+ static void prof_measurement_ruby_gc_free(void* data)
131
+ {
132
+ prof_measurement_t* measurement = (prof_measurement_t*)data;
133
+
134
+ if (!measurement)
135
+ {
136
+ // Object has already been freed by C code
137
+ return;
138
+ }
139
+ else if (measurement->owner == OWNER_RUBY)
140
+ {
141
+ // Ruby owns this object, we need to free the underlying C struct
142
+ prof_measurement_free(measurement);
143
+ }
144
+ else
145
+ {
146
+ // The Ruby object is being freed, but not the underlying C structure. So unlink the two.
147
+ measurement->object = Qnil;
148
+ }
149
+ }
150
+
87
151
  size_t prof_measurement_size(const void* data)
88
152
  {
89
153
  return sizeof(prof_measurement_t);
@@ -97,6 +161,7 @@ static const rb_data_type_t measurement_type =
97
161
  .dmark = prof_measurement_mark,
98
162
  .dfree = prof_measurement_ruby_gc_free,
99
163
  .dsize = prof_measurement_size,
164
+ .dcompact = prof_measurement_compact
100
165
  },
101
166
  .data = NULL,
102
167
  .flags = RUBY_TYPED_FREE_IMMEDIATELY
@@ -114,6 +179,8 @@ VALUE prof_measurement_wrap(prof_measurement_t* measurement)
114
179
  static VALUE prof_measurement_allocate(VALUE klass)
115
180
  {
116
181
  prof_measurement_t* measurement = prof_measurement_create();
182
+ // This object is being created by Ruby
183
+ measurement->owner = OWNER_RUBY;
117
184
  measurement->object = prof_measurement_wrap(measurement);
118
185
  return measurement->object;
119
186
  }
@@ -140,6 +207,17 @@ static VALUE prof_measurement_total_time(VALUE self)
140
207
  return rb_float_new(result->total_time);
141
208
  }
142
209
 
210
+ /* call-seq:
211
+ total_time=value -> value
212
+
213
+ Sets the call count to n. */
214
+ static VALUE prof_measurement_set_total_time(VALUE self, VALUE value)
215
+ {
216
+ prof_measurement_t* result = prof_get_measurement(self);
217
+ result->total_time = NUM2DBL(value);
218
+ return value;
219
+ }
220
+
143
221
  /* call-seq:
144
222
  self_time -> float
145
223
 
@@ -152,6 +230,17 @@ prof_measurement_self_time(VALUE self)
152
230
  return rb_float_new(result->self_time);
153
231
  }
154
232
 
233
+ /* call-seq:
234
+ self_time=value -> value
235
+
236
+ Sets the call count to value. */
237
+ static VALUE prof_measurement_set_self_time(VALUE self, VALUE value)
238
+ {
239
+ prof_measurement_t* result = prof_get_measurement(self);
240
+ result->self_time = NUM2DBL(value);
241
+ return value;
242
+ }
243
+
155
244
  /* call-seq:
156
245
  wait_time -> float
157
246
 
@@ -163,6 +252,17 @@ static VALUE prof_measurement_wait_time(VALUE self)
163
252
  return rb_float_new(result->wait_time);
164
253
  }
165
254
 
255
+ /* call-seq:
256
+ wait_time=value -> value
257
+
258
+ Sets the wait time to value. */
259
+ static VALUE prof_measurement_set_wait_time(VALUE self, VALUE value)
260
+ {
261
+ prof_measurement_t* result = prof_get_measurement(self);
262
+ result->wait_time = NUM2DBL(value);
263
+ return value;
264
+ }
265
+
166
266
  /* call-seq:
167
267
  called -> int
168
268
 
@@ -174,23 +274,44 @@ static VALUE prof_measurement_called(VALUE self)
174
274
  }
175
275
 
176
276
  /* call-seq:
177
- called=n -> n
277
+ called=value -> value
178
278
 
179
- Sets the call count to n. */
180
- static VALUE prof_measurement_set_called(VALUE self, VALUE called)
279
+ Sets the call count to value. */
280
+ static VALUE prof_measurement_set_called(VALUE self, VALUE value)
181
281
  {
182
- prof_measurement_t* result = prof_get_measurement(self);
183
- result->called = NUM2INT(called);
184
- return called;
282
+ prof_measurement_t* result = prof_get_measurement(self);
283
+ result->called = NUM2INT(value);
284
+ return value;
185
285
  }
186
286
 
187
287
  /* :nodoc: */
188
- static VALUE
189
- prof_measurement_dump(VALUE self)
288
+ void prof_measurement_merge_internal(prof_measurement_t* self, prof_measurement_t* other)
289
+ {
290
+ self->called += other->called;
291
+ self->total_time += other->total_time;
292
+ self->self_time += other->self_time;
293
+ self->wait_time += other->wait_time;
294
+ }
295
+
296
+ /* call-seq:
297
+ merge(other)
298
+
299
+ Adds the content of other measurement to this measurement */
300
+ VALUE prof_measurement_merge(VALUE self, VALUE other)
301
+ {
302
+ prof_measurement_t* self_ptr = prof_get_measurement(self);
303
+ prof_measurement_t* other_ptr = prof_get_measurement(other);
304
+ prof_measurement_merge_internal(self_ptr, other_ptr);
305
+ return self;
306
+ }
307
+
308
+ /* :nodoc: */
309
+ static VALUE prof_measurement_dump(VALUE self)
190
310
  {
191
311
  prof_measurement_t* measurement_data = prof_get_measurement(self);
192
312
  VALUE result = rb_hash_new();
193
313
 
314
+ rb_hash_aset(result, ID2SYM(rb_intern("owner")), INT2FIX(measurement_data->owner));
194
315
  rb_hash_aset(result, ID2SYM(rb_intern("total_time")), rb_float_new(measurement_data->total_time));
195
316
  rb_hash_aset(result, ID2SYM(rb_intern("self_time")), rb_float_new(measurement_data->self_time));
196
317
  rb_hash_aset(result, ID2SYM(rb_intern("wait_time")), rb_float_new(measurement_data->wait_time));
@@ -206,6 +327,7 @@ prof_measurement_load(VALUE self, VALUE data)
206
327
  prof_measurement_t* measurement = prof_get_measurement(self);
207
328
  measurement->object = self;
208
329
 
330
+ measurement->owner = FIX2INT(rb_hash_aref(data, ID2SYM(rb_intern("owner"))));
209
331
  measurement->total_time = rb_num2dbl(rb_hash_aref(data, ID2SYM(rb_intern("total_time"))));
210
332
  measurement->self_time = rb_num2dbl(rb_hash_aref(data, ID2SYM(rb_intern("self_time"))));
211
333
  measurement->wait_time = rb_num2dbl(rb_hash_aref(data, ID2SYM(rb_intern("wait_time"))));
@@ -223,14 +345,19 @@ void rp_init_measure()
223
345
  rp_init_measure_memory();
224
346
 
225
347
  cRpMeasurement = rb_define_class_under(mProf, "Measurement", rb_cObject);
226
- rb_undef_method(CLASS_OF(cRpMeasurement), "new");
227
348
  rb_define_alloc_func(cRpMeasurement, prof_measurement_allocate);
228
349
 
350
+ rb_define_method(cRpMeasurement, "initialize", prof_measurement_initialize, 4);
351
+ rb_define_method(cRpMeasurement, "initialize_copy", prof_measurement_initialize_copy, 1);
352
+ rb_define_method(cRpMeasurement, "merge!", prof_measurement_merge, 1);
229
353
  rb_define_method(cRpMeasurement, "called", prof_measurement_called, 0);
230
354
  rb_define_method(cRpMeasurement, "called=", prof_measurement_set_called, 1);
231
355
  rb_define_method(cRpMeasurement, "total_time", prof_measurement_total_time, 0);
356
+ rb_define_method(cRpMeasurement, "total_time=", prof_measurement_set_total_time, 1);
232
357
  rb_define_method(cRpMeasurement, "self_time", prof_measurement_self_time, 0);
358
+ rb_define_method(cRpMeasurement, "self_time=", prof_measurement_set_self_time, 1);
233
359
  rb_define_method(cRpMeasurement, "wait_time", prof_measurement_wait_time, 0);
360
+ rb_define_method(cRpMeasurement, "wait_time=", prof_measurement_set_wait_time, 1);
234
361
 
235
362
  rb_define_method(cRpMeasurement, "_dump_data", prof_measurement_dump, 0);
236
363
  rb_define_method(cRpMeasurement, "_load_data", prof_measurement_load, 1);
@@ -29,6 +29,7 @@ typedef struct prof_measurer_t
29
29
  /* Callers and callee information for a method. */
30
30
  typedef struct prof_measurement_t
31
31
  {
32
+ prof_owner_t owner;
32
33
  double total_time;
33
34
  double self_time;
34
35
  double wait_time;
@@ -36,14 +37,16 @@ typedef struct prof_measurement_t
36
37
  VALUE object;
37
38
  } prof_measurement_t;
38
39
 
39
- prof_measurer_t* prof_get_measurer(prof_measure_mode_t measure, bool track_allocations);
40
+ prof_measurer_t* prof_measurer_create(prof_measure_mode_t measure, bool track_allocations);
40
41
  double prof_measure(prof_measurer_t* measurer, rb_trace_arg_t* trace_arg);
41
42
 
42
43
  prof_measurement_t* prof_measurement_create(void);
44
+ prof_measurement_t* prof_measurement_copy(prof_measurement_t* other);
43
45
  void prof_measurement_free(prof_measurement_t* measurement);
44
46
  VALUE prof_measurement_wrap(prof_measurement_t* measurement);
45
47
  prof_measurement_t* prof_get_measurement(VALUE self);
46
48
  void prof_measurement_mark(void* data);
49
+ void prof_measurement_merge_internal(prof_measurement_t* destination, prof_measurement_t* other);
47
50
 
48
51
  void rp_init_measure(void);
49
52