ruby-prof 1.5.0-x64-mingw-ucrt → 1.6.1-x64-mingw-ucrt
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGES +13 -0
- data/ext/ruby_prof/rp_allocation.c +136 -81
- data/ext/ruby_prof/rp_allocation.h +8 -6
- data/ext/ruby_prof/rp_call_tree.c +8 -1
- data/ext/ruby_prof/rp_measurement.c +10 -3
- data/ext/ruby_prof/rp_method.c +51 -76
- data/ext/ruby_prof/rp_profile.c +62 -77
- data/ext/ruby_prof/rp_profile.h +1 -0
- data/ext/ruby_prof/rp_thread.c +14 -4
- data/ext/ruby_prof/vc/ruby_prof.vcxproj +1 -1
- data/lib/3.1/ruby_prof.so +0 -0
- data/lib/3.2/ruby_prof.so +0 -0
- data/lib/ruby-prof/compatibility.rb +14 -0
- data/lib/ruby-prof/printers/abstract_printer.rb +1 -1
- data/lib/ruby-prof/printers/call_tree_printer.rb +1 -1
- data/lib/ruby-prof/printers/multi_printer.rb +17 -17
- data/lib/ruby-prof/profile.rb +5 -5
- data/lib/ruby-prof/rack.rb +31 -21
- data/lib/ruby-prof/version.rb +1 -1
- data/test/abstract_printer_test.rb +1 -0
- data/test/alias_test.rb +6 -11
- data/test/call_tree_visitor_test.rb +1 -6
- data/test/call_trees_test.rb +2 -2
- data/test/{basic_test.rb → compatibility_test.rb} +8 -2
- data/test/duplicate_names_test.rb +1 -1
- data/test/dynamic_method_test.rb +1 -6
- data/test/enumerable_test.rb +1 -1
- data/test/exceptions_test.rb +2 -2
- data/test/exclude_methods_test.rb +3 -8
- data/test/exclude_threads_test.rb +4 -9
- data/test/fiber_test.rb +4 -9
- data/test/gc_test.rb +2 -1
- data/test/inverse_call_tree_test.rb +33 -34
- data/test/line_number_test.rb +1 -1
- data/test/marshal_test.rb +3 -3
- data/test/measure_allocations_test.rb +8 -17
- data/test/measure_memory_test.rb +3 -12
- data/test/measure_process_time_test.rb +29 -34
- data/test/measure_wall_time_test.rb +176 -181
- data/test/multi_printer_test.rb +0 -5
- data/test/no_method_class_test.rb +1 -1
- data/test/pause_resume_test.rb +12 -16
- data/test/printer_call_stack_test.rb +2 -2
- data/test/printer_call_tree_test.rb +2 -2
- data/test/printer_flat_test.rb +1 -1
- data/test/printer_graph_html_test.rb +2 -2
- data/test/printer_graph_test.rb +2 -2
- data/test/printers_test.rb +14 -20
- data/test/printing_recursive_graph_test.rb +2 -2
- data/test/recursive_test.rb +2 -7
- data/test/singleton_test.rb +1 -1
- data/test/stack_printer_test.rb +5 -8
- data/test/start_stop_test.rb +11 -14
- data/test/thread_test.rb +13 -15
- data/test/unique_call_path_test.rb +4 -4
- data/test/yarv_test.rb +3 -3
- metadata +4 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: b366b0cde0e477d0b06b95b8bcb7f2d0c8747216b9cc8fcdf2c202e89a3106d6
|
4
|
+
data.tar.gz: 21239104377ef6a1479c4110dcd4cd2e2b086d75cd577a3531102799f19b8061
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 6aa08fee0a9d16b9276867db6f7465286cae2d118738ff20e78bca8eae764bf9a6657bf93678b0f3430f2998c11117022e6bd6d732d29021bc4fb3b5e6f34615
|
7
|
+
data.tar.gz: b34eb2eb7e9bfb6567292ead8d625a3fd71533d3d2ed87be0798ab9e4e5def42a86768b02605f64048b25fbc90c2b736c5b1c4add494900c8250bfcf480e728d
|
data/CHANGES
CHANGED
@@ -1,3 +1,16 @@
|
|
1
|
+
1.6.1 (2023-02-21)
|
2
|
+
=====================
|
3
|
+
* Fix loading C extension for MacOS (Charlie Savage)
|
4
|
+
|
5
|
+
1.6.0 (2023-02-20)
|
6
|
+
=====================
|
7
|
+
* Add support for Ruby's compacting garbage collector (Charlie Savage)
|
8
|
+
* Add rbs signature files (Charlie Savage)
|
9
|
+
* Update rack adapter used for profiling Rails to include latest ruby-prof features (Charlie Savage)
|
10
|
+
* Add warnings for deprecated methods (Charlie Savage)
|
11
|
+
* Update tests to not use deprecated methods (Charlie Savage)
|
12
|
+
* Improve tests on OSX (Charlie Savage)
|
13
|
+
|
1
14
|
1.5.0 (2023-02-06)
|
2
15
|
=====================
|
3
16
|
* Add new Profile#merge! method that merges results for threads/fibers that share the same root method (Charlie Savage)
|
@@ -2,32 +2,11 @@
|
|
2
2
|
Please see the LICENSE file for copyright and distribution information */
|
3
3
|
|
4
4
|
#include "rp_allocation.h"
|
5
|
+
#include "rp_method.h"
|
5
6
|
|
6
7
|
VALUE cRpAllocation;
|
7
8
|
|
8
|
-
|
9
|
-
{
|
10
|
-
prof_allocation_t* result = NULL;
|
11
|
-
st_data_t value;
|
12
|
-
if (rb_st_lookup(table, key, &value))
|
13
|
-
{
|
14
|
-
result = (prof_allocation_t*)value;
|
15
|
-
}
|
16
|
-
|
17
|
-
return result;
|
18
|
-
}
|
19
|
-
|
20
|
-
void allocations_table_insert(st_table* table, st_data_t key, prof_allocation_t* allocation)
|
21
|
-
{
|
22
|
-
rb_st_insert(table, (st_data_t)key, (st_data_t)allocation);
|
23
|
-
}
|
24
|
-
|
25
|
-
st_data_t allocations_key(VALUE klass, int source_line)
|
26
|
-
{
|
27
|
-
return (klass << 4) + source_line;
|
28
|
-
}
|
29
|
-
|
30
|
-
/* ====== prof_allocation_t ====== */
|
9
|
+
// ------ prof_allocation_t ------
|
31
10
|
prof_allocation_t* prof_allocation_create(void)
|
32
11
|
{
|
33
12
|
prof_allocation_t* result = ALLOC(prof_allocation_t);
|
@@ -43,48 +22,17 @@ prof_allocation_t* prof_allocation_create(void)
|
|
43
22
|
return result;
|
44
23
|
}
|
45
24
|
|
46
|
-
prof_allocation_t*
|
25
|
+
prof_allocation_t* prof_allocation_get(VALUE self)
|
47
26
|
{
|
48
27
|
/* Can't use Data_Get_Struct because that triggers the event hook
|
49
28
|
ending up in endless recursion. */
|
50
29
|
prof_allocation_t* result = RTYPEDDATA_DATA(self);
|
51
|
-
|
52
30
|
if (!result)
|
53
31
|
rb_raise(rb_eRuntimeError, "This RubyProf::Allocation instance has already been freed, likely because its profile has been freed.");
|
54
32
|
|
55
33
|
return result;
|
56
34
|
}
|
57
35
|
|
58
|
-
prof_allocation_t* prof_allocate_increment(prof_method_t* method, rb_trace_arg_t* trace_arg)
|
59
|
-
{
|
60
|
-
VALUE object = rb_tracearg_object(trace_arg);
|
61
|
-
if (BUILTIN_TYPE(object) == T_IMEMO)
|
62
|
-
return NULL;
|
63
|
-
|
64
|
-
VALUE klass = rb_obj_class(object);
|
65
|
-
|
66
|
-
int source_line = FIX2INT(rb_tracearg_lineno(trace_arg));
|
67
|
-
st_data_t key = allocations_key(klass, source_line);
|
68
|
-
|
69
|
-
prof_allocation_t* allocation = allocations_table_lookup(method->allocations_table, key);
|
70
|
-
if (!allocation)
|
71
|
-
{
|
72
|
-
allocation = prof_allocation_create();
|
73
|
-
allocation->source_line = source_line;
|
74
|
-
allocation->source_file = rb_tracearg_path(trace_arg);
|
75
|
-
allocation->klass_flags = 0;
|
76
|
-
allocation->klass = resolve_klass(klass, &allocation->klass_flags);
|
77
|
-
|
78
|
-
allocation->key = key;
|
79
|
-
allocations_table_insert(method->allocations_table, key, allocation);
|
80
|
-
}
|
81
|
-
|
82
|
-
allocation->count++;
|
83
|
-
allocation->memory += rb_obj_memsize_of(object);
|
84
|
-
|
85
|
-
return allocation;
|
86
|
-
}
|
87
|
-
|
88
36
|
static void prof_allocation_ruby_gc_free(void* data)
|
89
37
|
{
|
90
38
|
if (data)
|
@@ -118,30 +66,39 @@ void prof_allocation_mark(void* data)
|
|
118
66
|
|
119
67
|
prof_allocation_t* allocation = (prof_allocation_t*)data;
|
120
68
|
if (allocation->object != Qnil)
|
121
|
-
|
69
|
+
rb_gc_mark_movable(allocation->object);
|
122
70
|
|
123
71
|
if (allocation->klass != Qnil)
|
124
|
-
|
72
|
+
rb_gc_mark_movable(allocation->klass);
|
125
73
|
|
126
74
|
if (allocation->klass_name != Qnil)
|
127
|
-
|
75
|
+
rb_gc_mark_movable(allocation->klass_name);
|
128
76
|
|
129
77
|
if (allocation->source_file != Qnil)
|
130
78
|
rb_gc_mark(allocation->source_file);
|
131
79
|
}
|
132
80
|
|
133
|
-
|
81
|
+
void prof_allocation_compact(void* data)
|
134
82
|
{
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
83
|
+
prof_allocation_t* allocation = (prof_allocation_t*)data;
|
84
|
+
allocation->object = rb_gc_location(allocation->object);
|
85
|
+
allocation->klass = rb_gc_location(allocation->klass);
|
86
|
+
allocation->klass_name = rb_gc_location(allocation->klass_name);
|
87
|
+
}
|
88
|
+
|
89
|
+
static const rb_data_type_t allocation_type =
|
90
|
+
{
|
91
|
+
.wrap_struct_name = "Allocation",
|
92
|
+
.function =
|
93
|
+
{
|
94
|
+
.dmark = prof_allocation_mark,
|
95
|
+
.dfree = prof_allocation_ruby_gc_free,
|
96
|
+
.dsize = prof_allocation_size,
|
97
|
+
.dcompact = prof_allocation_compact
|
98
|
+
},
|
99
|
+
.data = NULL,
|
100
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY
|
101
|
+
};
|
145
102
|
|
146
103
|
VALUE prof_allocation_wrap(prof_allocation_t* allocation)
|
147
104
|
{
|
@@ -152,24 +109,122 @@ VALUE prof_allocation_wrap(prof_allocation_t* allocation)
|
|
152
109
|
return allocation->object;
|
153
110
|
}
|
154
111
|
|
155
|
-
|
112
|
+
/* ====== Allocation Table ====== */
|
113
|
+
st_table* prof_allocations_create()
|
156
114
|
{
|
157
|
-
|
158
|
-
allocation->object = prof_allocation_wrap(allocation);
|
159
|
-
return allocation->object;
|
115
|
+
return rb_st_init_numtable();
|
160
116
|
}
|
161
117
|
|
162
|
-
|
118
|
+
static int allocations_table_free_iterator(st_data_t key, st_data_t value, st_data_t dummy)
|
163
119
|
{
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
120
|
+
prof_allocation_free((prof_allocation_t*)value);
|
121
|
+
return ST_CONTINUE;
|
122
|
+
}
|
123
|
+
|
124
|
+
st_data_t allocations_key(VALUE klass, int source_line)
|
125
|
+
{
|
126
|
+
return (klass << 4) + source_line;
|
127
|
+
}
|
128
|
+
|
129
|
+
static int prof_allocations_collect(st_data_t key, st_data_t value, st_data_t result)
|
130
|
+
{
|
131
|
+
prof_allocation_t* allocation = (prof_allocation_t*)value;
|
132
|
+
VALUE arr = (VALUE)result;
|
133
|
+
rb_ary_push(arr, prof_allocation_wrap(allocation));
|
134
|
+
return ST_CONTINUE;
|
135
|
+
}
|
136
|
+
|
137
|
+
static int prof_allocations_mark_each(st_data_t key, st_data_t value, st_data_t data)
|
138
|
+
{
|
139
|
+
prof_allocation_t* allocation = (prof_allocation_t*)value;
|
140
|
+
prof_allocation_mark(allocation);
|
141
|
+
return ST_CONTINUE;
|
142
|
+
}
|
143
|
+
|
144
|
+
void prof_allocations_mark(st_table* allocations_table)
|
145
|
+
{
|
146
|
+
rb_st_foreach(allocations_table, prof_allocations_mark_each, 0);
|
147
|
+
}
|
148
|
+
|
149
|
+
void prof_allocations_free(st_table* table)
|
150
|
+
{
|
151
|
+
rb_st_foreach(table, allocations_table_free_iterator, 0);
|
152
|
+
rb_st_free_table(table);
|
153
|
+
}
|
154
|
+
|
155
|
+
prof_allocation_t* allocations_table_lookup(st_table* table, st_data_t key)
|
156
|
+
{
|
157
|
+
prof_allocation_t* result = NULL;
|
158
|
+
st_data_t value;
|
159
|
+
if (rb_st_lookup(table, key, &value))
|
160
|
+
{
|
161
|
+
result = (prof_allocation_t*)value;
|
162
|
+
}
|
169
163
|
|
170
164
|
return result;
|
171
165
|
}
|
172
166
|
|
167
|
+
void allocations_table_insert(st_table* table, st_data_t key, prof_allocation_t* allocation)
|
168
|
+
{
|
169
|
+
rb_st_insert(table, (st_data_t)key, (st_data_t)allocation);
|
170
|
+
}
|
171
|
+
|
172
|
+
prof_allocation_t* prof_allocate_increment(st_table* allocations_table, rb_trace_arg_t* trace_arg)
|
173
|
+
{
|
174
|
+
VALUE object = rb_tracearg_object(trace_arg);
|
175
|
+
if (BUILTIN_TYPE(object) == T_IMEMO)
|
176
|
+
return NULL;
|
177
|
+
|
178
|
+
VALUE klass = rb_obj_class(object);
|
179
|
+
|
180
|
+
int source_line = FIX2INT(rb_tracearg_lineno(trace_arg));
|
181
|
+
st_data_t key = allocations_key(klass, source_line);
|
182
|
+
|
183
|
+
prof_allocation_t* allocation = allocations_table_lookup(allocations_table, key);
|
184
|
+
if (!allocation)
|
185
|
+
{
|
186
|
+
allocation = prof_allocation_create();
|
187
|
+
allocation->source_line = source_line;
|
188
|
+
allocation->source_file = rb_tracearg_path(trace_arg);
|
189
|
+
allocation->klass_flags = 0;
|
190
|
+
allocation->klass = resolve_klass(klass, &allocation->klass_flags);
|
191
|
+
|
192
|
+
allocation->key = key;
|
193
|
+
allocations_table_insert(allocations_table, key, allocation);
|
194
|
+
}
|
195
|
+
|
196
|
+
allocation->count++;
|
197
|
+
allocation->memory += rb_obj_memsize_of(object);
|
198
|
+
|
199
|
+
return allocation;
|
200
|
+
}
|
201
|
+
|
202
|
+
// Returns an array of allocations
|
203
|
+
VALUE prof_allocations_wrap(st_table* allocations_table)
|
204
|
+
{
|
205
|
+
VALUE result = rb_ary_new();
|
206
|
+
rb_st_foreach(allocations_table, prof_allocations_collect, result);
|
207
|
+
return result;
|
208
|
+
}
|
209
|
+
|
210
|
+
void prof_allocations_unwrap(st_table* allocations_table, VALUE allocations)
|
211
|
+
{
|
212
|
+
for (int i = 0; i < rb_array_len(allocations); i++)
|
213
|
+
{
|
214
|
+
VALUE allocation = rb_ary_entry(allocations, i);
|
215
|
+
prof_allocation_t* allocation_data = prof_allocation_get(allocation);
|
216
|
+
rb_st_insert(allocations_table, allocation_data->key, (st_data_t)allocation_data);
|
217
|
+
}
|
218
|
+
}
|
219
|
+
|
220
|
+
/* ====== prof_allocation_t ====== */
|
221
|
+
static VALUE prof_allocation_allocate(VALUE klass)
|
222
|
+
{
|
223
|
+
prof_allocation_t* allocation = prof_allocation_create();
|
224
|
+
allocation->object = prof_allocation_wrap(allocation);
|
225
|
+
return allocation->object;
|
226
|
+
}
|
227
|
+
|
173
228
|
/* call-seq:
|
174
229
|
klass -> Class
|
175
230
|
|
@@ -238,7 +293,7 @@ static VALUE prof_allocation_memory(VALUE self)
|
|
238
293
|
/* :nodoc: */
|
239
294
|
static VALUE prof_allocation_dump(VALUE self)
|
240
295
|
{
|
241
|
-
prof_allocation_t* allocation =
|
296
|
+
prof_allocation_t* allocation = prof_allocation_get(self);
|
242
297
|
|
243
298
|
VALUE result = rb_hash_new();
|
244
299
|
|
@@ -256,7 +311,7 @@ static VALUE prof_allocation_dump(VALUE self)
|
|
256
311
|
/* :nodoc: */
|
257
312
|
static VALUE prof_allocation_load(VALUE self, VALUE data)
|
258
313
|
{
|
259
|
-
prof_allocation_t* allocation =
|
314
|
+
prof_allocation_t* allocation = prof_allocation_get(self);
|
260
315
|
allocation->object = self;
|
261
316
|
|
262
317
|
allocation->key = RB_NUM2ULL(rb_hash_aref(data, ID2SYM(rb_intern("key"))));
|
@@ -5,7 +5,6 @@
|
|
5
5
|
#define _RP_ALLOCATION_
|
6
6
|
|
7
7
|
#include "ruby_prof.h"
|
8
|
-
#include "rp_method.h"
|
9
8
|
|
10
9
|
typedef struct prof_allocation_t
|
11
10
|
{
|
@@ -20,12 +19,15 @@ typedef struct prof_allocation_t
|
|
20
19
|
VALUE object; /* Cache to wrapped object */
|
21
20
|
} prof_allocation_t;
|
22
21
|
|
22
|
+
// Allocation (prof_allocation_t*)
|
23
23
|
void rp_init_allocation(void);
|
24
|
-
|
25
|
-
void prof_allocation_mark(void* data);
|
26
|
-
VALUE prof_allocation_wrap(prof_allocation_t* allocation);
|
27
|
-
prof_allocation_t* prof_allocation_get(VALUE self);
|
28
|
-
prof_allocation_t* prof_allocate_increment(prof_method_t* method, rb_trace_arg_t* trace_arg);
|
24
|
+
prof_allocation_t* prof_allocate_increment(st_table* allocations_table, rb_trace_arg_t* trace_arg);
|
29
25
|
|
26
|
+
// Allocations (st_table*)
|
27
|
+
st_table* prof_allocations_create(void);
|
28
|
+
VALUE prof_allocations_wrap(st_table* allocations_table);
|
29
|
+
void prof_allocations_unwrap(st_table* allocations_table, VALUE allocations);
|
30
|
+
void prof_allocations_mark(st_table* allocations_table);
|
31
|
+
void prof_allocations_free(st_table* table);
|
30
32
|
|
31
33
|
#endif //_RP_ALLOCATION_
|
@@ -54,7 +54,7 @@ void prof_call_tree_mark(void* data)
|
|
54
54
|
prof_call_tree_t* call_tree = (prof_call_tree_t*)data;
|
55
55
|
|
56
56
|
if (call_tree->object != Qnil)
|
57
|
-
|
57
|
+
rb_gc_mark_movable(call_tree->object);
|
58
58
|
|
59
59
|
if (call_tree->source_file != Qnil)
|
60
60
|
rb_gc_mark(call_tree->source_file);
|
@@ -68,6 +68,12 @@ void prof_call_tree_mark(void* data)
|
|
68
68
|
rb_st_foreach(call_tree->children, prof_call_tree_mark_children, 0);
|
69
69
|
}
|
70
70
|
|
71
|
+
void prof_call_tree_compact(void* data)
|
72
|
+
{
|
73
|
+
prof_call_tree_t* call_tree = (prof_call_tree_t*)data;
|
74
|
+
call_tree->object = rb_gc_location(call_tree->object);
|
75
|
+
}
|
76
|
+
|
71
77
|
static int prof_call_tree_free_children(st_data_t key, st_data_t value, st_data_t data)
|
72
78
|
{
|
73
79
|
prof_call_tree_t* call_tree = (prof_call_tree_t*)value;
|
@@ -130,6 +136,7 @@ static const rb_data_type_t call_tree_type =
|
|
130
136
|
.dmark = prof_call_tree_mark,
|
131
137
|
.dfree = prof_call_tree_ruby_gc_free,
|
132
138
|
.dsize = prof_call_tree_size,
|
139
|
+
.dcompact = prof_call_tree_compact
|
133
140
|
},
|
134
141
|
.data = NULL,
|
135
142
|
.flags = RUBY_TYPED_FREE_IMMEDIATELY
|
@@ -102,10 +102,16 @@ void prof_measurement_mark(void* data)
|
|
102
102
|
{
|
103
103
|
if (!data) return;
|
104
104
|
|
105
|
-
prof_measurement_t*
|
105
|
+
prof_measurement_t* measurement = (prof_measurement_t*)data;
|
106
106
|
|
107
|
-
if (
|
108
|
-
|
107
|
+
if (measurement->object != Qnil)
|
108
|
+
rb_gc_mark_movable(measurement->object);
|
109
|
+
}
|
110
|
+
|
111
|
+
void prof_measurement_compact(void* data)
|
112
|
+
{
|
113
|
+
prof_measurement_t* measurement = (prof_measurement_t*)data;
|
114
|
+
measurement->object = rb_gc_location(measurement->object);
|
109
115
|
}
|
110
116
|
|
111
117
|
void prof_measurement_free(prof_measurement_t* measurement)
|
@@ -155,6 +161,7 @@ static const rb_data_type_t measurement_type =
|
|
155
161
|
.dmark = prof_measurement_mark,
|
156
162
|
.dfree = prof_measurement_ruby_gc_free,
|
157
163
|
.dsize = prof_measurement_size,
|
164
|
+
.dcompact = prof_measurement_compact
|
158
165
|
},
|
159
166
|
.data = NULL,
|
160
167
|
.flags = RUBY_TYPED_FREE_IMMEDIATELY
|
data/ext/ruby_prof/rp_method.c
CHANGED
@@ -22,31 +22,38 @@ VALUE resolve_klass(VALUE klass, unsigned int* klass_flags)
|
|
22
22
|
figure out what it is attached to.*/
|
23
23
|
VALUE attached = rb_iv_get(klass, "__attached__");
|
24
24
|
|
25
|
-
|
26
|
-
if (BUILTIN_TYPE(attached) == T_CLASS)
|
25
|
+
switch (BUILTIN_TYPE(attached))
|
27
26
|
{
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
27
|
+
/* Is this a singleton class acting as a metaclass? */
|
28
|
+
case T_CLASS:
|
29
|
+
{
|
30
|
+
*klass_flags |= kClassSingleton;
|
31
|
+
result = attached;
|
32
|
+
break;
|
33
|
+
}
|
34
|
+
/* Is this for singleton methods on a module? */
|
35
|
+
case T_MODULE:
|
36
|
+
{
|
37
|
+
*klass_flags |= kModuleSingleton;
|
38
|
+
result = attached;
|
39
|
+
break;
|
40
|
+
}
|
41
|
+
/* Is this for singleton methods on an object? */
|
42
|
+
case T_OBJECT:
|
43
|
+
{
|
44
|
+
*klass_flags |= kObjectSingleton;
|
45
|
+
result = rb_class_superclass(klass);
|
46
|
+
break;
|
47
|
+
}
|
48
|
+
/* Ok, this could be other things like an array put onto
|
49
|
+
a singleton object (yeah, it happens, see the singleton
|
50
|
+
objects test case). */
|
51
|
+
default:
|
52
|
+
{
|
53
|
+
*klass_flags |= kOtherSingleton;
|
54
|
+
result = klass;
|
55
|
+
break;
|
56
|
+
}
|
50
57
|
}
|
51
58
|
}
|
52
59
|
/* Is this an include for a module? If so get the actual
|
@@ -56,7 +63,7 @@ VALUE resolve_klass(VALUE klass, unsigned int* klass_flags)
|
|
56
63
|
{
|
57
64
|
unsigned int dummy;
|
58
65
|
*klass_flags |= kModuleIncludee;
|
59
|
-
result = resolve_klass(
|
66
|
+
result = resolve_klass(RBASIC_CLASS(klass), &dummy);
|
60
67
|
}
|
61
68
|
return result;
|
62
69
|
}
|
@@ -94,7 +101,7 @@ st_data_t method_key(VALUE klass, VALUE msym)
|
|
94
101
|
}
|
95
102
|
else if (BUILTIN_TYPE(klass) == T_ICLASS)
|
96
103
|
{
|
97
|
-
resolved_klass =
|
104
|
+
resolved_klass = RBASIC_CLASS(klass);
|
98
105
|
}
|
99
106
|
|
100
107
|
st_data_t hash = rb_hash_start(0);
|
@@ -105,39 +112,6 @@ st_data_t method_key(VALUE klass, VALUE msym)
|
|
105
112
|
return hash;
|
106
113
|
}
|
107
114
|
|
108
|
-
/* ====== Allocation Table ====== */
|
109
|
-
st_table* allocations_table_create()
|
110
|
-
{
|
111
|
-
return rb_st_init_numtable();
|
112
|
-
}
|
113
|
-
|
114
|
-
static int allocations_table_free_iterator(st_data_t key, st_data_t value, st_data_t dummy)
|
115
|
-
{
|
116
|
-
prof_allocation_free((prof_allocation_t*)value);
|
117
|
-
return ST_CONTINUE;
|
118
|
-
}
|
119
|
-
|
120
|
-
static int prof_method_collect_allocations(st_data_t key, st_data_t value, st_data_t result)
|
121
|
-
{
|
122
|
-
prof_allocation_t* allocation = (prof_allocation_t*)value;
|
123
|
-
VALUE arr = (VALUE)result;
|
124
|
-
rb_ary_push(arr, prof_allocation_wrap(allocation));
|
125
|
-
return ST_CONTINUE;
|
126
|
-
}
|
127
|
-
|
128
|
-
static int prof_method_mark_allocations(st_data_t key, st_data_t value, st_data_t data)
|
129
|
-
{
|
130
|
-
prof_allocation_t* allocation = (prof_allocation_t*)value;
|
131
|
-
prof_allocation_mark(allocation);
|
132
|
-
return ST_CONTINUE;
|
133
|
-
}
|
134
|
-
|
135
|
-
void allocations_table_free(st_table* table)
|
136
|
-
{
|
137
|
-
rb_st_foreach(table, allocations_table_free_iterator, 0);
|
138
|
-
rb_st_free_table(table);
|
139
|
-
}
|
140
|
-
|
141
115
|
/* ================ prof_method_t =================*/
|
142
116
|
prof_method_t* prof_get_method(VALUE self)
|
143
117
|
{
|
@@ -167,7 +141,7 @@ prof_method_t* prof_method_create(VALUE profile, VALUE klass, VALUE msym, VALUE
|
|
167
141
|
result->measurement = prof_measurement_create();
|
168
142
|
|
169
143
|
result->call_trees = prof_call_trees_create();
|
170
|
-
result->allocations_table =
|
144
|
+
result->allocations_table = prof_allocations_create();
|
171
145
|
|
172
146
|
result->visits = 0;
|
173
147
|
result->recursive = false;
|
@@ -205,7 +179,7 @@ static void prof_method_free(prof_method_t* method)
|
|
205
179
|
method->object = Qnil;
|
206
180
|
}
|
207
181
|
|
208
|
-
|
182
|
+
prof_allocations_free(method->allocations_table);
|
209
183
|
prof_call_trees_free(method->call_trees);
|
210
184
|
prof_measurement_free(method->measurement);
|
211
185
|
xfree(method);
|
@@ -223,21 +197,29 @@ void prof_method_mark(void* data)
|
|
223
197
|
prof_method_t* method = (prof_method_t*)data;
|
224
198
|
|
225
199
|
if (method->profile != Qnil)
|
226
|
-
|
200
|
+
rb_gc_mark_movable(method->profile);
|
227
201
|
|
228
202
|
if (method->object != Qnil)
|
229
|
-
|
203
|
+
rb_gc_mark_movable(method->object);
|
230
204
|
|
231
|
-
|
232
|
-
|
205
|
+
rb_gc_mark_movable(method->klass_name);
|
206
|
+
rb_gc_mark_movable(method->method_name);
|
233
207
|
rb_gc_mark(method->source_file);
|
234
208
|
|
235
209
|
if (method->klass != Qnil)
|
236
210
|
rb_gc_mark(method->klass);
|
237
211
|
|
238
212
|
prof_measurement_mark(method->measurement);
|
213
|
+
prof_allocations_mark(method->allocations_table);
|
214
|
+
}
|
239
215
|
|
240
|
-
|
216
|
+
void prof_method_compact(void* data)
|
217
|
+
{
|
218
|
+
prof_method_t* method = (prof_method_t*)data;
|
219
|
+
method->object = rb_gc_location(method->object);
|
220
|
+
method->profile = rb_gc_location(method->profile);
|
221
|
+
method->klass_name = rb_gc_location(method->klass_name);
|
222
|
+
method->method_name = rb_gc_location(method->method_name);
|
241
223
|
}
|
242
224
|
|
243
225
|
static VALUE prof_method_allocate(VALUE klass)
|
@@ -255,6 +237,7 @@ static const rb_data_type_t method_info_type =
|
|
255
237
|
.dmark = prof_method_mark,
|
256
238
|
.dfree = prof_method_ruby_gc_free,
|
257
239
|
.dsize = prof_method_size,
|
240
|
+
.dcompact = prof_method_compact
|
258
241
|
},
|
259
242
|
.data = NULL,
|
260
243
|
.flags = RUBY_TYPED_FREE_IMMEDIATELY
|
@@ -360,9 +343,7 @@ Returns an array of allocation information.*/
|
|
360
343
|
static VALUE prof_method_allocations(VALUE self)
|
361
344
|
{
|
362
345
|
prof_method_t* method = prof_get_method(self);
|
363
|
-
|
364
|
-
rb_st_foreach(method->allocations_table, prof_method_collect_allocations, result);
|
365
|
-
return result;
|
346
|
+
return prof_allocations_wrap(method->allocations_table);
|
366
347
|
}
|
367
348
|
|
368
349
|
/* call-seq:
|
@@ -499,13 +480,7 @@ static VALUE prof_method_load(VALUE self, VALUE data)
|
|
499
480
|
method_data->measurement = prof_get_measurement(measurement);
|
500
481
|
|
501
482
|
VALUE allocations = rb_hash_aref(data, ID2SYM(rb_intern("allocations")));
|
502
|
-
|
503
|
-
{
|
504
|
-
VALUE allocation = rb_ary_entry(allocations, i);
|
505
|
-
prof_allocation_t* allocation_data = prof_allocation_get(allocation);
|
506
|
-
|
507
|
-
rb_st_insert(method_data->allocations_table, allocation_data->key, (st_data_t)allocation_data);
|
508
|
-
}
|
483
|
+
prof_allocations_unwrap(method_data->allocations_table, allocations);
|
509
484
|
return data;
|
510
485
|
}
|
511
486
|
|