allocations 1.0.4 → 1.0.5
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/ext/liballocations/liballocations.c +41 -25
- data/ext/liballocations/liballocations.h +2 -0
- data/ext/liballocations/state.c +65 -0
- data/ext/liballocations/state.h +17 -0
- data/lib/allocations/version.rb +1 -1
- metadata +4 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: a46f508510123c761b991e64889ac068ed815bd8
|
4
|
+
data.tar.gz: d68c81c787976c85a8887cc7bdab6968d4edf279
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 2d3b904496f79e974224f08a32ccd5063f15246947a9f74d29278b1205c2c6329b00302865534561dedf666601622ab994b5559b8dd7536a12b31e7ba8142107
|
7
|
+
data.tar.gz: c2ea1a53ac53a20e4b95e59d7ace5ef05890b23885a7af8fd8f9a85a923d499c9c8bccf5cc7d0a609fbddb82b5b325d1cbbf4f598582465927f1d4e62902f2d6
|
@@ -1,12 +1,14 @@
|
|
1
1
|
#include "liballocations.h"
|
2
2
|
|
3
|
-
st_table *object_counts;
|
4
|
-
|
5
3
|
VALUE allocation_tracer;
|
6
4
|
VALUE free_tracer;
|
5
|
+
VALUE state_const;
|
7
6
|
|
8
7
|
ID id_enabled;
|
9
8
|
|
9
|
+
#define IS_SINGLETON(klass) \
|
10
|
+
RB_TYPE_P(klass, T_CLASS) && FL_TEST(klass, FL_SINGLETON)
|
11
|
+
|
10
12
|
/**
|
11
13
|
* Called whenever a new Ruby object is allocated.
|
12
14
|
*/
|
@@ -15,16 +17,23 @@ void newobj_callback(VALUE tracepoint, void* data) {
|
|
15
17
|
|
16
18
|
st_data_t count = 0;
|
17
19
|
|
18
|
-
|
19
|
-
|
20
|
+
AllocationState *state = allocation_state_get_struct(state_const);
|
21
|
+
|
22
|
+
VALUE klass = rb_tracearg_defined_class(trace_arg);
|
20
23
|
|
21
24
|
/* These aren't actually allocated so there's no point in tracking them. */
|
22
25
|
if ( klass == Qtrue || klass == Qfalse || klass == Qnil ) {
|
23
26
|
return;
|
24
27
|
}
|
25
28
|
|
26
|
-
|
27
|
-
|
29
|
+
// We don't care about sigleton classes since only one of them exists at a
|
30
|
+
// time. The logic here is stolen from MRI's implementation of
|
31
|
+
// Class#singleton_class? as MRI sadly provides no public C function for
|
32
|
+
// this method.
|
33
|
+
if ( IS_SINGLETON(klass) ) return;
|
34
|
+
|
35
|
+
st_lookup(state->object_counts, (st_data_t) klass, &count);
|
36
|
+
st_insert(state->object_counts, (st_data_t) klass, count + 1);
|
28
37
|
}
|
29
38
|
|
30
39
|
/**
|
@@ -34,20 +43,22 @@ void newobj_callback(VALUE tracepoint, void* data) {
|
|
34
43
|
* segfault.
|
35
44
|
*/
|
36
45
|
void freeobj_callback(VALUE tracepoint, void* data) {
|
46
|
+
st_data_t count;
|
47
|
+
|
37
48
|
rb_trace_arg_t *trace_arg = rb_tracearg_from_tracepoint(tracepoint);
|
49
|
+
AllocationState *state = allocation_state_get_struct(state_const);
|
38
50
|
|
39
|
-
|
51
|
+
VALUE klass = rb_tracearg_defined_class(trace_arg);
|
40
52
|
|
41
|
-
|
42
|
-
VALUE klass = RBASIC_CLASS(obj);
|
53
|
+
if ( IS_SINGLETON(klass) ) return;
|
43
54
|
|
44
|
-
if ( st_lookup(object_counts, (st_data_t) klass, &count) ) {
|
45
|
-
if ( count > 0 && (count - 1) > 0) {
|
46
|
-
st_insert(object_counts, (st_data_t) klass, count - 1);
|
55
|
+
if ( st_lookup(state->object_counts, (st_data_t) klass, &count) ) {
|
56
|
+
if ( count > 0 && (count - 1) > 0 ) {
|
57
|
+
st_insert(state->object_counts, (st_data_t) klass, count - 1);
|
47
58
|
}
|
48
59
|
/* Remove the entry if the count is now 0 */
|
49
60
|
else {
|
50
|
-
st_delete(object_counts, (st_data_t*) &klass, NULL);
|
61
|
+
st_delete(state->object_counts, (st_data_t*) &klass, NULL);
|
51
62
|
}
|
52
63
|
}
|
53
64
|
}
|
@@ -58,9 +69,7 @@ void freeobj_callback(VALUE tracepoint, void* data) {
|
|
58
69
|
static int each_count(st_data_t key, st_data_t value, st_data_t hash_ptr) {
|
59
70
|
VALUE vkey = (VALUE) key;
|
60
71
|
|
61
|
-
|
62
|
-
rb_hash_aset((VALUE) hash_ptr, vkey, INT2NUM(value));
|
63
|
-
}
|
72
|
+
rb_hash_aset((VALUE) hash_ptr, vkey, INT2NUM(value));
|
64
73
|
|
65
74
|
return ST_CONTINUE;
|
66
75
|
}
|
@@ -71,19 +80,22 @@ static int each_count(st_data_t key, st_data_t value, st_data_t hash_ptr) {
|
|
71
80
|
* The returned Hash contains its own copy of the statistics, any further object
|
72
81
|
* allocations/frees will not modify said Hash.
|
73
82
|
*
|
83
|
+
* This method ignores singleton classes.
|
84
|
+
*
|
74
85
|
* call-seq:
|
75
86
|
* Allocations.to_hash -> Hash
|
76
87
|
*/
|
77
88
|
VALUE allocations_to_hash(VALUE self) {
|
89
|
+
AllocationState *state = allocation_state_get_struct(state_const);
|
90
|
+
|
78
91
|
st_table *local_counts;
|
79
92
|
VALUE hash;
|
80
93
|
|
81
|
-
if ( !object_counts ) {
|
94
|
+
if ( !state->object_counts ) {
|
82
95
|
return rb_hash_new();
|
83
96
|
}
|
84
97
|
|
85
|
-
local_counts =
|
86
|
-
|
98
|
+
local_counts = allocation_state_copy_table(state);
|
87
99
|
hash = rb_hash_new();
|
88
100
|
|
89
101
|
st_foreach(local_counts, each_count, (st_data_t) hash);
|
@@ -100,11 +112,13 @@ VALUE allocations_to_hash(VALUE self) {
|
|
100
112
|
* Allocations.start -> nil
|
101
113
|
*/
|
102
114
|
VALUE allocations_start(VALUE self) {
|
115
|
+
AllocationState *state = allocation_state_get_struct(state_const);
|
116
|
+
|
103
117
|
if ( rb_ivar_get(self, id_enabled) == Qtrue ) {
|
104
118
|
return Qnil;
|
105
119
|
}
|
106
120
|
|
107
|
-
|
121
|
+
allocation_state_allocate_counts(state);
|
108
122
|
|
109
123
|
rb_ivar_set(self, id_enabled, Qtrue);
|
110
124
|
|
@@ -121,6 +135,8 @@ VALUE allocations_start(VALUE self) {
|
|
121
135
|
* Allocations.stop -> nil
|
122
136
|
*/
|
123
137
|
VALUE allocations_stop(VALUE self) {
|
138
|
+
AllocationState *state = allocation_state_get_struct(state_const);
|
139
|
+
|
124
140
|
if ( rb_ivar_get(self, id_enabled) != Qtrue ) {
|
125
141
|
return Qnil;
|
126
142
|
}
|
@@ -128,11 +144,7 @@ VALUE allocations_stop(VALUE self) {
|
|
128
144
|
rb_tracepoint_disable(allocation_tracer);
|
129
145
|
rb_tracepoint_disable(free_tracer);
|
130
146
|
|
131
|
-
|
132
|
-
st_free_table(object_counts);
|
133
|
-
}
|
134
|
-
|
135
|
-
object_counts = NULL;
|
147
|
+
allocation_state_reset_counts(state);
|
136
148
|
|
137
149
|
rb_ivar_set(self, id_enabled, Qfalse);
|
138
150
|
|
@@ -173,4 +185,8 @@ void Init_liballocations() {
|
|
173
185
|
|
174
186
|
rb_define_const(mAllocations, "ALLOCATION_TRACER", allocation_tracer);
|
175
187
|
rb_define_const(mAllocations, "FREE_TRACER", free_tracer);
|
188
|
+
|
189
|
+
Init_allocations_state();
|
190
|
+
|
191
|
+
state_const = rb_const_get(mAllocations, rb_intern("STATE"));
|
176
192
|
}
|
@@ -0,0 +1,65 @@
|
|
1
|
+
#include "state.h"
|
2
|
+
|
3
|
+
static int mark_each_entry(st_data_t key, st_data_t value, st_data_t data) {
|
4
|
+
rb_gc_mark((VALUE) key);
|
5
|
+
|
6
|
+
return ST_CONTINUE;
|
7
|
+
}
|
8
|
+
|
9
|
+
// Initializes the table used for storing object counts.
|
10
|
+
void allocation_state_allocate_counts(AllocationState *state) {
|
11
|
+
state->object_counts = st_init_numtable();
|
12
|
+
}
|
13
|
+
|
14
|
+
// Resets the table used for storing object counts.
|
15
|
+
void allocation_state_reset_counts(AllocationState *state) {
|
16
|
+
if ( state->object_counts ) {
|
17
|
+
st_free_table(state->object_counts);
|
18
|
+
}
|
19
|
+
|
20
|
+
state->object_counts = NULL;
|
21
|
+
}
|
22
|
+
|
23
|
+
// Returns the AllocationState wrapped by `object`
|
24
|
+
AllocationState *allocation_state_get_struct(VALUE object) {
|
25
|
+
AllocationState *state;
|
26
|
+
|
27
|
+
Data_Get_Struct(object, AllocationState, state);
|
28
|
+
|
29
|
+
return state;
|
30
|
+
}
|
31
|
+
|
32
|
+
st_table *allocation_state_copy_table(AllocationState *state) {
|
33
|
+
return st_copy(state->object_counts);
|
34
|
+
}
|
35
|
+
|
36
|
+
void allocation_state_mark(AllocationState *state) {
|
37
|
+
if ( state->object_counts ) {
|
38
|
+
st_foreach(state->object_counts, mark_each_entry, (st_data_t) NULL);
|
39
|
+
}
|
40
|
+
}
|
41
|
+
|
42
|
+
void allocation_state_free(AllocationState *state) {
|
43
|
+
allocation_state_reset_counts(state);
|
44
|
+
|
45
|
+
free(state);
|
46
|
+
}
|
47
|
+
|
48
|
+
VALUE allocation_state_allocate(VALUE klass) {
|
49
|
+
AllocationState *state = ALLOC(AllocationState);
|
50
|
+
|
51
|
+
state->object_counts = NULL;
|
52
|
+
|
53
|
+
return Data_Wrap_Struct(klass, allocation_state_mark, allocation_state_free,
|
54
|
+
state);
|
55
|
+
}
|
56
|
+
|
57
|
+
void Init_allocations_state() {
|
58
|
+
VALUE mAllocations = rb_const_get(rb_cObject, rb_intern("Allocations"));
|
59
|
+
VALUE cState = rb_define_class_under(mAllocations, "State", rb_cObject);
|
60
|
+
|
61
|
+
rb_define_alloc_func(cState, allocation_state_allocate);
|
62
|
+
|
63
|
+
rb_define_const(mAllocations, "STATE",
|
64
|
+
rb_funcall(cState, rb_intern("new"), 0));
|
65
|
+
}
|
@@ -0,0 +1,17 @@
|
|
1
|
+
#ifndef ALLOCATIONS_STATE_H
|
2
|
+
#define ALLOCATIONS_STATE_H
|
3
|
+
|
4
|
+
#include "liballocations.h"
|
5
|
+
|
6
|
+
typedef struct {
|
7
|
+
st_table *object_counts;
|
8
|
+
} AllocationState;
|
9
|
+
|
10
|
+
extern void allocation_state_reset_counts(AllocationState*);
|
11
|
+
extern void allocation_state_allocate_counts(AllocationState*);
|
12
|
+
extern AllocationState *allocation_state_get_struct(VALUE object);
|
13
|
+
extern st_table *allocation_state_copy_table(AllocationState*);
|
14
|
+
|
15
|
+
extern void Init_allocations_state();
|
16
|
+
|
17
|
+
#endif
|
data/lib/allocations/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: allocations
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.0.
|
4
|
+
version: 1.0.5
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Yorick Peterse
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2016-
|
11
|
+
date: 2016-06-14 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: rake
|
@@ -79,6 +79,8 @@ files:
|
|
79
79
|
- ext/liballocations/extconf.rb
|
80
80
|
- ext/liballocations/liballocations.c
|
81
81
|
- ext/liballocations/liballocations.h
|
82
|
+
- ext/liballocations/state.c
|
83
|
+
- ext/liballocations/state.h
|
82
84
|
- lib/allocations.rb
|
83
85
|
- lib/allocations/version.rb
|
84
86
|
homepage: https://gitlab.com/gitlab-org/allocations
|