heap_dump 0.0.32 → 0.0.33

Sign up to get free protection for your applications and to get access to all the features.
data/README.md CHANGED
@@ -4,7 +4,7 @@ Low-level ruby heap memory dump - including data and code references, useful for
4
4
  Has no performance overhead while not active, so can be used in production environment.
5
5
 
6
6
  Originally written across ruby 1.9.2-p290 data structures.
7
- Does work on other 1.9.2s, 1.9.3 and 2.0.0-preview1, but not well-tested yet(output is not proven to be as full etc.).
7
+ Does work on other 1.9.2s, 1.9.3, 2.0.0-preview1 and 2.0.0-preview2, but not well-tested yet(output is not proven to be as full etc.).
8
8
 
9
9
  Currently is under development and output format may differ.
10
10
 
@@ -69,7 +69,7 @@ Also heap_dump now includes an object counter, like `ObjectSpace.count_objects`,
69
69
 
70
70
  ```ruby
71
71
 
72
- HeapDump.count_objects :YourNameSpace # => json string
72
+ HeapDump.count_objects [YourNameSpace, "SomeClass"] # => json string
73
73
  ```
74
74
 
75
75
  which results in something like:
@@ -100,7 +100,8 @@ which results in something like:
100
100
  },
101
101
  "user_types": {
102
102
  "YourNameSpace::B": 2,
103
- "YourNameSpace::A": 3
103
+ "YourNameSpace::A": 3,
104
+ "SomeClass": 1
104
105
  }
105
106
  }
106
107
  ```
@@ -131,6 +132,9 @@ or `call heapdump_dump(0)`, filename defaults to dump.json.
131
132
  Note that yajl-ruby gem (and heap_dump itself) should be available to process this being injected into.
132
133
  Also on rare ocassions process(for example if gdb attached while a signal/gc) may crash after and even during dumping, so safer way is to embed it in advance, there's no performance overhead.
133
134
 
135
+ Object count from gdb: `call (void)heapdump_count_objects_print("Object", "")` (null or empty string terminated list of namespace/class names)
136
+ or `call (char*)heapdump_count_objects_return("Object", "")`, note that you then should free memory - `call (void)free($$)`
137
+
134
138
  ### Importing dump in MongoDB
135
139
 
136
140
  Dump can be imported in mongo for some map-reduce, easy script access etc.
@@ -178,6 +182,21 @@ Then make a full dump and inspect references to those objects.
178
182
 
179
183
  Note that heapdump operates only on process it has been called on, so if you have multiple workers (Unicorn/Rainbows/Passenger spawned processes etc.) - you may run into a situation when request for dump is not routed to process you're interested in.
180
184
 
185
+ Also it may be a good idea to run dump in forked process or/and on signal:
186
+
187
+ ```ruby
188
+
189
+ Signal.trap('USR2') do
190
+ old_pid = Process.pid
191
+ fork {
192
+ puts "Dumping worker #{old_pid}"
193
+ require 'heap_dump'
194
+ HeapDump.dump "dump_#{old_pid}.json"
195
+ exit
196
+ }
197
+ end
198
+ ```
199
+
181
200
  ## Contributing
182
201
 
183
202
  1. Fork it
data/Rakefile CHANGED
@@ -42,6 +42,9 @@ task :test => :compile do
42
42
  HeapDump.verbose = true
43
43
  HeapDump.dump
44
44
  puts "Done"
45
+ class A; end
46
+ A.new
47
+ puts HeapDump.count_objects [A]
45
48
  end
46
49
 
47
50
  task :default => :test
@@ -13,6 +13,7 @@ unless ARGV.any? {|arg| arg.include?('--with-ruby-include') }
13
13
  ruby_include = "#{ENV['rvm_path']}/src/#{$2}" unless File.exist?(ruby_include)
14
14
  ARGV << "--with-ruby-include=#{ruby_include}"
15
15
  end
16
+ puts "Using ruby source from #{ruby_include}"
16
17
  end
17
18
 
18
19
  require 'mkmf'
@@ -45,8 +46,14 @@ spec = instance_eval(File.read(gemspec), gemspec).dependencies.find{|d|d.name ==
45
46
  yajl = find_gem_dir(spec.name, spec.requirement)
46
47
  find_header('api/yajl_gen.h', File.join(yajl, 'ext', 'yajl'))
47
48
 
48
- #TODO: inject ruby version
49
- unless find_header("gc_internal.h", File.join(File.dirname(__FILE__),'specific', "ruby-#{RUBY_VERSION}")) && have_header("gc_internal.h")
49
+ specific = RUBY_VERSION
50
+
51
+ # here other specific headers may be injected, for example - patched releases etc.
52
+ if RUBY_VERSION == '2.0.0' && RUBY_REVISION == 37411
53
+ specific = "2.0.0_preview1"
54
+ end
55
+
56
+ unless find_header("gc_internal.h", File.join(File.dirname(__FILE__),'specific', "ruby-#{specific}")) && have_header("gc_internal.h")
50
57
  raise "Do not have internal structs for your ruby version"
51
58
  end
52
59
 
@@ -85,6 +92,10 @@ hdrs = proc {
85
92
 
86
93
  dir_config("ruby") # allow user to pass in non-standard core include directory
87
94
 
95
+ if ENV['DEBUG']
96
+ CONFIG['debugflags'] << ' -ggdb3 -O0'
97
+ end
98
+
88
99
  if !Debugger::RubyCoreSource::create_makefile_with_core(hdrs, "heap_dump")
89
100
  STDERR.print("Makefile creation failed\n")
90
101
  STDERR.print("*************************************************************\n\n")
@@ -390,7 +390,8 @@ static void dump_node_refs(NODE* obj, walk_ctx_t* ctx){
390
390
  return;
391
391
 
392
392
  case NODE_MEMO:
393
- yg_id((VALUE)obj->u1.node);
393
+ if(is_in_heap(NULL, obj->u1.node))
394
+ yg_id((VALUE)obj->u1.node);
394
395
  break;
395
396
 
396
397
  //not implemented:
@@ -1718,26 +1719,16 @@ iterate_user_type_counts(VALUE key, VALUE value, yajl_gen yajl){
1718
1719
  }
1719
1720
 
1720
1721
  static VALUE
1721
- rb_heapdump_count_objects(VALUE self, VALUE string_prefixes, VALUE do_gc){
1722
- yajl_gen_config cfg;
1723
- yajl_gen yajl;
1722
+ heapdump_count_objects_core(yajl_gen yajl, VALUE string_prefixes, int do_gc){
1724
1723
  VALUE cls, class_name, prefix;
1725
1724
  size_t counts[T_MASK+1];
1726
1725
  size_t freed = 0;
1727
1726
  size_t total = 0;
1728
1727
  size_t i;
1729
1728
  long int n;
1730
- const unsigned char* buf;
1731
- unsigned int len;
1732
1729
  VALUE hash = rb_hash_new();
1733
1730
  rb_objspace_t *objspace = GET_THREAD()->vm->objspace;
1734
1731
 
1735
- rb_check_array_type(string_prefixes);
1736
- memset(&cfg, 0, sizeof(cfg));
1737
- cfg.beautify = true;
1738
- cfg.htmlSafe = true;
1739
- cfg.indentString = " ";
1740
- yajl = yajl_gen_alloc(&cfg,NULL);
1741
1732
  yg_map();
1742
1733
  if(do_gc){
1743
1734
  yg_cstring("gc_ran");
@@ -1799,6 +1790,26 @@ rb_heapdump_count_objects(VALUE self, VALUE string_prefixes, VALUE do_gc){
1799
1790
  yg_map_end();
1800
1791
 
1801
1792
  yg_map_end(); //all document
1793
+ return hash;
1794
+ #undef YAJL
1795
+ }
1796
+
1797
+ static VALUE
1798
+ rb_heapdump_count_objects(VALUE self, VALUE string_prefixes, VALUE do_gc){
1799
+ yajl_gen_config cfg;
1800
+ yajl_gen yajl;
1801
+ const unsigned char* buf;
1802
+ unsigned int len;
1803
+
1804
+ rb_check_array_type(string_prefixes);
1805
+
1806
+ memset(&cfg, 0, sizeof(cfg));
1807
+ cfg.beautify = true;
1808
+ cfg.htmlSafe = true;
1809
+ cfg.indentString = " ";
1810
+ yajl = yajl_gen_alloc(&cfg,NULL);
1811
+
1812
+ heapdump_count_objects_core(yajl, string_prefixes, RTEST(do_gc));
1802
1813
 
1803
1814
  //flush yajl:
1804
1815
  if(yajl_gen_get_buf(yajl, &buf, &len) == yajl_gen_status_ok){
@@ -1810,7 +1821,68 @@ rb_heapdump_count_objects(VALUE self, VALUE string_prefixes, VALUE do_gc){
1810
1821
  } else {
1811
1822
  return Qnil;
1812
1823
  }
1813
- #undef YAJL
1824
+ }
1825
+
1826
+ //NOTE: return value must be freed if not null
1827
+ static const char* heapdump_count_objects_ex(int return_string, char* first_name, va_list args){
1828
+ yajl_gen_config cfg;
1829
+ yajl_gen yajl;
1830
+ const unsigned char* buf;
1831
+ unsigned int len;
1832
+ VALUE string_prefixes;
1833
+
1834
+
1835
+ memset(&cfg, 0, sizeof(cfg));
1836
+ cfg.beautify = !return_string;
1837
+ // cfg.htmlSafe = true;
1838
+ cfg.indentString = " ";
1839
+ yajl = yajl_gen_alloc(&cfg,NULL);
1840
+
1841
+ string_prefixes = rb_ary_new();
1842
+ while(first_name && first_name[0]){
1843
+ rb_ary_push(string_prefixes, rb_str_new2(first_name));
1844
+ first_name = va_arg(args, char*);
1845
+ }
1846
+
1847
+ heapdump_count_objects_core(yajl, string_prefixes, false);
1848
+
1849
+ //flush yajl:
1850
+ if(yajl_gen_get_buf(yajl, &buf, &len) == yajl_gen_status_ok){
1851
+ char* result = NULL;
1852
+ if(!return_string){
1853
+ fwrite(buf, len, 1, stderr);
1854
+ fprintf(stderr, "\n");
1855
+ } else {
1856
+ result = malloc(len);
1857
+ memcpy(result, buf, len);
1858
+ }
1859
+ yajl_gen_clear(yajl);
1860
+ yajl_gen_free(yajl);
1861
+ return result; //NOTE: that memory is already freed! (but it's usually ok for gdb)
1862
+ }
1863
+ return NULL;
1864
+ }
1865
+
1866
+ void heapdump_count_objects_print(char* first_name, ...){
1867
+ va_list args;
1868
+ va_start(args, first_name);
1869
+ heapdump_count_objects_ex(false, first_name, args);
1870
+ va_end(args);
1871
+ }
1872
+
1873
+ //NOTE: return value must be freed if not null
1874
+ const char* heapdump_count_objects_return(char* first_name, ...){
1875
+ va_list args;
1876
+ va_start(args, first_name);
1877
+ const char* res = heapdump_count_objects_ex(true, first_name, args);
1878
+ va_end(args);
1879
+ return res;
1880
+ }
1881
+
1882
+
1883
+ static VALUE rb_heapdump_trigger_int_3(VALUE self){
1884
+ __asm__("int $0x3;");
1885
+ return Qnil;
1814
1886
  }
1815
1887
 
1816
1888
  void Init_heap_dump(){
@@ -1827,6 +1899,7 @@ void Init_heap_dump(){
1827
1899
  rb_mHeapDumpModule = rb_define_module("HeapDump");
1828
1900
  rb_define_singleton_method(rb_mHeapDumpModule, "dump_ext", rb_heapdump_dump, 1);
1829
1901
  rb_define_singleton_method(rb_mHeapDumpModule, "count_objects_ext", rb_heapdump_count_objects, 2);
1902
+ rb_define_singleton_method(rb_mHeapDumpModule, "int3", rb_heapdump_trigger_int_3, 0);
1830
1903
 
1831
1904
  rb_define_singleton_method(rb_mHeapDumpModule, "verbose", heapdump_verbose, 0);
1832
1905
  rb_define_singleton_method(rb_mHeapDumpModule, "verbose=", heapdump_verbose_setter, 1);
@@ -29,7 +29,6 @@ typedef struct gc_profile_record {
29
29
  #endif
30
30
  } gc_profile_record;
31
31
 
32
-
33
32
  #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
34
33
  #pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
35
34
  #endif
@@ -69,9 +68,7 @@ typedef struct RVALUE {
69
68
  #endif
70
69
 
71
70
  struct heaps_slot {
72
- void *membase;
73
- RVALUE *slot;
74
- size_t limit;
71
+ struct heaps_header *header;
75
72
  uintptr_t *bits;
76
73
  RVALUE *freelist;
77
74
  struct heaps_slot *next;
@@ -82,12 +79,9 @@ struct heaps_slot {
82
79
  struct heaps_header {
83
80
  struct heaps_slot *base;
84
81
  uintptr_t *bits;
85
- };
86
-
87
- struct sorted_heaps_slot {
88
82
  RVALUE *start;
89
83
  RVALUE *end;
90
- struct heaps_slot *slot;
84
+ size_t limit;
91
85
  };
92
86
 
93
87
  struct heaps_free_bitmap {
@@ -133,13 +127,12 @@ typedef struct rb_objspace {
133
127
  struct heaps_slot *ptr;
134
128
  struct heaps_slot *sweep_slots;
135
129
  struct heaps_slot *free_slots;
136
- struct sorted_heaps_slot *sorted;
130
+ struct heaps_header **sorted;
137
131
  size_t length;
138
132
  size_t used;
139
133
  struct heaps_free_bitmap *free_bitmap;
140
134
  RVALUE *range[2];
141
- RVALUE *freed;
142
- size_t live_num;
135
+ struct heaps_header *freed;
143
136
  size_t free_num;
144
137
  size_t free_min;
145
138
  size_t final_num;
@@ -165,6 +158,8 @@ typedef struct rb_objspace {
165
158
  } profile;
166
159
  struct gc_list *global_list;
167
160
  size_t count;
161
+ size_t total_allocated_object_num;
162
+ size_t total_freed_object_num;
168
163
  int gc_stress;
169
164
 
170
165
  struct mark_func_data_struct {
@@ -208,7 +203,6 @@ typedef struct rb_objspace {
208
203
  #define BITMAP_INDEX(p) (NUM_IN_SLOT(p) / (sizeof(uintptr_t) * CHAR_BIT))
209
204
  #define BITMAP_OFFSET(p) (NUM_IN_SLOT(p) & ((sizeof(uintptr_t) * CHAR_BIT)-1))
210
205
  #define MARKED_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] & ((uintptr_t)1 << BITMAP_OFFSET(p)))
211
-
212
206
  //
213
207
  #define RANY(o) ((RVALUE*)(o))
214
208
  //
@@ -217,7 +211,7 @@ static inline int
217
211
  is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
218
212
  {
219
213
  register RVALUE *p = RANY(ptr);
220
- register struct sorted_heaps_slot *heap;
214
+ register struct heaps_header *heap;
221
215
  register size_t hi, lo, mid;
222
216
 
223
217
  if (p < lomem || p > himem) return FALSE;
@@ -228,7 +222,7 @@ is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
228
222
  hi = heaps_used;
229
223
  while (lo < hi) {
230
224
  mid = (lo + hi) / 2;
231
- heap = &objspace->heap.sorted[mid];
225
+ heap = objspace->heap.sorted[mid];
232
226
  if (heap->start <= p) {
233
227
  if (p < heap->end)
234
228
  return TRUE;
@@ -242,10 +236,10 @@ is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
242
236
  }
243
237
 
244
238
  //from count_objects:
245
- #define FOR_EACH_HEAP_SLOT(p) for (i = 0; i < heaps_used; i++) {\
246
- RVALUE *p = objspace->heap.sorted[i].start, *pend = objspace->heap.sorted[i].end;\
247
- if(!p) continue;\
248
- for (; p < pend; p++) {
249
- #define FOR_EACH_HEAP_SLOT_END(total) } total += objspace->heap.sorted[i].slot->limit; }
239
+ #define FOR_EACH_HEAP_SLOT(p) for (i = 0; i < heaps_used; i++) { RVALUE *p, *pend;\
240
+ p = objspace->heap.sorted[i]->start; pend = p + objspace->heap.sorted[i]->limit;\
241
+ for (;p < pend; p++) {
242
+
243
+ #define FOR_EACH_HEAP_SLOT_END(total) } total += objspace->heap.sorted[i]->limit; }
250
244
 
251
245
  #define NODE_OPTBLOCK 1000000 //FIXME
@@ -0,0 +1,67 @@
1
+ #define CAPTURE_JUST_VALID_VM_STACK 1
2
+
3
+ enum context_type {
4
+ CONTINUATION_CONTEXT = 0,
5
+ FIBER_CONTEXT = 1,
6
+ ROOT_FIBER_CONTEXT = 2
7
+ };
8
+
9
+ typedef struct rb_context_struct {
10
+ enum context_type type;
11
+ VALUE self;
12
+ int argc;
13
+ VALUE value;
14
+ VALUE *vm_stack;
15
+ #ifdef CAPTURE_JUST_VALID_VM_STACK
16
+ size_t vm_stack_slen; /* length of stack (head of th->stack) */
17
+ size_t vm_stack_clen; /* length of control frames (tail of th->stack) */
18
+ #endif
19
+ VALUE *machine_stack;
20
+ VALUE *machine_stack_src;
21
+ #ifdef __ia64
22
+ VALUE *machine_register_stack;
23
+ VALUE *machine_register_stack_src;
24
+ int machine_register_stack_size;
25
+ #endif
26
+ rb_thread_t saved_thread;
27
+ rb_jmpbuf_t jmpbuf;
28
+ size_t machine_stack_size;
29
+ } rb_context_t;
30
+
31
+ enum fiber_status {
32
+ CREATED,
33
+ RUNNING,
34
+ TERMINATED
35
+ };
36
+
37
+ #if FIBER_USE_NATIVE && !defined(_WIN32)
38
+ #define MAX_MAHINE_STACK_CACHE 10
39
+ static int machine_stack_cache_index = 0;
40
+ typedef struct machine_stack_cache_struct {
41
+ void *ptr;
42
+ size_t size;
43
+ } machine_stack_cache_t;
44
+ static machine_stack_cache_t machine_stack_cache[MAX_MAHINE_STACK_CACHE];
45
+ static machine_stack_cache_t terminated_machine_stack;
46
+ #endif
47
+
48
+ typedef struct rb_fiber_struct {
49
+ rb_context_t cont;
50
+ VALUE prev;
51
+ enum fiber_status status;
52
+ struct rb_fiber_struct *prev_fiber;
53
+ struct rb_fiber_struct *next_fiber;
54
+ /* If a fiber invokes "transfer",
55
+ * then this fiber can't "resume" any more after that.
56
+ * You shouldn't mix "transfer" and "resume".
57
+ */
58
+ int transfered;
59
+
60
+ #if FIBER_USE_NATIVE
61
+ #ifdef _WIN32
62
+ void *fib_handle;
63
+ #else
64
+ ucontext_t context;
65
+ #endif
66
+ #endif
67
+ } rb_fiber_t;
@@ -0,0 +1,251 @@
1
+ #include "ruby/re.h"
2
+
3
+ #ifndef GC_PROFILE_MORE_DETAIL
4
+ #define GC_PROFILE_MORE_DETAIL 0
5
+ #endif
6
+
7
+ typedef struct gc_profile_record {
8
+ double gc_time;
9
+ double gc_invoke_time;
10
+
11
+ size_t heap_total_objects;
12
+ size_t heap_use_size;
13
+ size_t heap_total_size;
14
+
15
+ int is_marked;
16
+
17
+ #if GC_PROFILE_MORE_DETAIL
18
+ double gc_mark_time;
19
+ double gc_sweep_time;
20
+
21
+ size_t heap_use_slots;
22
+ size_t heap_live_objects;
23
+ size_t heap_free_objects;
24
+
25
+ int have_finalize;
26
+
27
+ size_t allocate_increase;
28
+ size_t allocate_limit;
29
+ #endif
30
+ } gc_profile_record;
31
+
32
+
33
+ #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
34
+ #pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
35
+ #endif
36
+
37
+ typedef struct RVALUE {
38
+ union {
39
+ struct {
40
+ VALUE flags; /* always 0 for freed obj */
41
+ struct RVALUE *next;
42
+ } free;
43
+ struct RBasic basic;
44
+ struct RObject object;
45
+ struct RClass klass;
46
+ struct RFloat flonum;
47
+ struct RString string;
48
+ struct RArray array;
49
+ struct RRegexp regexp;
50
+ struct RHash hash;
51
+ struct RData data;
52
+ struct RTypedData typeddata;
53
+ struct RStruct rstruct;
54
+ struct RBignum bignum;
55
+ struct RFile file;
56
+ struct RNode node;
57
+ struct RMatch match;
58
+ struct RRational rational;
59
+ struct RComplex complex;
60
+ } as;
61
+ #ifdef GC_DEBUG
62
+ const char *file;
63
+ int line;
64
+ #endif
65
+ } RVALUE;
66
+
67
+ #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
68
+ #pragma pack(pop)
69
+ #endif
70
+
71
+ struct heaps_slot {
72
+ void *membase;
73
+ RVALUE *slot;
74
+ size_t limit;
75
+ uintptr_t *bits;
76
+ RVALUE *freelist;
77
+ struct heaps_slot *next;
78
+ struct heaps_slot *prev;
79
+ struct heaps_slot *free_next;
80
+ };
81
+
82
+ struct heaps_header {
83
+ struct heaps_slot *base;
84
+ uintptr_t *bits;
85
+ };
86
+
87
+ struct sorted_heaps_slot {
88
+ RVALUE *start;
89
+ RVALUE *end;
90
+ struct heaps_slot *slot;
91
+ };
92
+
93
+ struct heaps_free_bitmap {
94
+ struct heaps_free_bitmap *next;
95
+ };
96
+
97
+ struct gc_list {
98
+ VALUE *varptr;
99
+ struct gc_list *next;
100
+ };
101
+
102
+ #define STACK_CHUNK_SIZE 500
103
+
104
+ typedef struct stack_chunk {
105
+ VALUE data[STACK_CHUNK_SIZE];
106
+ struct stack_chunk *next;
107
+ } stack_chunk_t;
108
+
109
+ typedef struct mark_stack {
110
+ stack_chunk_t *chunk;
111
+ stack_chunk_t *cache;
112
+ size_t index;
113
+ size_t limit;
114
+ size_t cache_size;
115
+ size_t unused_cache_size;
116
+ } mark_stack_t;
117
+
118
+ #ifndef CALC_EXACT_MALLOC_SIZE
119
+ #define CALC_EXACT_MALLOC_SIZE 0
120
+ #endif
121
+
122
+ typedef struct rb_objspace {
123
+ struct {
124
+ size_t limit;
125
+ size_t increase;
126
+ #if CALC_EXACT_MALLOC_SIZE
127
+ size_t allocated_size;
128
+ size_t allocations;
129
+ #endif
130
+ } malloc_params;
131
+ struct {
132
+ size_t increment;
133
+ struct heaps_slot *ptr;
134
+ struct heaps_slot *sweep_slots;
135
+ struct heaps_slot *free_slots;
136
+ struct sorted_heaps_slot *sorted;
137
+ size_t length;
138
+ size_t used;
139
+ struct heaps_free_bitmap *free_bitmap;
140
+ RVALUE *range[2];
141
+ RVALUE *freed;
142
+ size_t live_num;
143
+ size_t free_num;
144
+ size_t free_min;
145
+ size_t final_num;
146
+ size_t do_heap_free;
147
+ } heap;
148
+ struct {
149
+ int dont_gc;
150
+ int dont_lazy_sweep;
151
+ int during_gc;
152
+ rb_atomic_t finalizing;
153
+ } flags;
154
+ struct {
155
+ st_table *table;
156
+ RVALUE *deferred;
157
+ } final;
158
+ mark_stack_t mark_stack;
159
+ struct {
160
+ int run;
161
+ gc_profile_record *record;
162
+ size_t count;
163
+ size_t size;
164
+ double invoke_time;
165
+ } profile;
166
+ struct gc_list *global_list;
167
+ size_t count;
168
+ int gc_stress;
169
+
170
+ struct mark_func_data_struct {
171
+ void *data;
172
+ void (*mark_func)(VALUE v, void *data);
173
+ } *mark_func_data;
174
+ } rb_objspace_t;
175
+
176
+ #define malloc_limit objspace->malloc_params.limit
177
+ #define malloc_increase objspace->malloc_params.increase
178
+ #define heaps objspace->heap.ptr
179
+ #define heaps_length objspace->heap.length
180
+ #define heaps_used objspace->heap.used
181
+ #define lomem objspace->heap.range[0]
182
+ #define himem objspace->heap.range[1]
183
+ #define heaps_inc objspace->heap.increment
184
+ #define heaps_freed objspace->heap.freed
185
+ #define dont_gc objspace->flags.dont_gc
186
+ #define during_gc objspace->flags.during_gc
187
+ #define finalizing objspace->flags.finalizing
188
+ #define finalizer_table objspace->final.table
189
+ #define deferred_final_list objspace->final.deferred
190
+ #define global_List objspace->global_list
191
+ #define ruby_gc_stress objspace->gc_stress
192
+ #define initial_malloc_limit initial_params.initial_malloc_limit
193
+ #define initial_heap_min_slots initial_params.initial_heap_min_slots
194
+ #define initial_free_min initial_params.initial_free_min
195
+
196
+ #define is_lazy_sweeping(objspace) ((objspace)->heap.sweep_slots != 0)
197
+
198
+ #define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
199
+
200
+ #define RANY(o) ((RVALUE*)(o))
201
+ #define has_free_object (objspace->heap.free_slots && objspace->heap.free_slots->freelist)
202
+
203
+ #define HEAP_HEADER(p) ((struct heaps_header *)(p))
204
+ #define GET_HEAP_HEADER(x) (HEAP_HEADER((uintptr_t)(x) & ~(HEAP_ALIGN_MASK)))
205
+ #define GET_HEAP_SLOT(x) (GET_HEAP_HEADER(x)->base)
206
+ #define GET_HEAP_BITMAP(x) (GET_HEAP_HEADER(x)->bits)
207
+ #define NUM_IN_SLOT(p) (((uintptr_t)(p) & HEAP_ALIGN_MASK)/sizeof(RVALUE))
208
+ #define BITMAP_INDEX(p) (NUM_IN_SLOT(p) / (sizeof(uintptr_t) * CHAR_BIT))
209
+ #define BITMAP_OFFSET(p) (NUM_IN_SLOT(p) & ((sizeof(uintptr_t) * CHAR_BIT)-1))
210
+ #define MARKED_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] & ((uintptr_t)1 << BITMAP_OFFSET(p)))
211
+
212
+ //
213
+ #define RANY(o) ((RVALUE*)(o))
214
+ //
215
+
216
+ static inline int
217
+ is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
218
+ {
219
+ register RVALUE *p = RANY(ptr);
220
+ register struct sorted_heaps_slot *heap;
221
+ register size_t hi, lo, mid;
222
+
223
+ if (p < lomem || p > himem) return FALSE;
224
+ if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
225
+
226
+ /* check if p looks like a pointer using bsearch*/
227
+ lo = 0;
228
+ hi = heaps_used;
229
+ while (lo < hi) {
230
+ mid = (lo + hi) / 2;
231
+ heap = &objspace->heap.sorted[mid];
232
+ if (heap->start <= p) {
233
+ if (p < heap->end)
234
+ return TRUE;
235
+ lo = mid + 1;
236
+ }
237
+ else {
238
+ hi = mid;
239
+ }
240
+ }
241
+ return FALSE;
242
+ }
243
+
244
+ //from count_objects:
245
+ #define FOR_EACH_HEAP_SLOT(p) for (i = 0; i < heaps_used; i++) {\
246
+ RVALUE *p = objspace->heap.sorted[i].start, *pend = objspace->heap.sorted[i].end;\
247
+ if(!p) continue;\
248
+ for (; p < pend; p++) {
249
+ #define FOR_EACH_HEAP_SLOT_END(total) } total += objspace->heap.sorted[i].slot->limit; }
250
+
251
+ #define NODE_OPTBLOCK 1000000 //FIXME
@@ -0,0 +1,109 @@
1
+ //FIXME: autogen this from ruby (this copied from 1.9.3p194)
2
+
3
+ // thread.c:
4
+ struct thgroup {
5
+ int enclosed;
6
+ VALUE group;
7
+ };
8
+
9
+ // enumerator.c:
10
+ struct enumerator {
11
+ VALUE obj;
12
+ ID meth;
13
+ VALUE args;
14
+ VALUE fib;
15
+ VALUE dst;
16
+ VALUE lookahead;
17
+ VALUE feedvalue;
18
+ VALUE stop_exc;
19
+ };
20
+
21
+ //
22
+ struct generator {
23
+ VALUE proc;
24
+ };
25
+
26
+ struct yielder {
27
+ VALUE proc;
28
+ };
29
+
30
+
31
+ // proc.c:
32
+ struct METHOD {
33
+ VALUE recv;
34
+ VALUE rclass;
35
+ VALUE defined_class; //FIXME: dump this
36
+ ID id;
37
+ rb_method_entry_t *me;
38
+ struct unlinked_method_entry_list_entry *ume;
39
+ };
40
+
41
+ //
42
+ #define METHOD_DEFINITIONP(m) (m->me ? m->me->def : NULL)
43
+
44
+ //class.c:
45
+ #define HAVE_RB_CLASS_TBL 1
46
+ //For som reason this fails to link directly on 1.9.3 :(
47
+
48
+ //HACK:
49
+ #include <dlfcn.h>
50
+
51
+ inline st_table * rb_get_class_tbl(){
52
+ Dl_info info;
53
+ void* image;
54
+ if(!dladdr(rb_intern, &info) || !info.dli_fname){
55
+ return NULL;
56
+ }
57
+ image = dlopen(info.dli_fname, RTLD_NOLOAD | RTLD_GLOBAL);
58
+ // printf("Image is %p, addr is %p (%p rel)\n", image, rb_intern, ((void*)rb_intern - image));
59
+ if(image)
60
+ {
61
+ void* tbl = dlsym(image, "_rb_class_tbl");
62
+ dlclose(image);
63
+ if(tbl)
64
+ return tbl;
65
+ }
66
+
67
+ //TODO: parse sym table and calculate address?
68
+
69
+ return NULL;
70
+ }
71
+
72
+ #define ruby_current_thread ((rb_thread_t *)RTYPEDDATA_DATA(rb_thread_current()))
73
+ #define GET_THREAD() ruby_current_thread
74
+
75
+ //FIXME: get global const for it: rb_define_global_const("RUBY_ENGINE", ruby_engine_name = MKSTR(engine));
76
+ #define ruby_engine_name Qnil
77
+
78
+ #define ID_ALLOCATOR 0
79
+
80
+ //vm_trace.c
81
+ typedef enum {
82
+ RUBY_HOOK_FLAG_SAFE = 0x01,
83
+ RUBY_HOOK_FLAG_DELETED = 0x02,
84
+ RUBY_HOOK_FLAG_RAW_ARG = 0x04
85
+ } rb_hook_flag_t;
86
+ typedef struct rb_event_hook_struct {
87
+ rb_hook_flag_t hook_flags;
88
+ rb_event_flag_t events;
89
+ rb_event_hook_func_t func;
90
+ VALUE data;
91
+ struct rb_event_hook_struct *next;
92
+ } rb_event_hook_t;
93
+
94
+ //vm_backtrace.c
95
+ inline static int
96
+ calc_lineno(const rb_iseq_t *iseq, const VALUE *pc)
97
+ {
98
+ return rb_iseq_line_no(iseq, pc - iseq->iseq_encoded);
99
+ }
100
+
101
+ int rb_vm_get_sourceline(const rb_control_frame_t * cfp){
102
+ int lineno = 0;
103
+ const rb_iseq_t *iseq = cfp->iseq;
104
+
105
+ if (RUBY_VM_NORMAL_ISEQ_P(iseq)) {
106
+ lineno = calc_lineno(cfp->iseq, cfp->pc);
107
+ }
108
+ return lineno;
109
+ }
@@ -1,3 +1,3 @@
1
1
  module HeapDump
2
- VERSION = "0.0.32"
2
+ VERSION = "0.0.33"
3
3
  end
data/lib/heap_dump.rb CHANGED
@@ -11,13 +11,13 @@ module HeapDump
11
11
  end
12
12
 
13
13
  # provides an object count - like ObjectSpace.count_objects, but also for user classes
14
- def self.count_objects namespaces_array, gc=false
15
- unless namespaces_array.is_a?(Array) && namespaces_array.all?{|v|v.is_a? Symbol}
16
- if namespaces_array.is_a? Symbol
17
- namespaces_array = [namespaces_array]
14
+ def self.count_objects namespaces_array=[], gc=false
15
+ unless namespaces_array.is_a?(Array) && namespaces_array.all?{|v|v.respond_to? :to_s}
16
+ if namespaces_array.respond_to? :to_s
17
+ namespaces_array = [namespaces_array.to_s]
18
18
  else
19
19
  #TODO: actually, better way is to accept anything convertable, even module itself
20
- raise ArgumentError.new("namespaces_array must be a symbol or array of symbols")
20
+ raise ArgumentError.new("namespaces_array must be a symbol/string or array of strings/symbols")
21
21
  end
22
22
  end
23
23
  prefixes_array = namespaces_array.map{|c| c.to_s}
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: heap_dump
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.32
4
+ version: 0.0.33
5
5
  prerelease:
6
6
  platform: ruby
7
7
  authors:
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2012-11-21 00:00:00.000000000 Z
12
+ date: 2012-12-14 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: debugger-ruby_core_source
@@ -85,6 +85,9 @@ files:
85
85
  - ext/heap_dump/specific/ruby-2.0.0/fiber.h
86
86
  - ext/heap_dump/specific/ruby-2.0.0/gc_internal.h
87
87
  - ext/heap_dump/specific/ruby-2.0.0/internal_typed_data.h
88
+ - ext/heap_dump/specific/ruby-2.0.0_preview1/fiber.h
89
+ - ext/heap_dump/specific/ruby-2.0.0_preview1/gc_internal.h
90
+ - ext/heap_dump/specific/ruby-2.0.0_preview1/internal_typed_data.h
88
91
  - heap_dump.gemspec
89
92
  - lib/heap_dump.rb
90
93
  - lib/heap_dump/version.rb
@@ -106,6 +109,9 @@ required_rubygems_version: !ruby/object:Gem::Requirement
106
109
  - - ! '>='
107
110
  - !ruby/object:Gem::Version
108
111
  version: '0'
112
+ segments:
113
+ - 0
114
+ hash: -3329195941644190960
109
115
  requirements: []
110
116
  rubyforge_project:
111
117
  rubygems_version: 1.8.24
@@ -114,4 +120,3 @@ specification_version: 3
114
120
  summary: Allows to dump heap to track reference leaks, including leaks in proc contexts
115
121
  and fibers
116
122
  test_files: []
117
- has_rdoc: