allocation_tracer 0.1.1 → 0.2.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 434fc304eec817d33faa852fed47fe0313b5ac3a
4
- data.tar.gz: ec7b3a9e1f126cfaf7cbff508b9613eb93547334
3
+ metadata.gz: 79049581513cae857637ce2a806116bb7f4f409a
4
+ data.tar.gz: 0d24ddb1dd4328def9f76afce88761364fcea0e8
5
5
  SHA512:
6
- metadata.gz: eeb8744fbad790fada8a84f78cb83c6d58c1dd62044125a21f6770f77392c22a9d2400fa13814eaf7f6a8f76fbc8da125bb56942500a78c1e474c3333aaac6c5
7
- data.tar.gz: 5c7baa58e8f347acd5d873112d1f9fe6ee3ce84e55b626cfbcd2d61d48c7cf10a49e10a39d6692044d549ae168ac36c70005b1108a9c1a43a947d8cdbca328f9
6
+ metadata.gz: 6e5d80fbdfd2e5bde9c13d05ccebc4ec3bd87425b0599e83d277fe690366d428352f8acaa2892a92b9dc6d4f7d23a440978412692e5c245b3bc5633b661fec69
7
+ data.tar.gz: 81f1ab724b85bb73c079490c065b1649a4ef721a6e84ccd7e5e2a3b1f97c023628e82b2eddd4c5517c433b89dee017cb6603190931b291ff68dcd26244116316
@@ -7,6 +7,7 @@
7
7
 
8
8
  #include "ruby/ruby.h"
9
9
  #include "ruby/debug.h"
10
+ #include <assert.h>
10
11
 
11
12
  size_t rb_obj_memsize_of(VALUE obj); /* in gc.c */
12
13
 
@@ -223,7 +224,7 @@ newobj_i(VALUE tpval, void *data)
223
224
  info->flags = RBASIC(obj)->flags;
224
225
  info->living = 1;
225
226
  info->memsize = 0;
226
- info->klass = RTEST(klass) ? rb_class_real(klass) : Qnil;
227
+ info->klass = (RTEST(klass) && !RB_TYPE_P(obj, T_NODE)) ? rb_class_real(klass) : Qnil;
227
228
  info->generation = rb_gc_count();
228
229
 
229
230
  info->path = path_cstr;
@@ -235,69 +236,73 @@ newobj_i(VALUE tpval, void *data)
235
236
  /* file, line, type, klass */
236
237
  #define MAX_KEY_SIZE 4
237
238
 
238
- void
239
- aggregator_i(void *data)
239
+ static void
240
+ aggregate_each_info(struct traceobj_arg *arg, struct allocation_info *info, size_t gc_count)
240
241
  {
241
- size_t gc_count = rb_gc_count();
242
- struct traceobj_arg *arg = (struct traceobj_arg *)data;
243
- struct allocation_info *info = arg->freed_allocation_info;
242
+ st_data_t key, val;
243
+ struct memcmp_key_data key_data;
244
+ size_t *val_buff;
245
+ size_t age = (int)(gc_count - info->generation);
246
+ int i = 0;
244
247
 
245
- arg->freed_allocation_info = NULL;
248
+ if (arg->keys & KEY_PATH) {
249
+ key_data.data[i++] = (st_data_t)info->path;
250
+ }
251
+ if (arg->keys & KEY_LINE) {
252
+ key_data.data[i++] = (st_data_t)info->line;
253
+ }
254
+ if (arg->keys & KEY_TYPE) {
255
+ key_data.data[i++] = (st_data_t)(info->flags & T_MASK);
256
+ }
257
+ if (arg->keys & KEY_CLASS) {
258
+ key_data.data[i++] = info->klass;
259
+ }
260
+ key_data.n = i;
261
+ key = (st_data_t)&key_data;
246
262
 
247
- while (info) {
248
- struct allocation_info *next_info = info->next;
249
- st_data_t key, val;
250
- struct memcmp_key_data key_data;
251
- size_t *val_buff;
252
- size_t age = (int)(gc_count - info->generation);
253
- int i;
263
+ if (st_lookup(arg->aggregate_table, key, &val) == 0) {
264
+ struct memcmp_key_data *key_buff = ruby_xmalloc(sizeof(int) + sizeof(st_data_t) * key_data.n);
265
+ key_buff->n = key_data.n;
254
266
 
255
- i = 0;
256
- if (arg->keys & KEY_PATH) {
257
- key_data.data[i++] = (st_data_t)info->path;
258
- }
259
- if (arg->keys & KEY_LINE) {
260
- key_data.data[i++] = (st_data_t)info->line;
261
- }
262
- if (arg->keys & KEY_TYPE) {
263
- key_data.data[i++] = (st_data_t)(info->flags & T_MASK);
264
- }
265
- if (arg->keys & KEY_CLASS) {
266
- key_data.data[i++] = info->klass;
267
+ for (i=0; i<key_data.n; i++) {
268
+ key_buff->data[i] = key_data.data[i];
267
269
  }
268
- key_data.n = i;
269
- key = (st_data_t)&key_data;
270
+ key = (st_data_t)key_buff;
270
271
 
271
- if (st_lookup(arg->aggregate_table, key, &val) == 0) {
272
- struct memcmp_key_data *key_buff = ruby_xmalloc(sizeof(int) + sizeof(st_data_t) * key_data.n);
273
- key_buff->n = key_data.n;
272
+ /* count, total age, max age, min age */
273
+ val_buff = ALLOC_N(size_t, 6);
274
+ val_buff[0] = val_buff[1] = val_buff[2] = 0;
275
+ val_buff[3] = val_buff[4] = age;
276
+ val_buff[5] = 0;
274
277
 
275
- for (i=0; i<key_data.n; i++) {
276
- key_buff->data[i] = key_data.data[i];
277
- }
278
- key = (st_data_t)key_buff;
278
+ if (arg->keys & KEY_PATH) keep_unique_str(arg->str_table, info->path);
279
279
 
280
- /* count, total age, max age, min age */
281
- val_buff = ALLOC_N(size_t, 6);
282
- val_buff[0] = val_buff[1] = val_buff[2] = 0;
283
- val_buff[3] = val_buff[4] = age;
284
- val_buff[5] = 0;
280
+ st_insert(arg->aggregate_table, (st_data_t)key_buff, (st_data_t)val_buff);
281
+ }
282
+ else {
283
+ val_buff = (size_t *)val;
284
+ }
285
285
 
286
- if (arg->keys & KEY_PATH) keep_unique_str(arg->str_table, info->path);
286
+ val_buff[0] += 1;
287
+ if (info->flags & FL_PROMOTED) val_buff[1] += 1;
288
+ val_buff[2] += age;
289
+ if (val_buff[3] > age) val_buff[3] = age; /* min */
290
+ if (val_buff[4] < age) val_buff[4] = age; /* max */
291
+ val_buff[5] += info->memsize;
292
+ }
287
293
 
288
- st_insert(arg->aggregate_table, (st_data_t)key_buff, (st_data_t)val_buff);
289
- }
290
- else {
291
- val_buff = (size_t *)val;
292
- }
294
+ static void
295
+ aggregate_freed_info(void *data)
296
+ {
297
+ size_t gc_count = rb_gc_count();
298
+ struct traceobj_arg *arg = (struct traceobj_arg *)data;
299
+ struct allocation_info *info = arg->freed_allocation_info;
293
300
 
294
- val_buff[0] += 1;
295
- if (info->flags & FL_PROMOTED) val_buff[1] += 1;
296
- val_buff[2] += age;
297
- if (val_buff[3] > age) val_buff[3] = age; /* min */
298
- if (val_buff[4] < age) val_buff[4] = age; /* max */
299
- val_buff[5] += info->memsize;
301
+ arg->freed_allocation_info = NULL;
300
302
 
303
+ while (info) {
304
+ struct allocation_info *next_info = info->next;
305
+ aggregate_each_info(arg, info, gc_count);
301
306
  free_allocation_info(arg, info);
302
307
  info = next_info;
303
308
  }
@@ -318,15 +323,54 @@ freeobj_i(VALUE tpval, void *data)
318
323
  VALUE obj = rb_tracearg_object(tparg);
319
324
  struct allocation_info *info;
320
325
 
321
- if (arg->freed_allocation_info == NULL) {
322
- rb_postponed_job_register_one(0, aggregator_i, arg);
323
- }
324
-
325
326
  if (st_lookup(arg->object_table, (st_data_t)obj, (st_data_t *)&info)) {
326
327
  info->flags = RBASIC(obj)->flags;
327
328
  info->memsize = rb_obj_memsize_of(obj);
328
329
  move_to_freed_list(arg, info);
329
330
  st_delete(arg->object_table, (st_data_t *)&obj, (st_data_t *)&info);
331
+ if (arg->freed_allocation_info == NULL) {
332
+ rb_postponed_job_register_one(0, aggregate_freed_info, arg);
333
+ }
334
+ }
335
+ }
336
+
337
+ static void
338
+ enable_newobj_hook(void)
339
+ {
340
+ VALUE newobj_hook;
341
+
342
+ if ((newobj_hook = rb_ivar_get(rb_mAllocationTracer, rb_intern("newobj_hook"))) == Qnil) {
343
+ rb_raise(rb_eRuntimeError, "not started.");
344
+ }
345
+ if (rb_tracepoint_enabled_p(newobj_hook)) {
346
+ rb_raise(rb_eRuntimeError, "newobj hooks is already enabled.");
347
+ }
348
+
349
+ rb_tracepoint_enable(newobj_hook);
350
+ }
351
+
352
+ static void
353
+ disable_newobj_hook(void)
354
+ {
355
+ VALUE newobj_hook;
356
+
357
+ if ((newobj_hook = rb_ivar_get(rb_mAllocationTracer, rb_intern("newobj_hook"))) == Qnil) {
358
+ rb_raise(rb_eRuntimeError, "not started.");
359
+ }
360
+ if (rb_tracepoint_enabled_p(newobj_hook) == Qfalse) {
361
+ rb_raise(rb_eRuntimeError, "newobj hooks is already disabled.");
362
+ }
363
+
364
+ rb_tracepoint_disable(newobj_hook);
365
+ }
366
+
367
+ static void
368
+ check_tracer_running(void)
369
+ {
370
+ struct traceobj_arg * arg = get_traceobj_arg();
371
+
372
+ if (!arg->running) {
373
+ rb_raise(rb_eRuntimeError, "not started yet");
330
374
  }
331
375
  }
332
376
 
@@ -348,6 +392,26 @@ start_alloc_hooks(VALUE mod)
348
392
  rb_tracepoint_enable(freeobj_hook);
349
393
  }
350
394
 
395
+ static VALUE
396
+ stop_alloc_hooks(VALUE self)
397
+ {
398
+ struct traceobj_arg * arg = get_traceobj_arg();
399
+ check_tracer_running();
400
+
401
+ {
402
+ VALUE newobj_hook = rb_ivar_get(rb_mAllocationTracer, rb_intern("newobj_hook"));
403
+ VALUE freeobj_hook = rb_ivar_get(rb_mAllocationTracer, rb_intern("freeobj_hook"));
404
+ rb_tracepoint_disable(newobj_hook);
405
+ rb_tracepoint_disable(freeobj_hook);
406
+
407
+ clear_traceobj_arg();
408
+
409
+ arg->running = 0;
410
+ }
411
+
412
+ return Qnil;
413
+ }
414
+
351
415
  static const char *
352
416
  type_name(int type)
353
417
  {
@@ -427,7 +491,9 @@ aggregate_result_i(st_data_t key, st_data_t val, void *data)
427
491
  rb_ary_push(k, INT2FIX((int)key_buff->data[i++]));
428
492
  }
429
493
  if (arg->keys & KEY_TYPE) {
430
- rb_ary_push(k, type_symbols[key_buff->data[i++]]);
494
+ int sym_index = key_buff->data[i++];
495
+ assert(T_MASK > sym_index);
496
+ rb_ary_push(k, type_symbols[sym_index]);
431
497
  }
432
498
  if (arg->keys & KEY_CLASS) {
433
499
  VALUE klass = key_buff->data[i++];
@@ -448,17 +514,20 @@ aggregate_result_i(st_data_t key, st_data_t val, void *data)
448
514
  }
449
515
 
450
516
  static int
451
- aggregate_rest_object_i(st_data_t key, st_data_t val, void *data)
517
+ aggregate_live_object_i(st_data_t key, st_data_t val, void *data)
452
518
  {
453
- struct traceobj_arg *arg = (struct traceobj_arg *)data;
454
- struct allocation_info *info = (struct allocation_info *)val;
455
519
  VALUE obj = (VALUE)key;
520
+ struct allocation_info *info = (struct allocation_info *)val;
521
+ size_t gc_count = rb_gc_count();
522
+ struct traceobj_arg *arg = (struct traceobj_arg *)data;
456
523
 
457
- if ((info->flags & T_MASK) == (RBASIC(obj)->flags & T_MASK)) {
524
+ if (BUILTIN_TYPE(obj) == (info->flags & T_MASK)) {
525
+ VALUE klass = RBASIC_CLASS(obj);
458
526
  info->flags = RBASIC(obj)->flags;
527
+ info->klass = (RTEST(klass) && !RB_TYPE_P(obj, T_NODE)) ? rb_class_real(klass) : Qnil;
459
528
  }
460
- /* danger operation because obj can be collected. */
461
- move_to_freed_list(arg, info);
529
+
530
+ aggregate_each_info(arg, info, gc_count);
462
531
 
463
532
  return ST_CONTINUE;
464
533
  }
@@ -470,39 +539,64 @@ aggregate_result(struct traceobj_arg *arg)
470
539
  aar.result = rb_hash_new();
471
540
  aar.arg = arg;
472
541
 
473
- st_foreach(arg->object_table, aggregate_rest_object_i, (st_data_t)arg);
474
- st_clear(arg->object_table);
475
- aggregator_i(arg);
542
+ /* aggregated table -> Ruby hash */
543
+ aggregate_freed_info(arg);
476
544
  st_foreach(arg->aggregate_table, aggregate_result_i, (st_data_t)&aar);
477
- clear_traceobj_arg();
545
+
546
+ /* collect live aggregate table */
547
+ {
548
+ st_table *dead_object_aggregate_table = arg->aggregate_table;
549
+
550
+ /* make live object aggregate table */
551
+ arg->aggregate_table = st_init_table(&memcmp_hash_type);
552
+ st_foreach(arg->object_table, aggregate_live_object_i, (st_data_t)arg);
553
+
554
+ /* aggregate table -> Ruby hash */
555
+ st_foreach(arg->aggregate_table, aggregate_result_i, (st_data_t)&aar);
556
+
557
+ /* remove live object aggregate table */
558
+ st_foreach(arg->aggregate_table, free_key_values_i, 0);
559
+ st_free_table(arg->aggregate_table);
560
+
561
+ arg->aggregate_table = dead_object_aggregate_table;
562
+ }
478
563
  return aar.result;
479
564
  }
480
565
 
481
566
  static VALUE
482
- stop_allocation_tracing(VALUE self)
567
+ allocation_tracer_stop(VALUE self)
568
+ {
569
+ VALUE result = aggregate_result(get_traceobj_arg());
570
+ stop_alloc_hooks(self);
571
+ return result;
572
+ }
573
+
574
+ static VALUE
575
+ allocation_tracer_result(VALUE self)
483
576
  {
484
- struct traceobj_arg * arg = get_traceobj_arg();
577
+ VALUE result;
578
+ struct traceobj_arg *arg = get_traceobj_arg();
485
579
 
486
- if (arg->running) {
487
- VALUE newobj_hook = rb_ivar_get(rb_mAllocationTracer, rb_intern("newobj_hook"));
488
- VALUE freeobj_hook = rb_ivar_get(rb_mAllocationTracer, rb_intern("freeobj_hook"));
489
- rb_tracepoint_disable(newobj_hook);
490
- rb_tracepoint_disable(freeobj_hook);
580
+ check_tracer_running();
491
581
 
492
- arg->running = 0;
493
- }
494
- else {
495
- rb_raise(rb_eRuntimeError, "not started yet.");
496
- }
582
+ disable_newobj_hook();
583
+ result = aggregate_result(arg);
584
+ enable_newobj_hook();
585
+ return result;
586
+ }
497
587
 
588
+ static VALUE
589
+ allocation_tracer_clear(VALUE self)
590
+ {
591
+ clear_traceobj_arg();
498
592
  return Qnil;
499
593
  }
500
594
 
501
595
  static VALUE
502
- allocation_tracer_stop(VALUE self)
596
+ allocation_tracer_trace_i(VALUE self)
503
597
  {
504
- stop_allocation_tracing(self);
505
- return aggregate_result(get_traceobj_arg());
598
+ rb_yield(Qnil);
599
+ return allocation_tracer_result(self);
506
600
  }
507
601
 
508
602
  static VALUE
@@ -519,14 +613,29 @@ allocation_tracer_trace(VALUE self)
519
613
  start_alloc_hooks(rb_mAllocationTracer);
520
614
 
521
615
  if (rb_block_given_p()) {
522
- rb_ensure(rb_yield, Qnil, stop_allocation_tracing, Qnil);
523
- return aggregate_result(get_traceobj_arg());
616
+ return rb_ensure(allocation_tracer_trace_i, self, stop_alloc_hooks, Qnil);
524
617
  }
525
618
  }
526
619
 
527
620
  return Qnil;
528
621
  }
529
622
 
623
+ static VALUE
624
+ allocation_tracer_pause(VALUE self)
625
+ {
626
+ check_tracer_running();
627
+ disable_newobj_hook();
628
+ return Qnil;
629
+ }
630
+
631
+ static VALUE
632
+ allocation_tracer_resume(VALUE self)
633
+ {
634
+ check_tracer_running();
635
+ enable_newobj_hook();
636
+ return Qnil;
637
+ }
638
+
530
639
  static VALUE
531
640
  allocation_tracer_setup(int argc, VALUE *argv, VALUE self)
532
641
  {
@@ -588,7 +697,13 @@ Init_allocation_tracer(void)
588
697
 
589
698
  /* allocation tracer methods */
590
699
  rb_define_module_function(mod, "trace", allocation_tracer_trace, 0);
700
+ rb_define_module_function(mod, "start", allocation_tracer_trace, 0);
591
701
  rb_define_module_function(mod, "stop", allocation_tracer_stop, 0);
702
+ rb_define_module_function(mod, "pause", allocation_tracer_pause, 0);
703
+ rb_define_module_function(mod, "resume", allocation_tracer_resume, 0);
704
+
705
+ rb_define_module_function(mod, "result", allocation_tracer_result, 0);
706
+ rb_define_module_function(mod, "clear", allocation_tracer_clear, 0);
592
707
  rb_define_module_function(mod, "setup", allocation_tracer_setup, -1);
593
708
  rb_define_module_function(mod, "header", allocation_tracer_header, 0);
594
709
  }
@@ -1,3 +1,3 @@
1
1
  module ObjectSpace::AllocationTracer
2
- VERSION = "0.1.1"
2
+ VERSION = "0.2.0"
3
3
  end
@@ -50,6 +50,72 @@ describe ObjectSpace::AllocationTracer do
50
50
  expect(size).to be > 1234 if size > 0
51
51
  end
52
52
 
53
+ it 'can be paused and resumed' do
54
+ line = __LINE__ + 2
55
+ result = ObjectSpace::AllocationTracer.trace do
56
+ Object.new
57
+ ObjectSpace::AllocationTracer.pause
58
+ Object.new # ignore tracing
59
+ ObjectSpace::AllocationTracer.resume
60
+ Object.new
61
+ end
62
+
63
+ expect(result.length).to be 2
64
+ expect(result[[__FILE__, line ]]).to eq [1, 0, 0, 0, 0, 0]
65
+ expect(result[[__FILE__, line + 4]]).to eq [1, 0, 0, 0, 0, 0]
66
+ end
67
+
68
+ it 'can be get middle result' do
69
+ middle_result = nil
70
+ line = __LINE__ + 2
71
+ result = ObjectSpace::AllocationTracer.trace do
72
+ Object.new
73
+ middle_result = ObjectSpace::AllocationTracer.result
74
+ Object.new
75
+ end
76
+
77
+ expect(result.length).to be 2
78
+ expect(result[[__FILE__, line ]]).to eq [1, 0, 0, 0, 0, 0]
79
+ expect(result[[__FILE__, line + 2]]).to eq [1, 0, 0, 0, 0, 0]
80
+
81
+ expect(middle_result.length).to be 1
82
+ expect(middle_result[[__FILE__, line ]]).to eq [1, 0, 0, 0, 0, 0]
83
+ end
84
+
85
+ describe 'stop when not started yet' do
86
+ it 'should raise RuntimeError' do
87
+ expect do
88
+ ObjectSpace::AllocationTracer.stop
89
+ end.to raise_error(RuntimeError)
90
+ end
91
+ end
92
+
93
+ describe 'pause when not started yet' do
94
+ it 'should raise RuntimeError' do
95
+ expect do
96
+ ObjectSpace::AllocationTracer.pause
97
+ end.to raise_error(RuntimeError)
98
+ end
99
+ end
100
+
101
+ describe 'resume when not started yet' do
102
+ it 'should raise RuntimeError' do
103
+ expect do
104
+ ObjectSpace::AllocationTracer.resume
105
+ end.to raise_error(RuntimeError)
106
+ end
107
+ end
108
+
109
+ describe 'when starting recursively' do
110
+ it 'should raise RuntimeError' do
111
+ expect do
112
+ ObjectSpace::AllocationTracer.trace{
113
+ ObjectSpace::AllocationTracer.trace{}
114
+ }
115
+ end.to raise_error(RuntimeError)
116
+ end
117
+ end
118
+
53
119
  describe 'with different setup' do
54
120
  it 'should work with type' do
55
121
  line = __LINE__ + 3
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: allocation_tracer
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.1
4
+ version: 0.2.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Koichi Sasada
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2014-04-19 00:00:00.000000000 Z
11
+ date: 2014-04-21 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler