mwrap 2.0.0.4.gd1ea → 2.1.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: a0c1d38278467de929383b9a6415bd0364aa176c18e480c68553978536d37f2b
4
- data.tar.gz: 96ae575c69d8dfa9204434acd5fd3cc2aebe64d6ef2ef51cf37ad6e924aa84a5
3
+ metadata.gz: abf683546f4fb8c006364863d126489983a232239dce12e98ab006a667a24220
4
+ data.tar.gz: 3d23cf41c24d12f9e0422179c92389d63f90219307f9c7dfe71104cfd67e6060
5
5
  SHA512:
6
- metadata.gz: facf03ed2e533fc8fa3faa14069101ed218e922b94cab228f5e651f89ebbd590972d44ff479374ccff9c0ed666426975fb3386df6a2eb4d2e174fb9268b8a18b
7
- data.tar.gz: '0393004af3b3f22d6cc8581f239b838067764d48cfab0eb5d759b614edfb97a9f30c7e89bae658486f970d692951b4adb15ba08d12d326ccfd7191c60fff38e1'
6
+ metadata.gz: 2617b3932aeb6b7b3cd72fba57b0d87513b45dee6b79c5a9c3bdf9b331e9e06757cc1c896840c8111a6f6d223cbf4fdad4d3ef4b64d8e97b3502f96e48864ed9
7
+ data.tar.gz: 6eaab1b74b63c5800e0fd151a5662e3095d12f56ad228302aa8d0d733634e34e0ce3aa824003a06cc09414820660414cc21032afbf38a9eaa1c8805bd65a6000
@@ -23,13 +23,13 @@
23
23
  #include "jhash.h"
24
24
 
25
25
  static ID id_uminus;
26
- static unsigned int track_memalign;
27
26
  const char *rb_source_location_cstr(int *line); /* requires 2.6.0dev */
28
27
  extern int __attribute__((weak)) ruby_thread_has_gvl_p(void);
29
28
  extern void * __attribute__((weak)) ruby_current_execution_context_ptr;
30
29
  extern void * __attribute__((weak)) ruby_current_vm_ptr; /* for rb_gc_count */
31
30
  extern size_t __attribute__((weak)) rb_gc_count(void);
32
31
  extern VALUE __attribute__((weak)) rb_cObject;
32
+ extern VALUE __attribute__((weak)) rb_eTypeError;
33
33
  extern VALUE __attribute__((weak)) rb_yield(VALUE);
34
34
 
35
35
  static size_t total_bytes_inc, total_bytes_dec;
@@ -37,6 +37,16 @@ static size_t total_bytes_inc, total_bytes_dec;
37
37
  /* true for glibc/dlmalloc/ptmalloc, not sure about jemalloc */
38
38
  #define ASSUMED_MALLOC_ALIGNMENT (sizeof(void *) * 2)
39
39
 
40
+ /* match values in Ruby gc.c */
41
+ #define HEAP_PAGE_ALIGN_LOG 14
42
+ enum {
43
+ HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG),
44
+ REQUIRED_SIZE_BY_MALLOC = (sizeof(size_t) * 5),
45
+ HEAP_PAGE_SIZE = (HEAP_PAGE_ALIGN - REQUIRED_SIZE_BY_MALLOC)
46
+ };
47
+
48
+ #define IS_HEAP_PAGE_BODY ((struct src_loc *)-1)
49
+
40
50
  int __attribute__((weak)) ruby_thread_has_gvl_p(void)
41
51
  {
42
52
  return 0;
@@ -96,7 +106,6 @@ lfht_new(void)
96
106
  __attribute__((constructor)) static void resolve_malloc(void)
97
107
  {
98
108
  int err;
99
- const char *opt;
100
109
  ++locating;
101
110
 
102
111
  #ifdef __FreeBSD__
@@ -140,11 +149,6 @@ __attribute__((constructor)) static void resolve_malloc(void)
140
149
  if (err)
141
150
  fprintf(stderr, "pthread_atfork failed: %s\n", strerror(err));
142
151
  page_size = sysconf(_SC_PAGESIZE);
143
- opt = getenv("MWRAP");
144
- if (opt && (opt = strstr(opt, "memalign:"))) {
145
- if (!sscanf(opt, "memalign:%u", &track_memalign))
146
- fprintf(stderr, "not an unsigned int: %s\n", opt);
147
- }
148
152
  --locating;
149
153
  }
150
154
 
@@ -213,6 +217,32 @@ static int has_ec_p(void)
213
217
  ruby_current_execution_context_ptr);
214
218
  }
215
219
 
220
+ struct acc {
221
+ uint64_t nr;
222
+ int64_t min;
223
+ int64_t max;
224
+ double m2;
225
+ double mean;
226
+ };
227
+
228
+ #define ACC_INIT(name) { .nr=0, .min=INT64_MAX, .max=-1, .m2=0, .mean=0 }
229
+
230
+ /* for tracking 16K-aligned heap page bodies (protected by GVL) */
231
+ struct {
232
+ pthread_mutex_t lock;
233
+ struct cds_list_head bodies;
234
+ struct cds_list_head freed;
235
+
236
+ struct acc alive;
237
+ struct acc reborn;
238
+ } hpb_stats = {
239
+ .lock = PTHREAD_MUTEX_INITIALIZER,
240
+ .bodies = CDS_LIST_HEAD_INIT(hpb_stats.bodies),
241
+ .freed = CDS_LIST_HEAD_INIT(hpb_stats.freed),
242
+ .alive = ACC_INIT(hpb_stats.alive),
243
+ .reborn = ACC_INIT(hpb_stats.reborn)
244
+ };
245
+
216
246
  /* allocated via real_malloc/real_free */
217
247
  struct src_loc {
218
248
  pthread_mutex_t *mtx;
@@ -237,6 +267,9 @@ struct alloc_hdr {
237
267
  struct src_loc *loc;
238
268
  } live;
239
269
  struct rcu_head dead;
270
+ struct {
271
+ size_t at; /* rb_gc_count() */
272
+ } hpb_freed;
240
273
  } as;
241
274
  void *real; /* what to call real_free on */
242
275
  size_t size;
@@ -276,6 +309,64 @@ static int loc_eq(struct cds_lfht_node *node, const void *key)
276
309
  memcmp(k->k, existing->k, loc_size(k)) == 0);
277
310
  }
278
311
 
312
+ /* note: not atomic */
313
+ static void
314
+ acc_add(struct acc *acc, size_t val)
315
+ {
316
+ double delta = val - acc->mean;
317
+ uint64_t nr = ++acc->nr;
318
+
319
+ /* just don't divide-by-zero if we ever hit this (unlikely :P) */
320
+ if (nr)
321
+ acc->mean += delta / nr;
322
+
323
+ acc->m2 += delta * (val - acc->mean);
324
+ if ((int64_t)val < acc->min)
325
+ acc->min = (int64_t)val;
326
+ if ((int64_t)val > acc->max)
327
+ acc->max = (int64_t)val;
328
+ }
329
+
330
+ #if SIZEOF_LONG == 8
331
+ # define INT64toNUM(x) LONG2NUM((long)x)
332
+ #elif defined(HAVE_LONG_LONG) && SIZEOF_LONG_LONG == 8
333
+ # define INT64toNUM(x) LL2NUM((LONG_LONG)x)
334
+ #endif
335
+
336
+ static VALUE
337
+ acc_max(const struct acc *acc)
338
+ {
339
+ return INT64toNUM(acc->max);
340
+ }
341
+
342
+ static VALUE
343
+ acc_min(const struct acc *acc)
344
+ {
345
+ return acc->min == INT64_MAX ? INT2FIX(-1) : INT64toNUM(acc->min);
346
+ }
347
+
348
+ static VALUE
349
+ acc_mean(const struct acc *acc)
350
+ {
351
+ return DBL2NUM(acc->nr ? acc->mean : HUGE_VAL);
352
+ }
353
+
354
+ static double
355
+ acc_stddev_dbl(const struct acc *acc)
356
+ {
357
+ if (acc->nr > 1) {
358
+ double variance = acc->m2 / (acc->nr - 1);
359
+ return sqrt(variance);
360
+ }
361
+ return 0.0;
362
+ }
363
+
364
+ static VALUE
365
+ acc_stddev(const struct acc *acc)
366
+ {
367
+ return DBL2NUM(acc_stddev_dbl(acc));
368
+ }
369
+
279
370
  static struct src_loc *totals_add_rcu(struct src_loc *k)
280
371
  {
281
372
  struct cds_lfht_iter iter;
@@ -391,7 +482,7 @@ void free(void *p)
391
482
  struct src_loc *l = h->as.live.loc;
392
483
 
393
484
  if (!real_free) return; /* oh well, leak a little */
394
- if (l) {
485
+ if (l && l != IS_HEAP_PAGE_BODY) {
395
486
  size_t age = generation - h->as.live.gen;
396
487
 
397
488
  uatomic_add(&total_bytes_dec, h->size);
@@ -406,8 +497,20 @@ void free(void *p)
406
497
  mutex_unlock(l->mtx);
407
498
 
408
499
  call_rcu(&h->as.dead, free_hdr_rcu);
409
- }
410
- else {
500
+ } else if (l == IS_HEAP_PAGE_BODY) {
501
+ size_t gen = generation;
502
+ size_t age = gen - h->as.live.gen;
503
+
504
+ h->as.hpb_freed.at = gen;
505
+
506
+ mutex_lock(&hpb_stats.lock);
507
+ acc_add(&hpb_stats.alive, age);
508
+
509
+ /* hpb_stats.bodies => hpb_stats.freed */
510
+ cds_list_move(&h->anode, &hpb_stats.freed);
511
+
512
+ mutex_unlock(&hpb_stats.lock);
513
+ } else {
411
514
  real_free(h->real);
412
515
  }
413
516
  }
@@ -434,7 +537,7 @@ static size_t size_align(size_t size, size_t alignment)
434
537
  return ((size + (alignment - 1)) & ~(alignment - 1));
435
538
  }
436
539
 
437
- static bool ptr_is_aligned(void *ptr, size_t alignment)
540
+ static bool ptr_is_aligned(const void *ptr, size_t alignment)
438
541
  {
439
542
  return ((uintptr_t)ptr & (alignment - 1)) == 0;
440
543
  }
@@ -473,18 +576,66 @@ internal_memalign(void **pp, size_t alignment, size_t size, uintptr_t caller)
473
576
  __builtin_add_overflow(asize, sizeof(struct alloc_hdr), &asize))
474
577
  return ENOMEM;
475
578
 
476
- /* assert(asize == (alignment + size + sizeof(struct alloc_hdr))); */
477
- l = track_memalign ? update_stats_rcu_lock(size, caller) : 0;
478
- real = real_malloc(asize);
479
- if (real) {
480
- void *p = hdr2ptr(real);
481
- if (!ptr_is_aligned(p, alignment))
482
- p = ptr_align(p, alignment);
483
- h = ptr2hdr(p);
484
- alloc_insert_rcu(l, h, size, real);
579
+
580
+ if (alignment == HEAP_PAGE_ALIGN && size == HEAP_PAGE_SIZE) {
581
+ if (has_ec_p()) generation = rb_gc_count();
582
+ l = IS_HEAP_PAGE_BODY;
583
+ } else {
584
+ l = update_stats_rcu_lock(size, caller);
585
+ }
586
+
587
+ if (l == IS_HEAP_PAGE_BODY) {
588
+ void *p;
589
+ size_t gen = generation;
590
+
591
+ mutex_lock(&hpb_stats.lock);
592
+
593
+ /* reuse existing entry */
594
+ if (!cds_list_empty(&hpb_stats.freed)) {
595
+ size_t deathspan;
596
+
597
+ h = cds_list_first_entry(&hpb_stats.freed,
598
+ struct alloc_hdr, anode);
599
+ /* hpb_stats.freed => hpb_stats.bodies */
600
+ cds_list_move(&h->anode, &hpb_stats.bodies);
601
+ assert(h->size == size);
602
+ assert(h->real);
603
+ real = h->real;
604
+ p = hdr2ptr(h);
605
+ assert(ptr_is_aligned(p, alignment));
606
+
607
+ deathspan = gen - h->as.hpb_freed.at;
608
+ acc_add(&hpb_stats.reborn, deathspan);
609
+ }
610
+ else {
611
+ real = real_malloc(asize);
612
+ if (!real) return ENOMEM;
613
+
614
+ p = hdr2ptr(real);
615
+ if (!ptr_is_aligned(p, alignment))
616
+ p = ptr_align(p, alignment);
617
+ h = ptr2hdr(p);
618
+ h->size = size;
619
+ h->real = real;
620
+ cds_list_add(&h->anode, &hpb_stats.bodies);
621
+ }
622
+ mutex_unlock(&hpb_stats.lock);
623
+ h->as.live.loc = l;
624
+ h->as.live.gen = gen;
485
625
  *pp = p;
486
626
  }
487
- update_stats_rcu_unlock(l);
627
+ else {
628
+ real = real_malloc(asize);
629
+ if (real) {
630
+ void *p = hdr2ptr(real);
631
+ if (!ptr_is_aligned(p, alignment))
632
+ p = ptr_align(p, alignment);
633
+ h = ptr2hdr(p);
634
+ alloc_insert_rcu(l, h, size, real);
635
+ update_stats_rcu_unlock(l);
636
+ *pp = p;
637
+ }
638
+ }
488
639
 
489
640
  return real ? 0 : ENOMEM;
490
641
  }
@@ -1042,16 +1193,89 @@ static VALUE mwrap_quiet(VALUE mod)
1042
1193
  return rb_ensure(rb_yield, SIZET2NUM(cur), reset_locating, 0);
1043
1194
  }
1044
1195
 
1196
+ /*
1197
+ * total bytes allocated as tracked by mwrap
1198
+ */
1045
1199
  static VALUE total_inc(VALUE mod)
1046
1200
  {
1047
1201
  return SIZET2NUM(total_bytes_inc);
1048
1202
  }
1049
1203
 
1204
+ /*
1205
+ * total bytes freed as tracked by mwrap
1206
+ */
1050
1207
  static VALUE total_dec(VALUE mod)
1051
1208
  {
1052
1209
  return SIZET2NUM(total_bytes_dec);
1053
1210
  }
1054
1211
 
1212
+ static VALUE hpb_each_yield(VALUE ignore)
1213
+ {
1214
+ struct alloc_hdr *h, *next;
1215
+
1216
+ cds_list_for_each_entry_safe(h, next, &hpb_stats.bodies, anode) {
1217
+ VALUE v[2]; /* [ generation, address ] */
1218
+ void *addr = hdr2ptr(h);
1219
+ assert(ptr_is_aligned(addr, HEAP_PAGE_ALIGN));
1220
+ v[0] = LONG2NUM((long)addr);
1221
+ v[1] = SIZET2NUM(h->as.live.gen);
1222
+ rb_yield_values2(2, v);
1223
+ }
1224
+ return Qnil;
1225
+ }
1226
+
1227
+ /*
1228
+ * call-seq:
1229
+ *
1230
+ * Mwrap::HeapPageBody.each { |gen, addr| } -> Integer
1231
+ *
1232
+ * Yields the generation (GC.count) the heap page body was created
1233
+ * and address of the heap page body as an Integer. Returns the
1234
+ * number of allocated pages as an Integer. This return value should
1235
+ * match the result of GC.stat(:heap_allocated_pages)
1236
+ */
1237
+ static VALUE hpb_each(VALUE mod)
1238
+ {
1239
+ ++locating;
1240
+ return rb_ensure(hpb_each_yield, Qfalse, reset_locating, 0);
1241
+ }
1242
+
1243
+ /*
1244
+ * call-seq:
1245
+ *
1246
+ * Mwrap::HeapPageBody.stat -> Hash
1247
+ * Mwrap::HeapPageBody.stat(hash) -> hash
1248
+ *
1249
+ * The maximum lifespan of a heap page body in the Ruby VM.
1250
+ * This may be Infinity if no heap page bodies were ever freed.
1251
+ */
1252
+ static VALUE hpb_stat(int argc, VALUE *argv, VALUE hpb)
1253
+ {
1254
+ VALUE h;
1255
+
1256
+ rb_scan_args(argc, argv, "01", &h);
1257
+ if (NIL_P(h))
1258
+ h = rb_hash_new();
1259
+ else if (!RB_TYPE_P(h, T_HASH))
1260
+ rb_raise(rb_eTypeError, "not a hash %+"PRIsVALUE, h);
1261
+
1262
+ ++locating;
1263
+ #define S(x) ID2SYM(rb_intern(#x))
1264
+ rb_hash_aset(h, S(lifespan_max), acc_max(&hpb_stats.alive));
1265
+ rb_hash_aset(h, S(lifespan_min), acc_min(&hpb_stats.alive));
1266
+ rb_hash_aset(h, S(lifespan_mean), acc_mean(&hpb_stats.alive));
1267
+ rb_hash_aset(h, S(lifespan_stddev), acc_stddev(&hpb_stats.alive));
1268
+ rb_hash_aset(h, S(deathspan_max), acc_max(&hpb_stats.reborn));
1269
+ rb_hash_aset(h, S(deathspan_min), acc_min(&hpb_stats.reborn));
1270
+ rb_hash_aset(h, S(deathspan_mean), acc_mean(&hpb_stats.reborn));
1271
+ rb_hash_aset(h, S(deathspan_stddev), acc_stddev(&hpb_stats.reborn));
1272
+ rb_hash_aset(h, S(resurrects), SIZET2NUM(hpb_stats.reborn.nr));
1273
+ #undef S
1274
+ --locating;
1275
+
1276
+ return h;
1277
+ }
1278
+
1055
1279
  /*
1056
1280
  * Document-module: Mwrap
1057
1281
  *
@@ -1070,20 +1294,19 @@ static VALUE total_dec(VALUE mod)
1070
1294
  * * dump_fd: a writable FD to dump to
1071
1295
  * * dump_path: a path to dump to, the file is opened in O_APPEND mode
1072
1296
  * * dump_min: the minimum allocation size (total) to dump
1073
- * * memalign: use `1' to enable tracking the memalign family
1297
+ * * dump_heap: mask of heap_page_body statistics to dump
1074
1298
  *
1075
1299
  * If both `dump_fd' and `dump_path' are specified, dump_path takes
1076
1300
  * precedence.
1077
1301
  *
1078
- * Tracking the memalign family of functions is misleading for Ruby
1079
- * applications, as heap page allocations can happen anywhere a
1080
- * Ruby object is allocated, even in the coldest code paths.
1081
- * Furthermore, it is rarely-used outside of the Ruby object allocator.
1082
- * Thus tracking memalign functions is disabled by default.
1302
+ * dump_heap bitmask
1303
+ * * 0x01 - summary stats (same info as HeapPageBody.stat)
1304
+ * * 0x02 - all live heaps (similar to HeapPageBody.each)
1305
+ * * 0x04 - skip non-heap_page_body-related output
1083
1306
  */
1084
1307
  void Init_mwrap(void)
1085
1308
  {
1086
- VALUE mod;
1309
+ VALUE mod, hpb;
1087
1310
 
1088
1311
  ++locating;
1089
1312
  mod = rb_define_module("Mwrap");
@@ -1105,6 +1328,8 @@ void Init_mwrap(void)
1105
1328
  rb_define_singleton_method(mod, "quiet", mwrap_quiet, 0);
1106
1329
  rb_define_singleton_method(mod, "total_bytes_allocated", total_inc, 0);
1107
1330
  rb_define_singleton_method(mod, "total_bytes_freed", total_dec, 0);
1331
+
1332
+
1108
1333
  rb_define_method(cSrcLoc, "each", src_loc_each, 0);
1109
1334
  rb_define_method(cSrcLoc, "frees", src_loc_frees, 0);
1110
1335
  rb_define_method(cSrcLoc, "allocations", src_loc_allocations, 0);
@@ -1112,9 +1337,68 @@ void Init_mwrap(void)
1112
1337
  rb_define_method(cSrcLoc, "mean_lifespan", src_loc_mean_lifespan, 0);
1113
1338
  rb_define_method(cSrcLoc, "max_lifespan", src_loc_max_lifespan, 0);
1114
1339
  rb_define_method(cSrcLoc, "name", src_loc_name, 0);
1340
+
1341
+ /*
1342
+ * Information about "struct heap_page_body" allocations from
1343
+ * Ruby gc.c. This can be useful for tracking fragmentation
1344
+ * from posix_memalign(3) use in mainline Ruby:
1345
+ *
1346
+ * https://sourceware.org/bugzilla/show_bug.cgi?id=14581
1347
+ *
1348
+ * These statistics are never reset by Mwrap.reset or
1349
+ * any other method. They only make sense in the context
1350
+ * of an entire program lifetime.
1351
+ */
1352
+ hpb = rb_define_class_under(mod, "HeapPageBody", rb_cObject);
1353
+ rb_define_singleton_method(hpb, "stat", hpb_stat, -1);
1354
+ rb_define_singleton_method(hpb, "each", hpb_each, 0);
1355
+
1115
1356
  --locating;
1116
1357
  }
1117
1358
 
1359
+ enum {
1360
+ DUMP_HPB_STATS = 0x1,
1361
+ DUMP_HPB_EACH = 0x2,
1362
+ DUMP_HPB_EXCL = 0x4,
1363
+ };
1364
+
1365
+ static void dump_hpb(FILE *fp, unsigned flags)
1366
+ {
1367
+ if (flags & DUMP_HPB_STATS) {
1368
+ fprintf(fp,
1369
+ "lifespan_max: %"PRId64"\n"
1370
+ "lifespan_min:%s%"PRId64"\n"
1371
+ "lifespan_mean: %0.3f\n"
1372
+ "lifespan_stddev: %0.3f\n"
1373
+ "deathspan_max: %"PRId64"\n"
1374
+ "deathspan_min:%s%"PRId64"\n"
1375
+ "deathspan_mean: %0.3f\n"
1376
+ "deathspan_stddev: %0.3f\n"
1377
+ "gc_count: %zu\n",
1378
+ hpb_stats.alive.max,
1379
+ hpb_stats.alive.min == INT64_MAX ? " -" : " ",
1380
+ hpb_stats.alive.min,
1381
+ hpb_stats.alive.mean,
1382
+ acc_stddev_dbl(&hpb_stats.alive),
1383
+ hpb_stats.reborn.max,
1384
+ hpb_stats.reborn.min == INT64_MAX ? " -" : " ",
1385
+ hpb_stats.reborn.min,
1386
+ hpb_stats.reborn.mean,
1387
+ acc_stddev_dbl(&hpb_stats.reborn),
1388
+ /* n.b.: unsafe to call rb_gc_count() in destructor */
1389
+ generation);
1390
+ }
1391
+ if (flags & DUMP_HPB_EACH) {
1392
+ struct alloc_hdr *h;
1393
+
1394
+ cds_list_for_each_entry(h, &hpb_stats.bodies, anode) {
1395
+ void *addr = hdr2ptr(h);
1396
+
1397
+ fprintf(fp, "%p\t%zu\n", addr, h->as.live.gen);
1398
+ }
1399
+ }
1400
+ }
1401
+
1118
1402
  /* rb_cloexec_open isn't usable by non-Ruby processes */
1119
1403
  #ifndef O_CLOEXEC
1120
1404
  # define O_CLOEXEC 0
@@ -1125,10 +1409,12 @@ static void mwrap_dump_destructor(void)
1125
1409
  {
1126
1410
  const char *opt = getenv("MWRAP");
1127
1411
  const char *modes[] = { "a", "a+", "w", "w+", "r+" };
1128
- struct dump_arg a;
1412
+ struct dump_arg a = { .min = 0 };
1129
1413
  size_t i;
1130
1414
  int dump_fd;
1415
+ unsigned dump_heap = 0;
1131
1416
  char *dump_path;
1417
+ char *s;
1132
1418
 
1133
1419
  if (!opt)
1134
1420
  return;
@@ -1155,8 +1441,11 @@ static void mwrap_dump_destructor(void)
1155
1441
  else if (!sscanf(opt, "dump_fd:%d", &dump_fd))
1156
1442
  goto out;
1157
1443
 
1158
- if (!sscanf(opt, "dump_min:%zu", &a.min))
1159
- a.min = 0;
1444
+ if ((s = strstr(opt, "dump_min:")))
1445
+ sscanf(s, "dump_min:%zu", &a.min);
1446
+
1447
+ if ((s = strstr(opt, "dump_heap:")))
1448
+ sscanf(s, "dump_heap:%u", &dump_heap);
1160
1449
 
1161
1450
  switch (dump_fd) {
1162
1451
  case 0: goto out;
@@ -1177,7 +1466,9 @@ static void mwrap_dump_destructor(void)
1177
1466
  }
1178
1467
  /* we'll leak some memory here, but this is a destructor */
1179
1468
  }
1180
- dump_to_file(&a);
1469
+ if ((dump_heap & DUMP_HPB_EXCL) == 0)
1470
+ dump_to_file(&a);
1471
+ dump_hpb(a.fp, dump_heap);
1181
1472
  out:
1182
1473
  --locating;
1183
1474
  }
@@ -92,6 +92,53 @@ class MwrapRack
92
92
  end
93
93
  end
94
94
 
95
+ class HeapPages # :nodoc:
96
+ include HtmlResponse
97
+ HEADER = '<tr><th>address</th><th>generation</th></tr>'
98
+
99
+ def hpb_rows
100
+ Mwrap::HeapPageBody.stat(stat = Thread.current[:mwrap_hpb_stat] ||= {})
101
+ %i(lifespan_max lifespan_min lifespan_mean lifespan_stddev
102
+ deathspan_max deathspan_min deathspan_mean deathspan_stddev
103
+ resurrects
104
+ ).map! do |k|
105
+ "<tr><td>#{k}</td><td>#{stat[k]}</td></tr>\n"
106
+ end.join
107
+ end
108
+
109
+ def gc_stat_rows
110
+ GC.stat(stat = Thread.current[:mwrap_gc_stat] ||= {})
111
+ %i(count heap_allocated_pages heap_eden_pages heap_tomb_pages
112
+ total_allocated_pages total_freed_pages).map do |k|
113
+ "<tr><td>GC.stat(:#{k})</td><td>#{stat[k]}</td></tr>\n"
114
+ end.join
115
+ end
116
+
117
+ GC_STAT_URL = 'https://docs.ruby-lang.org/en/trunk/GC.html#method-c-stat'
118
+ GC_STAT_HELP = <<~""
119
+ <p>Non-Infinity lifespans can indicate fragmentation.
120
+ <p>See <a
121
+ href="#{GC_STAT_URL}">#{GC_STAT_URL}</a> for info on GC.stat values.
122
+
123
+ def each
124
+ Mwrap.quiet do
125
+ yield("<html><head><title>heap pages</title></head>" \
126
+ "<body><h1>heap pages</h1>" \
127
+ "<table><tr><th>stat</th><th>value</th></tr>\n" \
128
+ "#{hpb_rows}" \
129
+ "#{gc_stat_rows}" \
130
+ "</table>\n" \
131
+ "#{GC_STAT_HELP}" \
132
+ "<table>#{HEADER}")
133
+ Mwrap::HeapPageBody.each do |addr, generation|
134
+ addr = -sprintf('0x%x', addr)
135
+ yield(-"<tr><td>#{addr}</td><td>#{generation}</td></tr>\n")
136
+ end
137
+ yield "</table></body></html>\n"
138
+ end
139
+ end
140
+ end
141
+
95
142
  def r404 # :nodoc:
96
143
  [404,{'Content-Type'=>'text/plain'},["Not found\n"]]
97
144
  end
@@ -107,12 +154,16 @@ class MwrapRack
107
154
  loc = -CGI.unescape($1)
108
155
  loc = Mwrap[loc] or return r404
109
156
  EachAt.new(loc).response
157
+ when '/heap_pages'
158
+ HeapPages.new.response
110
159
  when '/'
111
160
  n = 2000
112
161
  u = 'https://80x24.org/mwrap/README.html'
113
162
  b = -('<html><head><title>Mwrap demo</title></head>' \
114
163
  "<body><p><a href=\"each/#{n}\">allocations &gt;#{n} bytes</a>" \
115
- "<p><a href=\"#{u}\">#{u}</a></body></html>\n")
164
+ "<p><a href=\"#{u}\">#{u}</a>" \
165
+ "<p><a href=\"heap_pages\">heap pages</a>" \
166
+ "</body></html>\n")
116
167
  [ 200, {'Content-Type'=>'text/html','Content-Length'=>-b.size.to_s},[b]]
117
168
  else
118
169
  r404
@@ -12,7 +12,7 @@ desc = `git describe --abbrev=4 HEAD`.strip.tr('-', '.').delete_prefix('v')
12
12
 
13
13
  Gem::Specification.new do |s|
14
14
  s.name = 'mwrap'
15
- s.version = desc.empty? ? '2.0.0' : desc
15
+ s.version = desc.empty? ? '2.1.0' : desc
16
16
  s.homepage = 'https://80x24.org/mwrap/'
17
17
  s.authors = ["Ruby hackers"]
18
18
  s.summary = 'LD_PRELOAD malloc wrapper for Ruby'
@@ -58,6 +58,13 @@ class TestMwrap < Test::Unit::TestCase
58
58
  res = system(env, *cmd)
59
59
  assert res, $?.inspect
60
60
  assert_match(/\b10001\s+1\s+-e:1$/, tmp.read)
61
+
62
+ tmp.rewind
63
+ tmp.truncate(0)
64
+ env['MWRAP'] = "dump_path:#{tmp.path},dump_heap:5"
65
+ res = system(env, *cmd)
66
+ assert res, $?.inspect
67
+ assert_match %r{lifespan_stddev}, tmp.read
61
68
  end
62
69
  end
63
70
 
@@ -283,4 +290,33 @@ class TestMwrap < Test::Unit::TestCase
283
290
  abort 'freed more than allocated'
284
291
  end;
285
292
  end
293
+
294
+ def test_heap_page_body
295
+ assert_separately(+"#{<<~"begin;"}\n#{<<~'end;'}")
296
+ begin;
297
+ require 'mwrap'
298
+ require 'rubygems' # use up some memory
299
+ ap = GC.stat(:heap_allocated_pages)
300
+ h = {}
301
+ nr = 0
302
+ Mwrap::HeapPageBody.each do |addr, gen|
303
+ nr += 1
304
+ gen <= GC.count && gen >= 0 or abort "bad generation: #{gen}"
305
+ (0 == (addr & 16383)) or abort "addr not aligned: #{'%x' % addr}"
306
+ end
307
+ nr == ap or abort 'HeapPageBody.each missed page'
308
+ 10.times { (1..20000).to_a.map(&:to_s) }
309
+ 3.times { GC.start }
310
+ Mwrap::HeapPageBody.stat(h)
311
+ Integer === h[:lifespan_max] or abort 'lifespan_max not recorded'
312
+ Integer === h[:lifespan_min] or abort 'lifespan_min not recorded'
313
+ Float === h[:lifespan_mean] or abort 'lifespan_mean not recorded'
314
+ 3.times { GC.start }
315
+ 10.times { (1..20000).to_a.map(&:to_s) }
316
+ Mwrap::HeapPageBody.stat(h)
317
+ h[:deathspan_min] <= h[:deathspan_max] or
318
+ abort 'wrong min/max deathtime'
319
+ Float === h[:deathspan_mean] or abort 'deathspan_mean not recorded'
320
+ end;
321
+ end
286
322
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: mwrap
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.0.0.4.gd1ea
4
+ version: 2.1.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Ruby hackers
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2018-07-26 00:00:00.000000000 Z
11
+ date: 2018-08-11 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: test-unit
@@ -77,9 +77,9 @@ required_ruby_version: !ruby/object:Gem::Requirement
77
77
  version: '0'
78
78
  required_rubygems_version: !ruby/object:Gem::Requirement
79
79
  requirements:
80
- - - ">"
80
+ - - ">="
81
81
  - !ruby/object:Gem::Version
82
- version: 1.3.1
82
+ version: '0'
83
83
  requirements: []
84
84
  rubyforge_project:
85
85
  rubygems_version: 2.7.7