mwrap 2.0.0.4.gd1ea → 2.2.0.1.g867b

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: a0c1d38278467de929383b9a6415bd0364aa176c18e480c68553978536d37f2b
4
- data.tar.gz: 96ae575c69d8dfa9204434acd5fd3cc2aebe64d6ef2ef51cf37ad6e924aa84a5
3
+ metadata.gz: b3766bf906f58529a4ddf88092f71cd4dc976e5c1dd1f147ffa30d7ebb0ddcf7
4
+ data.tar.gz: 594b60075684fbedb224dfbb2632df74971cd36b52d4e991e07146af9a7106cc
5
5
  SHA512:
6
- metadata.gz: facf03ed2e533fc8fa3faa14069101ed218e922b94cab228f5e651f89ebbd590972d44ff479374ccff9c0ed666426975fb3386df6a2eb4d2e174fb9268b8a18b
7
- data.tar.gz: '0393004af3b3f22d6cc8581f239b838067764d48cfab0eb5d759b614edfb97a9f30c7e89bae658486f970d692951b4adb15ba08d12d326ccfd7191c60fff38e1'
6
+ metadata.gz: a60e54ad4b1f024cd69ed7a968721c893310c5201516cc98d2023e9b61e54a20a1d8d1775fc6ca1f3638ebebc6ee32a4bb7c0ab6afc51b719203fbae4e955ba9
7
+ data.tar.gz: b9e1a20b64ddf602baf3f5a43e439b3a2742a31c45faad9725fa32fdfc4895f40ccf3949980dace0e51e3ef1cecc9b6e3674c3e0a78d2725f11711e23c4bcee5
data/.document CHANGED
@@ -1,2 +1,3 @@
1
1
  ext/mwrap/mwrap.c
2
2
  lib/mwrap_rack.rb
3
+ README
data/.gitignore CHANGED
@@ -4,3 +4,6 @@
4
4
  /pkg
5
5
  /*.gem
6
6
  /doc
7
+ /NEWS
8
+ /NEWS.atom.xml
9
+ /LATEST
data/.olddoc.yml CHANGED
@@ -5,4 +5,6 @@ rdoc_url: https://80x24.org/mwrap/
5
5
  ml_url: https://80x24.org/mwrap-public/
6
6
  public_email: mwrap-public@80x24.org
7
7
  nntp_url:
8
- - nntp://news.public-inbox.org/inbox.comp.lang.ruby.mwrap
8
+ - nntps://news.public-inbox.org/inbox.comp.lang.ruby.mwrap
9
+ imap_url:
10
+ - imaps://;AUTH=ANONYMOUS@80x24.org/inbox.comp.lang.ruby.mwrap.0
data/README CHANGED
@@ -67,16 +67,18 @@ first two columns to find the hottest malloc locations.
67
67
  mwrap 2.0.0+ also supports a Rack application endpoint,
68
68
  it is documented at:
69
69
 
70
- https://80x24.org/mwrap/MwrapRack.html
70
+ https://80x24.org/mwrap/MwrapRack.html
71
71
 
72
72
  == Known problems
73
73
 
74
74
  * 32-bit machines are prone to overflow (WONTFIX)
75
75
 
76
- == Mail archives and list:
76
+ == Public mail archives and contact info:
77
77
 
78
- https://80x24.org/mwrap-public/
79
- nntp://80x24.org/inbox.comp.lang.ruby.mwrap
78
+ * https://80x24.org/mwrap-public/
79
+ * nntps://80x24.org/inbox.comp.lang.ruby.mwrap
80
+ * imaps://;AUTH=ANONYMOUS@80x24.org/inbox.comp.lang.ruby.mwrap.0
81
+ * https://80x24.org/mwrap-public/_/text/help/#pop3
80
82
 
81
83
  No subscription will ever be required to post, but HTML mail
82
84
  will be rejected:
@@ -88,7 +90,7 @@ will be rejected:
88
90
  git clone https://80x24.org/mwrap.git
89
91
 
90
92
  Send all patches and pull requests (use "git request-pull" to format) to
91
- the mailing list. We do not use centralized or proprietary messaging
93
+ mwrap-public@80x24.org. We do not use centralized or proprietary messaging
92
94
  systems.
93
95
 
94
96
  == License
data/Rakefile CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright (C) 2018 mwrap hackers <mwrap-public@80x24.org>
1
+ # Copyright (C) mwrap hackers <mwrap-public@80x24.org>
2
2
  # License: GPL-2.0+ <https://www.gnu.org/licenses/gpl-2.0.txt>
3
3
  require 'rake/testtask'
4
4
  begin
@@ -14,3 +14,31 @@ task :default => :compile
14
14
 
15
15
  c_files = File.readlines('MANIFEST').grep(%r{ext/.*\.[ch]$}).map!(&:chomp!)
16
16
  task 'compile:mwrap' => c_files
17
+
18
+ olddoc = ENV['OLDDOC'] || 'olddoc'
19
+ rdoc = ENV['RDOC'] || 'rdoc'
20
+ task :rsync_docs do
21
+ require 'fileutils'
22
+ top = %w(README COPYING LATEST NEWS NEWS.atom.xml)
23
+ system("git", "set-file-times")
24
+ dest = ENV["RSYNC_DEST"] || "80x24.org:/srv/80x24/mwrap/"
25
+ FileUtils.rm_rf('doc')
26
+ sh "#{olddoc} prepare"
27
+ sh "#{rdoc} -f dark216" # dark216 requires olddoc 1.7+
28
+ File.unlink('doc/created.rid') rescue nil
29
+ File.unlink('doc/index.html') rescue nil
30
+ FileUtils.cp(top, 'doc')
31
+ sh "#{olddoc} merge"
32
+
33
+ Dir['doc/**/*'].each do |txt|
34
+ st = File.stat(txt)
35
+ if st.file?
36
+ gz = "#{txt}.gz"
37
+ tmp = "#{gz}.#$$"
38
+ sh("gzip --rsyncable -9 <#{txt} >#{tmp}")
39
+ File.utime(st.atime, st.mtime, tmp) # make nginx gzip_static happy
40
+ File.rename(tmp, gz)
41
+ end
42
+ end
43
+ sh("rsync --chmod=Fugo=r #{ENV['RSYNC_OPT']} -av doc/ #{dest}/")
44
+ end
data/bin/mwrap CHANGED
@@ -1,6 +1,6 @@
1
1
  #!/usr/bin/ruby
2
2
  # frozen_string_literal: true
3
- # Copyright (C) 2018 mwrap hackers <mwrap-public@80x24.org>
3
+ # Copyright (C) mwrap hackers <mwrap-public@80x24.org>
4
4
  # License: GPL-2.0+ <https://www.gnu.org/licenses/gpl-2.0.txt>
5
5
  require 'mwrap'
6
6
  mwrap_so = $".grep(%r{/mwrap\.so\z})[0] or abort "mwrap.so not loaded"
data/ext/mwrap/extconf.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
- # Copyright (C) 2018 mwrap hackers <mwrap-public@80x24.org>
2
+ # Copyright (C) mwrap hackers <mwrap-public@80x24.org>
3
3
  # License: GPL-2.0+ <https://www.gnu.org/licenses/gpl-2.0.txt>
4
4
  require 'mkmf'
5
5
 
@@ -25,4 +25,11 @@ else
25
25
  abort 'missing __builtin_add_overflow'
26
26
  end
27
27
 
28
+ begin
29
+ if n = GC::INTERNAL_CONSTANTS[:HEAP_PAGE_SIZE]
30
+ $defs << "-DHEAP_PAGE_SIZE=#{n}"
31
+ end
32
+ rescue NameError
33
+ end
34
+
28
35
  create_makefile 'mwrap'
data/ext/mwrap/mwrap.c CHANGED
@@ -1,9 +1,9 @@
1
1
  /*
2
- * Copyright (C) 2018 mwrap hackers <mwrap-public@80x24.org>
2
+ * Copyright (C) mwrap hackers <mwrap-public@80x24.org>
3
3
  * License: GPL-2.0+ <https://www.gnu.org/licenses/gpl-2.0.txt>
4
4
  */
5
5
  #define _LGPL_SOURCE /* allows URCU to inline some stuff */
6
- #include <ruby/ruby.h>
6
+ #include <ruby.h> /* defines HAVE_RUBY_RACTOR_H on 3.0+ */
7
7
  #include <ruby/thread.h>
8
8
  #include <ruby/io.h>
9
9
  #include <execinfo.h>
@@ -22,25 +22,47 @@
22
22
  #include <urcu/rculist.h>
23
23
  #include "jhash.h"
24
24
 
25
+ #if __STDC_VERSION__ >= 201112
26
+ # define MWRAP_TSD _Thread_local
27
+ #elif defined(__GNUC__)
28
+ # define MWRAP_TSD __thread
29
+ #else
30
+ # error _Thread_local nor __thread supported
31
+ #endif
32
+
25
33
  static ID id_uminus;
26
- static unsigned int track_memalign;
27
34
  const char *rb_source_location_cstr(int *line); /* requires 2.6.0dev */
28
- extern int __attribute__((weak)) ruby_thread_has_gvl_p(void);
35
+
36
+ #ifdef HAVE_RUBY_RACTOR_H /* Ruby 3.0+ */
37
+ extern MWRAP_TSD void * __attribute__((weak)) ruby_current_ec;
38
+ #else /* Ruby 2.6-2.7 */
29
39
  extern void * __attribute__((weak)) ruby_current_execution_context_ptr;
40
+ # define ruby_current_ec ruby_current_execution_context_ptr
41
+ #endif
30
42
  extern void * __attribute__((weak)) ruby_current_vm_ptr; /* for rb_gc_count */
31
43
  extern size_t __attribute__((weak)) rb_gc_count(void);
32
44
  extern VALUE __attribute__((weak)) rb_cObject;
45
+ extern VALUE __attribute__((weak)) rb_eTypeError;
33
46
  extern VALUE __attribute__((weak)) rb_yield(VALUE);
47
+ int __attribute__((weak)) ruby_thread_has_gvl_p(void);
34
48
 
35
49
  static size_t total_bytes_inc, total_bytes_dec;
36
50
 
37
51
  /* true for glibc/dlmalloc/ptmalloc, not sure about jemalloc */
38
52
  #define ASSUMED_MALLOC_ALIGNMENT (sizeof(void *) * 2)
39
53
 
40
- int __attribute__((weak)) ruby_thread_has_gvl_p(void)
41
- {
42
- return 0;
43
- }
54
+ /* match values in Ruby gc.c */
55
+ #define HEAP_PAGE_ALIGN_LOG 14
56
+ enum {
57
+ HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG)
58
+ #ifndef HEAP_PAGE_SIZE /* Ruby 2.6-2.7 only */
59
+ ,
60
+ REQUIRED_SIZE_BY_MALLOC = (sizeof(size_t) * 5),
61
+ HEAP_PAGE_SIZE = (HEAP_PAGE_ALIGN - REQUIRED_SIZE_BY_MALLOC)
62
+ #endif
63
+ };
64
+
65
+ #define IS_HEAP_PAGE_BODY ((struct src_loc *)-1)
44
66
 
45
67
  #ifdef __FreeBSD__
46
68
  void *__malloc(size_t);
@@ -65,7 +87,7 @@ static int resolving_malloc;
65
87
  } \
66
88
  } while (0)
67
89
 
68
- static __thread size_t locating;
90
+ static MWRAP_TSD size_t locating;
69
91
  static size_t generation;
70
92
  static size_t page_size;
71
93
  static struct cds_lfht *totals;
@@ -96,7 +118,6 @@ lfht_new(void)
96
118
  __attribute__((constructor)) static void resolve_malloc(void)
97
119
  {
98
120
  int err;
99
- const char *opt;
100
121
  ++locating;
101
122
 
102
123
  #ifdef __FreeBSD__
@@ -130,8 +151,8 @@ __attribute__((constructor)) static void resolve_malloc(void)
130
151
  _exit(1);
131
152
  }
132
153
  #endif /* !FreeBSD */
133
- totals = lfht_new();
134
- if (!totals)
154
+ CMM_STORE_SHARED(totals, lfht_new());
155
+ if (!CMM_LOAD_SHARED(totals))
135
156
  fprintf(stderr, "failed to allocate totals table\n");
136
157
 
137
158
  err = pthread_atfork(call_rcu_before_fork,
@@ -140,19 +161,21 @@ __attribute__((constructor)) static void resolve_malloc(void)
140
161
  if (err)
141
162
  fprintf(stderr, "pthread_atfork failed: %s\n", strerror(err));
142
163
  page_size = sysconf(_SC_PAGESIZE);
143
- opt = getenv("MWRAP");
144
- if (opt && (opt = strstr(opt, "memalign:"))) {
145
- if (!sscanf(opt, "memalign:%u", &track_memalign))
146
- fprintf(stderr, "not an unsigned int: %s\n", opt);
147
- }
148
164
  --locating;
149
165
  }
150
166
 
167
+ #ifdef NDEBUG
168
+ #define QUIET_CC_WARNING(var) (void)var;
169
+ #else
170
+ #define QUIET_CC_WARNING(var)
171
+ #endif
172
+
151
173
  static void
152
174
  mutex_lock(pthread_mutex_t *m)
153
175
  {
154
176
  int err = pthread_mutex_lock(m);
155
177
  assert(err == 0);
178
+ QUIET_CC_WARNING(err)
156
179
  }
157
180
 
158
181
  static void
@@ -160,6 +183,7 @@ mutex_unlock(pthread_mutex_t *m)
160
183
  {
161
184
  int err = pthread_mutex_unlock(m);
162
185
  assert(err == 0);
186
+ QUIET_CC_WARNING(err)
163
187
  }
164
188
 
165
189
  #ifndef HAVE_MEMPCPY
@@ -209,10 +233,36 @@ static char *int2str(int num, char *dst, size_t * size)
209
233
  */
210
234
  static int has_ec_p(void)
211
235
  {
212
- return (ruby_thread_has_gvl_p() && ruby_current_vm_ptr &&
213
- ruby_current_execution_context_ptr);
236
+ return ruby_thread_has_gvl_p && ruby_thread_has_gvl_p() &&
237
+ ruby_current_vm_ptr && ruby_current_ec;
214
238
  }
215
239
 
240
+ struct acc {
241
+ uint64_t nr;
242
+ int64_t min;
243
+ int64_t max;
244
+ double m2;
245
+ double mean;
246
+ };
247
+
248
+ #define ACC_INIT(name) { .nr=0, .min=INT64_MAX, .max=-1, .m2=0, .mean=0 }
249
+
250
+ /* for tracking 16K-aligned heap page bodies (protected by GVL) */
251
+ struct {
252
+ pthread_mutex_t lock;
253
+ struct cds_list_head bodies;
254
+ struct cds_list_head freed;
255
+
256
+ struct acc alive;
257
+ struct acc reborn;
258
+ } hpb_stats = {
259
+ .lock = PTHREAD_MUTEX_INITIALIZER,
260
+ .bodies = CDS_LIST_HEAD_INIT(hpb_stats.bodies),
261
+ .freed = CDS_LIST_HEAD_INIT(hpb_stats.freed),
262
+ .alive = ACC_INIT(hpb_stats.alive),
263
+ .reborn = ACC_INIT(hpb_stats.reborn)
264
+ };
265
+
216
266
  /* allocated via real_malloc/real_free */
217
267
  struct src_loc {
218
268
  pthread_mutex_t *mtx;
@@ -237,6 +287,9 @@ struct alloc_hdr {
237
287
  struct src_loc *loc;
238
288
  } live;
239
289
  struct rcu_head dead;
290
+ struct {
291
+ size_t at; /* rb_gc_count() */
292
+ } hpb_freed;
240
293
  } as;
241
294
  void *real; /* what to call real_free on */
242
295
  size_t size;
@@ -276,7 +329,65 @@ static int loc_eq(struct cds_lfht_node *node, const void *key)
276
329
  memcmp(k->k, existing->k, loc_size(k)) == 0);
277
330
  }
278
331
 
279
- static struct src_loc *totals_add_rcu(struct src_loc *k)
332
+ /* note: not atomic */
333
+ static void
334
+ acc_add(struct acc *acc, size_t val)
335
+ {
336
+ double delta = val - acc->mean;
337
+ uint64_t nr = ++acc->nr;
338
+
339
+ /* just don't divide-by-zero if we ever hit this (unlikely :P) */
340
+ if (nr)
341
+ acc->mean += delta / nr;
342
+
343
+ acc->m2 += delta * (val - acc->mean);
344
+ if ((int64_t)val < acc->min)
345
+ acc->min = (int64_t)val;
346
+ if ((int64_t)val > acc->max)
347
+ acc->max = (int64_t)val;
348
+ }
349
+
350
+ #if SIZEOF_LONG == 8
351
+ # define INT64toNUM(x) LONG2NUM((long)x)
352
+ #elif defined(HAVE_LONG_LONG) && SIZEOF_LONG_LONG == 8
353
+ # define INT64toNUM(x) LL2NUM((LONG_LONG)x)
354
+ #endif
355
+
356
+ static VALUE
357
+ acc_max(const struct acc *acc)
358
+ {
359
+ return INT64toNUM(acc->max);
360
+ }
361
+
362
+ static VALUE
363
+ acc_min(const struct acc *acc)
364
+ {
365
+ return acc->min == INT64_MAX ? INT2FIX(-1) : INT64toNUM(acc->min);
366
+ }
367
+
368
+ static VALUE
369
+ acc_mean(const struct acc *acc)
370
+ {
371
+ return DBL2NUM(acc->nr ? acc->mean : HUGE_VAL);
372
+ }
373
+
374
+ static double
375
+ acc_stddev_dbl(const struct acc *acc)
376
+ {
377
+ if (acc->nr > 1) {
378
+ double variance = acc->m2 / (acc->nr - 1);
379
+ return sqrt(variance);
380
+ }
381
+ return 0.0;
382
+ }
383
+
384
+ static VALUE
385
+ acc_stddev(const struct acc *acc)
386
+ {
387
+ return DBL2NUM(acc_stddev_dbl(acc));
388
+ }
389
+
390
+ static struct src_loc *totals_add_rcu(const struct src_loc *k)
280
391
  {
281
392
  struct cds_lfht_iter iter;
282
393
  struct cds_lfht_node *cur;
@@ -284,7 +395,7 @@ static struct src_loc *totals_add_rcu(struct src_loc *k)
284
395
  struct cds_lfht *t;
285
396
 
286
397
  again:
287
- t = rcu_dereference(totals);
398
+ t = CMM_LOAD_SHARED(totals);
288
399
  if (!t) goto out_unlock;
289
400
  cds_lfht_lookup(t, k->hval, loc_eq, k, &iter);
290
401
  cur = cds_lfht_iter_get_node(&iter);
@@ -326,7 +437,7 @@ static struct src_loc *update_stats_rcu_lock(size_t size, uintptr_t caller)
326
437
  static const size_t xlen = sizeof(caller);
327
438
  char *dst;
328
439
 
329
- if (caa_unlikely(!totals)) return 0;
440
+ if (caa_unlikely(!CMM_LOAD_SHARED(totals))) return 0;
330
441
  if (locating++) goto out; /* do not recurse into another *alloc */
331
442
 
332
443
  uatomic_add(&total_bytes_inc, size);
@@ -391,7 +502,7 @@ void free(void *p)
391
502
  struct src_loc *l = h->as.live.loc;
392
503
 
393
504
  if (!real_free) return; /* oh well, leak a little */
394
- if (l) {
505
+ if (l && l != IS_HEAP_PAGE_BODY) {
395
506
  size_t age = generation - h->as.live.gen;
396
507
 
397
508
  uatomic_add(&total_bytes_dec, h->size);
@@ -406,8 +517,20 @@ void free(void *p)
406
517
  mutex_unlock(l->mtx);
407
518
 
408
519
  call_rcu(&h->as.dead, free_hdr_rcu);
409
- }
410
- else {
520
+ } else if (l == IS_HEAP_PAGE_BODY) {
521
+ size_t gen = generation;
522
+ size_t age = gen - h->as.live.gen;
523
+
524
+ h->as.hpb_freed.at = gen;
525
+
526
+ mutex_lock(&hpb_stats.lock);
527
+ acc_add(&hpb_stats.alive, age);
528
+
529
+ /* hpb_stats.bodies => hpb_stats.freed */
530
+ cds_list_move(&h->anode, &hpb_stats.freed);
531
+
532
+ mutex_unlock(&hpb_stats.lock);
533
+ } else {
411
534
  real_free(h->real);
412
535
  }
413
536
  }
@@ -434,7 +557,7 @@ static size_t size_align(size_t size, size_t alignment)
434
557
  return ((size + (alignment - 1)) & ~(alignment - 1));
435
558
  }
436
559
 
437
- static bool ptr_is_aligned(void *ptr, size_t alignment)
560
+ static bool ptr_is_aligned(const void *ptr, size_t alignment)
438
561
  {
439
562
  return ((uintptr_t)ptr & (alignment - 1)) == 0;
440
563
  }
@@ -473,18 +596,66 @@ internal_memalign(void **pp, size_t alignment, size_t size, uintptr_t caller)
473
596
  __builtin_add_overflow(asize, sizeof(struct alloc_hdr), &asize))
474
597
  return ENOMEM;
475
598
 
476
- /* assert(asize == (alignment + size + sizeof(struct alloc_hdr))); */
477
- l = track_memalign ? update_stats_rcu_lock(size, caller) : 0;
478
- real = real_malloc(asize);
479
- if (real) {
480
- void *p = hdr2ptr(real);
481
- if (!ptr_is_aligned(p, alignment))
482
- p = ptr_align(p, alignment);
483
- h = ptr2hdr(p);
484
- alloc_insert_rcu(l, h, size, real);
599
+
600
+ if (alignment == HEAP_PAGE_ALIGN && size == HEAP_PAGE_SIZE) {
601
+ if (has_ec_p()) generation = rb_gc_count();
602
+ l = IS_HEAP_PAGE_BODY;
603
+ } else {
604
+ l = update_stats_rcu_lock(size, caller);
605
+ }
606
+
607
+ if (l == IS_HEAP_PAGE_BODY) {
608
+ void *p;
609
+ size_t gen = generation;
610
+
611
+ mutex_lock(&hpb_stats.lock);
612
+
613
+ /* reuse existing entry */
614
+ if (!cds_list_empty(&hpb_stats.freed)) {
615
+ size_t deathspan;
616
+
617
+ h = cds_list_first_entry(&hpb_stats.freed,
618
+ struct alloc_hdr, anode);
619
+ /* hpb_stats.freed => hpb_stats.bodies */
620
+ cds_list_move(&h->anode, &hpb_stats.bodies);
621
+ assert(h->size == size);
622
+ assert(h->real);
623
+ real = h->real;
624
+ p = hdr2ptr(h);
625
+ assert(ptr_is_aligned(p, alignment));
626
+
627
+ deathspan = gen - h->as.hpb_freed.at;
628
+ acc_add(&hpb_stats.reborn, deathspan);
629
+ }
630
+ else {
631
+ real = real_malloc(asize);
632
+ if (!real) return ENOMEM;
633
+
634
+ p = hdr2ptr(real);
635
+ if (!ptr_is_aligned(p, alignment))
636
+ p = ptr_align(p, alignment);
637
+ h = ptr2hdr(p);
638
+ h->size = size;
639
+ h->real = real;
640
+ cds_list_add(&h->anode, &hpb_stats.bodies);
641
+ }
642
+ mutex_unlock(&hpb_stats.lock);
643
+ h->as.live.loc = l;
644
+ h->as.live.gen = gen;
485
645
  *pp = p;
486
646
  }
487
- update_stats_rcu_unlock(l);
647
+ else {
648
+ real = real_malloc(asize);
649
+ if (real) {
650
+ void *p = hdr2ptr(real);
651
+ if (!ptr_is_aligned(p, alignment))
652
+ p = ptr_align(p, alignment);
653
+ h = ptr2hdr(p);
654
+ alloc_insert_rcu(l, h, size, real);
655
+ *pp = p;
656
+ }
657
+ update_stats_rcu_unlock(l);
658
+ }
488
659
 
489
660
  return real ? 0 : ENOMEM;
490
661
  }
@@ -492,16 +663,14 @@ internal_memalign(void **pp, size_t alignment, size_t size, uintptr_t caller)
492
663
  static void *
493
664
  memalign_result(int err, void *p)
494
665
  {
495
- if (caa_unlikely(err)) {
666
+ if (caa_unlikely(err))
496
667
  errno = err;
497
- return 0;
498
- }
499
668
  return p;
500
669
  }
501
670
 
502
671
  void *memalign(size_t alignment, size_t size)
503
672
  {
504
- void *p;
673
+ void *p = NULL;
505
674
  int err = internal_memalign(&p, alignment, size, RETURN_ADDRESS(0));
506
675
  return memalign_result(err, p);
507
676
  }
@@ -516,7 +685,7 @@ void cfree(void *) __attribute__((alias("free")));
516
685
 
517
686
  void *valloc(size_t size)
518
687
  {
519
- void *p;
688
+ void *p = NULL;
520
689
  int err = internal_memalign(&p, page_size, size, RETURN_ADDRESS(0));
521
690
  return memalign_result(err, p);
522
691
  }
@@ -534,7 +703,7 @@ void *valloc(size_t size)
534
703
  void *pvalloc(size_t size)
535
704
  {
536
705
  size_t alignment = page_size;
537
- void *p;
706
+ void *p = NULL;
538
707
  int err;
539
708
 
540
709
  if (add_overflow_p(size, alignment)) {
@@ -657,7 +826,7 @@ static void *dump_to_file(void *x)
657
826
 
658
827
  ++locating;
659
828
  rcu_read_lock();
660
- t = rcu_dereference(totals);
829
+ t = CMM_LOAD_SHARED(totals);
661
830
  if (!t)
662
831
  goto out_unlock;
663
832
  cds_lfht_for_each_entry(t, &iter, l, hnode) {
@@ -726,7 +895,7 @@ static void *totals_reset(void *ign)
726
895
  uatomic_set(&total_bytes_dec, 0);
727
896
 
728
897
  rcu_read_lock();
729
- t = rcu_dereference(totals);
898
+ t = CMM_LOAD_SHARED(totals);
730
899
  cds_lfht_for_each_entry(t, &iter, l, hnode) {
731
900
  uatomic_set(&l->total, 0);
732
901
  uatomic_set(&l->allocations, 0);
@@ -794,7 +963,7 @@ static VALUE dump_each_rcu(VALUE x)
794
963
  struct cds_lfht_iter iter;
795
964
  struct src_loc *l;
796
965
 
797
- t = rcu_dereference(totals);
966
+ t = CMM_LOAD_SHARED(totals);
798
967
  cds_lfht_for_each_entry(t, &iter, l, hnode) {
799
968
  VALUE v[6];
800
969
  if (l->total <= a->min) continue;
@@ -898,9 +1067,9 @@ static VALUE mwrap_aref(VALUE mod, VALUE loc)
898
1067
 
899
1068
  if (!k) return val;
900
1069
 
1070
+ t = CMM_LOAD_SHARED(totals);
1071
+ if (!t) return val;
901
1072
  rcu_read_lock();
902
- t = rcu_dereference(totals);
903
- if (!t) goto out_unlock;
904
1073
 
905
1074
  cds_lfht_lookup(t, k->hval, loc_eq, k, &iter);
906
1075
  cur = cds_lfht_iter_get_node(&iter);
@@ -908,7 +1077,6 @@ static VALUE mwrap_aref(VALUE mod, VALUE loc)
908
1077
  l = caa_container_of(cur, struct src_loc, hnode);
909
1078
  val = TypedData_Wrap_Struct(cSrcLoc, &src_loc_type, l);
910
1079
  }
911
- out_unlock:
912
1080
  rcu_read_unlock();
913
1081
  return val;
914
1082
  }
@@ -1042,16 +1210,89 @@ static VALUE mwrap_quiet(VALUE mod)
1042
1210
  return rb_ensure(rb_yield, SIZET2NUM(cur), reset_locating, 0);
1043
1211
  }
1044
1212
 
1213
+ /*
1214
+ * total bytes allocated as tracked by mwrap
1215
+ */
1045
1216
  static VALUE total_inc(VALUE mod)
1046
1217
  {
1047
1218
  return SIZET2NUM(total_bytes_inc);
1048
1219
  }
1049
1220
 
1221
+ /*
1222
+ * total bytes freed as tracked by mwrap
1223
+ */
1050
1224
  static VALUE total_dec(VALUE mod)
1051
1225
  {
1052
1226
  return SIZET2NUM(total_bytes_dec);
1053
1227
  }
1054
1228
 
1229
+ static VALUE hpb_each_yield(VALUE ignore)
1230
+ {
1231
+ struct alloc_hdr *h, *next;
1232
+
1233
+ cds_list_for_each_entry_safe(h, next, &hpb_stats.bodies, anode) {
1234
+ VALUE v[2]; /* [ generation, address ] */
1235
+ void *addr = hdr2ptr(h);
1236
+ assert(ptr_is_aligned(addr, HEAP_PAGE_ALIGN));
1237
+ v[0] = LONG2NUM((long)addr);
1238
+ v[1] = SIZET2NUM(h->as.live.gen);
1239
+ rb_yield_values2(2, v);
1240
+ }
1241
+ return Qnil;
1242
+ }
1243
+
1244
+ /*
1245
+ * call-seq:
1246
+ *
1247
+ * Mwrap::HeapPageBody.each { |gen, addr| } -> Integer
1248
+ *
1249
+ * Yields the generation (GC.count) the heap page body was created
1250
+ * and address of the heap page body as an Integer. Returns the
1251
+ * number of allocated pages as an Integer. This return value should
1252
+ * match the result of GC.stat(:heap_allocated_pages)
1253
+ */
1254
+ static VALUE hpb_each(VALUE mod)
1255
+ {
1256
+ ++locating;
1257
+ return rb_ensure(hpb_each_yield, Qfalse, reset_locating, 0);
1258
+ }
1259
+
1260
+ /*
1261
+ * call-seq:
1262
+ *
1263
+ * Mwrap::HeapPageBody.stat -> Hash
1264
+ * Mwrap::HeapPageBody.stat(hash) -> hash
1265
+ *
1266
+ * The maximum lifespan of a heap page body in the Ruby VM.
1267
+ * This may be Infinity if no heap page bodies were ever freed.
1268
+ */
1269
+ static VALUE hpb_stat(int argc, VALUE *argv, VALUE hpb)
1270
+ {
1271
+ VALUE h;
1272
+
1273
+ rb_scan_args(argc, argv, "01", &h);
1274
+ if (NIL_P(h))
1275
+ h = rb_hash_new();
1276
+ else if (!RB_TYPE_P(h, T_HASH))
1277
+ rb_raise(rb_eTypeError, "not a hash %+"PRIsVALUE, h);
1278
+
1279
+ ++locating;
1280
+ #define S(x) ID2SYM(rb_intern(#x))
1281
+ rb_hash_aset(h, S(lifespan_max), acc_max(&hpb_stats.alive));
1282
+ rb_hash_aset(h, S(lifespan_min), acc_min(&hpb_stats.alive));
1283
+ rb_hash_aset(h, S(lifespan_mean), acc_mean(&hpb_stats.alive));
1284
+ rb_hash_aset(h, S(lifespan_stddev), acc_stddev(&hpb_stats.alive));
1285
+ rb_hash_aset(h, S(deathspan_max), acc_max(&hpb_stats.reborn));
1286
+ rb_hash_aset(h, S(deathspan_min), acc_min(&hpb_stats.reborn));
1287
+ rb_hash_aset(h, S(deathspan_mean), acc_mean(&hpb_stats.reborn));
1288
+ rb_hash_aset(h, S(deathspan_stddev), acc_stddev(&hpb_stats.reborn));
1289
+ rb_hash_aset(h, S(resurrects), SIZET2NUM(hpb_stats.reborn.nr));
1290
+ #undef S
1291
+ --locating;
1292
+
1293
+ return h;
1294
+ }
1295
+
1055
1296
  /*
1056
1297
  * Document-module: Mwrap
1057
1298
  *
@@ -1070,20 +1311,19 @@ static VALUE total_dec(VALUE mod)
1070
1311
  * * dump_fd: a writable FD to dump to
1071
1312
  * * dump_path: a path to dump to, the file is opened in O_APPEND mode
1072
1313
  * * dump_min: the minimum allocation size (total) to dump
1073
- * * memalign: use `1' to enable tracking the memalign family
1314
+ * * dump_heap: mask of heap_page_body statistics to dump
1074
1315
  *
1075
1316
  * If both `dump_fd' and `dump_path' are specified, dump_path takes
1076
1317
  * precedence.
1077
1318
  *
1078
- * Tracking the memalign family of functions is misleading for Ruby
1079
- * applications, as heap page allocations can happen anywhere a
1080
- * Ruby object is allocated, even in the coldest code paths.
1081
- * Furthermore, it is rarely-used outside of the Ruby object allocator.
1082
- * Thus tracking memalign functions is disabled by default.
1319
+ * dump_heap bitmask
1320
+ * * 0x01 - summary stats (same info as HeapPageBody.stat)
1321
+ * * 0x02 - all live heaps (similar to HeapPageBody.each)
1322
+ * * 0x04 - skip non-heap_page_body-related output
1083
1323
  */
1084
1324
  void Init_mwrap(void)
1085
1325
  {
1086
- VALUE mod;
1326
+ VALUE mod, hpb;
1087
1327
 
1088
1328
  ++locating;
1089
1329
  mod = rb_define_module("Mwrap");
@@ -1105,6 +1345,8 @@ void Init_mwrap(void)
1105
1345
  rb_define_singleton_method(mod, "quiet", mwrap_quiet, 0);
1106
1346
  rb_define_singleton_method(mod, "total_bytes_allocated", total_inc, 0);
1107
1347
  rb_define_singleton_method(mod, "total_bytes_freed", total_dec, 0);
1348
+
1349
+
1108
1350
  rb_define_method(cSrcLoc, "each", src_loc_each, 0);
1109
1351
  rb_define_method(cSrcLoc, "frees", src_loc_frees, 0);
1110
1352
  rb_define_method(cSrcLoc, "allocations", src_loc_allocations, 0);
@@ -1112,9 +1354,68 @@ void Init_mwrap(void)
1112
1354
  rb_define_method(cSrcLoc, "mean_lifespan", src_loc_mean_lifespan, 0);
1113
1355
  rb_define_method(cSrcLoc, "max_lifespan", src_loc_max_lifespan, 0);
1114
1356
  rb_define_method(cSrcLoc, "name", src_loc_name, 0);
1357
+
1358
+ /*
1359
+ * Information about "struct heap_page_body" allocations from
1360
+ * Ruby gc.c. This can be useful for tracking fragmentation
1361
+ * from posix_memalign(3) use in mainline Ruby:
1362
+ *
1363
+ * https://sourceware.org/bugzilla/show_bug.cgi?id=14581
1364
+ *
1365
+ * These statistics are never reset by Mwrap.reset or
1366
+ * any other method. They only make sense in the context
1367
+ * of an entire program lifetime.
1368
+ */
1369
+ hpb = rb_define_class_under(mod, "HeapPageBody", rb_cObject);
1370
+ rb_define_singleton_method(hpb, "stat", hpb_stat, -1);
1371
+ rb_define_singleton_method(hpb, "each", hpb_each, 0);
1372
+
1115
1373
  --locating;
1116
1374
  }
1117
1375
 
1376
+ enum {
1377
+ DUMP_HPB_STATS = 0x1,
1378
+ DUMP_HPB_EACH = 0x2,
1379
+ DUMP_HPB_EXCL = 0x4,
1380
+ };
1381
+
1382
+ static void dump_hpb(FILE *fp, unsigned flags)
1383
+ {
1384
+ if (flags & DUMP_HPB_STATS) {
1385
+ fprintf(fp,
1386
+ "lifespan_max: %"PRId64"\n"
1387
+ "lifespan_min:%s%"PRId64"\n"
1388
+ "lifespan_mean: %0.3f\n"
1389
+ "lifespan_stddev: %0.3f\n"
1390
+ "deathspan_max: %"PRId64"\n"
1391
+ "deathspan_min:%s%"PRId64"\n"
1392
+ "deathspan_mean: %0.3f\n"
1393
+ "deathspan_stddev: %0.3f\n"
1394
+ "gc_count: %zu\n",
1395
+ hpb_stats.alive.max,
1396
+ hpb_stats.alive.min == INT64_MAX ? " -" : " ",
1397
+ hpb_stats.alive.min,
1398
+ hpb_stats.alive.mean,
1399
+ acc_stddev_dbl(&hpb_stats.alive),
1400
+ hpb_stats.reborn.max,
1401
+ hpb_stats.reborn.min == INT64_MAX ? " -" : " ",
1402
+ hpb_stats.reborn.min,
1403
+ hpb_stats.reborn.mean,
1404
+ acc_stddev_dbl(&hpb_stats.reborn),
1405
+ /* n.b.: unsafe to call rb_gc_count() in destructor */
1406
+ generation);
1407
+ }
1408
+ if (flags & DUMP_HPB_EACH) {
1409
+ struct alloc_hdr *h;
1410
+
1411
+ cds_list_for_each_entry(h, &hpb_stats.bodies, anode) {
1412
+ void *addr = hdr2ptr(h);
1413
+
1414
+ fprintf(fp, "%p\t%zu\n", addr, h->as.live.gen);
1415
+ }
1416
+ }
1417
+ }
1418
+
1118
1419
  /* rb_cloexec_open isn't usable by non-Ruby processes */
1119
1420
  #ifndef O_CLOEXEC
1120
1421
  # define O_CLOEXEC 0
@@ -1125,10 +1426,12 @@ static void mwrap_dump_destructor(void)
1125
1426
  {
1126
1427
  const char *opt = getenv("MWRAP");
1127
1428
  const char *modes[] = { "a", "a+", "w", "w+", "r+" };
1128
- struct dump_arg a;
1429
+ struct dump_arg a = { .min = 0 };
1129
1430
  size_t i;
1130
1431
  int dump_fd;
1432
+ unsigned dump_heap = 0;
1131
1433
  char *dump_path;
1434
+ char *s;
1132
1435
 
1133
1436
  if (!opt)
1134
1437
  return;
@@ -1155,8 +1458,11 @@ static void mwrap_dump_destructor(void)
1155
1458
  else if (!sscanf(opt, "dump_fd:%d", &dump_fd))
1156
1459
  goto out;
1157
1460
 
1158
- if (!sscanf(opt, "dump_min:%zu", &a.min))
1159
- a.min = 0;
1461
+ if ((s = strstr(opt, "dump_min:")))
1462
+ sscanf(s, "dump_min:%zu", &a.min);
1463
+
1464
+ if ((s = strstr(opt, "dump_heap:")))
1465
+ sscanf(s, "dump_heap:%u", &dump_heap);
1160
1466
 
1161
1467
  switch (dump_fd) {
1162
1468
  case 0: goto out;
@@ -1177,7 +1483,9 @@ static void mwrap_dump_destructor(void)
1177
1483
  }
1178
1484
  /* we'll leak some memory here, but this is a destructor */
1179
1485
  }
1180
- dump_to_file(&a);
1486
+ if ((dump_heap & DUMP_HPB_EXCL) == 0)
1487
+ dump_to_file(&a);
1488
+ dump_hpb(a.fp, dump_heap);
1181
1489
  out:
1182
1490
  --locating;
1183
1491
  }
data/lib/mwrap_rack.rb CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright (C) 2018 all contributors <mwrap@80x24.org>
1
+ # Copyright (C) all contributors <mwrap-public@80x24.org>
2
2
  # License: GPL-2.0+ <https://www.gnu.org/licenses/gpl-2.0.txt>
3
3
  # frozen_string_literal: true
4
4
  require 'mwrap'
@@ -17,9 +17,6 @@ require 'cgi'
17
17
  # map('/MWRAP') { run(MwrapRack.new) }
18
18
  # map('/') { run(your_normal_app) }
19
19
  #
20
- # A live demo is available at https://80x24.org/MWRAP/
21
- # (warning the demo machine is 32-bit, so counters will overflow)
22
- #
23
20
  # This module is only available in mwrap 2.0.0+
24
21
  class MwrapRack
25
22
  module HtmlResponse # :nodoc:
@@ -92,6 +89,54 @@ class MwrapRack
92
89
  end
93
90
  end
94
91
 
92
+ class HeapPages # :nodoc:
93
+ include HtmlResponse
94
+ HEADER = '<tr><th>address</th><th>generation</th></tr>'
95
+
96
+ def hpb_rows
97
+ Mwrap::HeapPageBody.stat(stat = Thread.current[:mwrap_hpb_stat] ||= {})
98
+ %i(lifespan_max lifespan_min lifespan_mean lifespan_stddev
99
+ deathspan_max deathspan_min deathspan_mean deathspan_stddev
100
+ resurrects
101
+ ).map! do |k|
102
+ "<tr><td>#{k}</td><td>#{stat[k]}</td></tr>\n"
103
+ end.join
104
+ end
105
+
106
+ def gc_stat_rows
107
+ GC.stat(stat = Thread.current[:mwrap_gc_stat] ||= {})
108
+ %i(count heap_allocated_pages heap_eden_pages heap_tomb_pages
109
+ total_allocated_pages total_freed_pages).map do |k|
110
+ "<tr><td>GC.stat(:#{k})</td><td>#{stat[k]}</td></tr>\n"
111
+ end.join
112
+ end
113
+
114
+ GC_STAT_URL = 'https://docs.ruby-lang.org/en/trunk/GC.html#method-c-stat'
115
+ GC_STAT_HELP = <<~EOM
116
+ <p>Non-Infinity lifespans can indicate fragmentation.
117
+ <p>See <a
118
+ href="#{GC_STAT_URL}">#{GC_STAT_URL}</a> for info on GC.stat values.
119
+ EOM
120
+
121
+ def each
122
+ Mwrap.quiet do
123
+ yield("<html><head><title>heap pages</title></head>" \
124
+ "<body><h1>heap pages</h1>" \
125
+ "<table><tr><th>stat</th><th>value</th></tr>\n" \
126
+ "#{hpb_rows}" \
127
+ "#{gc_stat_rows}" \
128
+ "</table>\n" \
129
+ "#{GC_STAT_HELP}" \
130
+ "<table>#{HEADER}")
131
+ Mwrap::HeapPageBody.each do |addr, generation|
132
+ addr = -sprintf('0x%x', addr)
133
+ yield(-"<tr><td>#{addr}</td><td>#{generation}</td></tr>\n")
134
+ end
135
+ yield "</table></body></html>\n"
136
+ end
137
+ end
138
+ end
139
+
95
140
  def r404 # :nodoc:
96
141
  [404,{'Content-Type'=>'text/plain'},["Not found\n"]]
97
142
  end
@@ -107,12 +152,16 @@ class MwrapRack
107
152
  loc = -CGI.unescape($1)
108
153
  loc = Mwrap[loc] or return r404
109
154
  EachAt.new(loc).response
155
+ when '/heap_pages'
156
+ HeapPages.new.response
110
157
  when '/'
111
158
  n = 2000
112
159
  u = 'https://80x24.org/mwrap/README.html'
113
160
  b = -('<html><head><title>Mwrap demo</title></head>' \
114
161
  "<body><p><a href=\"each/#{n}\">allocations &gt;#{n} bytes</a>" \
115
- "<p><a href=\"#{u}\">#{u}</a></body></html>\n")
162
+ "<p><a href=\"#{u}\">#{u}</a>" \
163
+ "<p><a href=\"heap_pages\">heap pages</a>" \
164
+ "</body></html>\n")
116
165
  [ 200, {'Content-Type'=>'text/html','Content-Length'=>-b.size.to_s},[b]]
117
166
  else
118
167
  r404
data/mwrap.gemspec CHANGED
@@ -12,9 +12,9 @@ desc = `git describe --abbrev=4 HEAD`.strip.tr('-', '.').delete_prefix('v')
12
12
 
13
13
  Gem::Specification.new do |s|
14
14
  s.name = 'mwrap'
15
- s.version = desc.empty? ? '2.0.0' : desc
15
+ s.version = desc.empty? ? '2.2.0' : desc
16
16
  s.homepage = 'https://80x24.org/mwrap/'
17
- s.authors = ["Ruby hackers"]
17
+ s.authors = ["mwrap hackers"]
18
18
  s.summary = 'LD_PRELOAD malloc wrapper for Ruby'
19
19
  s.executables = %w(mwrap)
20
20
  s.files = manifest
data/test/test_mwrap.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
- # Copyright (C) 2018 mwrap hackers <mwrap-public@80x24.org>
2
+ # Copyright (C) mwrap hackers <mwrap-public@80x24.org>
3
3
  # License: GPL-2.0+ <https://www.gnu.org/licenses/gpl-2.0.txt>
4
4
  require 'test/unit'
5
5
  require 'mwrap'
@@ -29,7 +29,8 @@ class TestMwrap < Test::Unit::TestCase
29
29
  tmp.rewind
30
30
  lines = tmp.readlines
31
31
  line_1 = lines.grep(/\s-e:1\b/)[0].strip
32
- assert_equal '10001', line_1.split(/\s+/)[0]
32
+ bytes = line_1.split(/\s+/)[0].to_i
33
+ assert_operator bytes, :>=, 10001
33
34
  end
34
35
  end
35
36
 
@@ -42,7 +43,7 @@ class TestMwrap < Test::Unit::TestCase
42
43
  res = system(env, *cmd, { 5 => tmp })
43
44
  assert res, $?.inspect
44
45
  tmp.rewind
45
- assert_match(/\b10001\s+1\s+-e:1$/, tmp.read)
46
+ assert_match(/\b1\d{4}\s+[1-9]\d*\s+-e:1$/, tmp.read)
46
47
 
47
48
  env['MWRAP'] = 'dump_fd:1,dump_min:10000'
48
49
  tmp.rewind
@@ -50,14 +51,21 @@ class TestMwrap < Test::Unit::TestCase
50
51
  res = system(env, *cmd, { 1 => tmp })
51
52
  assert res, $?.inspect
52
53
  tmp.rewind
53
- assert_match(/\b10001\s+1\s+-e:1$/, tmp.read)
54
+ assert_match(/\b1\d{4}\s+[1-9]\d*\s+-e:1$/, tmp.read)
54
55
 
55
56
  tmp.rewind
56
57
  tmp.truncate(0)
57
58
  env['MWRAP'] = "dump_path:#{tmp.path},dump_min:10000"
58
59
  res = system(env, *cmd)
59
60
  assert res, $?.inspect
60
- assert_match(/\b10001\s+1\s+-e:1$/, tmp.read)
61
+ assert_match(/\b1\d{4}\s+[1-9]\d*\s+-e:1$/, tmp.read)
62
+
63
+ tmp.rewind
64
+ tmp.truncate(0)
65
+ env['MWRAP'] = "dump_path:#{tmp.path},dump_heap:5"
66
+ res = system(env, *cmd)
67
+ assert res, $?.inspect
68
+ assert_match %r{lifespan_stddev}, tmp.read
61
69
  end
62
70
  end
63
71
 
@@ -91,7 +99,7 @@ class TestMwrap < Test::Unit::TestCase
91
99
  tmp.rewind
92
100
  buf = tmp.read
93
101
  assert_not_match(/\s+-e:1$/, buf)
94
- assert_match(/\b20001\s+1\s+-e:3$/, buf)
102
+ assert_match(/\b2\d{4}\s+[0-9]\d*\s+-e:3$/, buf)
95
103
  end
96
104
  end
97
105
 
@@ -169,8 +177,8 @@ class TestMwrap < Test::Unit::TestCase
169
177
  -e GC.disable
170
178
  -e keep=("0"*10000)
171
179
  -e loc=Mwrap["-e:3"]
172
- -e loc.each{|size,gen|p([size,gen,count])}
173
- )
180
+ -e
181
+ ) + [ 'loc.each{|size,gen|p([size,gen,count]) if size > 10000}' ]
174
182
  buf = IO.popen(@@env, cmd, &:read)
175
183
  assert_predicate $?, :success?
176
184
  assert_match(/\A\[\s*\d+,\s*\d+,\s*\d+\]\s*\z/s, buf)
@@ -223,7 +231,8 @@ class TestMwrap < Test::Unit::TestCase
223
231
  loc.name == k or abort 'SourceLocation#name broken'
224
232
  loc.total >= 10000 or abort 'SourceLocation#total broken'
225
233
  loc.frees == 0 or abort 'SourceLocation#frees broken'
226
- loc.allocations == 1 or abort 'SourceLocation#allocations broken'
234
+ loc.allocations >= 1 or
235
+ abort "SourceLocation#allocations broken: #{loc.allocations}"
227
236
  seen = false
228
237
  loc.each do |*x| seen = x end
229
238
  seen[1] == loc.total or 'SourceLocation#each broken'
@@ -233,7 +242,9 @@ class TestMwrap < Test::Unit::TestCase
233
242
  freed = false
234
243
  until freed
235
244
  freed = true
236
- loc.each do freed = false end
245
+ loc.each do |size, gen|
246
+ freed = false if size >= 10000
247
+ end
237
248
  end
238
249
  loc.frees == 1 or abort 'SourceLocation#frees broken (after free)'
239
250
  Float === loc.mean_lifespan or abort 'mean_lifespan broken'
@@ -257,8 +268,9 @@ class TestMwrap < Test::Unit::TestCase
257
268
  assert_separately(+"#{<<~"begin;"}\n#{<<~'end;'}")
258
269
  begin;
259
270
  require 'mwrap'
260
- before = __LINE__
271
+ before = nil
261
272
  res = Mwrap.quiet do |depth|
273
+ before = __LINE__
262
274
  depth == 1 or abort 'depth is not 1'
263
275
  ('a' * 10000).clear
264
276
  Mwrap.quiet { |d| d == 2 or abort 'depth is not 2' }
@@ -283,4 +295,35 @@ class TestMwrap < Test::Unit::TestCase
283
295
  abort 'freed more than allocated'
284
296
  end;
285
297
  end
298
+
299
+ def test_heap_page_body
300
+ assert_separately(+"#{<<~"begin;"}\n#{<<~'end;'}")
301
+ begin;
302
+ require 'mwrap'
303
+ require 'rubygems' # use up some memory
304
+ ap = GC.stat(:heap_allocated_pages)
305
+ h = {}
306
+ nr = 0
307
+ Mwrap::HeapPageBody.each do |addr, gen|
308
+ nr += 1
309
+ gen <= GC.count && gen >= 0 or abort "bad generation: #{gen}"
310
+ (0 == (addr & 16383)) or abort "addr not aligned: #{'%x' % addr}"
311
+ end
312
+ if RUBY_VERSION.to_f < 3.1 # 3.1+ uses mmap on platforms we care about
313
+ nr == ap or abort "HeapPageBody.each missed page #{nr} != #{ap}"
314
+ end
315
+ 10.times { (1..20000).to_a.map(&:to_s) }
316
+ 3.times { GC.start }
317
+ Mwrap::HeapPageBody.stat(h)
318
+ Integer === h[:lifespan_max] or abort 'lifespan_max not recorded'
319
+ Integer === h[:lifespan_min] or abort 'lifespan_min not recorded'
320
+ Float === h[:lifespan_mean] or abort 'lifespan_mean not recorded'
321
+ 3.times { GC.start }
322
+ 10.times { (1..20000).to_a.map(&:to_s) }
323
+ Mwrap::HeapPageBody.stat(h)
324
+ h[:deathspan_min] <= h[:deathspan_max] or
325
+ abort 'wrong min/max deathtime'
326
+ Float === h[:deathspan_mean] or abort 'deathspan_mean not recorded'
327
+ end;
328
+ end
286
329
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: mwrap
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.0.0.4.gd1ea
4
+ version: 2.2.0.1.g867b
5
5
  platform: ruby
6
6
  authors:
7
- - Ruby hackers
7
+ - mwrap hackers
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2018-07-26 00:00:00.000000000 Z
11
+ date: 2022-08-23 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: test-unit
@@ -81,8 +81,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
81
81
  - !ruby/object:Gem::Version
82
82
  version: 1.3.1
83
83
  requirements: []
84
- rubyforge_project:
85
- rubygems_version: 2.7.7
84
+ rubygems_version: 3.0.2
86
85
  signing_key:
87
86
  specification_version: 4
88
87
  summary: LD_PRELOAD malloc wrapper for Ruby