mwrap 2.0.0 → 2.2.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 8559afec1946b7f545c944085aac5e205601deb82fa9f1529785dd3ef7526e5a
4
- data.tar.gz: 34ea90410103ec59f367baa00e1e4df6424fe67af8608b5c7a3a0cb66fec2440
3
+ metadata.gz: 406933abeb7d63d1304c63f0f5aa7a2f4deb9ecb1820816886df8bfcf2d01601
4
+ data.tar.gz: 214975358778869db55691d8ddcac9e078cc16be6870e143094bb837d88551eb
5
5
  SHA512:
6
- metadata.gz: ff91f6f250e7a18cb465a6dc04ddee374411a10a92cd1b3227e09a4ccd0be1b8597f475f71d5061bd5bc36e8c0418ce3bf6faa3a78b37aec5d21b0340f736031
7
- data.tar.gz: 06311f2949ae4dae6bd2d0e95e6b0d147b44b6bae6cd4464e4c1d160fc1f5f3f99b2a6a3a3e711a10b8894da3bdb6220ccfeb72dfb18545170edbbc65dbad225
6
+ metadata.gz: a0a353c1ed720192b6dc63699d7d53661fc80c204429df3c93d4795b39cdec1d0e1b83c90e4250ddee11abd1911d578dd4f83516368a787a21fb1327de0ac32b
7
+ data.tar.gz: 28fd47f870ec74cd4c15570e6f6df0b9ee80ff30eb460185f841d2bc796836dd8aa6d82a7e3304bf811d22acc2dfb3e00fa4f92143a0cbb04dbd78e99ae7231e
data/.document CHANGED
@@ -1,2 +1,3 @@
1
1
  ext/mwrap/mwrap.c
2
2
  lib/mwrap_rack.rb
3
+ README
data/.gitignore CHANGED
@@ -4,3 +4,6 @@
4
4
  /pkg
5
5
  /*.gem
6
6
  /doc
7
+ /NEWS
8
+ /NEWS.atom.xml
9
+ /LATEST
data/.olddoc.yml ADDED
@@ -0,0 +1,10 @@
1
+ ---
2
+ cgit_url: https://80x24.org/mwrap.git
3
+ git_url: https://80x24.org/mwrap.git
4
+ rdoc_url: https://80x24.org/mwrap/
5
+ ml_url: https://80x24.org/mwrap-public/
6
+ public_email: mwrap-public@80x24.org
7
+ nntp_url:
8
+ - nntps://news.public-inbox.org/inbox.comp.lang.ruby.mwrap
9
+ imap_url:
10
+ - imaps://;AUTH=ANONYMOUS@80x24.org/inbox.comp.lang.ruby.mwrap.0
data/MANIFEST CHANGED
@@ -1,5 +1,6 @@
1
1
  .document
2
2
  .gitignore
3
+ .olddoc.yml
3
4
  COPYING
4
5
  MANIFEST
5
6
  README
data/README CHANGED
@@ -67,16 +67,18 @@ first two columns to find the hottest malloc locations.
67
67
  mwrap 2.0.0+ also supports a Rack application endpoint,
68
68
  it is documented at:
69
69
 
70
- https://80x24.org/mwrap/MwrapRack.html
70
+ https://80x24.org/mwrap/MwrapRack.html
71
71
 
72
72
  == Known problems
73
73
 
74
74
  * 32-bit machines are prone to overflow (WONTFIX)
75
75
 
76
- == Mail archives and list:
76
+ == Public mail archives and contact info:
77
77
 
78
- https://80x24.org/mwrap-public/
79
- nntp://80x24.org/inbox.comp.lang.ruby.mwrap
78
+ * https://80x24.org/mwrap-public/
79
+ * nntps://80x24.org/inbox.comp.lang.ruby.mwrap
80
+ * imaps://;AUTH=ANONYMOUS@80x24.org/inbox.comp.lang.ruby.mwrap.0
81
+ * https://80x24.org/mwrap-public/_/text/help/#pop3
80
82
 
81
83
  No subscription will ever be required to post, but HTML mail
82
84
  will be rejected:
@@ -88,7 +90,7 @@ will be rejected:
88
90
  git clone https://80x24.org/mwrap.git
89
91
 
90
92
  Send all patches and pull requests (use "git request-pull" to format) to
91
- the mailing list. We do not use centralized or proprietary messaging
93
+ mwrap-public@80x24.org. We do not use centralized or proprietary messaging
92
94
  systems.
93
95
 
94
96
  == License
data/Rakefile CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright (C) 2018 mwrap hackers <mwrap-public@80x24.org>
1
+ # Copyright (C) mwrap hackers <mwrap-public@80x24.org>
2
2
  # License: GPL-2.0+ <https://www.gnu.org/licenses/gpl-2.0.txt>
3
3
  require 'rake/testtask'
4
4
  begin
@@ -14,3 +14,31 @@ task :default => :compile
14
14
 
15
15
  c_files = File.readlines('MANIFEST').grep(%r{ext/.*\.[ch]$}).map!(&:chomp!)
16
16
  task 'compile:mwrap' => c_files
17
+
18
+ olddoc = ENV['OLDDOC'] || 'olddoc'
19
+ rdoc = ENV['RDOC'] || 'rdoc'
20
+ task :rsync_docs do
21
+ require 'fileutils'
22
+ top = %w(README COPYING LATEST NEWS NEWS.atom.xml)
23
+ system("git", "set-file-times")
24
+ dest = ENV["RSYNC_DEST"] || "80x24.org:/srv/80x24/mwrap/"
25
+ FileUtils.rm_rf('doc')
26
+ sh "#{olddoc} prepare"
27
+ sh "#{rdoc} -f dark216" # dark216 requires olddoc 1.7+
28
+ File.unlink('doc/created.rid') rescue nil
29
+ File.unlink('doc/index.html') rescue nil
30
+ FileUtils.cp(top, 'doc')
31
+ sh "#{olddoc} merge"
32
+
33
+ Dir['doc/**/*'].each do |txt|
34
+ st = File.stat(txt)
35
+ if st.file?
36
+ gz = "#{txt}.gz"
37
+ tmp = "#{gz}.#$$"
38
+ sh("gzip --rsyncable -9 <#{txt} >#{tmp}")
39
+ File.utime(st.atime, st.mtime, tmp) # make nginx gzip_static happy
40
+ File.rename(tmp, gz)
41
+ end
42
+ end
43
+ sh("rsync --chmod=Fugo=r #{ENV['RSYNC_OPT']} -av doc/ #{dest}/")
44
+ end
data/bin/mwrap CHANGED
@@ -1,6 +1,6 @@
1
1
  #!/usr/bin/ruby
2
2
  # frozen_string_literal: true
3
- # Copyright (C) 2018 mwrap hackers <mwrap-public@80x24.org>
3
+ # Copyright (C) mwrap hackers <mwrap-public@80x24.org>
4
4
  # License: GPL-2.0+ <https://www.gnu.org/licenses/gpl-2.0.txt>
5
5
  require 'mwrap'
6
6
  mwrap_so = $".grep(%r{/mwrap\.so\z})[0] or abort "mwrap.so not loaded"
data/ext/mwrap/extconf.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
- # Copyright (C) 2018 mwrap hackers <mwrap-public@80x24.org>
2
+ # Copyright (C) mwrap hackers <mwrap-public@80x24.org>
3
3
  # License: GPL-2.0+ <https://www.gnu.org/licenses/gpl-2.0.txt>
4
4
  require 'mkmf'
5
5
 
@@ -25,4 +25,11 @@ else
25
25
  abort 'missing __builtin_add_overflow'
26
26
  end
27
27
 
28
+ begin
29
+ if n = GC::INTERNAL_CONSTANTS[:HEAP_PAGE_SIZE]
30
+ $defs << "-DHEAP_PAGE_SIZE=#{n}"
31
+ end
32
+ rescue NameError
33
+ end
34
+
28
35
  create_makefile 'mwrap'
data/ext/mwrap/mwrap.c CHANGED
@@ -1,9 +1,9 @@
1
1
  /*
2
- * Copyright (C) 2018 mwrap hackers <mwrap-public@80x24.org>
2
+ * Copyright (C) mwrap hackers <mwrap-public@80x24.org>
3
3
  * License: GPL-2.0+ <https://www.gnu.org/licenses/gpl-2.0.txt>
4
4
  */
5
5
  #define _LGPL_SOURCE /* allows URCU to inline some stuff */
6
- #include <ruby/ruby.h>
6
+ #include <ruby.h> /* defines HAVE_RUBY_RACTOR_H on 3.0+ */
7
7
  #include <ruby/thread.h>
8
8
  #include <ruby/io.h>
9
9
  #include <execinfo.h>
@@ -22,19 +22,48 @@
22
22
  #include <urcu/rculist.h>
23
23
  #include "jhash.h"
24
24
 
25
+ #if __STDC_VERSION__ >= 201112
26
+ # define MWRAP_TSD _Thread_local
27
+ #elif defined(__GNUC__)
28
+ # define MWRAP_TSD __thread
29
+ #else
30
+ # error _Thread_local nor __thread supported
31
+ #endif
32
+
25
33
  static ID id_uminus;
26
- static unsigned int track_memalign;
27
34
  const char *rb_source_location_cstr(int *line); /* requires 2.6.0dev */
28
35
  extern int __attribute__((weak)) ruby_thread_has_gvl_p(void);
36
+
37
+ #ifdef HAVE_RUBY_RACTOR_H /* Ruby 3.0+ */
38
+ extern MWRAP_TSD void * __attribute__((weak)) ruby_current_ec;
39
+ #else /* Ruby 2.6-2.7 */
29
40
  extern void * __attribute__((weak)) ruby_current_execution_context_ptr;
41
+ # define ruby_current_ec ruby_current_execution_context_ptr
42
+ #endif
30
43
  extern void * __attribute__((weak)) ruby_current_vm_ptr; /* for rb_gc_count */
31
44
  extern size_t __attribute__((weak)) rb_gc_count(void);
32
45
  extern VALUE __attribute__((weak)) rb_cObject;
46
+ extern VALUE __attribute__((weak)) rb_eTypeError;
33
47
  extern VALUE __attribute__((weak)) rb_yield(VALUE);
34
48
 
49
+ static size_t total_bytes_inc, total_bytes_dec;
50
+
35
51
  /* true for glibc/dlmalloc/ptmalloc, not sure about jemalloc */
36
52
  #define ASSUMED_MALLOC_ALIGNMENT (sizeof(void *) * 2)
37
53
 
54
+ /* match values in Ruby gc.c */
55
+ #define HEAP_PAGE_ALIGN_LOG 14
56
+ enum {
57
+ HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG)
58
+ #ifndef HEAP_PAGE_SIZE /* Ruby 2.6-2.7 only */
59
+ ,
60
+ REQUIRED_SIZE_BY_MALLOC = (sizeof(size_t) * 5),
61
+ HEAP_PAGE_SIZE = (HEAP_PAGE_ALIGN - REQUIRED_SIZE_BY_MALLOC)
62
+ #endif
63
+ };
64
+
65
+ #define IS_HEAP_PAGE_BODY ((struct src_loc *)-1)
66
+
38
67
  int __attribute__((weak)) ruby_thread_has_gvl_p(void)
39
68
  {
40
69
  return 0;
@@ -63,7 +92,7 @@ static int resolving_malloc;
63
92
  } \
64
93
  } while (0)
65
94
 
66
- static __thread size_t locating;
95
+ static MWRAP_TSD size_t locating;
67
96
  static size_t generation;
68
97
  static size_t page_size;
69
98
  static struct cds_lfht *totals;
@@ -94,7 +123,6 @@ lfht_new(void)
94
123
  __attribute__((constructor)) static void resolve_malloc(void)
95
124
  {
96
125
  int err;
97
- const char *opt;
98
126
  ++locating;
99
127
 
100
128
  #ifdef __FreeBSD__
@@ -128,8 +156,8 @@ __attribute__((constructor)) static void resolve_malloc(void)
128
156
  _exit(1);
129
157
  }
130
158
  #endif /* !FreeBSD */
131
- totals = lfht_new();
132
- if (!totals)
159
+ CMM_STORE_SHARED(totals, lfht_new());
160
+ if (!CMM_LOAD_SHARED(totals))
133
161
  fprintf(stderr, "failed to allocate totals table\n");
134
162
 
135
163
  err = pthread_atfork(call_rcu_before_fork,
@@ -138,19 +166,21 @@ __attribute__((constructor)) static void resolve_malloc(void)
138
166
  if (err)
139
167
  fprintf(stderr, "pthread_atfork failed: %s\n", strerror(err));
140
168
  page_size = sysconf(_SC_PAGESIZE);
141
- opt = getenv("MWRAP");
142
- if (opt && (opt = strstr(opt, "memalign:"))) {
143
- if (!sscanf(opt, "memalign:%u", &track_memalign))
144
- fprintf(stderr, "not an unsigned int: %s\n", opt);
145
- }
146
169
  --locating;
147
170
  }
148
171
 
172
+ #ifdef NDEBUG
173
+ #define QUIET_CC_WARNING(var) (void)var;
174
+ #else
175
+ #define QUIET_CC_WARNING(var)
176
+ #endif
177
+
149
178
  static void
150
179
  mutex_lock(pthread_mutex_t *m)
151
180
  {
152
181
  int err = pthread_mutex_lock(m);
153
182
  assert(err == 0);
183
+ QUIET_CC_WARNING(err)
154
184
  }
155
185
 
156
186
  static void
@@ -158,6 +188,7 @@ mutex_unlock(pthread_mutex_t *m)
158
188
  {
159
189
  int err = pthread_mutex_unlock(m);
160
190
  assert(err == 0);
191
+ QUIET_CC_WARNING(err)
161
192
  }
162
193
 
163
194
  #ifndef HAVE_MEMPCPY
@@ -208,9 +239,35 @@ static char *int2str(int num, char *dst, size_t * size)
208
239
  static int has_ec_p(void)
209
240
  {
210
241
  return (ruby_thread_has_gvl_p() && ruby_current_vm_ptr &&
211
- ruby_current_execution_context_ptr);
242
+ ruby_current_ec);
212
243
  }
213
244
 
245
+ struct acc {
246
+ uint64_t nr;
247
+ int64_t min;
248
+ int64_t max;
249
+ double m2;
250
+ double mean;
251
+ };
252
+
253
+ #define ACC_INIT(name) { .nr=0, .min=INT64_MAX, .max=-1, .m2=0, .mean=0 }
254
+
255
+ /* for tracking 16K-aligned heap page bodies (protected by GVL) */
256
+ struct {
257
+ pthread_mutex_t lock;
258
+ struct cds_list_head bodies;
259
+ struct cds_list_head freed;
260
+
261
+ struct acc alive;
262
+ struct acc reborn;
263
+ } hpb_stats = {
264
+ .lock = PTHREAD_MUTEX_INITIALIZER,
265
+ .bodies = CDS_LIST_HEAD_INIT(hpb_stats.bodies),
266
+ .freed = CDS_LIST_HEAD_INIT(hpb_stats.freed),
267
+ .alive = ACC_INIT(hpb_stats.alive),
268
+ .reborn = ACC_INIT(hpb_stats.reborn)
269
+ };
270
+
214
271
  /* allocated via real_malloc/real_free */
215
272
  struct src_loc {
216
273
  pthread_mutex_t *mtx;
@@ -235,6 +292,9 @@ struct alloc_hdr {
235
292
  struct src_loc *loc;
236
293
  } live;
237
294
  struct rcu_head dead;
295
+ struct {
296
+ size_t at; /* rb_gc_count() */
297
+ } hpb_freed;
238
298
  } as;
239
299
  void *real; /* what to call real_free on */
240
300
  size_t size;
@@ -274,7 +334,65 @@ static int loc_eq(struct cds_lfht_node *node, const void *key)
274
334
  memcmp(k->k, existing->k, loc_size(k)) == 0);
275
335
  }
276
336
 
277
- static struct src_loc *totals_add_rcu(struct src_loc *k)
337
+ /* note: not atomic */
338
+ static void
339
+ acc_add(struct acc *acc, size_t val)
340
+ {
341
+ double delta = val - acc->mean;
342
+ uint64_t nr = ++acc->nr;
343
+
344
+ /* just don't divide-by-zero if we ever hit this (unlikely :P) */
345
+ if (nr)
346
+ acc->mean += delta / nr;
347
+
348
+ acc->m2 += delta * (val - acc->mean);
349
+ if ((int64_t)val < acc->min)
350
+ acc->min = (int64_t)val;
351
+ if ((int64_t)val > acc->max)
352
+ acc->max = (int64_t)val;
353
+ }
354
+
355
+ #if SIZEOF_LONG == 8
356
+ # define INT64toNUM(x) LONG2NUM((long)x)
357
+ #elif defined(HAVE_LONG_LONG) && SIZEOF_LONG_LONG == 8
358
+ # define INT64toNUM(x) LL2NUM((LONG_LONG)x)
359
+ #endif
360
+
361
+ static VALUE
362
+ acc_max(const struct acc *acc)
363
+ {
364
+ return INT64toNUM(acc->max);
365
+ }
366
+
367
+ static VALUE
368
+ acc_min(const struct acc *acc)
369
+ {
370
+ return acc->min == INT64_MAX ? INT2FIX(-1) : INT64toNUM(acc->min);
371
+ }
372
+
373
+ static VALUE
374
+ acc_mean(const struct acc *acc)
375
+ {
376
+ return DBL2NUM(acc->nr ? acc->mean : HUGE_VAL);
377
+ }
378
+
379
+ static double
380
+ acc_stddev_dbl(const struct acc *acc)
381
+ {
382
+ if (acc->nr > 1) {
383
+ double variance = acc->m2 / (acc->nr - 1);
384
+ return sqrt(variance);
385
+ }
386
+ return 0.0;
387
+ }
388
+
389
+ static VALUE
390
+ acc_stddev(const struct acc *acc)
391
+ {
392
+ return DBL2NUM(acc_stddev_dbl(acc));
393
+ }
394
+
395
+ static struct src_loc *totals_add_rcu(const struct src_loc *k)
278
396
  {
279
397
  struct cds_lfht_iter iter;
280
398
  struct cds_lfht_node *cur;
@@ -282,7 +400,7 @@ static struct src_loc *totals_add_rcu(struct src_loc *k)
282
400
  struct cds_lfht *t;
283
401
 
284
402
  again:
285
- t = rcu_dereference(totals);
403
+ t = CMM_LOAD_SHARED(totals);
286
404
  if (!t) goto out_unlock;
287
405
  cds_lfht_lookup(t, k->hval, loc_eq, k, &iter);
288
406
  cur = cds_lfht_iter_get_node(&iter);
@@ -324,9 +442,11 @@ static struct src_loc *update_stats_rcu_lock(size_t size, uintptr_t caller)
324
442
  static const size_t xlen = sizeof(caller);
325
443
  char *dst;
326
444
 
327
- if (caa_unlikely(!totals)) return 0;
445
+ if (caa_unlikely(!CMM_LOAD_SHARED(totals))) return 0;
328
446
  if (locating++) goto out; /* do not recurse into another *alloc */
329
447
 
448
+ uatomic_add(&total_bytes_inc, size);
449
+
330
450
  rcu_read_lock();
331
451
  if (has_ec_p()) {
332
452
  int line;
@@ -387,9 +507,10 @@ void free(void *p)
387
507
  struct src_loc *l = h->as.live.loc;
388
508
 
389
509
  if (!real_free) return; /* oh well, leak a little */
390
- if (l) {
510
+ if (l && l != IS_HEAP_PAGE_BODY) {
391
511
  size_t age = generation - h->as.live.gen;
392
512
 
513
+ uatomic_add(&total_bytes_dec, h->size);
393
514
  uatomic_set(&h->size, 0);
394
515
  uatomic_add(&l->frees, 1);
395
516
  uatomic_add(&l->age_total, age);
@@ -401,8 +522,20 @@ void free(void *p)
401
522
  mutex_unlock(l->mtx);
402
523
 
403
524
  call_rcu(&h->as.dead, free_hdr_rcu);
404
- }
405
- else {
525
+ } else if (l == IS_HEAP_PAGE_BODY) {
526
+ size_t gen = generation;
527
+ size_t age = gen - h->as.live.gen;
528
+
529
+ h->as.hpb_freed.at = gen;
530
+
531
+ mutex_lock(&hpb_stats.lock);
532
+ acc_add(&hpb_stats.alive, age);
533
+
534
+ /* hpb_stats.bodies => hpb_stats.freed */
535
+ cds_list_move(&h->anode, &hpb_stats.freed);
536
+
537
+ mutex_unlock(&hpb_stats.lock);
538
+ } else {
406
539
  real_free(h->real);
407
540
  }
408
541
  }
@@ -429,7 +562,7 @@ static size_t size_align(size_t size, size_t alignment)
429
562
  return ((size + (alignment - 1)) & ~(alignment - 1));
430
563
  }
431
564
 
432
- static bool ptr_is_aligned(void *ptr, size_t alignment)
565
+ static bool ptr_is_aligned(const void *ptr, size_t alignment)
433
566
  {
434
567
  return ((uintptr_t)ptr & (alignment - 1)) == 0;
435
568
  }
@@ -468,18 +601,66 @@ internal_memalign(void **pp, size_t alignment, size_t size, uintptr_t caller)
468
601
  __builtin_add_overflow(asize, sizeof(struct alloc_hdr), &asize))
469
602
  return ENOMEM;
470
603
 
471
- /* assert(asize == (alignment + size + sizeof(struct alloc_hdr))); */
472
- l = track_memalign ? update_stats_rcu_lock(size, caller) : 0;
473
- real = real_malloc(asize);
474
- if (real) {
475
- void *p = hdr2ptr(real);
476
- if (!ptr_is_aligned(p, alignment))
477
- p = ptr_align(p, alignment);
478
- h = ptr2hdr(p);
479
- alloc_insert_rcu(l, h, size, real);
604
+
605
+ if (alignment == HEAP_PAGE_ALIGN && size == HEAP_PAGE_SIZE) {
606
+ if (has_ec_p()) generation = rb_gc_count();
607
+ l = IS_HEAP_PAGE_BODY;
608
+ } else {
609
+ l = update_stats_rcu_lock(size, caller);
610
+ }
611
+
612
+ if (l == IS_HEAP_PAGE_BODY) {
613
+ void *p;
614
+ size_t gen = generation;
615
+
616
+ mutex_lock(&hpb_stats.lock);
617
+
618
+ /* reuse existing entry */
619
+ if (!cds_list_empty(&hpb_stats.freed)) {
620
+ size_t deathspan;
621
+
622
+ h = cds_list_first_entry(&hpb_stats.freed,
623
+ struct alloc_hdr, anode);
624
+ /* hpb_stats.freed => hpb_stats.bodies */
625
+ cds_list_move(&h->anode, &hpb_stats.bodies);
626
+ assert(h->size == size);
627
+ assert(h->real);
628
+ real = h->real;
629
+ p = hdr2ptr(h);
630
+ assert(ptr_is_aligned(p, alignment));
631
+
632
+ deathspan = gen - h->as.hpb_freed.at;
633
+ acc_add(&hpb_stats.reborn, deathspan);
634
+ }
635
+ else {
636
+ real = real_malloc(asize);
637
+ if (!real) return ENOMEM;
638
+
639
+ p = hdr2ptr(real);
640
+ if (!ptr_is_aligned(p, alignment))
641
+ p = ptr_align(p, alignment);
642
+ h = ptr2hdr(p);
643
+ h->size = size;
644
+ h->real = real;
645
+ cds_list_add(&h->anode, &hpb_stats.bodies);
646
+ }
647
+ mutex_unlock(&hpb_stats.lock);
648
+ h->as.live.loc = l;
649
+ h->as.live.gen = gen;
480
650
  *pp = p;
481
651
  }
482
- update_stats_rcu_unlock(l);
652
+ else {
653
+ real = real_malloc(asize);
654
+ if (real) {
655
+ void *p = hdr2ptr(real);
656
+ if (!ptr_is_aligned(p, alignment))
657
+ p = ptr_align(p, alignment);
658
+ h = ptr2hdr(p);
659
+ alloc_insert_rcu(l, h, size, real);
660
+ *pp = p;
661
+ }
662
+ update_stats_rcu_unlock(l);
663
+ }
483
664
 
484
665
  return real ? 0 : ENOMEM;
485
666
  }
@@ -487,16 +668,14 @@ internal_memalign(void **pp, size_t alignment, size_t size, uintptr_t caller)
487
668
  static void *
488
669
  memalign_result(int err, void *p)
489
670
  {
490
- if (caa_unlikely(err)) {
671
+ if (caa_unlikely(err))
491
672
  errno = err;
492
- return 0;
493
- }
494
673
  return p;
495
674
  }
496
675
 
497
676
  void *memalign(size_t alignment, size_t size)
498
677
  {
499
- void *p;
678
+ void *p = NULL;
500
679
  int err = internal_memalign(&p, alignment, size, RETURN_ADDRESS(0));
501
680
  return memalign_result(err, p);
502
681
  }
@@ -511,7 +690,7 @@ void cfree(void *) __attribute__((alias("free")));
511
690
 
512
691
  void *valloc(size_t size)
513
692
  {
514
- void *p;
693
+ void *p = NULL;
515
694
  int err = internal_memalign(&p, page_size, size, RETURN_ADDRESS(0));
516
695
  return memalign_result(err, p);
517
696
  }
@@ -529,7 +708,7 @@ void *valloc(size_t size)
529
708
  void *pvalloc(size_t size)
530
709
  {
531
710
  size_t alignment = page_size;
532
- void *p;
711
+ void *p = NULL;
533
712
  int err;
534
713
 
535
714
  if (add_overflow_p(size, alignment)) {
@@ -652,7 +831,7 @@ static void *dump_to_file(void *x)
652
831
 
653
832
  ++locating;
654
833
  rcu_read_lock();
655
- t = rcu_dereference(totals);
834
+ t = CMM_LOAD_SHARED(totals);
656
835
  if (!t)
657
836
  goto out_unlock;
658
837
  cds_lfht_for_each_entry(t, &iter, l, hnode) {
@@ -710,14 +889,18 @@ static VALUE mwrap_dump(int argc, VALUE * argv, VALUE mod)
710
889
  return Qnil;
711
890
  }
712
891
 
892
+ /* The whole operation is not remotely atomic... */
713
893
  static void *totals_reset(void *ign)
714
894
  {
715
895
  struct cds_lfht *t;
716
896
  struct cds_lfht_iter iter;
717
897
  struct src_loc *l;
718
898
 
899
+ uatomic_set(&total_bytes_inc, 0);
900
+ uatomic_set(&total_bytes_dec, 0);
901
+
719
902
  rcu_read_lock();
720
- t = rcu_dereference(totals);
903
+ t = CMM_LOAD_SHARED(totals);
721
904
  cds_lfht_for_each_entry(t, &iter, l, hnode) {
722
905
  uatomic_set(&l->total, 0);
723
906
  uatomic_set(&l->allocations, 0);
@@ -785,7 +968,7 @@ static VALUE dump_each_rcu(VALUE x)
785
968
  struct cds_lfht_iter iter;
786
969
  struct src_loc *l;
787
970
 
788
- t = rcu_dereference(totals);
971
+ t = CMM_LOAD_SHARED(totals);
789
972
  cds_lfht_for_each_entry(t, &iter, l, hnode) {
790
973
  VALUE v[6];
791
974
  if (l->total <= a->min) continue;
@@ -889,9 +1072,9 @@ static VALUE mwrap_aref(VALUE mod, VALUE loc)
889
1072
 
890
1073
  if (!k) return val;
891
1074
 
1075
+ t = CMM_LOAD_SHARED(totals);
1076
+ if (!t) return val;
892
1077
  rcu_read_lock();
893
- t = rcu_dereference(totals);
894
- if (!t) goto out_unlock;
895
1078
 
896
1079
  cds_lfht_lookup(t, k->hval, loc_eq, k, &iter);
897
1080
  cur = cds_lfht_iter_get_node(&iter);
@@ -899,7 +1082,6 @@ static VALUE mwrap_aref(VALUE mod, VALUE loc)
899
1082
  l = caa_container_of(cur, struct src_loc, hnode);
900
1083
  val = TypedData_Wrap_Struct(cSrcLoc, &src_loc_type, l);
901
1084
  }
902
- out_unlock:
903
1085
  rcu_read_unlock();
904
1086
  return val;
905
1087
  }
@@ -1033,6 +1215,89 @@ static VALUE mwrap_quiet(VALUE mod)
1033
1215
  return rb_ensure(rb_yield, SIZET2NUM(cur), reset_locating, 0);
1034
1216
  }
1035
1217
 
1218
+ /*
1219
+ * total bytes allocated as tracked by mwrap
1220
+ */
1221
+ static VALUE total_inc(VALUE mod)
1222
+ {
1223
+ return SIZET2NUM(total_bytes_inc);
1224
+ }
1225
+
1226
+ /*
1227
+ * total bytes freed as tracked by mwrap
1228
+ */
1229
+ static VALUE total_dec(VALUE mod)
1230
+ {
1231
+ return SIZET2NUM(total_bytes_dec);
1232
+ }
1233
+
1234
+ static VALUE hpb_each_yield(VALUE ignore)
1235
+ {
1236
+ struct alloc_hdr *h, *next;
1237
+
1238
+ cds_list_for_each_entry_safe(h, next, &hpb_stats.bodies, anode) {
1239
+ VALUE v[2]; /* [ generation, address ] */
1240
+ void *addr = hdr2ptr(h);
1241
+ assert(ptr_is_aligned(addr, HEAP_PAGE_ALIGN));
1242
+ v[0] = LONG2NUM((long)addr);
1243
+ v[1] = SIZET2NUM(h->as.live.gen);
1244
+ rb_yield_values2(2, v);
1245
+ }
1246
+ return Qnil;
1247
+ }
1248
+
1249
+ /*
1250
+ * call-seq:
1251
+ *
1252
+ * Mwrap::HeapPageBody.each { |gen, addr| } -> Integer
1253
+ *
1254
+ * Yields the generation (GC.count) the heap page body was created
1255
+ * and address of the heap page body as an Integer. Returns the
1256
+ * number of allocated pages as an Integer. This return value should
1257
+ * match the result of GC.stat(:heap_allocated_pages)
1258
+ */
1259
+ static VALUE hpb_each(VALUE mod)
1260
+ {
1261
+ ++locating;
1262
+ return rb_ensure(hpb_each_yield, Qfalse, reset_locating, 0);
1263
+ }
1264
+
1265
+ /*
1266
+ * call-seq:
1267
+ *
1268
+ * Mwrap::HeapPageBody.stat -> Hash
1269
+ * Mwrap::HeapPageBody.stat(hash) -> hash
1270
+ *
1271
+ * The maximum lifespan of a heap page body in the Ruby VM.
1272
+ * This may be Infinity if no heap page bodies were ever freed.
1273
+ */
1274
+ static VALUE hpb_stat(int argc, VALUE *argv, VALUE hpb)
1275
+ {
1276
+ VALUE h;
1277
+
1278
+ rb_scan_args(argc, argv, "01", &h);
1279
+ if (NIL_P(h))
1280
+ h = rb_hash_new();
1281
+ else if (!RB_TYPE_P(h, T_HASH))
1282
+ rb_raise(rb_eTypeError, "not a hash %+"PRIsVALUE, h);
1283
+
1284
+ ++locating;
1285
+ #define S(x) ID2SYM(rb_intern(#x))
1286
+ rb_hash_aset(h, S(lifespan_max), acc_max(&hpb_stats.alive));
1287
+ rb_hash_aset(h, S(lifespan_min), acc_min(&hpb_stats.alive));
1288
+ rb_hash_aset(h, S(lifespan_mean), acc_mean(&hpb_stats.alive));
1289
+ rb_hash_aset(h, S(lifespan_stddev), acc_stddev(&hpb_stats.alive));
1290
+ rb_hash_aset(h, S(deathspan_max), acc_max(&hpb_stats.reborn));
1291
+ rb_hash_aset(h, S(deathspan_min), acc_min(&hpb_stats.reborn));
1292
+ rb_hash_aset(h, S(deathspan_mean), acc_mean(&hpb_stats.reborn));
1293
+ rb_hash_aset(h, S(deathspan_stddev), acc_stddev(&hpb_stats.reborn));
1294
+ rb_hash_aset(h, S(resurrects), SIZET2NUM(hpb_stats.reborn.nr));
1295
+ #undef S
1296
+ --locating;
1297
+
1298
+ return h;
1299
+ }
1300
+
1036
1301
  /*
1037
1302
  * Document-module: Mwrap
1038
1303
  *
@@ -1051,20 +1316,19 @@ static VALUE mwrap_quiet(VALUE mod)
1051
1316
  * * dump_fd: a writable FD to dump to
1052
1317
  * * dump_path: a path to dump to, the file is opened in O_APPEND mode
1053
1318
  * * dump_min: the minimum allocation size (total) to dump
1054
- * * memalign: use `1' to enable tracking the memalign family
1319
+ * * dump_heap: mask of heap_page_body statistics to dump
1055
1320
  *
1056
1321
  * If both `dump_fd' and `dump_path' are specified, dump_path takes
1057
1322
  * precedence.
1058
1323
  *
1059
- * Tracking the memalign family of functions is misleading for Ruby
1060
- * applications, as heap page allocations can happen anywhere a
1061
- * Ruby object is allocated, even in the coldest code paths.
1062
- * Furthermore, it is rarely-used outside of the Ruby object allocator.
1063
- * Thus tracking memalign functions is disabled by default.
1324
+ * dump_heap bitmask
1325
+ * * 0x01 - summary stats (same info as HeapPageBody.stat)
1326
+ * * 0x02 - all live heaps (similar to HeapPageBody.each)
1327
+ * * 0x04 - skip non-heap_page_body-related output
1064
1328
  */
1065
1329
  void Init_mwrap(void)
1066
1330
  {
1067
- VALUE mod;
1331
+ VALUE mod, hpb;
1068
1332
 
1069
1333
  ++locating;
1070
1334
  mod = rb_define_module("Mwrap");
@@ -1084,6 +1348,10 @@ void Init_mwrap(void)
1084
1348
  rb_define_singleton_method(mod, "each", mwrap_each, -1);
1085
1349
  rb_define_singleton_method(mod, "[]", mwrap_aref, 1);
1086
1350
  rb_define_singleton_method(mod, "quiet", mwrap_quiet, 0);
1351
+ rb_define_singleton_method(mod, "total_bytes_allocated", total_inc, 0);
1352
+ rb_define_singleton_method(mod, "total_bytes_freed", total_dec, 0);
1353
+
1354
+
1087
1355
  rb_define_method(cSrcLoc, "each", src_loc_each, 0);
1088
1356
  rb_define_method(cSrcLoc, "frees", src_loc_frees, 0);
1089
1357
  rb_define_method(cSrcLoc, "allocations", src_loc_allocations, 0);
@@ -1091,9 +1359,68 @@ void Init_mwrap(void)
1091
1359
  rb_define_method(cSrcLoc, "mean_lifespan", src_loc_mean_lifespan, 0);
1092
1360
  rb_define_method(cSrcLoc, "max_lifespan", src_loc_max_lifespan, 0);
1093
1361
  rb_define_method(cSrcLoc, "name", src_loc_name, 0);
1362
+
1363
+ /*
1364
+ * Information about "struct heap_page_body" allocations from
1365
+ * Ruby gc.c. This can be useful for tracking fragmentation
1366
+ * from posix_memalign(3) use in mainline Ruby:
1367
+ *
1368
+ * https://sourceware.org/bugzilla/show_bug.cgi?id=14581
1369
+ *
1370
+ * These statistics are never reset by Mwrap.reset or
1371
+ * any other method. They only make sense in the context
1372
+ * of an entire program lifetime.
1373
+ */
1374
+ hpb = rb_define_class_under(mod, "HeapPageBody", rb_cObject);
1375
+ rb_define_singleton_method(hpb, "stat", hpb_stat, -1);
1376
+ rb_define_singleton_method(hpb, "each", hpb_each, 0);
1377
+
1094
1378
  --locating;
1095
1379
  }
1096
1380
 
1381
+ enum {
1382
+ DUMP_HPB_STATS = 0x1,
1383
+ DUMP_HPB_EACH = 0x2,
1384
+ DUMP_HPB_EXCL = 0x4,
1385
+ };
1386
+
1387
+ static void dump_hpb(FILE *fp, unsigned flags)
1388
+ {
1389
+ if (flags & DUMP_HPB_STATS) {
1390
+ fprintf(fp,
1391
+ "lifespan_max: %"PRId64"\n"
1392
+ "lifespan_min:%s%"PRId64"\n"
1393
+ "lifespan_mean: %0.3f\n"
1394
+ "lifespan_stddev: %0.3f\n"
1395
+ "deathspan_max: %"PRId64"\n"
1396
+ "deathspan_min:%s%"PRId64"\n"
1397
+ "deathspan_mean: %0.3f\n"
1398
+ "deathspan_stddev: %0.3f\n"
1399
+ "gc_count: %zu\n",
1400
+ hpb_stats.alive.max,
1401
+ hpb_stats.alive.min == INT64_MAX ? " -" : " ",
1402
+ hpb_stats.alive.min,
1403
+ hpb_stats.alive.mean,
1404
+ acc_stddev_dbl(&hpb_stats.alive),
1405
+ hpb_stats.reborn.max,
1406
+ hpb_stats.reborn.min == INT64_MAX ? " -" : " ",
1407
+ hpb_stats.reborn.min,
1408
+ hpb_stats.reborn.mean,
1409
+ acc_stddev_dbl(&hpb_stats.reborn),
1410
+ /* n.b.: unsafe to call rb_gc_count() in destructor */
1411
+ generation);
1412
+ }
1413
+ if (flags & DUMP_HPB_EACH) {
1414
+ struct alloc_hdr *h;
1415
+
1416
+ cds_list_for_each_entry(h, &hpb_stats.bodies, anode) {
1417
+ void *addr = hdr2ptr(h);
1418
+
1419
+ fprintf(fp, "%p\t%zu\n", addr, h->as.live.gen);
1420
+ }
1421
+ }
1422
+ }
1423
+
1097
1424
  /* rb_cloexec_open isn't usable by non-Ruby processes */
1098
1425
  #ifndef O_CLOEXEC
1099
1426
  # define O_CLOEXEC 0
@@ -1104,10 +1431,12 @@ static void mwrap_dump_destructor(void)
1104
1431
  {
1105
1432
  const char *opt = getenv("MWRAP");
1106
1433
  const char *modes[] = { "a", "a+", "w", "w+", "r+" };
1107
- struct dump_arg a;
1434
+ struct dump_arg a = { .min = 0 };
1108
1435
  size_t i;
1109
1436
  int dump_fd;
1437
+ unsigned dump_heap = 0;
1110
1438
  char *dump_path;
1439
+ char *s;
1111
1440
 
1112
1441
  if (!opt)
1113
1442
  return;
@@ -1134,8 +1463,11 @@ static void mwrap_dump_destructor(void)
1134
1463
  else if (!sscanf(opt, "dump_fd:%d", &dump_fd))
1135
1464
  goto out;
1136
1465
 
1137
- if (!sscanf(opt, "dump_min:%zu", &a.min))
1138
- a.min = 0;
1466
+ if ((s = strstr(opt, "dump_min:")))
1467
+ sscanf(s, "dump_min:%zu", &a.min);
1468
+
1469
+ if ((s = strstr(opt, "dump_heap:")))
1470
+ sscanf(s, "dump_heap:%u", &dump_heap);
1139
1471
 
1140
1472
  switch (dump_fd) {
1141
1473
  case 0: goto out;
@@ -1156,7 +1488,9 @@ static void mwrap_dump_destructor(void)
1156
1488
  }
1157
1489
  /* we'll leak some memory here, but this is a destructor */
1158
1490
  }
1159
- dump_to_file(&a);
1491
+ if ((dump_heap & DUMP_HPB_EXCL) == 0)
1492
+ dump_to_file(&a);
1493
+ dump_hpb(a.fp, dump_heap);
1160
1494
  out:
1161
1495
  --locating;
1162
1496
  }
data/lib/mwrap_rack.rb ADDED
@@ -0,0 +1,170 @@
1
+ # Copyright (C) all contributors <mwrap-public@80x24.org>
2
+ # License: GPL-2.0+ <https://www.gnu.org/licenses/gpl-2.0.txt>
3
+ # frozen_string_literal: true
4
+ require 'mwrap'
5
+ require 'rack'
6
+ require 'cgi'
7
+
8
+ # MwrapRack is a standalone Rack application which can be
9
+ # mounted to run within your application process.
10
+ #
11
+ # Using the Rack::Builder API in config.ru, you can map it to
12
+ # the "/MWRAP/" endpoint. As with the rest of the Mwrap API,
13
+ # your Rack server needs to be spawned with the mwrap(1)
14
+ # wrapper to enable the LD_PRELOAD.
15
+ #
16
+ # require 'mwrap_rack'
17
+ # map('/MWRAP') { run(MwrapRack.new) }
18
+ # map('/') { run(your_normal_app) }
19
+ #
20
+ # This module is only available in mwrap 2.0.0+
21
+ class MwrapRack
22
+ module HtmlResponse # :nodoc:
23
+ def response
24
+ [ 200, {
25
+ 'Expires' => 'Fri, 01 Jan 1980 00:00:00 GMT',
26
+ 'Pragma' => 'no-cache',
27
+ 'Cache-Control' => 'no-cache, max-age=0, must-revalidate',
28
+ 'Content-Type' => 'text/html; charset=UTF-8',
29
+ }, self ]
30
+ end
31
+ end
32
+
33
+ class Each < Struct.new(:script_name, :min, :sort) # :nodoc:
34
+ include HtmlResponse
35
+ HEADER = '<tr><th>' + %w(total allocations frees mean_life max_life
36
+ location).join('</th><th>') + '</th></tr>'
37
+ FIELDS = %w(total allocations frees mean_life max_life location)
38
+ def each
39
+ Mwrap.quiet do
40
+ t = -"Mwrap.each(#{min})"
41
+ sn = script_name
42
+ all = []
43
+ f = FIELDS.dup
44
+ sc = FIELDS.index(sort || 'total') || 0
45
+ f[sc] = -"<b>#{f[sc]}</b>"
46
+ f.map! do |hdr|
47
+ if hdr.start_with?('<b>')
48
+ hdr
49
+ else
50
+ -%Q(<a\nhref="#{sn}/each/#{min}?sort=#{hdr}">#{hdr}</a>)
51
+ end
52
+ end
53
+ Mwrap.each(min) do |loc, total, allocations, frees, age_sum, max_life|
54
+ mean_life = frees == 0 ? Float::INFINITY : age_sum/frees.to_f
55
+ all << [total,allocations,frees,mean_life,max_life,loc]
56
+ end
57
+ all.sort_by! { |cols| -cols[sc] }
58
+
59
+ yield(-"<html><head><title>#{t}</title></head>" \
60
+ "<body><h1>#{t}</h1>\n" \
61
+ "<h2>Current generation: #{GC.count}</h2>\n<table>\n" \
62
+ "<tr><th>#{f.join('</th><th>')}</th></tr>\n")
63
+ all.each do |cols|
64
+ loc = cols.pop
65
+ cols[3] = sprintf('%0.3f', cols[3]) # mean_life
66
+ href = -(+"#{sn}/at/#{CGI.escape(loc)}").encode!(xml: :attr)
67
+ yield(%Q(<tr><td>#{cols.join('</td><td>')}<td><a\nhref=#{
68
+ href}>#{-loc.encode(xml: :text)}</a></td></tr>\n))
69
+ cols.clear
70
+ end.clear
71
+ yield "</table></body></html>\n"
72
+ end
73
+ end
74
+ end
75
+
76
+ class EachAt < Struct.new(:loc) # :nodoc:
77
+ include HtmlResponse
78
+ HEADER = '<tr><th>size</th><th>generation</th></tr>'
79
+
80
+ def each
81
+ t = loc.name.encode(xml: :text)
82
+ yield(-"<html><head><title>#{t}</title></head>" \
83
+ "<body><h1>live allocations at #{t}</h1>" \
84
+ "<h2>Current generation: #{GC.count}</h2>\n<table>#{HEADER}")
85
+ loc.each do |size, generation|
86
+ yield("<tr><td>#{size}</td><td>#{generation}</td></tr>\n")
87
+ end
88
+ yield "</table></body></html>\n"
89
+ end
90
+ end
91
+
92
+ class HeapPages # :nodoc:
93
+ include HtmlResponse
94
+ HEADER = '<tr><th>address</th><th>generation</th></tr>'
95
+
96
+ def hpb_rows
97
+ Mwrap::HeapPageBody.stat(stat = Thread.current[:mwrap_hpb_stat] ||= {})
98
+ %i(lifespan_max lifespan_min lifespan_mean lifespan_stddev
99
+ deathspan_max deathspan_min deathspan_mean deathspan_stddev
100
+ resurrects
101
+ ).map! do |k|
102
+ "<tr><td>#{k}</td><td>#{stat[k]}</td></tr>\n"
103
+ end.join
104
+ end
105
+
106
+ def gc_stat_rows
107
+ GC.stat(stat = Thread.current[:mwrap_gc_stat] ||= {})
108
+ %i(count heap_allocated_pages heap_eden_pages heap_tomb_pages
109
+ total_allocated_pages total_freed_pages).map do |k|
110
+ "<tr><td>GC.stat(:#{k})</td><td>#{stat[k]}</td></tr>\n"
111
+ end.join
112
+ end
113
+
114
+ GC_STAT_URL = 'https://docs.ruby-lang.org/en/trunk/GC.html#method-c-stat'
115
+ GC_STAT_HELP = <<~EOM
116
+ <p>Non-Infinity lifespans can indicate fragmentation.
117
+ <p>See <a
118
+ href="#{GC_STAT_URL}">#{GC_STAT_URL}</a> for info on GC.stat values.
119
+ EOM
120
+
121
+ def each
122
+ Mwrap.quiet do
123
+ yield("<html><head><title>heap pages</title></head>" \
124
+ "<body><h1>heap pages</h1>" \
125
+ "<table><tr><th>stat</th><th>value</th></tr>\n" \
126
+ "#{hpb_rows}" \
127
+ "#{gc_stat_rows}" \
128
+ "</table>\n" \
129
+ "#{GC_STAT_HELP}" \
130
+ "<table>#{HEADER}")
131
+ Mwrap::HeapPageBody.each do |addr, generation|
132
+ addr = -sprintf('0x%x', addr)
133
+ yield(-"<tr><td>#{addr}</td><td>#{generation}</td></tr>\n")
134
+ end
135
+ yield "</table></body></html>\n"
136
+ end
137
+ end
138
+ end
139
+
140
+ def r404 # :nodoc:
141
+ [404,{'Content-Type'=>'text/plain'},["Not found\n"]]
142
+ end
143
+
144
+ # The standard Rack application endpoint for MwrapRack
145
+ def call(env)
146
+ case env['PATH_INFO']
147
+ when %r{\A/each/(\d+)\z}
148
+ min = $1.to_i
149
+ m = env['QUERY_STRING'].match(/\bsort=(\w+)/)
150
+ Each.new(env['SCRIPT_NAME'], min, m ? m[1] : nil).response
151
+ when %r{\A/at/(.*)\z}
152
+ loc = -CGI.unescape($1)
153
+ loc = Mwrap[loc] or return r404
154
+ EachAt.new(loc).response
155
+ when '/heap_pages'
156
+ HeapPages.new.response
157
+ when '/'
158
+ n = 2000
159
+ u = 'https://80x24.org/mwrap/README.html'
160
+ b = -('<html><head><title>Mwrap demo</title></head>' \
161
+ "<body><p><a href=\"each/#{n}\">allocations &gt;#{n} bytes</a>" \
162
+ "<p><a href=\"#{u}\">#{u}</a>" \
163
+ "<p><a href=\"heap_pages\">heap pages</a>" \
164
+ "</body></html>\n")
165
+ [ 200, {'Content-Type'=>'text/html','Content-Length'=>-b.size.to_s},[b]]
166
+ else
167
+ r404
168
+ end
169
+ end
170
+ end
data/mwrap.gemspec CHANGED
@@ -8,11 +8,13 @@ if git_manifest[0] && manifest != git_manifest
8
8
  system('git add MANIFEST')
9
9
  end
10
10
 
11
+ desc = `git describe --abbrev=4 HEAD`.strip.tr('-', '.').delete_prefix('v')
12
+
11
13
  Gem::Specification.new do |s|
12
14
  s.name = 'mwrap'
13
- s.version = '2.0.0'
15
+ s.version = desc.empty? ? '2.2.0' : desc
14
16
  s.homepage = 'https://80x24.org/mwrap/'
15
- s.authors = ["Ruby hackers"]
17
+ s.authors = ["mwrap hackers"]
16
18
  s.summary = 'LD_PRELOAD malloc wrapper for Ruby'
17
19
  s.executables = %w(mwrap)
18
20
  s.files = manifest
data/test/test_mwrap.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
- # Copyright (C) 2018 mwrap hackers <mwrap-public@80x24.org>
2
+ # Copyright (C) mwrap hackers <mwrap-public@80x24.org>
3
3
  # License: GPL-2.0+ <https://www.gnu.org/licenses/gpl-2.0.txt>
4
4
  require 'test/unit'
5
5
  require 'mwrap'
@@ -29,7 +29,8 @@ class TestMwrap < Test::Unit::TestCase
29
29
  tmp.rewind
30
30
  lines = tmp.readlines
31
31
  line_1 = lines.grep(/\s-e:1\b/)[0].strip
32
- assert_equal '10001', line_1.split(/\s+/)[0]
32
+ bytes = line_1.split(/\s+/)[0].to_i
33
+ assert_operator bytes, :>=, 10001
33
34
  end
34
35
  end
35
36
 
@@ -42,7 +43,7 @@ class TestMwrap < Test::Unit::TestCase
42
43
  res = system(env, *cmd, { 5 => tmp })
43
44
  assert res, $?.inspect
44
45
  tmp.rewind
45
- assert_match(/\b10001\s+1\s+-e:1$/, tmp.read)
46
+ assert_match(/\b1\d{4}\s+[1-9]\d*\s+-e:1$/, tmp.read)
46
47
 
47
48
  env['MWRAP'] = 'dump_fd:1,dump_min:10000'
48
49
  tmp.rewind
@@ -50,14 +51,21 @@ class TestMwrap < Test::Unit::TestCase
50
51
  res = system(env, *cmd, { 1 => tmp })
51
52
  assert res, $?.inspect
52
53
  tmp.rewind
53
- assert_match(/\b10001\s+1\s+-e:1$/, tmp.read)
54
+ assert_match(/\b1\d{4}\s+[1-9]\d*\s+-e:1$/, tmp.read)
54
55
 
55
56
  tmp.rewind
56
57
  tmp.truncate(0)
57
58
  env['MWRAP'] = "dump_path:#{tmp.path},dump_min:10000"
58
59
  res = system(env, *cmd)
59
60
  assert res, $?.inspect
60
- assert_match(/\b10001\s+1\s+-e:1$/, tmp.read)
61
+ assert_match(/\b1\d{4}\s+[1-9]\d*\s+-e:1$/, tmp.read)
62
+
63
+ tmp.rewind
64
+ tmp.truncate(0)
65
+ env['MWRAP'] = "dump_path:#{tmp.path},dump_heap:5"
66
+ res = system(env, *cmd)
67
+ assert res, $?.inspect
68
+ assert_match %r{lifespan_stddev}, tmp.read
61
69
  end
62
70
  end
63
71
 
@@ -91,7 +99,7 @@ class TestMwrap < Test::Unit::TestCase
91
99
  tmp.rewind
92
100
  buf = tmp.read
93
101
  assert_not_match(/\s+-e:1$/, buf)
94
- assert_match(/\b20001\s+1\s+-e:3$/, buf)
102
+ assert_match(/\b2\d{4}\s+[0-9]\d*\s+-e:3$/, buf)
95
103
  end
96
104
  end
97
105
 
@@ -169,8 +177,8 @@ class TestMwrap < Test::Unit::TestCase
169
177
  -e GC.disable
170
178
  -e keep=("0"*10000)
171
179
  -e loc=Mwrap["-e:3"]
172
- -e loc.each{|size,gen|p([size,gen,count])}
173
- )
180
+ -e
181
+ ) + [ 'loc.each{|size,gen|p([size,gen,count]) if size > 10000}' ]
174
182
  buf = IO.popen(@@env, cmd, &:read)
175
183
  assert_predicate $?, :success?
176
184
  assert_match(/\A\[\s*\d+,\s*\d+,\s*\d+\]\s*\z/s, buf)
@@ -223,7 +231,8 @@ class TestMwrap < Test::Unit::TestCase
223
231
  loc.name == k or abort 'SourceLocation#name broken'
224
232
  loc.total >= 10000 or abort 'SourceLocation#total broken'
225
233
  loc.frees == 0 or abort 'SourceLocation#frees broken'
226
- loc.allocations == 1 or abort 'SourceLocation#allocations broken'
234
+ loc.allocations >= 1 or
235
+ abort "SourceLocation#allocations broken: #{loc.allocations}"
227
236
  seen = false
228
237
  loc.each do |*x| seen = x end
229
238
  seen[1] == loc.total or 'SourceLocation#each broken'
@@ -233,7 +242,9 @@ class TestMwrap < Test::Unit::TestCase
233
242
  freed = false
234
243
  until freed
235
244
  freed = true
236
- loc.each do freed = false end
245
+ loc.each do |size, gen|
246
+ freed = false if size >= 10000
247
+ end
237
248
  end
238
249
  loc.frees == 1 or abort 'SourceLocation#frees broken (after free)'
239
250
  Float === loc.mean_lifespan or abort 'mean_lifespan broken'
@@ -257,8 +268,9 @@ class TestMwrap < Test::Unit::TestCase
257
268
  assert_separately(+"#{<<~"begin;"}\n#{<<~'end;'}")
258
269
  begin;
259
270
  require 'mwrap'
260
- before = __LINE__
271
+ before = nil
261
272
  res = Mwrap.quiet do |depth|
273
+ before = __LINE__
262
274
  depth == 1 or abort 'depth is not 1'
263
275
  ('a' * 10000).clear
264
276
  Mwrap.quiet { |d| d == 2 or abort 'depth is not 2' }
@@ -272,4 +284,46 @@ class TestMwrap < Test::Unit::TestCase
272
284
  res == :foo or abort 'Mwrap.quiet did not return block result'
273
285
  end;
274
286
  end
287
+
288
+ def test_total_bytes
289
+ assert_separately(+"#{<<~"begin;"}\n#{<<~'end;'}")
290
+ begin;
291
+ require 'mwrap'
292
+ Mwrap.total_bytes_allocated > 0 or abort 'nothing allocated'
293
+ Mwrap.total_bytes_freed > 0 or abort 'nothing freed'
294
+ Mwrap.total_bytes_allocated > Mwrap.total_bytes_freed or
295
+ abort 'freed more than allocated'
296
+ end;
297
+ end
298
+
299
+ def test_heap_page_body
300
+ assert_separately(+"#{<<~"begin;"}\n#{<<~'end;'}")
301
+ begin;
302
+ require 'mwrap'
303
+ require 'rubygems' # use up some memory
304
+ ap = GC.stat(:heap_allocated_pages)
305
+ h = {}
306
+ nr = 0
307
+ Mwrap::HeapPageBody.each do |addr, gen|
308
+ nr += 1
309
+ gen <= GC.count && gen >= 0 or abort "bad generation: #{gen}"
310
+ (0 == (addr & 16383)) or abort "addr not aligned: #{'%x' % addr}"
311
+ end
312
+ if RUBY_VERSION.to_f < 3.1 # 3.1+ uses mmap on platforms we care about
313
+ nr == ap or abort "HeapPageBody.each missed page #{nr} != #{ap}"
314
+ end
315
+ 10.times { (1..20000).to_a.map(&:to_s) }
316
+ 3.times { GC.start }
317
+ Mwrap::HeapPageBody.stat(h)
318
+ Integer === h[:lifespan_max] or abort 'lifespan_max not recorded'
319
+ Integer === h[:lifespan_min] or abort 'lifespan_min not recorded'
320
+ Float === h[:lifespan_mean] or abort 'lifespan_mean not recorded'
321
+ 3.times { GC.start }
322
+ 10.times { (1..20000).to_a.map(&:to_s) }
323
+ Mwrap::HeapPageBody.stat(h)
324
+ h[:deathspan_min] <= h[:deathspan_max] or
325
+ abort 'wrong min/max deathtime'
326
+ Float === h[:deathspan_mean] or abort 'deathspan_mean not recorded'
327
+ end;
328
+ end
275
329
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: mwrap
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.0.0
4
+ version: 2.2.0
5
5
  platform: ruby
6
6
  authors:
7
- - Ruby hackers
7
+ - mwrap hackers
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2018-07-20 00:00:00.000000000 Z
11
+ date: 2022-08-22 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: test-unit
@@ -50,6 +50,7 @@ extra_rdoc_files: []
50
50
  files:
51
51
  - ".document"
52
52
  - ".gitignore"
53
+ - ".olddoc.yml"
53
54
  - COPYING
54
55
  - MANIFEST
55
56
  - README
@@ -58,6 +59,7 @@ files:
58
59
  - ext/mwrap/extconf.rb
59
60
  - ext/mwrap/jhash.h
60
61
  - ext/mwrap/mwrap.c
62
+ - lib/mwrap_rack.rb
61
63
  - mwrap.gemspec
62
64
  - test/test_mwrap.rb
63
65
  homepage: https://80x24.org/mwrap/
@@ -79,8 +81,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
79
81
  - !ruby/object:Gem::Version
80
82
  version: '0'
81
83
  requirements: []
82
- rubyforge_project:
83
- rubygems_version: 2.7.7
84
+ rubygems_version: 3.0.2
84
85
  signing_key:
85
86
  specification_version: 4
86
87
  summary: LD_PRELOAD malloc wrapper for Ruby