mesh-rb 0.0.1 → 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (124) hide show
  1. checksums.yaml +4 -4
  2. data/Gemfile.lock +1 -1
  3. data/ext/mesh/extconf.rb +22 -4
  4. data/ext/mesh/mesh.tar.gz +0 -0
  5. data/lib/mesh/version.rb +1 -1
  6. data/mesh.gemspec +3 -2
  7. metadata +4 -120
  8. data/ext/mesh/mesh/.bazelrc +0 -20
  9. data/ext/mesh/mesh/.bazelversion +0 -1
  10. data/ext/mesh/mesh/.clang-format +0 -15
  11. data/ext/mesh/mesh/.dockerignore +0 -5
  12. data/ext/mesh/mesh/.editorconfig +0 -16
  13. data/ext/mesh/mesh/.gitattributes +0 -4
  14. data/ext/mesh/mesh/.github/workflows/main.yml +0 -144
  15. data/ext/mesh/mesh/.gitignore +0 -51
  16. data/ext/mesh/mesh/AUTHORS +0 -5
  17. data/ext/mesh/mesh/CMakeLists.txt +0 -270
  18. data/ext/mesh/mesh/CODE_OF_CONDUCT.md +0 -77
  19. data/ext/mesh/mesh/Dockerfile +0 -30
  20. data/ext/mesh/mesh/LICENSE +0 -201
  21. data/ext/mesh/mesh/Makefile +0 -81
  22. data/ext/mesh/mesh/README.md +0 -97
  23. data/ext/mesh/mesh/WORKSPACE +0 -50
  24. data/ext/mesh/mesh/bazel +0 -350
  25. data/ext/mesh/mesh/mesh-pldi19-powers.pdf +0 -0
  26. data/ext/mesh/mesh/src/BUILD +0 -222
  27. data/ext/mesh/mesh/src/CMakeLists.txt +0 -85
  28. data/ext/mesh/mesh/src/bitmap.h +0 -590
  29. data/ext/mesh/mesh/src/cheap_heap.h +0 -170
  30. data/ext/mesh/mesh/src/common.h +0 -377
  31. data/ext/mesh/mesh/src/copts.bzl +0 -31
  32. data/ext/mesh/mesh/src/d_assert.cc +0 -75
  33. data/ext/mesh/mesh/src/fixed_array.h +0 -124
  34. data/ext/mesh/mesh/src/global_heap.cc +0 -547
  35. data/ext/mesh/mesh/src/global_heap.h +0 -569
  36. data/ext/mesh/mesh/src/gnu_wrapper.cc +0 -75
  37. data/ext/mesh/mesh/src/internal.h +0 -356
  38. data/ext/mesh/mesh/src/libmesh.cc +0 -239
  39. data/ext/mesh/mesh/src/mac_wrapper.cc +0 -528
  40. data/ext/mesh/mesh/src/measure_rss.cc +0 -44
  41. data/ext/mesh/mesh/src/measure_rss.h +0 -20
  42. data/ext/mesh/mesh/src/meshable_arena.cc +0 -776
  43. data/ext/mesh/mesh/src/meshable_arena.h +0 -309
  44. data/ext/mesh/mesh/src/meshing.h +0 -60
  45. data/ext/mesh/mesh/src/mini_heap.h +0 -532
  46. data/ext/mesh/mesh/src/mmap_heap.h +0 -104
  47. data/ext/mesh/mesh/src/one_way_mmap_heap.h +0 -77
  48. data/ext/mesh/mesh/src/partitioned_heap.h +0 -111
  49. data/ext/mesh/mesh/src/plasma/mesh.h +0 -33
  50. data/ext/mesh/mesh/src/real.cc +0 -52
  51. data/ext/mesh/mesh/src/real.h +0 -36
  52. data/ext/mesh/mesh/src/rng/mwc.h +0 -296
  53. data/ext/mesh/mesh/src/rng/mwc64.h +0 -58
  54. data/ext/mesh/mesh/src/rpl_printf.c +0 -1991
  55. data/ext/mesh/mesh/src/runtime.cc +0 -393
  56. data/ext/mesh/mesh/src/runtime.h +0 -114
  57. data/ext/mesh/mesh/src/shuffle_vector.h +0 -287
  58. data/ext/mesh/mesh/src/size_classes.def +0 -251
  59. data/ext/mesh/mesh/src/static/if.h +0 -36
  60. data/ext/mesh/mesh/src/static/log.h +0 -43
  61. data/ext/mesh/mesh/src/testing/benchmark/local_refill.cc +0 -103
  62. data/ext/mesh/mesh/src/testing/big-alloc.c +0 -28
  63. data/ext/mesh/mesh/src/testing/fragmenter.cc +0 -128
  64. data/ext/mesh/mesh/src/testing/global-large-stress.cc +0 -25
  65. data/ext/mesh/mesh/src/testing/local-alloc.c +0 -16
  66. data/ext/mesh/mesh/src/testing/meshing_benchmark.cc +0 -189
  67. data/ext/mesh/mesh/src/testing/thread.cc +0 -35
  68. data/ext/mesh/mesh/src/testing/unit/alignment.cc +0 -56
  69. data/ext/mesh/mesh/src/testing/unit/bitmap_test.cc +0 -274
  70. data/ext/mesh/mesh/src/testing/unit/concurrent_mesh_test.cc +0 -185
  71. data/ext/mesh/mesh/src/testing/unit/mesh_test.cc +0 -143
  72. data/ext/mesh/mesh/src/testing/unit/rng_test.cc +0 -22
  73. data/ext/mesh/mesh/src/testing/unit/size_class_test.cc +0 -66
  74. data/ext/mesh/mesh/src/testing/unit/triple_mesh_test.cc +0 -285
  75. data/ext/mesh/mesh/src/testing/userfaultfd-kernel-copy.cc +0 -164
  76. data/ext/mesh/mesh/src/thread_local_heap.cc +0 -163
  77. data/ext/mesh/mesh/src/thread_local_heap.h +0 -268
  78. data/ext/mesh/mesh/src/wrapper.cc +0 -433
  79. data/ext/mesh/mesh/support/export_mesh.cmake +0 -28
  80. data/ext/mesh/mesh/support/gen-size-classes +0 -57
  81. data/ext/mesh/mesh/support/install_all_configs +0 -33
  82. data/ext/mesh/mesh/support/remove_export_mesh.cmake +0 -48
  83. data/ext/mesh/mesh/support/update-bazelisk +0 -8
  84. data/ext/mesh/mesh/theory/32m80.png +0 -0
  85. data/ext/mesh/mesh/theory/64m80ind.png +0 -0
  86. data/ext/mesh/mesh/theory/bound_comparison.py +0 -67
  87. data/ext/mesh/mesh/theory/bounds/impdeg+1 +0 -135
  88. data/ext/mesh/mesh/theory/choose.py +0 -43
  89. data/ext/mesh/mesh/theory/common.py +0 -42
  90. data/ext/mesh/mesh/theory/compute_exp_Y.py +0 -134
  91. data/ext/mesh/mesh/theory/createRandomString.py +0 -69
  92. data/ext/mesh/mesh/theory/deg_bound_check.py +0 -100
  93. data/ext/mesh/mesh/theory/degcheck.py +0 -47
  94. data/ext/mesh/mesh/theory/dumps/32,1,80,dumb.txt +0 -81
  95. data/ext/mesh/mesh/theory/dumps/32,2,80,dumb.txt +0 -81
  96. data/ext/mesh/mesh/theory/dumps/32,3,80,dumb.txt +0 -81
  97. data/ext/mesh/mesh/theory/dumps/32,4,80,dumb.txt +0 -81
  98. data/ext/mesh/mesh/theory/dumps/32,5,80,dumb.txt +0 -81
  99. data/ext/mesh/mesh/theory/dumps/32,6,80,dumb.txt +0 -81
  100. data/ext/mesh/mesh/theory/dumps/32,7,80,dumb.txt +0 -81
  101. data/ext/mesh/mesh/theory/dumps/32,8,80,dumb.txt +0 -81
  102. data/ext/mesh/mesh/theory/dumps/32,9,80,dumb.txt +0 -81
  103. data/ext/mesh/mesh/theory/experiment.py +0 -303
  104. data/ext/mesh/mesh/theory/experiment_raw_results/.gitignore +0 -0
  105. data/ext/mesh/mesh/theory/greedy_experiment.py +0 -66
  106. data/ext/mesh/mesh/theory/greedy_experiment_copy.py +0 -46
  107. data/ext/mesh/mesh/theory/greedy_experiment_q.py +0 -75
  108. data/ext/mesh/mesh/theory/makeGraph.py +0 -64
  109. data/ext/mesh/mesh/theory/manyreps.png +0 -0
  110. data/ext/mesh/mesh/theory/manystrings.png +0 -0
  111. data/ext/mesh/mesh/theory/match_vs_color_experiment.py +0 -94
  112. data/ext/mesh/mesh/theory/maxmatch_vs_E[Y].py +0 -162
  113. data/ext/mesh/mesh/theory/maxmatch_vs_greedymatch.py +0 -96
  114. data/ext/mesh/mesh/theory/maxvdeg+1imp++32,80.png +0 -0
  115. data/ext/mesh/mesh/theory/mesh_util.py +0 -322
  116. data/ext/mesh/mesh/theory/meshers.py +0 -452
  117. data/ext/mesh/mesh/theory/meshingBenchmark.py +0 -96
  118. data/ext/mesh/mesh/theory/occupancyComparison.py +0 -133
  119. data/ext/mesh/mesh/theory/randmatch_vs_greedymatch.py +0 -97
  120. data/ext/mesh/mesh/theory/randmatch_vs_greedymatch_q.py +0 -103
  121. data/ext/mesh/mesh/theory/randmatch_vs_greedymatch_time.py +0 -117
  122. data/ext/mesh/mesh/theory/read_mesh_dump.py +0 -82
  123. data/ext/mesh/mesh/theory/test.py +0 -70
  124. data/ext/mesh/mesh/tools/bazel +0 -1
@@ -1,31 +0,0 @@
1
- # Copyright 2020 The Mesh Authors. All rights reserved.
2
- # Use of this source code is governed by the Apache License,
3
- # Version 2.0, that can be found in the LICENSE file.
4
-
5
- COMMON_FLAGS = [
6
- # warn on lots of stuff; this is cargo-culted from the old Make build system
7
- "-Wall",
8
- "-Wextra",
9
- "-Werror=pointer-arith",
10
- "-pedantic",
11
- "-Wno-unused-parameter",
12
- "-Wno-unused-variable",
13
- "-Woverloaded-virtual",
14
- "-Werror=return-type",
15
- "-Wtype-limits",
16
- "-Wempty-body",
17
- "-Winvalid-offsetof",
18
- "-Wvariadic-macros",
19
- "-Wcast-align",
20
- ]
21
-
22
- MESH_LLVM_FLAGS = []
23
-
24
- MESH_GCC_FLAGS = [
25
- "-Wa,--noexecstack",
26
- ]
27
-
28
- MESH_DEFAULT_COPTS = select({
29
- "//src:llvm": COMMON_FLAGS + MESH_LLVM_FLAGS,
30
- "//conditions:default": COMMON_FLAGS + MESH_GCC_FLAGS,
31
- })
@@ -1,75 +0,0 @@
1
- // -*- mode: c++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2
- // Copyright 2019 The Mesh Authors. All rights reserved.
3
- // Use of this source code is governed by the Apache License,
4
- // Version 2.0, that can be found in the LICENSE file.
5
-
6
- #include <cstdarg>
7
- #include <cstdlib> // for abort
8
-
9
- #include <unistd.h>
10
-
11
- #include "common.h"
12
-
13
- #include "rpl_printf.c"
14
-
15
- // mutex protecting debug and __mesh_assert_fail to avoid concurrent
16
- // use of static buffers by multiple threads
17
- inline static mutex *getAssertMutex(void) {
18
- static char assertBuf[sizeof(std::mutex)];
19
- static mutex *assertMutex = new (assertBuf) mutex();
20
-
21
- return assertMutex;
22
- }
23
-
24
- // threadsafe printf-like debug statements safe for use in an
25
- // allocator (it will never call into malloc or free to allocate
26
- // memory)
27
- void mesh::debug(const char *fmt, ...) {
28
- constexpr size_t buf_len = 4096;
29
- static char buf[buf_len];
30
- std::lock_guard<std::mutex> lock(*getAssertMutex());
31
-
32
- va_list args;
33
-
34
- va_start(args, fmt);
35
- int len = rpl_vsnprintf(buf, buf_len - 1, fmt, args);
36
- va_end(args);
37
-
38
- buf[buf_len - 1] = 0;
39
- if (len > 0) {
40
- auto _ __attribute__((unused)) = write(STDERR_FILENO, buf, len);
41
- // ensure a trailing newline is written out
42
- if (buf[len - 1] != '\n')
43
- _ = write(STDERR_FILENO, "\n", 1);
44
- }
45
- }
46
-
47
- // out-of-line function called to report an error and exit the program
48
- // when an assertion failed.
49
- void mesh::internal::__mesh_assert_fail(const char *assertion, const char *file, const char *func, int line,
50
- const char *fmt, ...) {
51
- constexpr size_t buf_len = 4096;
52
- constexpr size_t usr_len = 512;
53
- static char buf[buf_len];
54
- static char usr[usr_len];
55
- std::lock_guard<std::mutex> lock(*getAssertMutex());
56
-
57
- va_list args;
58
-
59
- va_start(args, fmt);
60
- (void)rpl_vsnprintf(usr, usr_len - 1, fmt, args);
61
- va_end(args);
62
-
63
- usr[usr_len - 1] = 0;
64
-
65
- int len = rpl_snprintf(buf, buf_len - 1, "%s:%d:%s: ASSERTION '%s' FAILED: %s\n", file, line, func, assertion, usr);
66
- if (len > 0) {
67
- auto _ __attribute__((unused)) = write(STDERR_FILENO, buf, len);
68
- }
69
-
70
- // void *array[32];
71
- // size_t size = backtrace(array, 10);
72
- // backtrace_symbols_fd(array, size, STDERR_FILENO);
73
-
74
- abort();
75
- }
@@ -1,124 +0,0 @@
1
- // -*- mode: c++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2
- // Copyright 2019 The Mesh Authors. All rights reserved.
3
- // Use of this source code is governed by the Apache License,
4
- // Version 2.0, that can be found in the LICENSE file.
5
-
6
- #pragma once
7
- #ifndef MESH_FIXED_ARRAY_H
8
- #define MESH_FIXED_ARRAY_H
9
-
10
- #include <iterator>
11
-
12
- namespace mesh {
13
-
14
- // enables iteration through the miniheaps in an array
15
- template <typename FixedArray>
16
- class FixedArrayIter : public std::iterator<std::forward_iterator_tag, typename FixedArray::value_type> {
17
- public:
18
- FixedArrayIter(const FixedArray &a, const uint32_t i) : _i(i), _array(a) {
19
- }
20
- FixedArrayIter &operator++() {
21
- if (unlikely(_i + 1 >= _array.size())) {
22
- _i = _array.size();
23
- return *this;
24
- }
25
-
26
- _i++;
27
- return *this;
28
- }
29
- bool operator==(const FixedArrayIter &rhs) const {
30
- return &_array == &rhs._array && _i == rhs._i;
31
- }
32
- bool operator!=(const FixedArrayIter &rhs) const {
33
- return &_array != &rhs._array || _i != rhs._i;
34
- }
35
- typename FixedArray::value_type operator*() {
36
- return _array[_i];
37
- }
38
-
39
- private:
40
- uint32_t _i;
41
- const FixedArray &_array;
42
- };
43
-
44
- template <typename T, uint32_t Capacity>
45
- class FixedArray {
46
- private:
47
- DISALLOW_COPY_AND_ASSIGN(FixedArray);
48
-
49
- public:
50
- typedef T *value_type;
51
- typedef FixedArrayIter<FixedArray<T, Capacity>> iterator;
52
- typedef FixedArrayIter<FixedArray<T, Capacity>> const const_iterator;
53
-
54
- FixedArray() {
55
- d_assert(_size == 0);
56
- #ifndef NDEBUG
57
- for (uint32_t i = 0; i < Capacity; i++) {
58
- d_assert(_objects[i] == nullptr);
59
- }
60
- #endif
61
- }
62
-
63
- ~FixedArray() {
64
- clear();
65
- }
66
-
67
- uint32_t size() const {
68
- return _size;
69
- }
70
-
71
- bool full() const {
72
- return _size == Capacity;
73
- }
74
-
75
- void clear() {
76
- memset(_objects, 0, Capacity * sizeof(T *));
77
- _size = 0;
78
- }
79
-
80
- void append(T *obj) {
81
- hard_assert(_size < Capacity);
82
- _objects[_size] = obj;
83
- _size++;
84
- }
85
-
86
- T *operator[](uint32_t i) const {
87
- // d_assert(i < _size);
88
- return _objects[i];
89
- }
90
-
91
- T **array_begin() {
92
- return &_objects[0];
93
- }
94
- T **array_end() {
95
- return &_objects[size()];
96
- }
97
-
98
- iterator begin() {
99
- return iterator(*this, 0);
100
- }
101
- iterator end() {
102
- return iterator(*this, size());
103
- }
104
- const_iterator begin() const {
105
- return iterator(*this, 0);
106
- }
107
- const_iterator end() const {
108
- return iterator(*this, size());
109
- }
110
- const_iterator cbegin() const {
111
- return iterator(*this, 0);
112
- }
113
- const_iterator cend() const {
114
- return iterator(*this, size());
115
- }
116
-
117
- private:
118
- T *_objects[Capacity]{};
119
- uint32_t _size{0};
120
- };
121
-
122
- } // namespace mesh
123
-
124
- #endif // MESH_FIXED_ARRAY_H
@@ -1,547 +0,0 @@
1
- // -*- mode: c++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2
- // Copyright 2019 The Mesh Authors. All rights reserved.
3
- // Use of this source code is governed by the Apache License,
4
- // Version 2.0, that can be found in the LICENSE file.
5
-
6
- #include <utility>
7
-
8
- #include "global_heap.h"
9
-
10
- #include "meshing.h"
11
- #include "runtime.h"
12
-
13
- namespace mesh {
14
-
15
- MiniHeap *GetMiniHeap(const MiniHeapID id) {
16
- hard_assert(id.hasValue() && id != list::Head);
17
-
18
- return runtime().heap().miniheapForID(id);
19
- }
20
-
21
- MiniHeapID GetMiniHeapID(const MiniHeap *mh) {
22
- if (unlikely(mh == nullptr)) {
23
- d_assert(false);
24
- return MiniHeapID{0};
25
- }
26
-
27
- return runtime().heap().miniheapIDFor(mh);
28
- }
29
-
30
- void *GlobalHeap::malloc(size_t sz) {
31
- #ifndef NDEBUG
32
- if (unlikely(sz <= kMaxSize)) {
33
- abort();
34
- }
35
- #endif
36
-
37
- const auto pageCount = PageCount(sz);
38
-
39
- return pageAlignedAlloc(1, pageCount);
40
- }
41
-
42
- void GlobalHeap::free(void *ptr) {
43
- size_t startEpoch{0};
44
- auto mh = miniheapForWithEpoch(ptr, startEpoch);
45
- if (unlikely(!mh)) {
46
- #ifndef NDEBUG
47
- if (ptr != nullptr) {
48
- debug("FIXME: free of untracked ptr %p", ptr);
49
- }
50
- #endif
51
- return;
52
- }
53
- this->freeFor(mh, ptr, startEpoch);
54
- }
55
-
56
- void GlobalHeap::freeFor(MiniHeap *mh, void *ptr, size_t startEpoch) {
57
- if (unlikely(ptr == nullptr)) {
58
- return;
59
- }
60
-
61
- if (unlikely(!mh)) {
62
- return;
63
- }
64
-
65
- // large objects are tracked with a miniheap per object and don't
66
- // trigger meshing, because they are multiples of the page size.
67
- // This can also include, for example, single page allocations w/
68
- // 16KB alignment.
69
- if (mh->isLargeAlloc()) {
70
- lock_guard<mutex> lock(_miniheapLock);
71
- freeMiniheapLocked(mh, false);
72
- return;
73
- }
74
-
75
- d_assert(mh->maxCount() > 1);
76
-
77
- auto freelistId = mh->freelistId();
78
- auto isAttached = mh->isAttached();
79
- auto sizeClass = mh->sizeClass();
80
-
81
- // try to avoid storing to this cacheline; the branch is worth it to avoid
82
- // multi-threaded contention
83
- if (_lastMeshEffective.load(std::memory_order::memory_order_acquire) == 0) {
84
- _lastMeshEffective.store(1, std::memory_order::memory_order_release);
85
- }
86
- // read inUseCount before calling free to avoid stalling after the
87
- // LOCK CMPXCHG in mh->free
88
- auto remaining = mh->inUseCount() - 1;
89
-
90
- // here can't call mh->free(arenaBegin(), ptr), because in consume takeBitmap always clear the bitmap,
91
- // if clearIfNotFree after takeBitmap
92
- // it alwasy return false, but in this case, you need to free again.
93
- auto wasSet = mh->clearIfNotFree(arenaBegin(), ptr);
94
-
95
- bool shouldMesh = false;
96
-
97
- // the epoch will be odd if a mesh was in progress when we looked up
98
- // the miniheap; if that is true, or a meshing started between then
99
- // and now we can't be sure the above free was successful
100
- if (startEpoch % 2 == 1 || !_meshEpoch.isSame(startEpoch)) {
101
- // a mesh was started in between when we looked up our miniheap
102
- // and now. synchronize to avoid races
103
- lock_guard<mutex> lock(_miniheapLock);
104
-
105
- const auto origMh = mh;
106
- mh = miniheapForWithEpoch(ptr, startEpoch);
107
- if (unlikely(mh == nullptr)) {
108
- return;
109
- }
110
-
111
- if (unlikely(mh != origMh)) {
112
- hard_assert(!mh->isMeshed());
113
- if (mh->isRelated(origMh) && !wasSet) {
114
- // we have confirmation that we raced with meshing, so free the pointer
115
- // on the new miniheap
116
- d_assert(sizeClass == mh->sizeClass());
117
- mh->free(arenaBegin(), ptr);
118
- } else {
119
- // our MiniHeap is unrelated to whatever is here in memory now - get out of here.
120
- return;
121
- }
122
- }
123
-
124
- if (unlikely(mh->sizeClass() != sizeClass || mh->isLargeAlloc())) {
125
- // TODO: This papers over a bug where the miniheap was freed
126
- // + reused out from under us while we were waiting for the mh lock.
127
- // It doesn't eliminate the problem (which should be solved
128
- // by storing the 'created epoch' on the MiniHeap), but it should
129
- // further reduce its probability
130
- return;
131
- }
132
-
133
- remaining = mh->inUseCount();
134
- freelistId = mh->freelistId();
135
- isAttached = mh->isAttached();
136
-
137
- if (!isAttached && (remaining == 0 || freelistId == list::Full)) {
138
- // this may free the miniheap -- we can't safely access it after
139
- // this point.
140
- const bool shouldFlush = postFreeLocked(mh, sizeClass, remaining);
141
- mh = nullptr;
142
- if (unlikely(shouldFlush)) {
143
- flushBinLocked(sizeClass);
144
- }
145
- } else {
146
- shouldMesh = true;
147
- }
148
- } else {
149
- // the free went through ok; if we _were_ full, or now _are_ empty,
150
- // make sure to update the littleheaps
151
- if (!isAttached && (remaining == 0 || freelistId == list::Full)) {
152
- lock_guard<mutex> lock(_miniheapLock);
153
-
154
- // there are 2 ways we could have raced with meshing:
155
- //
156
- // 1. when writing to the MiniHeap's bitmap (which we check for
157
- // above with the !_meshEpoch.isSame(current)). this is what
158
- // the outer if statement here takes care of.
159
- //
160
- // 2. this thread losing the race with acquiring _miniheapLock
161
- // (what we care about here). for thi case, we know a) our
162
- // write to the MiniHeap's bitmap succeeded (or we would be
163
- // in the other side of the outer if statement), and b) our
164
- // MiniHeap could have been freed from under us while we were
165
- // waiting for this lock (if e.g. remaining == 0, a mesh
166
- // happened on another thread, and the other thread notices
167
- // this MiniHeap is empty (b.c. an empty MiniHeap meshes with
168
- // every other MiniHeap). We need to be careful here.
169
-
170
- const auto origMh = mh;
171
- // we have to reload the miniheap here because of the
172
- // just-described possible race
173
- mh = miniheapForWithEpoch(ptr, startEpoch);
174
-
175
- // if the MiniHeap associated with the ptr we freed has changed,
176
- // there are a few possibilities.
177
- if (unlikely(mh != origMh)) {
178
- // another thread took care of freeing this MiniHeap for us,
179
- // super! nothing else to do.
180
- if (mh == nullptr) {
181
- return;
182
- }
183
-
184
- // check to make sure the new MiniHeap is related (via a
185
- // meshing relationship) to the one we had before grabbing the
186
- // lock.
187
- if (!mh->isRelated(origMh)) {
188
- // the original miniheap was freed and a new (unrelated)
189
- // Miniheap allocated for the address space. nothing else
190
- // for us to do.
191
- return;
192
- } else {
193
- // TODO: we should really store 'created epoch' on mh and
194
- // check those are the same here, too.
195
- }
196
- }
197
-
198
- if (unlikely(mh->sizeClass() != sizeClass || mh->isLargeAlloc())) {
199
- // TODO: This papers over a bug where the miniheap was freed
200
- // + reused out from under us while we were waiting for the mh lock.
201
- // It doesn't eliminate the problem (which should be solved
202
- // by storing the 'created epoch' on the MiniHeap), but it should
203
- // further reduce its probability
204
- return;
205
- }
206
-
207
- // a lot could have happened between when we read this without
208
- // the lock held and now; just recalculate it.
209
- remaining = mh->inUseCount();
210
- const bool shouldFlush = postFreeLocked(mh, sizeClass, remaining);
211
- if (unlikely(shouldFlush)) {
212
- flushBinLocked(sizeClass);
213
- }
214
- } else {
215
- shouldMesh = !isAttached;
216
- }
217
- }
218
-
219
- if (shouldMesh) {
220
- maybeMesh();
221
- }
222
- }
223
-
224
- int GlobalHeap::mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
225
- unique_lock<mutex> lock(_miniheapLock);
226
-
227
- if (!oldp || !oldlenp || *oldlenp < sizeof(size_t))
228
- return -1;
229
-
230
- auto statp = reinterpret_cast<size_t *>(oldp);
231
-
232
- if (strcmp(name, "mesh.check_period") == 0) {
233
- *statp = _meshPeriod;
234
- if (!newp || newlen < sizeof(size_t))
235
- return -1;
236
- auto newVal = reinterpret_cast<size_t *>(newp);
237
- _meshPeriod = *newVal;
238
- // resetNextMeshCheck();
239
- } else if (strcmp(name, "mesh.scavenge") == 0) {
240
- lock.unlock();
241
- scavenge(true);
242
- lock.lock();
243
- } else if (strcmp(name, "mesh.compact") == 0) {
244
- meshAllSizeClassesLocked();
245
- lock.unlock();
246
- scavenge(true);
247
- lock.lock();
248
- } else if (strcmp(name, "arena") == 0) {
249
- // not sure what this should do
250
- } else if (strcmp(name, "stats.resident") == 0) {
251
- auto pss = internal::measurePssKiB();
252
- // mesh::debug("measurePssKiB: %zu KiB", pss);
253
-
254
- *statp = pss * 1024; // originally in KB
255
- } else if (strcmp(name, "stats.active") == 0) {
256
- // all miniheaps at least partially full
257
- size_t sz = 0;
258
- // for (size_t i = 0; i < kNumBins; i++) {
259
- // const auto count = _littleheaps[i].nonEmptyCount();
260
- // if (count == 0)
261
- // continue;
262
- // sz += count * _littleheaps[i].objectSize() * _littleheaps[i].objectCount();
263
- // }
264
- *statp = sz;
265
- } else if (strcmp(name, "stats.allocated") == 0) {
266
- // TODO: revisit this
267
- // same as active for us, for now -- memory not returned to the OS
268
- size_t sz = 0;
269
- for (size_t i = 0; i < kNumBins; i++) {
270
- // const auto &bin = _littleheaps[i];
271
- // const auto count = bin.nonEmptyCount();
272
- // if (count == 0)
273
- // continue;
274
- // sz += bin.objectSize() * bin.allocatedObjectCount();
275
- }
276
- *statp = sz;
277
- }
278
- return 0;
279
- }
280
-
281
- void GlobalHeap::meshLocked(MiniHeap *dst, MiniHeap *&src) {
282
- // mesh::debug("mesh dst:%p <- src:%p\n", dst, src);
283
- // dst->dumpDebug();
284
- // src->dumpDebug();
285
- const size_t dstSpanSize = dst->spanSize();
286
- const auto dstSpanStart = reinterpret_cast<void *>(dst->getSpanStart(arenaBegin()));
287
-
288
- src->forEachMeshed([&](const MiniHeap *mh) {
289
- // marks srcSpans read-only
290
- const auto srcSpan = reinterpret_cast<void *>(mh->getSpanStart(arenaBegin()));
291
- Super::beginMesh(dstSpanStart, srcSpan, dstSpanSize);
292
- return false;
293
- });
294
-
295
- // does the copying of objects and updating of span metadata
296
- dst->consume(arenaBegin(), src);
297
- d_assert(src->isMeshed());
298
-
299
- src->forEachMeshed([&](const MiniHeap *mh) {
300
- d_assert(mh->isMeshed());
301
- const auto srcSpan = reinterpret_cast<void *>(mh->getSpanStart(arenaBegin()));
302
- // frees physical memory + re-marks srcSpans as read/write
303
- Super::finalizeMesh(dstSpanStart, srcSpan, dstSpanSize);
304
- return false;
305
- });
306
- Super::freePhys(reinterpret_cast<void *>(src->getSpanStart(arenaBegin())), dstSpanSize);
307
-
308
- // make sure we adjust what bin the destination is in -- it might
309
- // now be full and not a candidate for meshing
310
- postFreeLocked(dst, dst->sizeClass(), dst->inUseCount());
311
- untrackMiniheapLocked(src);
312
- }
313
-
314
- size_t GlobalHeap::meshSizeClassLocked(size_t sizeClass, MergeSetArray &mergeSets, SplitArray &left,
315
- SplitArray &right) {
316
- size_t mergeSetCount = 0;
317
- // memset(reinterpret_cast<void *>(&mergeSets), 0, sizeof(mergeSets));
318
- // memset(&left, 0, sizeof(left));
319
- // memset(&right, 0, sizeof(right));
320
-
321
- auto meshFound =
322
- function<bool(std::pair<MiniHeap *, MiniHeap *> &&)>([&](std::pair<MiniHeap *, MiniHeap *> &&miniheaps) {
323
- if (miniheaps.first->isMeshingCandidate() && miniheaps.second->isMeshingCandidate()) {
324
- mergeSets[mergeSetCount] = std::move(miniheaps);
325
- mergeSetCount++;
326
- }
327
- return mergeSetCount < kMaxMergeSets;
328
- });
329
-
330
- method::shiftedSplitting(_fastPrng, &_partialFreelist[sizeClass].first, left, right, meshFound);
331
-
332
- if (mergeSetCount == 0) {
333
- // debug("nothing to mesh.");
334
- return 0;
335
- }
336
-
337
- size_t meshCount = 0;
338
-
339
- for (size_t i = 0; i < mergeSetCount; i++) {
340
- std::pair<MiniHeap *, MiniHeap *> &mergeSet = mergeSets[i];
341
- MiniHeap *dst = mergeSet.first;
342
- MiniHeap *src = mergeSet.second;
343
- d_assert(dst != nullptr);
344
- d_assert(src != nullptr);
345
-
346
- // merge _into_ the one with a larger mesh count, potentially
347
- // swapping the order of the pair
348
- const auto dstCount = dst->meshCount();
349
- const auto srcCount = src->meshCount();
350
- if (dstCount + srcCount > kMaxMeshes) {
351
- continue;
352
- }
353
- if (dstCount < srcCount) {
354
- std::swap(dst, src);
355
- }
356
-
357
- // final check: if one of these miniheaps is now empty
358
- // (e.g. because a parallel thread is freeing a bunch of objects
359
- // in a row) save ourselves some work by just tracking this as a
360
- // regular postFree
361
- auto oneEmpty = false;
362
- if (dst->inUseCount() == 0) {
363
- postFreeLocked(dst, sizeClass, 0);
364
- oneEmpty = true;
365
- }
366
- if (src->inUseCount() == 0) {
367
- postFreeLocked(src, sizeClass, 0);
368
- oneEmpty = true;
369
- }
370
-
371
- if (!oneEmpty && !aboveMeshThreshold()) {
372
- meshLocked(dst, src);
373
- meshCount++;
374
- }
375
- }
376
-
377
- // flush things once more (since we may have called postFree instead
378
- // of mesh above)
379
- flushBinLocked(sizeClass);
380
-
381
- return meshCount;
382
- }
383
-
384
- void GlobalHeap::meshAllSizeClassesLocked() {
385
- static MergeSetArray PAGE_ALIGNED MergeSets;
386
- static_assert(sizeof(MergeSets) == sizeof(void *) * 2 * 4096, "array too big");
387
- d_assert((reinterpret_cast<uintptr_t>(&MergeSets) & (kPageSize - 1)) == 0);
388
-
389
- static SplitArray PAGE_ALIGNED Left;
390
- static SplitArray PAGE_ALIGNED Right;
391
- static_assert(sizeof(Left) == sizeof(void *) * 16384, "array too big");
392
- static_assert(sizeof(Right) == sizeof(void *) * 16384, "array too big");
393
- d_assert((reinterpret_cast<uintptr_t>(&Left) & (kPageSize - 1)) == 0);
394
- d_assert((reinterpret_cast<uintptr_t>(&Right) & (kPageSize - 1)) == 0);
395
-
396
- // if we have freed but not reset meshed mappings, this will reset
397
- // them to the identity mapping, ensuring we don't blow past our VMA
398
- // limit (which is why we set the force flag to true)
399
- Super::scavenge(true);
400
-
401
- if (!_lastMeshEffective.load(std::memory_order::memory_order_acquire)) {
402
- return;
403
- }
404
-
405
- if (Super::aboveMeshThreshold()) {
406
- return;
407
- }
408
-
409
- lock_guard<EpochLock> epochLock(_meshEpoch);
410
-
411
- // const auto start = time::now();
412
-
413
- // first, clear out any free memory we might have
414
- for (size_t sizeClass = 0; sizeClass < kNumBins; sizeClass++) {
415
- flushBinLocked(sizeClass);
416
- }
417
-
418
- size_t totalMeshCount = 0;
419
-
420
- for (size_t sizeClass = 0; sizeClass < kNumBins; sizeClass++) {
421
- totalMeshCount += meshSizeClassLocked(sizeClass, MergeSets, Left, Right);
422
- }
423
-
424
- madvise(&Left, sizeof(Left), MADV_DONTNEED);
425
- madvise(&Right, sizeof(Right), MADV_DONTNEED);
426
- madvise(&MergeSets, sizeof(MergeSets), MADV_DONTNEED);
427
-
428
- _lastMeshEffective = totalMeshCount > 256;
429
- _stats.meshCount += totalMeshCount;
430
-
431
- Super::scavenge(true);
432
-
433
- _lastMesh = time::now();
434
-
435
- // const std::chrono::duration<double> duration = _lastMesh - start;
436
- // debug("mesh took %f, found %zu", duration.count(), totalMeshCount);
437
- }
438
-
439
- void GlobalHeap::dumpStats(int level, bool beDetailed) const {
440
- if (level < 1)
441
- return;
442
-
443
- lock_guard<mutex> lock(_miniheapLock);
444
-
445
- const auto meshedPageHWM = meshedPageHighWaterMark();
446
-
447
- debug("MESH COUNT: %zu\n", (size_t)_stats.meshCount);
448
- debug("Meshed MB (total): %.1f\n", (size_t)_stats.meshCount * 4096.0 / 1024.0 / 1024.0);
449
- debug("Meshed pages HWM: %zu\n", meshedPageHWM);
450
- debug("Meshed MB HWM: %.1f\n", meshedPageHWM * 4096.0 / 1024.0 / 1024.0);
451
- // debug("Peak RSS reduction: %.2f\n", rssSavings);
452
- debug("MH Alloc Count: %zu\n", (size_t)_stats.mhAllocCount);
453
- debug("MH Free Count: %zu\n", (size_t)_stats.mhFreeCount);
454
- debug("MH High Water Mark: %zu\n", (size_t)_stats.mhHighWaterMark);
455
- if (level > 1) {
456
- // for (size_t i = 0; i < kNumBins; i++) {
457
- // _littleheaps[i].dumpStats(beDetailed);
458
- // }
459
- }
460
- }
461
-
462
- namespace method {
463
-
464
- void ATTRIBUTE_NEVER_INLINE halfSplit(MWC &prng, MiniHeapListEntry *miniheaps, SplitArray &left, size_t &leftSize,
465
- SplitArray &right, size_t &rightSize) noexcept {
466
- d_assert(leftSize == 0);
467
- d_assert(rightSize == 0);
468
- MiniHeapID mhId = miniheaps->next();
469
- while (mhId != list::Head && leftSize < kMaxSplitListSize && rightSize < kMaxSplitListSize) {
470
- auto mh = GetMiniHeap(mhId);
471
- mhId = mh->getFreelist()->next();
472
-
473
- if (!mh->isMeshingCandidate() || (mh->fullness() >= kOccupancyCutoff)) {
474
- continue;
475
- }
476
-
477
- if (leftSize <= rightSize) {
478
- left[leftSize] = mh;
479
- leftSize++;
480
- } else {
481
- right[rightSize] = mh;
482
- rightSize++;
483
- }
484
- }
485
-
486
- internal::mwcShuffle(&left[0], &left[leftSize], prng);
487
- internal::mwcShuffle(&right[0], &right[rightSize], prng);
488
- }
489
-
490
- void ATTRIBUTE_NEVER_INLINE
491
- shiftedSplitting(MWC &prng, MiniHeapListEntry *miniheaps, SplitArray &left, SplitArray &right,
492
- const function<bool(std::pair<MiniHeap *, MiniHeap *> &&)> &meshFound) noexcept {
493
- constexpr size_t t = 64;
494
-
495
- if (miniheaps->empty()) {
496
- return;
497
- }
498
-
499
- size_t leftSize = 0;
500
- size_t rightSize = 0;
501
-
502
- halfSplit(prng, miniheaps, left, leftSize, right, rightSize);
503
-
504
- if (leftSize == 0 || rightSize == 0) {
505
- return;
506
- }
507
-
508
- constexpr size_t nBytes = 32;
509
- const size_t limit = rightSize < t ? rightSize : t;
510
- d_assert(nBytes == left[0]->bitmap().byteCount());
511
-
512
- size_t foundCount = 0;
513
- for (size_t j = 0; j < leftSize; j++) {
514
- const size_t idxLeft = j;
515
- size_t idxRight = j;
516
-
517
- for (size_t i = 0; i < limit; i++, idxRight++) {
518
- if (unlikely(idxRight >= rightSize)) {
519
- idxRight %= rightSize;
520
- }
521
- auto h1 = left[idxLeft];
522
- auto h2 = right[idxRight];
523
-
524
- if (h1 == nullptr || h2 == nullptr)
525
- continue;
526
-
527
- const auto bitmap1 = h1->bitmap().bits();
528
- const auto bitmap2 = h2->bitmap().bits();
529
-
530
- const bool areMeshable = mesh::bitmapsMeshable(bitmap1, bitmap2, nBytes);
531
-
532
- if (unlikely(areMeshable)) {
533
- std::pair<MiniHeap *, MiniHeap *> heaps{h1, h2};
534
- bool shouldContinue = meshFound(std::move(heaps));
535
- left[idxLeft] = nullptr;
536
- right[idxRight] = nullptr;
537
- foundCount++;
538
- if (unlikely(foundCount > kMaxMeshesPerIteration || !shouldContinue)) {
539
- return;
540
- }
541
- }
542
- }
543
- }
544
- }
545
-
546
- } // namespace method
547
- } // namespace mesh