mesh-rb 0.0.1 → 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (124) hide show
  1. checksums.yaml +4 -4
  2. data/Gemfile.lock +1 -1
  3. data/ext/mesh/extconf.rb +22 -4
  4. data/ext/mesh/mesh.tar.gz +0 -0
  5. data/lib/mesh/version.rb +1 -1
  6. data/mesh.gemspec +3 -2
  7. metadata +4 -120
  8. data/ext/mesh/mesh/.bazelrc +0 -20
  9. data/ext/mesh/mesh/.bazelversion +0 -1
  10. data/ext/mesh/mesh/.clang-format +0 -15
  11. data/ext/mesh/mesh/.dockerignore +0 -5
  12. data/ext/mesh/mesh/.editorconfig +0 -16
  13. data/ext/mesh/mesh/.gitattributes +0 -4
  14. data/ext/mesh/mesh/.github/workflows/main.yml +0 -144
  15. data/ext/mesh/mesh/.gitignore +0 -51
  16. data/ext/mesh/mesh/AUTHORS +0 -5
  17. data/ext/mesh/mesh/CMakeLists.txt +0 -270
  18. data/ext/mesh/mesh/CODE_OF_CONDUCT.md +0 -77
  19. data/ext/mesh/mesh/Dockerfile +0 -30
  20. data/ext/mesh/mesh/LICENSE +0 -201
  21. data/ext/mesh/mesh/Makefile +0 -81
  22. data/ext/mesh/mesh/README.md +0 -97
  23. data/ext/mesh/mesh/WORKSPACE +0 -50
  24. data/ext/mesh/mesh/bazel +0 -350
  25. data/ext/mesh/mesh/mesh-pldi19-powers.pdf +0 -0
  26. data/ext/mesh/mesh/src/BUILD +0 -222
  27. data/ext/mesh/mesh/src/CMakeLists.txt +0 -85
  28. data/ext/mesh/mesh/src/bitmap.h +0 -590
  29. data/ext/mesh/mesh/src/cheap_heap.h +0 -170
  30. data/ext/mesh/mesh/src/common.h +0 -377
  31. data/ext/mesh/mesh/src/copts.bzl +0 -31
  32. data/ext/mesh/mesh/src/d_assert.cc +0 -75
  33. data/ext/mesh/mesh/src/fixed_array.h +0 -124
  34. data/ext/mesh/mesh/src/global_heap.cc +0 -547
  35. data/ext/mesh/mesh/src/global_heap.h +0 -569
  36. data/ext/mesh/mesh/src/gnu_wrapper.cc +0 -75
  37. data/ext/mesh/mesh/src/internal.h +0 -356
  38. data/ext/mesh/mesh/src/libmesh.cc +0 -239
  39. data/ext/mesh/mesh/src/mac_wrapper.cc +0 -528
  40. data/ext/mesh/mesh/src/measure_rss.cc +0 -44
  41. data/ext/mesh/mesh/src/measure_rss.h +0 -20
  42. data/ext/mesh/mesh/src/meshable_arena.cc +0 -776
  43. data/ext/mesh/mesh/src/meshable_arena.h +0 -309
  44. data/ext/mesh/mesh/src/meshing.h +0 -60
  45. data/ext/mesh/mesh/src/mini_heap.h +0 -532
  46. data/ext/mesh/mesh/src/mmap_heap.h +0 -104
  47. data/ext/mesh/mesh/src/one_way_mmap_heap.h +0 -77
  48. data/ext/mesh/mesh/src/partitioned_heap.h +0 -111
  49. data/ext/mesh/mesh/src/plasma/mesh.h +0 -33
  50. data/ext/mesh/mesh/src/real.cc +0 -52
  51. data/ext/mesh/mesh/src/real.h +0 -36
  52. data/ext/mesh/mesh/src/rng/mwc.h +0 -296
  53. data/ext/mesh/mesh/src/rng/mwc64.h +0 -58
  54. data/ext/mesh/mesh/src/rpl_printf.c +0 -1991
  55. data/ext/mesh/mesh/src/runtime.cc +0 -393
  56. data/ext/mesh/mesh/src/runtime.h +0 -114
  57. data/ext/mesh/mesh/src/shuffle_vector.h +0 -287
  58. data/ext/mesh/mesh/src/size_classes.def +0 -251
  59. data/ext/mesh/mesh/src/static/if.h +0 -36
  60. data/ext/mesh/mesh/src/static/log.h +0 -43
  61. data/ext/mesh/mesh/src/testing/benchmark/local_refill.cc +0 -103
  62. data/ext/mesh/mesh/src/testing/big-alloc.c +0 -28
  63. data/ext/mesh/mesh/src/testing/fragmenter.cc +0 -128
  64. data/ext/mesh/mesh/src/testing/global-large-stress.cc +0 -25
  65. data/ext/mesh/mesh/src/testing/local-alloc.c +0 -16
  66. data/ext/mesh/mesh/src/testing/meshing_benchmark.cc +0 -189
  67. data/ext/mesh/mesh/src/testing/thread.cc +0 -35
  68. data/ext/mesh/mesh/src/testing/unit/alignment.cc +0 -56
  69. data/ext/mesh/mesh/src/testing/unit/bitmap_test.cc +0 -274
  70. data/ext/mesh/mesh/src/testing/unit/concurrent_mesh_test.cc +0 -185
  71. data/ext/mesh/mesh/src/testing/unit/mesh_test.cc +0 -143
  72. data/ext/mesh/mesh/src/testing/unit/rng_test.cc +0 -22
  73. data/ext/mesh/mesh/src/testing/unit/size_class_test.cc +0 -66
  74. data/ext/mesh/mesh/src/testing/unit/triple_mesh_test.cc +0 -285
  75. data/ext/mesh/mesh/src/testing/userfaultfd-kernel-copy.cc +0 -164
  76. data/ext/mesh/mesh/src/thread_local_heap.cc +0 -163
  77. data/ext/mesh/mesh/src/thread_local_heap.h +0 -268
  78. data/ext/mesh/mesh/src/wrapper.cc +0 -433
  79. data/ext/mesh/mesh/support/export_mesh.cmake +0 -28
  80. data/ext/mesh/mesh/support/gen-size-classes +0 -57
  81. data/ext/mesh/mesh/support/install_all_configs +0 -33
  82. data/ext/mesh/mesh/support/remove_export_mesh.cmake +0 -48
  83. data/ext/mesh/mesh/support/update-bazelisk +0 -8
  84. data/ext/mesh/mesh/theory/32m80.png +0 -0
  85. data/ext/mesh/mesh/theory/64m80ind.png +0 -0
  86. data/ext/mesh/mesh/theory/bound_comparison.py +0 -67
  87. data/ext/mesh/mesh/theory/bounds/impdeg+1 +0 -135
  88. data/ext/mesh/mesh/theory/choose.py +0 -43
  89. data/ext/mesh/mesh/theory/common.py +0 -42
  90. data/ext/mesh/mesh/theory/compute_exp_Y.py +0 -134
  91. data/ext/mesh/mesh/theory/createRandomString.py +0 -69
  92. data/ext/mesh/mesh/theory/deg_bound_check.py +0 -100
  93. data/ext/mesh/mesh/theory/degcheck.py +0 -47
  94. data/ext/mesh/mesh/theory/dumps/32,1,80,dumb.txt +0 -81
  95. data/ext/mesh/mesh/theory/dumps/32,2,80,dumb.txt +0 -81
  96. data/ext/mesh/mesh/theory/dumps/32,3,80,dumb.txt +0 -81
  97. data/ext/mesh/mesh/theory/dumps/32,4,80,dumb.txt +0 -81
  98. data/ext/mesh/mesh/theory/dumps/32,5,80,dumb.txt +0 -81
  99. data/ext/mesh/mesh/theory/dumps/32,6,80,dumb.txt +0 -81
  100. data/ext/mesh/mesh/theory/dumps/32,7,80,dumb.txt +0 -81
  101. data/ext/mesh/mesh/theory/dumps/32,8,80,dumb.txt +0 -81
  102. data/ext/mesh/mesh/theory/dumps/32,9,80,dumb.txt +0 -81
  103. data/ext/mesh/mesh/theory/experiment.py +0 -303
  104. data/ext/mesh/mesh/theory/experiment_raw_results/.gitignore +0 -0
  105. data/ext/mesh/mesh/theory/greedy_experiment.py +0 -66
  106. data/ext/mesh/mesh/theory/greedy_experiment_copy.py +0 -46
  107. data/ext/mesh/mesh/theory/greedy_experiment_q.py +0 -75
  108. data/ext/mesh/mesh/theory/makeGraph.py +0 -64
  109. data/ext/mesh/mesh/theory/manyreps.png +0 -0
  110. data/ext/mesh/mesh/theory/manystrings.png +0 -0
  111. data/ext/mesh/mesh/theory/match_vs_color_experiment.py +0 -94
  112. data/ext/mesh/mesh/theory/maxmatch_vs_E[Y].py +0 -162
  113. data/ext/mesh/mesh/theory/maxmatch_vs_greedymatch.py +0 -96
  114. data/ext/mesh/mesh/theory/maxvdeg+1imp++32,80.png +0 -0
  115. data/ext/mesh/mesh/theory/mesh_util.py +0 -322
  116. data/ext/mesh/mesh/theory/meshers.py +0 -452
  117. data/ext/mesh/mesh/theory/meshingBenchmark.py +0 -96
  118. data/ext/mesh/mesh/theory/occupancyComparison.py +0 -133
  119. data/ext/mesh/mesh/theory/randmatch_vs_greedymatch.py +0 -97
  120. data/ext/mesh/mesh/theory/randmatch_vs_greedymatch_q.py +0 -103
  121. data/ext/mesh/mesh/theory/randmatch_vs_greedymatch_time.py +0 -117
  122. data/ext/mesh/mesh/theory/read_mesh_dump.py +0 -82
  123. data/ext/mesh/mesh/theory/test.py +0 -70
  124. data/ext/mesh/mesh/tools/bazel +0 -1
@@ -1,309 +0,0 @@
1
- // -*- mode: c++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2
- // Copyright 2019 The Mesh Authors. All rights reserved.
3
- // Use of this source code is governed by the Apache License,
4
- // Version 2.0, that can be found in the LICENSE file.
5
-
6
- #pragma once
7
- #ifndef MESH_MESHABLE_ARENA_H
8
- #define MESH_MESHABLE_ARENA_H
9
-
10
- #if defined(_WIN32)
11
- #error "TODO"
12
- #include <windows.h>
13
- #else
14
- // UNIX
15
- #include <fcntl.h>
16
- #include <stdlib.h>
17
- #include <sys/mman.h>
18
- #include <sys/stat.h>
19
- #include <sys/types.h>
20
- #include <unistd.h>
21
- #endif
22
-
23
- #if defined(__APPLE__) || defined(__FreeBSD__)
24
- #include <copyfile.h>
25
- #else
26
- #include <sys/sendfile.h>
27
- #endif
28
-
29
- #include <new>
30
-
31
- #include "internal.h"
32
-
33
- #include "cheap_heap.h"
34
-
35
- #include "bitmap.h"
36
-
37
- #include "mmap_heap.h"
38
-
39
- #ifndef MADV_DONTDUMP
40
- #define MADV_DONTDUMP 0
41
- #endif
42
-
43
- #ifndef MADV_DODUMP
44
- #define MADV_DODUMP 0
45
- #endif
46
-
47
- namespace mesh {
48
-
49
- class MeshableArena : public mesh::OneWayMmapHeap {
50
- private:
51
- DISALLOW_COPY_AND_ASSIGN(MeshableArena);
52
- typedef OneWayMmapHeap SuperHeap;
53
-
54
- public:
55
- enum { Alignment = kPageSize };
56
-
57
- explicit MeshableArena();
58
-
59
- inline bool contains(const void *ptr) const {
60
- auto arena = reinterpret_cast<uintptr_t>(_arenaBegin);
61
- auto ptrval = reinterpret_cast<uintptr_t>(ptr);
62
- return arena <= ptrval && ptrval < arena + kArenaSize;
63
- }
64
-
65
- char *pageAlloc(Span &result, size_t pageCount, size_t pageAlignment = 1);
66
-
67
- void free(void *ptr, size_t sz, internal::PageType type);
68
-
69
- inline void trackMiniHeap(const Span span, MiniHeapID id) {
70
- // now that we know they are available, set the empty pages to
71
- // in-use. This is safe because this whole function is called
72
- // under the GlobalHeap lock, so there is no chance of concurrent
73
- // modification between the loop above and the one below.
74
- for (size_t i = 0; i < span.length; i++) {
75
- #ifndef NDEBUG
76
- d_assert(!_mhIndex[span.offset + i].load(std::memory_order_acquire).hasValue());
77
- // auto mh = reinterpret_cast<MiniHeap *>(miniheapForArenaOffset(span.offset + i));
78
- // mh->dumpDebug();
79
- #endif
80
- setIndex(span.offset + i, id);
81
- }
82
- }
83
-
84
- inline void *ATTRIBUTE_ALWAYS_INLINE miniheapForArenaOffset(Offset arenaOff) const {
85
- const MiniHeapID mhOff = _mhIndex[arenaOff].load(std::memory_order_acquire);
86
- if (likely(mhOff.hasValue())) {
87
- return _mhAllocator.ptrFromOffset(mhOff.value());
88
- }
89
-
90
- return nullptr;
91
- }
92
-
93
- inline void *ATTRIBUTE_ALWAYS_INLINE lookupMiniheap(const void *ptr) const {
94
- if (unlikely(!contains(ptr))) {
95
- return nullptr;
96
- }
97
-
98
- // we've already checked contains, so we know this offset is
99
- // within bounds
100
- const auto arenaOff = offsetFor(ptr);
101
- return miniheapForArenaOffset(arenaOff);
102
- }
103
-
104
- void beginMesh(void *keep, void *remove, size_t sz);
105
- void finalizeMesh(void *keep, void *remove, size_t sz);
106
-
107
- inline bool aboveMeshThreshold() const {
108
- return _meshedPageCount > _maxMeshCount;
109
- }
110
-
111
- inline void setMaxMeshCount(size_t maxMeshCount) {
112
- // debug("setting max map count: %zu", maxMeshCount);
113
- _maxMeshCount = maxMeshCount;
114
- }
115
-
116
- inline size_t maxMeshCount() const {
117
- return _maxMeshCount;
118
- }
119
-
120
- // protected:
121
- // public for testing
122
- void scavenge(bool force);
123
- // like a scavenge, but we only MADV_FREE
124
- void partialScavenge();
125
-
126
- // return the maximum number of pages we've had meshed (and thus our
127
- // savings) at any point in time.
128
- inline size_t meshedPageHighWaterMark() const {
129
- return _meshedPageCountHWM;
130
- }
131
-
132
- inline size_t RSSAtHighWaterMark() const {
133
- return _rssKbAtHWM;
134
- }
135
-
136
- char *arenaBegin() const {
137
- return reinterpret_cast<char *>(_arenaBegin);
138
- }
139
- void *arenaEnd() const {
140
- return reinterpret_cast<char *>(_arenaBegin) + kArenaSize;
141
- }
142
-
143
- void doAfterForkChild();
144
-
145
- void freePhys(void *ptr, size_t sz);
146
-
147
- private:
148
- void expandArena(size_t minPagesAdded);
149
- bool findPages(size_t pageCount, Span &result, internal::PageType &type);
150
- bool ATTRIBUTE_NEVER_INLINE findPagesInner(internal::vector<Span> freeSpans[kSpanClassCount], size_t i,
151
- size_t pageCount, Span &result);
152
- Span reservePages(size_t pageCount, size_t pageAlignment);
153
- internal::RelaxedBitmap allocatedBitmap(bool includeDirty = true) const;
154
-
155
- void *malloc(size_t sz) = delete;
156
-
157
- inline bool isAligned(const Span &span, const size_t pageAlignment) const {
158
- return ptrvalFromOffset(span.offset) % (pageAlignment * kPageSize) == 0;
159
- }
160
-
161
- static constexpr size_t indexSize() {
162
- // one pointer per page in our arena
163
- return sizeof(Offset) * (kArenaSize / kPageSize);
164
- }
165
-
166
- inline void clearIndex(const Span &span) {
167
- for (size_t i = 0; i < span.length; i++) {
168
- // clear the miniheap pointers we were tracking
169
- setIndex(span.offset + i, MiniHeapID{0});
170
- }
171
- }
172
-
173
- inline void freeSpan(const Span &span, const internal::PageType flags) {
174
- if (span.length == 0) {
175
- return;
176
- }
177
-
178
- // this happens when we are trying to get an aligned allocation
179
- // and returning excess back to the arena
180
- if (flags == internal::PageType::Clean) {
181
- _clean[span.spanClass()].push_back(span);
182
- return;
183
- }
184
-
185
- clearIndex(span);
186
-
187
- if (flags == internal::PageType::Dirty) {
188
- if (kAdviseDump) {
189
- madvise(ptrFromOffset(span.offset), span.length * kPageSize, MADV_DONTDUMP);
190
- }
191
- d_assert(span.length > 0);
192
- _dirty[span.spanClass()].push_back(span);
193
- _dirtyPageCount += span.length;
194
-
195
- if (_dirtyPageCount > kMaxDirtyPageThreshold) {
196
- // do a full scavenge with a probability 1/10
197
- if (_fastPrng.inRange(0, 9) == 9) {
198
- scavenge(true);
199
- } else {
200
- partialScavenge();
201
- }
202
- }
203
- } else if (flags == internal::PageType::Meshed) {
204
- // delay restoring the identity mapping
205
- _toReset.push_back(span);
206
- }
207
- }
208
-
209
- int openShmSpanFile(size_t sz);
210
- int openSpanFile(size_t sz);
211
- char *openSpanDir(int pid);
212
-
213
- // pointer must already have been checked by `contains()` for bounds
214
- inline Offset offsetFor(const void *ptr) const {
215
- const uintptr_t ptrval = reinterpret_cast<uintptr_t>(ptr);
216
- const uintptr_t arena = reinterpret_cast<uintptr_t>(_arenaBegin);
217
-
218
- d_assert(ptrval >= arena);
219
-
220
- return (ptrval - arena) / kPageSize;
221
- }
222
-
223
- inline uintptr_t ptrvalFromOffset(size_t off) const {
224
- return reinterpret_cast<uintptr_t>(_arenaBegin) + off * kPageSize;
225
- }
226
-
227
- inline void *ptrFromOffset(size_t off) const {
228
- return reinterpret_cast<void *>(ptrvalFromOffset(off));
229
- }
230
-
231
- inline void setIndex(size_t off, MiniHeapID val) {
232
- d_assert(off < indexSize());
233
- _mhIndex[off].store(val, std::memory_order_release);
234
- }
235
-
236
- static void staticAtExit();
237
- static void staticPrepareForFork();
238
- static void staticAfterForkParent();
239
- static void staticAfterForkChild();
240
-
241
- void exit() {
242
- // FIXME: do this from the destructor, and test that destructor is
243
- // called. Also don't leak _spanDir
244
- if (_spanDir != nullptr) {
245
- rmdir(_spanDir);
246
- _spanDir = nullptr;
247
- }
248
- }
249
-
250
- inline void trackMeshed(const Span &span) {
251
- for (size_t i = 0; i < span.length; i++) {
252
- // this may already be 1 if it was a meshed virtual span that is
253
- // now being re-meshed to a new owning miniheap
254
- _meshedBitmap.tryToSet(span.offset + i);
255
- }
256
- }
257
-
258
- inline void untrackMeshed(const Span &span) {
259
- for (size_t i = 0; i < span.length; i++) {
260
- d_assert(_meshedBitmap.isSet(span.offset + i));
261
- _meshedBitmap.unset(span.offset + i);
262
- }
263
- }
264
-
265
- inline void resetSpanMapping(const Span &span) {
266
- auto ptr = ptrFromOffset(span.offset);
267
- auto sz = span.byteLength();
268
- mmap(ptr, sz, HL_MMAP_PROTECTION_MASK, kMapShared | MAP_FIXED, _fd, span.offset * kPageSize);
269
- }
270
-
271
- void prepareForFork();
272
- void afterForkParent();
273
- void afterForkChild();
274
-
275
- void *_arenaBegin{nullptr};
276
- // indexed by page offset.
277
- atomic<MiniHeapID> *_mhIndex{nullptr};
278
-
279
- protected:
280
- CheapHeap<64, kArenaSize / kPageSize> _mhAllocator{};
281
- MWC _fastPrng;
282
-
283
- private:
284
- Offset _end{}; // in pages
285
-
286
- // spans that had been meshed, have been freed, and need to be reset
287
- // to identity mappings in the page tables.
288
- internal::vector<Span> _toReset;
289
-
290
- internal::vector<Span> _clean[kSpanClassCount];
291
- internal::vector<Span> _dirty[kSpanClassCount];
292
-
293
- size_t _dirtyPageCount{0};
294
-
295
- internal::RelaxedBitmap _meshedBitmap{
296
- kArenaSize / kPageSize,
297
- reinterpret_cast<char *>(OneWayMmapHeap().malloc(bitmap::representationSize(kArenaSize / kPageSize))), false};
298
- size_t _meshedPageCount{0};
299
- size_t _meshedPageCountHWM{0};
300
- size_t _rssKbAtHWM{0};
301
- size_t _maxMeshCount{kDefaultMaxMeshCount};
302
-
303
- int _fd;
304
- int _forkPipe[2]{-1, -1}; // used for signaling during fork
305
- char *_spanDir{nullptr};
306
- };
307
- } // namespace mesh
308
-
309
- #endif // MESH_MESHABLE_ARENA_H
@@ -1,60 +0,0 @@
1
- // -*- mode: c++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2
- // Copyright 2019 The Mesh Authors. All rights reserved.
3
- // Use of this source code is governed by the Apache License,
4
- // Version 2.0, that can be found in the LICENSE file.
5
-
6
- #pragma once
7
- #ifndef MESH_MESHING_H
8
- #define MESH_MESHING_H
9
-
10
- #include <algorithm>
11
- #include <atomic>
12
- #include <limits>
13
-
14
- #include "bitmap.h"
15
- #include "common.h"
16
- #include "internal.h"
17
- #include "mini_heap.h"
18
-
19
- namespace mesh {
20
-
21
- using internal::Bitmap;
22
-
23
- inline bool bitmapsMeshable(const Bitmap::word_t *__restrict__ bitmap1, const Bitmap::word_t *__restrict__ bitmap2,
24
- size_t byteLen) noexcept {
25
- d_assert(reinterpret_cast<uintptr_t>(bitmap1) % 16 == 0);
26
- d_assert(reinterpret_cast<uintptr_t>(bitmap2) % 16 == 0);
27
- d_assert(byteLen >= 8);
28
- d_assert(byteLen % 8 == 0);
29
-
30
- // because we hold the global lock (so no miniheap can transition
31
- // from 'free' to 'attached'), the only possible data race we have
32
- // here is our read with a write changing a bit from 1 -> 0. That
33
- // makes this race benign - we may have a false positive if an
34
- // allocation is freed (it may cause 2 bitmaps to look like they
35
- // overlap when _now_ they don't actually), but it won't cause
36
- // correctness issues.
37
- const auto bitmapL = (const uint64_t *)__builtin_assume_aligned(bitmap1, 16);
38
- const auto bitmapR = (const uint64_t *)__builtin_assume_aligned(bitmap2, 16);
39
-
40
- uint64_t result = 0;
41
-
42
- for (size_t i = 0; i < byteLen / sizeof(size_t); i++) {
43
- result |= bitmapL[i] & bitmapR[i];
44
- }
45
-
46
- return result == 0;
47
- }
48
-
49
- namespace method {
50
-
51
- // split miniheaps into two lists in a random order
52
- void halfSplit(MWC &prng, MiniHeapListEntry *miniheaps, SplitArray &left, size_t &leftSize, SplitArray &right,
53
- size_t &rightSize) noexcept;
54
-
55
- void shiftedSplitting(MWC &prng, MiniHeapListEntry *miniheaps, SplitArray &left, SplitArray &right,
56
- const function<bool(std::pair<MiniHeap *, MiniHeap *> &&)> &meshFound) noexcept;
57
- } // namespace method
58
- } // namespace mesh
59
-
60
- #endif // MESH_MESHING_H
@@ -1,532 +0,0 @@
1
- // -*- mode: c++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2
- // Copyright 2019 The Mesh Authors. All rights reserved.
3
- // Use of this source code is governed by the Apache License,
4
- // Version 2.0, that can be found in the LICENSE file.
5
-
6
- #pragma once
7
- #ifndef MESH_MINI_HEAP_H
8
- #define MESH_MINI_HEAP_H
9
-
10
- #include <pthread.h>
11
-
12
- #include <atomic>
13
- #include <random>
14
-
15
- #include "bitmap.h"
16
- #include "fixed_array.h"
17
- #include "internal.h"
18
-
19
- #include "rng/mwc.h"
20
-
21
- #include "heaplayers.h"
22
-
23
- namespace mesh {
24
-
25
- class MiniHeap;
26
-
27
- class Flags {
28
- private:
29
- DISALLOW_COPY_AND_ASSIGN(Flags);
30
-
31
- static inline constexpr uint32_t ATTRIBUTE_ALWAYS_INLINE getSingleBitMask(uint32_t pos) {
32
- return 1UL << pos;
33
- }
34
- static constexpr uint32_t SizeClassShift = 0;
35
- static constexpr uint32_t FreelistIdShift = 6;
36
- static constexpr uint32_t ShuffleVectorOffsetShift = 8;
37
- static constexpr uint32_t MaxCountShift = 16;
38
- static constexpr uint32_t MeshedOffset = 30;
39
-
40
- inline void ATTRIBUTE_ALWAYS_INLINE setMasked(uint32_t mask, uint32_t newVal) {
41
- uint32_t oldFlags = _flags.load(std::memory_order_relaxed);
42
- while (!atomic_compare_exchange_weak_explicit(&_flags,
43
- &oldFlags, // old val
44
- (oldFlags & mask) | newVal, // new val
45
- std::memory_order_release, // success mem model
46
- std::memory_order_relaxed)) {
47
- }
48
- }
49
-
50
- public:
51
- explicit Flags(uint32_t maxCount, uint32_t sizeClass, uint32_t svOffset, uint32_t freelistId) noexcept
52
- : _flags{(maxCount << MaxCountShift) + (sizeClass << SizeClassShift) + (svOffset << ShuffleVectorOffsetShift) +
53
- (freelistId << FreelistIdShift)} {
54
- d_assert((freelistId & 0x3) == freelistId);
55
- d_assert((sizeClass & ((1 << FreelistIdShift) - 1)) == sizeClass);
56
- d_assert(svOffset < 255);
57
- d_assert_msg(sizeClass < 255, "sizeClass: %u", sizeClass);
58
- d_assert(maxCount <= 256);
59
- d_assert(this->maxCount() == maxCount);
60
- }
61
-
62
- inline uint32_t freelistId() const {
63
- return (_flags.load(std::memory_order_seq_cst) >> FreelistIdShift) & 0x3;
64
- }
65
-
66
- inline void setFreelistId(uint32_t freelistId) {
67
- static_assert(list::Max <= (1 << FreelistIdShift), "expected max < 4");
68
- d_assert(freelistId < list::Max);
69
- uint32_t mask = ~(static_cast<uint32_t>(0x3) << FreelistIdShift);
70
- uint32_t newVal = (static_cast<uint32_t>(freelistId) << FreelistIdShift);
71
- setMasked(mask, newVal);
72
- }
73
-
74
- inline uint32_t maxCount() const {
75
- // XXX: does this assume little endian?
76
- return (_flags.load(std::memory_order_seq_cst) >> MaxCountShift) & 0x1ff;
77
- }
78
-
79
- inline uint32_t sizeClass() const {
80
- return (_flags.load(std::memory_order_seq_cst) >> SizeClassShift) & 0x3f;
81
- }
82
-
83
- inline uint8_t svOffset() const {
84
- return (_flags.load(std::memory_order_seq_cst) >> ShuffleVectorOffsetShift) & 0xff;
85
- }
86
-
87
- inline void setSvOffset(uint8_t off) {
88
- d_assert(off < 255);
89
- uint32_t mask = ~(static_cast<uint32_t>(0xff) << ShuffleVectorOffsetShift);
90
- uint32_t newVal = (static_cast<uint32_t>(off) << ShuffleVectorOffsetShift);
91
- setMasked(mask, newVal);
92
- }
93
-
94
- inline void setMeshed() {
95
- set(MeshedOffset);
96
- }
97
-
98
- inline void unsetMeshed() {
99
- unset(MeshedOffset);
100
- }
101
-
102
- inline bool ATTRIBUTE_ALWAYS_INLINE isMeshed() const {
103
- return is(MeshedOffset);
104
- }
105
-
106
- private:
107
- inline bool ATTRIBUTE_ALWAYS_INLINE is(size_t offset) const {
108
- const auto mask = getSingleBitMask(offset);
109
- return (_flags.load(std::memory_order_acquire) & mask) == mask;
110
- }
111
-
112
- inline void set(size_t offset) {
113
- const uint32_t mask = getSingleBitMask(offset);
114
-
115
- uint32_t oldFlags = _flags.load(std::memory_order_relaxed);
116
- while (!atomic_compare_exchange_weak_explicit(&_flags,
117
- &oldFlags, // old val
118
- oldFlags | mask, // new val
119
- std::memory_order_release, // success mem model
120
- std::memory_order_relaxed)) {
121
- }
122
- }
123
-
124
- inline void unset(size_t offset) {
125
- const uint32_t mask = getSingleBitMask(offset);
126
-
127
- uint32_t oldFlags = _flags.load(std::memory_order_relaxed);
128
- while (!atomic_compare_exchange_weak_explicit(&_flags,
129
- &oldFlags, // old val
130
- oldFlags & ~mask, // new val
131
- std::memory_order_release, // success mem model
132
- std::memory_order_relaxed)) {
133
- }
134
- }
135
-
136
- std::atomic<uint32_t> _flags;
137
- };
138
-
139
- class MiniHeap {
140
- private:
141
- DISALLOW_COPY_AND_ASSIGN(MiniHeap);
142
-
143
- public:
144
- MiniHeap(void *arenaBegin, Span span, size_t objectCount, size_t objectSize)
145
- : _bitmap(objectCount),
146
- _span(span),
147
- _flags(objectCount, objectCount > 1 ? SizeMap::SizeClass(objectSize) : 1, 0, list::Attached),
148
- _objectSizeReciprocal(1.0 / (float)objectSize) {
149
- // debug("sizeof(MiniHeap): %zu", sizeof(MiniHeap));
150
-
151
- d_assert(_bitmap.inUseCount() == 0);
152
-
153
- const auto expectedSpanSize = _span.byteLength();
154
- d_assert_msg(expectedSpanSize == spanSize(), "span size %zu == %zu (%u, %u)", expectedSpanSize, spanSize(),
155
- maxCount(), this->objectSize());
156
-
157
- // d_assert_msg(spanSize == static_cast<size_t>(_spanSize), "%zu != %hu", spanSize, _spanSize);
158
- // d_assert_msg(objectSize == static_cast<size_t>(objectSize()), "%zu != %hu", objectSize, _objectSize);
159
-
160
- d_assert(!_nextMeshed.hasValue());
161
-
162
- // debug("new:\n");
163
- // dumpDebug();
164
- }
165
-
166
- ~MiniHeap() {
167
- // debug("destruct:\n");
168
- // dumpDebug();
169
- }
170
-
171
- inline Span span() const {
172
- return _span;
173
- }
174
-
175
- void printOccupancy() const {
176
- mesh::debug("{\"name\": \"%p\", \"object-size\": %d, \"length\": %d, \"mesh-count\": %d, \"bitmap\": \"%s\"}\n",
177
- this, objectSize(), maxCount(), meshCount(), _bitmap.to_string(maxCount()).c_str());
178
- }
179
-
180
- inline void ATTRIBUTE_ALWAYS_INLINE free(void *arenaBegin, void *ptr) {
181
- // the logic in globalFree is
182
- // updated to allow the 'race' between lock-free freeing and
183
- // meshing
184
- // d_assert(!isMeshed());
185
- const ssize_t off = getOff(arenaBegin, ptr);
186
- if (unlikely(off < 0)) {
187
- d_assert(false);
188
- return;
189
- }
190
-
191
- freeOff(off);
192
- }
193
-
194
- inline bool clearIfNotFree(void *arenaBegin, void *ptr) {
195
- const ssize_t off = getOff(arenaBegin, ptr);
196
- const auto notWasSet = _bitmap.unset(off);
197
- const auto wasSet = !notWasSet;
198
- return wasSet;
199
- }
200
-
201
- inline void ATTRIBUTE_ALWAYS_INLINE freeOff(size_t off) {
202
- d_assert_msg(_bitmap.isSet(off), "MiniHeap(%p) expected bit %zu to be set (svOff:%zu)", this, off, svOffset());
203
- _bitmap.unset(off);
204
- }
205
-
206
- /// Copies (for meshing) the contents of src into our span.
207
- inline void consume(const void *arenaBegin, MiniHeap *src) {
208
- // this would be bad
209
- d_assert(src != this);
210
- d_assert(objectSize() == src->objectSize());
211
-
212
- src->setMeshed();
213
- const auto srcSpan = src->getSpanStart(arenaBegin);
214
- const auto objectSize = this->objectSize();
215
-
216
- // this both avoids the need to call `freeOff` in the loop
217
- // below, but it ensures we will be able to check for bitmap
218
- // setting races in GlobalHeap::freeFor
219
- const auto srcBitmap = src->takeBitmap();
220
-
221
- // for each object in src, copy it to our backing span + update
222
- // our bitmap and in-use count
223
- for (auto const &off : srcBitmap) {
224
- d_assert(off < maxCount());
225
- d_assert(!_bitmap.isSet(off));
226
-
227
- void *srcObject = reinterpret_cast<void *>(srcSpan + off * objectSize);
228
- // need to ensure we update the bitmap and in-use count
229
- void *dstObject = mallocAt(arenaBegin, off);
230
- // debug("meshing: %zu (%p <- %p)\n", off, dstObject, srcObject);
231
- d_assert(dstObject != nullptr);
232
- memcpy(dstObject, srcObject, objectSize);
233
- // debug("\t'%s'\n", dstObject);
234
- // debug("\t'%s'\n", srcObject);
235
- }
236
-
237
- trackMeshedSpan(GetMiniHeapID(src));
238
- }
239
-
240
- inline size_t spanSize() const {
241
- return _span.byteLength();
242
- }
243
-
244
- inline uint32_t ATTRIBUTE_ALWAYS_INLINE maxCount() const {
245
- return _flags.maxCount();
246
- }
247
-
248
- inline bool ATTRIBUTE_ALWAYS_INLINE isLargeAlloc() const {
249
- return maxCount() == 1;
250
- }
251
-
252
- inline size_t objectSize() const {
253
- if (likely(!isLargeAlloc())) {
254
- // this doesn't handle all the corner cases of roundf(3),
255
- // but it does work for all of our small object size classes
256
- return static_cast<size_t>(1 / _objectSizeReciprocal + 0.5);
257
- } else {
258
- return _span.length * kPageSize;
259
- }
260
- }
261
-
262
- inline int sizeClass() const {
263
- return _flags.sizeClass();
264
- }
265
-
266
- inline uintptr_t getSpanStart(const void *arenaBegin) const {
267
- const auto beginval = reinterpret_cast<uintptr_t>(arenaBegin);
268
- return beginval + _span.offset * kPageSize;
269
- }
270
-
271
- inline bool ATTRIBUTE_ALWAYS_INLINE isEmpty() const {
272
- return _bitmap.inUseCount() == 0;
273
- }
274
-
275
- inline bool ATTRIBUTE_ALWAYS_INLINE isFull() const {
276
- return _bitmap.inUseCount() == maxCount();
277
- }
278
-
279
- inline uint32_t ATTRIBUTE_ALWAYS_INLINE inUseCount() const {
280
- return _bitmap.inUseCount();
281
- }
282
-
283
- inline size_t bytesFree() const {
284
- return inUseCount() * objectSize();
285
- }
286
-
287
- inline void setMeshed() {
288
- _flags.setMeshed();
289
- }
290
-
291
- inline void setAttached(pid_t current, MiniHeapListEntry *listHead) {
292
- // mesh::debug("MiniHeap(%p:%5zu): current <- %u\n", this, objectSize(), current);
293
- _current.store(current, std::memory_order::memory_order_release);
294
- if (listHead != nullptr) {
295
- _freelist.remove(listHead);
296
- }
297
- this->setFreelistId(list::Attached);
298
- }
299
-
300
- inline uint8_t svOffset() const {
301
- return _flags.svOffset();
302
- }
303
-
304
- inline void setSvOffset(uint8_t off) {
305
- // debug("MiniHeap(%p) SET svOff:%zu)", this, off);
306
- _flags.setSvOffset(off);
307
- }
308
-
309
- inline uint8_t freelistId() const {
310
- return _flags.freelistId();
311
- }
312
-
313
- inline void setFreelistId(uint8_t id) {
314
- _flags.setFreelistId(id);
315
- }
316
-
317
- inline pid_t current() const {
318
- return _current.load(std::memory_order::memory_order_acquire);
319
- }
320
-
321
- inline void unsetAttached() {
322
- // mesh::debug("MiniHeap(%p:%5zu): current <- UNSET\n", this, objectSize());
323
- _current.store(0, std::memory_order::memory_order_release);
324
- }
325
-
326
- inline bool isAttached() const {
327
- return current() != 0;
328
- }
329
-
330
- inline bool ATTRIBUTE_ALWAYS_INLINE isMeshed() const {
331
- return _flags.isMeshed();
332
- }
333
-
334
- inline bool ATTRIBUTE_ALWAYS_INLINE hasMeshed() const {
335
- return _nextMeshed.hasValue();
336
- }
337
-
338
- inline bool isMeshingCandidate() const {
339
- return !isAttached() && objectSize() < kPageSize;
340
- }
341
-
342
- /// Returns the fraction full (in the range [0, 1]) that this miniheap is.
343
- inline double fullness() const {
344
- return static_cast<double>(inUseCount()) / static_cast<double>(maxCount());
345
- }
346
-
347
- internal::RelaxedFixedBitmap takeBitmap() {
348
- const auto capacity = this->maxCount();
349
- internal::RelaxedFixedBitmap zero{capacity};
350
- internal::RelaxedFixedBitmap result{capacity};
351
- _bitmap.setAndExchangeAll(result.mut_bits(), zero.bits());
352
- return result;
353
- }
354
-
355
- const internal::Bitmap &bitmap() const {
356
- return _bitmap;
357
- }
358
-
359
- internal::Bitmap &writableBitmap() {
360
- return _bitmap;
361
- }
362
-
363
- void trackMeshedSpan(MiniHeapID id) {
364
- hard_assert(id.hasValue());
365
-
366
- if (!_nextMeshed.hasValue()) {
367
- _nextMeshed = id;
368
- } else {
369
- GetMiniHeap(_nextMeshed)->trackMeshedSpan(id);
370
- }
371
- }
372
-
373
- public:
374
- template <class Callback>
375
- inline void forEachMeshed(Callback cb) const {
376
- if (cb(this))
377
- return;
378
-
379
- if (_nextMeshed.hasValue()) {
380
- const auto mh = GetMiniHeap(_nextMeshed);
381
- mh->forEachMeshed(cb);
382
- }
383
- }
384
-
385
- template <class Callback>
386
- inline void forEachMeshed(Callback cb) {
387
- if (cb(this))
388
- return;
389
-
390
- if (_nextMeshed.hasValue()) {
391
- auto mh = GetMiniHeap(_nextMeshed);
392
- mh->forEachMeshed(cb);
393
- }
394
- }
395
-
396
- bool isRelated(MiniHeap *other) const {
397
- auto otherFound = false;
398
- this->forEachMeshed([&](const MiniHeap *eachMh) {
399
- const auto found = eachMh == other;
400
- otherFound = found;
401
- return found;
402
- });
403
- return otherFound;
404
- }
405
-
406
- size_t meshCount() const {
407
- size_t count = 0;
408
-
409
- const MiniHeap *mh = this;
410
- while (mh != nullptr) {
411
- count++;
412
-
413
- auto next = mh->_nextMeshed;
414
- mh = next.hasValue() ? GetMiniHeap(next) : nullptr;
415
- }
416
-
417
- return count;
418
- }
419
-
420
- MiniHeapListEntry *getFreelist() {
421
- return &_freelist;
422
- }
423
-
424
- /// public for meshTest only
425
- inline void *mallocAt(const void *arenaBegin, size_t off) {
426
- if (!_bitmap.tryToSet(off)) {
427
- mesh::debug("%p: MA %u", this, off);
428
- dumpDebug();
429
- return nullptr;
430
- }
431
-
432
- return ptrFromOffset(arenaBegin, off);
433
- }
434
-
435
- inline void *ptrFromOffset(const void *arenaBegin, size_t off) {
436
- return reinterpret_cast<void *>(getSpanStart(arenaBegin) + off * objectSize());
437
- }
438
-
439
- inline bool operator<(MiniHeap *&rhs) noexcept {
440
- return this->inUseCount() < rhs->inUseCount();
441
- }
442
-
443
- void dumpDebug() const {
444
- const auto heapPages = spanSize() / HL::CPUInfo::PageSize;
445
- const size_t inUseCount = this->inUseCount();
446
- const size_t meshCount = this->meshCount();
447
- mesh::debug(
448
- "MiniHeap(%p:%5zu): %3zu objects on %2zu pages (inUse: %zu, spans: %zu)\t%p-%p\tFreelist{prev:%u, next:%u}\n",
449
- this, objectSize(), maxCount(), heapPages, inUseCount, meshCount, _span.offset * kPageSize,
450
- _span.offset * kPageSize + spanSize(), _freelist.prev(), _freelist.next());
451
- mesh::debug("\t%s\n", _bitmap.to_string(maxCount()).c_str());
452
- }
453
-
454
- // this only works for unmeshed miniheaps
455
- inline uint8_t ATTRIBUTE_ALWAYS_INLINE getUnmeshedOff(const void *arenaBegin, void *ptr) const {
456
- const auto ptrval = reinterpret_cast<uintptr_t>(ptr);
457
-
458
- uintptr_t span = reinterpret_cast<uintptr_t>(arenaBegin) + _span.offset * kPageSize;
459
- d_assert(span != 0);
460
-
461
- const size_t off = (ptrval - span) * _objectSizeReciprocal;
462
- d_assert(off < maxCount());
463
-
464
- return off;
465
- }
466
-
467
- inline uint8_t ATTRIBUTE_ALWAYS_INLINE getOff(const void *arenaBegin, void *ptr) const {
468
- const auto span = spanStart(reinterpret_cast<uintptr_t>(arenaBegin), ptr);
469
- d_assert(span != 0);
470
- const auto ptrval = reinterpret_cast<uintptr_t>(ptr);
471
-
472
- const size_t off = (ptrval - span) * _objectSizeReciprocal;
473
- d_assert(off < maxCount());
474
-
475
- return off;
476
- }
477
-
478
- protected:
479
- inline uintptr_t ATTRIBUTE_ALWAYS_INLINE spanStart(uintptr_t arenaBegin, void *ptr) const {
480
- const auto ptrval = reinterpret_cast<uintptr_t>(ptr);
481
- const auto len = _span.byteLength();
482
-
483
- // manually unroll loop once to capture the common case of
484
- // un-meshed miniheaps
485
- uintptr_t spanptr = arenaBegin + _span.offset * kPageSize;
486
- if (likely(spanptr <= ptrval && ptrval < spanptr + len)) {
487
- return spanptr;
488
- }
489
-
490
- return spanStartSlowpath(arenaBegin, ptrval);
491
- }
492
-
493
- uintptr_t ATTRIBUTE_NEVER_INLINE spanStartSlowpath(uintptr_t arenaBegin, uintptr_t ptrval) const {
494
- const auto len = _span.byteLength();
495
- uintptr_t spanptr = 0;
496
-
497
- const MiniHeap *mh = this;
498
- while (true) {
499
- if (unlikely(!mh->_nextMeshed.hasValue())) {
500
- abort();
501
- }
502
-
503
- mh = GetMiniHeap(mh->_nextMeshed);
504
-
505
- const uintptr_t meshedSpanptr = arenaBegin + mh->span().offset * kPageSize;
506
- if (meshedSpanptr <= ptrval && ptrval < meshedSpanptr + len) {
507
- spanptr = meshedSpanptr;
508
- break;
509
- }
510
- };
511
-
512
- return spanptr;
513
- }
514
-
515
- internal::Bitmap _bitmap; // 32 bytes 32
516
- const Span _span; // 8 40
517
- MiniHeapListEntry _freelist{}; // 8 48
518
- atomic<pid_t> _current{0}; // 4 52
519
- Flags _flags; // 4 56
520
- const float _objectSizeReciprocal; // 4 60
521
- MiniHeapID _nextMeshed{}; // 4 64
522
- };
523
-
524
- typedef FixedArray<MiniHeap, 63> MiniHeapArray;
525
-
526
- static_assert(sizeof(pid_t) == 4, "pid_t not 32-bits!");
527
- static_assert(sizeof(mesh::internal::Bitmap) == 32, "Bitmap too big!");
528
- static_assert(sizeof(MiniHeap) == 64, "MiniHeap too big!");
529
- static_assert(sizeof(MiniHeapArray) == 64 * sizeof(void *), "MiniHeapArray too big!");
530
- } // namespace mesh
531
-
532
- #endif // MESH_MINI_HEAP_H