mesh-rb 0.0.1 → 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (124) hide show
  1. checksums.yaml +4 -4
  2. data/Gemfile.lock +1 -1
  3. data/ext/mesh/extconf.rb +22 -4
  4. data/ext/mesh/mesh.tar.gz +0 -0
  5. data/lib/mesh/version.rb +1 -1
  6. data/mesh.gemspec +3 -2
  7. metadata +4 -120
  8. data/ext/mesh/mesh/.bazelrc +0 -20
  9. data/ext/mesh/mesh/.bazelversion +0 -1
  10. data/ext/mesh/mesh/.clang-format +0 -15
  11. data/ext/mesh/mesh/.dockerignore +0 -5
  12. data/ext/mesh/mesh/.editorconfig +0 -16
  13. data/ext/mesh/mesh/.gitattributes +0 -4
  14. data/ext/mesh/mesh/.github/workflows/main.yml +0 -144
  15. data/ext/mesh/mesh/.gitignore +0 -51
  16. data/ext/mesh/mesh/AUTHORS +0 -5
  17. data/ext/mesh/mesh/CMakeLists.txt +0 -270
  18. data/ext/mesh/mesh/CODE_OF_CONDUCT.md +0 -77
  19. data/ext/mesh/mesh/Dockerfile +0 -30
  20. data/ext/mesh/mesh/LICENSE +0 -201
  21. data/ext/mesh/mesh/Makefile +0 -81
  22. data/ext/mesh/mesh/README.md +0 -97
  23. data/ext/mesh/mesh/WORKSPACE +0 -50
  24. data/ext/mesh/mesh/bazel +0 -350
  25. data/ext/mesh/mesh/mesh-pldi19-powers.pdf +0 -0
  26. data/ext/mesh/mesh/src/BUILD +0 -222
  27. data/ext/mesh/mesh/src/CMakeLists.txt +0 -85
  28. data/ext/mesh/mesh/src/bitmap.h +0 -590
  29. data/ext/mesh/mesh/src/cheap_heap.h +0 -170
  30. data/ext/mesh/mesh/src/common.h +0 -377
  31. data/ext/mesh/mesh/src/copts.bzl +0 -31
  32. data/ext/mesh/mesh/src/d_assert.cc +0 -75
  33. data/ext/mesh/mesh/src/fixed_array.h +0 -124
  34. data/ext/mesh/mesh/src/global_heap.cc +0 -547
  35. data/ext/mesh/mesh/src/global_heap.h +0 -569
  36. data/ext/mesh/mesh/src/gnu_wrapper.cc +0 -75
  37. data/ext/mesh/mesh/src/internal.h +0 -356
  38. data/ext/mesh/mesh/src/libmesh.cc +0 -239
  39. data/ext/mesh/mesh/src/mac_wrapper.cc +0 -528
  40. data/ext/mesh/mesh/src/measure_rss.cc +0 -44
  41. data/ext/mesh/mesh/src/measure_rss.h +0 -20
  42. data/ext/mesh/mesh/src/meshable_arena.cc +0 -776
  43. data/ext/mesh/mesh/src/meshable_arena.h +0 -309
  44. data/ext/mesh/mesh/src/meshing.h +0 -60
  45. data/ext/mesh/mesh/src/mini_heap.h +0 -532
  46. data/ext/mesh/mesh/src/mmap_heap.h +0 -104
  47. data/ext/mesh/mesh/src/one_way_mmap_heap.h +0 -77
  48. data/ext/mesh/mesh/src/partitioned_heap.h +0 -111
  49. data/ext/mesh/mesh/src/plasma/mesh.h +0 -33
  50. data/ext/mesh/mesh/src/real.cc +0 -52
  51. data/ext/mesh/mesh/src/real.h +0 -36
  52. data/ext/mesh/mesh/src/rng/mwc.h +0 -296
  53. data/ext/mesh/mesh/src/rng/mwc64.h +0 -58
  54. data/ext/mesh/mesh/src/rpl_printf.c +0 -1991
  55. data/ext/mesh/mesh/src/runtime.cc +0 -393
  56. data/ext/mesh/mesh/src/runtime.h +0 -114
  57. data/ext/mesh/mesh/src/shuffle_vector.h +0 -287
  58. data/ext/mesh/mesh/src/size_classes.def +0 -251
  59. data/ext/mesh/mesh/src/static/if.h +0 -36
  60. data/ext/mesh/mesh/src/static/log.h +0 -43
  61. data/ext/mesh/mesh/src/testing/benchmark/local_refill.cc +0 -103
  62. data/ext/mesh/mesh/src/testing/big-alloc.c +0 -28
  63. data/ext/mesh/mesh/src/testing/fragmenter.cc +0 -128
  64. data/ext/mesh/mesh/src/testing/global-large-stress.cc +0 -25
  65. data/ext/mesh/mesh/src/testing/local-alloc.c +0 -16
  66. data/ext/mesh/mesh/src/testing/meshing_benchmark.cc +0 -189
  67. data/ext/mesh/mesh/src/testing/thread.cc +0 -35
  68. data/ext/mesh/mesh/src/testing/unit/alignment.cc +0 -56
  69. data/ext/mesh/mesh/src/testing/unit/bitmap_test.cc +0 -274
  70. data/ext/mesh/mesh/src/testing/unit/concurrent_mesh_test.cc +0 -185
  71. data/ext/mesh/mesh/src/testing/unit/mesh_test.cc +0 -143
  72. data/ext/mesh/mesh/src/testing/unit/rng_test.cc +0 -22
  73. data/ext/mesh/mesh/src/testing/unit/size_class_test.cc +0 -66
  74. data/ext/mesh/mesh/src/testing/unit/triple_mesh_test.cc +0 -285
  75. data/ext/mesh/mesh/src/testing/userfaultfd-kernel-copy.cc +0 -164
  76. data/ext/mesh/mesh/src/thread_local_heap.cc +0 -163
  77. data/ext/mesh/mesh/src/thread_local_heap.h +0 -268
  78. data/ext/mesh/mesh/src/wrapper.cc +0 -433
  79. data/ext/mesh/mesh/support/export_mesh.cmake +0 -28
  80. data/ext/mesh/mesh/support/gen-size-classes +0 -57
  81. data/ext/mesh/mesh/support/install_all_configs +0 -33
  82. data/ext/mesh/mesh/support/remove_export_mesh.cmake +0 -48
  83. data/ext/mesh/mesh/support/update-bazelisk +0 -8
  84. data/ext/mesh/mesh/theory/32m80.png +0 -0
  85. data/ext/mesh/mesh/theory/64m80ind.png +0 -0
  86. data/ext/mesh/mesh/theory/bound_comparison.py +0 -67
  87. data/ext/mesh/mesh/theory/bounds/impdeg+1 +0 -135
  88. data/ext/mesh/mesh/theory/choose.py +0 -43
  89. data/ext/mesh/mesh/theory/common.py +0 -42
  90. data/ext/mesh/mesh/theory/compute_exp_Y.py +0 -134
  91. data/ext/mesh/mesh/theory/createRandomString.py +0 -69
  92. data/ext/mesh/mesh/theory/deg_bound_check.py +0 -100
  93. data/ext/mesh/mesh/theory/degcheck.py +0 -47
  94. data/ext/mesh/mesh/theory/dumps/32,1,80,dumb.txt +0 -81
  95. data/ext/mesh/mesh/theory/dumps/32,2,80,dumb.txt +0 -81
  96. data/ext/mesh/mesh/theory/dumps/32,3,80,dumb.txt +0 -81
  97. data/ext/mesh/mesh/theory/dumps/32,4,80,dumb.txt +0 -81
  98. data/ext/mesh/mesh/theory/dumps/32,5,80,dumb.txt +0 -81
  99. data/ext/mesh/mesh/theory/dumps/32,6,80,dumb.txt +0 -81
  100. data/ext/mesh/mesh/theory/dumps/32,7,80,dumb.txt +0 -81
  101. data/ext/mesh/mesh/theory/dumps/32,8,80,dumb.txt +0 -81
  102. data/ext/mesh/mesh/theory/dumps/32,9,80,dumb.txt +0 -81
  103. data/ext/mesh/mesh/theory/experiment.py +0 -303
  104. data/ext/mesh/mesh/theory/experiment_raw_results/.gitignore +0 -0
  105. data/ext/mesh/mesh/theory/greedy_experiment.py +0 -66
  106. data/ext/mesh/mesh/theory/greedy_experiment_copy.py +0 -46
  107. data/ext/mesh/mesh/theory/greedy_experiment_q.py +0 -75
  108. data/ext/mesh/mesh/theory/makeGraph.py +0 -64
  109. data/ext/mesh/mesh/theory/manyreps.png +0 -0
  110. data/ext/mesh/mesh/theory/manystrings.png +0 -0
  111. data/ext/mesh/mesh/theory/match_vs_color_experiment.py +0 -94
  112. data/ext/mesh/mesh/theory/maxmatch_vs_E[Y].py +0 -162
  113. data/ext/mesh/mesh/theory/maxmatch_vs_greedymatch.py +0 -96
  114. data/ext/mesh/mesh/theory/maxvdeg+1imp++32,80.png +0 -0
  115. data/ext/mesh/mesh/theory/mesh_util.py +0 -322
  116. data/ext/mesh/mesh/theory/meshers.py +0 -452
  117. data/ext/mesh/mesh/theory/meshingBenchmark.py +0 -96
  118. data/ext/mesh/mesh/theory/occupancyComparison.py +0 -133
  119. data/ext/mesh/mesh/theory/randmatch_vs_greedymatch.py +0 -97
  120. data/ext/mesh/mesh/theory/randmatch_vs_greedymatch_q.py +0 -103
  121. data/ext/mesh/mesh/theory/randmatch_vs_greedymatch_time.py +0 -117
  122. data/ext/mesh/mesh/theory/read_mesh_dump.py +0 -82
  123. data/ext/mesh/mesh/theory/test.py +0 -70
  124. data/ext/mesh/mesh/tools/bazel +0 -1
@@ -1,569 +0,0 @@
1
- // -*- mode: c++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2
- // Copyright 2019 The Mesh Authors. All rights reserved.
3
- // Use of this source code is governed by the Apache License,
4
- // Version 2.0, that can be found in the LICENSE file.
5
-
6
- #pragma once
7
- #ifndef MESH_GLOBAL_HEAP_H
8
- #define MESH_GLOBAL_HEAP_H
9
-
10
- #include <algorithm>
11
- #include <array>
12
- #include <mutex>
13
-
14
- #include "internal.h"
15
- #include "meshable_arena.h"
16
- #include "mini_heap.h"
17
-
18
- #include "heaplayers.h"
19
-
20
- using namespace HL;
21
-
22
- namespace mesh {
23
-
24
- static constexpr std::pair<MiniHeapListEntry, size_t> Head{MiniHeapListEntry{list::Head, list::Head}, 0};
25
-
26
- class EpochLock {
27
- private:
28
- DISALLOW_COPY_AND_ASSIGN(EpochLock);
29
-
30
- public:
31
- EpochLock() {
32
- }
33
-
34
- inline size_t ATTRIBUTE_ALWAYS_INLINE current() const noexcept {
35
- return _epoch.load(std::memory_order::memory_order_seq_cst);
36
- }
37
-
38
- inline size_t ATTRIBUTE_ALWAYS_INLINE isSame(size_t startEpoch) const noexcept {
39
- return current() == startEpoch;
40
- }
41
-
42
- inline void ATTRIBUTE_ALWAYS_INLINE lock() noexcept {
43
- // make sure that the previous epoch was even
44
- const auto old = _epoch.fetch_add(1, std::memory_order::memory_order_seq_cst);
45
- hard_assert(old % 2 == 0);
46
- }
47
-
48
- inline void ATTRIBUTE_ALWAYS_INLINE unlock() noexcept {
49
- #ifndef NDEBUG
50
- // make sure that the previous epoch was odd
51
- const auto old = _epoch.fetch_add(1, std::memory_order::memory_order_seq_cst);
52
- d_assert(old % 2 == 1);
53
- #else
54
- _epoch.fetch_add(1, std::memory_order::memory_order_seq_cst);
55
- #endif
56
- }
57
-
58
- private:
59
- atomic_size_t _epoch{0};
60
- };
61
-
62
- class GlobalHeapStats {
63
- public:
64
- atomic_size_t meshCount;
65
- size_t mhFreeCount;
66
- size_t mhAllocCount;
67
- size_t mhHighWaterMark;
68
- };
69
-
70
- class GlobalHeap : public MeshableArena {
71
- private:
72
- DISALLOW_COPY_AND_ASSIGN(GlobalHeap);
73
- typedef MeshableArena Super;
74
-
75
- static_assert(HL::gcd<MmapHeap::Alignment, Alignment>::value == Alignment,
76
- "expected MmapHeap to have 16-byte alignment");
77
-
78
- public:
79
- enum { Alignment = 16 };
80
-
81
- GlobalHeap() : Super(), _maxObjectSize(SizeMap::ByteSizeForClass(kNumBins - 1)), _lastMesh{time::now()} {
82
- }
83
-
84
- inline void dumpStrings() const {
85
- lock_guard<mutex> lock(_miniheapLock);
86
-
87
- mesh::debug("TODO: reimplement printOccupancy\n");
88
- // for (size_t i = 0; i < kNumBins; i++) {
89
- // _littleheaps[i].printOccupancy();
90
- // }
91
- }
92
-
93
- inline void flushAllBins() {
94
- for (size_t sizeClass = 0; sizeClass < kNumBins; sizeClass++) {
95
- flushBinLocked(sizeClass);
96
- }
97
- }
98
-
99
- void scavenge(bool force = false) {
100
- lock_guard<mutex> lock(_miniheapLock);
101
-
102
- Super::scavenge(force);
103
- }
104
-
105
- void dumpStats(int level, bool beDetailed) const;
106
-
107
- // must be called with exclusive _mhRWLock held
108
- inline MiniHeap *ATTRIBUTE_ALWAYS_INLINE allocMiniheapLocked(int sizeClass, size_t pageCount, size_t objectCount,
109
- size_t objectSize, size_t pageAlignment = 1) {
110
- d_assert(0 < pageCount);
111
-
112
- void *buf = _mhAllocator.alloc();
113
- d_assert(buf != nullptr);
114
-
115
- // allocate out of the arena
116
- Span span{0, 0};
117
- char *spanBegin = Super::pageAlloc(span, pageCount, pageAlignment);
118
- d_assert(spanBegin != nullptr);
119
- d_assert((reinterpret_cast<uintptr_t>(spanBegin) / kPageSize) % pageAlignment == 0);
120
-
121
- MiniHeap *mh = new (buf) MiniHeap(arenaBegin(), span, objectCount, objectSize);
122
-
123
- const auto miniheapID = MiniHeapID{_mhAllocator.offsetFor(buf)};
124
- Super::trackMiniHeap(span, miniheapID);
125
-
126
- // mesh::debug("%p (%u) created!\n", mh, GetMiniHeapID(mh));
127
-
128
- _miniheapCount++;
129
- _stats.mhAllocCount++;
130
- _stats.mhHighWaterMark = max(_miniheapCount, _stats.mhHighWaterMark);
131
-
132
- return mh;
133
- }
134
-
135
- inline void *pageAlignedAlloc(size_t pageAlignment, size_t pageCount) {
136
- // if given a very large allocation size (e.g. (uint64_t)-8), it is possible
137
- // the pageCount calculation overflowed. An allocation that big is impossible
138
- // to satisfy anyway, so just fail early.
139
- if (unlikely(pageCount == 0)) {
140
- return nullptr;
141
- }
142
-
143
- lock_guard<mutex> lock(_miniheapLock);
144
-
145
- MiniHeap *mh = allocMiniheapLocked(-1, pageCount, 1, pageCount * kPageSize, pageAlignment);
146
-
147
- d_assert(mh->isLargeAlloc());
148
- d_assert(mh->spanSize() == pageCount * kPageSize);
149
- // d_assert(mh->objectSize() == pageCount * kPageSize);
150
-
151
- void *ptr = mh->mallocAt(arenaBegin(), 0);
152
-
153
- return ptr;
154
- }
155
-
156
- inline MiniHeapListEntry *freelistFor(uint8_t freelistId, int sizeClass) {
157
- switch (freelistId) {
158
- case list::Empty:
159
- return &_emptyFreelist[sizeClass].first;
160
- case list::Partial:
161
- return &_partialFreelist[sizeClass].first;
162
- case list::Full:
163
- return &_fullList[sizeClass].first;
164
- }
165
- // remaining case is 'attached', for which there is no freelist
166
- return nullptr;
167
- }
168
-
169
- inline bool postFreeLocked(MiniHeap *mh, int sizeClass, size_t inUse) {
170
- // its possible we raced between reading isAttached + grabbing a lock.
171
- // just check here to avoid having to play whack-a-mole at each call site.
172
- if (mh->isAttached()) {
173
- return false;
174
- }
175
- const auto currFreelistId = mh->freelistId();
176
- auto currFreelist = freelistFor(currFreelistId, sizeClass);
177
- const auto max = mh->maxCount();
178
-
179
- std::pair<MiniHeapListEntry, size_t> *list;
180
- uint8_t newListId;
181
-
182
- if (inUse == 0) {
183
- // if the miniheap is already in the right list there is nothing to do
184
- if (currFreelistId == list::Empty) {
185
- return false;
186
- }
187
- newListId = list::Empty;
188
- list = &_emptyFreelist[sizeClass];
189
- } else if (inUse == max) {
190
- if (currFreelistId == list::Full) {
191
- return false;
192
- }
193
- newListId = list::Full;
194
- list = &_fullList[sizeClass];
195
- } else {
196
- if (currFreelistId == list::Partial) {
197
- return false;
198
- }
199
- newListId = list::Partial;
200
- list = &_partialFreelist[sizeClass];
201
- }
202
-
203
- list->first.add(currFreelist, newListId, list::Head, mh);
204
- list->second++;
205
-
206
- return _emptyFreelist[sizeClass].second > kBinnedTrackerMaxEmpty;
207
- }
208
-
209
- inline void releaseMiniheapLocked(MiniHeap *mh, int sizeClass) {
210
- // ensure this flag is always set with the miniheap lock held
211
- mh->unsetAttached();
212
- const auto inUse = mh->inUseCount();
213
- postFreeLocked(mh, sizeClass, inUse);
214
- }
215
-
216
- template <uint32_t Size>
217
- inline void releaseMiniheaps(FixedArray<MiniHeap, Size> &miniheaps) {
218
- if (miniheaps.size() == 0) {
219
- return;
220
- }
221
-
222
- lock_guard<mutex> lock(_miniheapLock);
223
- for (auto mh : miniheaps) {
224
- releaseMiniheapLocked(mh, mh->sizeClass());
225
- }
226
- miniheaps.clear();
227
- }
228
-
229
- template <uint32_t Size>
230
- size_t fillFromList(FixedArray<MiniHeap, Size> &miniheaps, pid_t current,
231
- std::pair<MiniHeapListEntry, size_t> &freelist, size_t bytesFree) {
232
- if (freelist.first.empty()) {
233
- return bytesFree;
234
- }
235
-
236
- auto nextId = freelist.first.next();
237
- while (nextId != list::Head && bytesFree < kMiniheapRefillGoalSize && !miniheaps.full()) {
238
- auto mh = GetMiniHeap(nextId);
239
- d_assert(mh != nullptr);
240
- nextId = mh->getFreelist()->next();
241
-
242
- // TODO: we can eventually remove this
243
- d_assert(!(mh->isFull() || mh->isAttached() || mh->isMeshed()));
244
-
245
- // TODO: this is commented out to match a bug in the previous implementation;
246
- // it turns out if you don't track bytes free and give more memory to the
247
- // thread-local cache, things perform better!
248
- // bytesFree += mh->bytesFree();
249
- d_assert(!mh->isAttached());
250
- mh->setAttached(current, freelistFor(mh->freelistId(), mh->sizeClass()));
251
- d_assert(mh->isAttached() && mh->current() == current);
252
- hard_assert(!miniheaps.full());
253
- miniheaps.append(mh);
254
- d_assert(freelist.second > 0);
255
- freelist.second--;
256
- }
257
-
258
- return bytesFree;
259
- }
260
-
261
- template <uint32_t Size>
262
- size_t selectForReuse(int sizeClass, FixedArray<MiniHeap, Size> &miniheaps, pid_t current) {
263
- size_t bytesFree = fillFromList(miniheaps, current, _partialFreelist[sizeClass], 0);
264
-
265
- if (bytesFree >= kMiniheapRefillGoalSize || miniheaps.full()) {
266
- return bytesFree;
267
- }
268
-
269
- // we've exhausted all of our partially full MiniHeaps, but there
270
- // might still be empty ones we could reuse.
271
- return fillFromList(miniheaps, current, _emptyFreelist[sizeClass], bytesFree);
272
- }
273
-
274
- template <uint32_t Size>
275
- inline void allocSmallMiniheaps(int sizeClass, uint32_t objectSize, FixedArray<MiniHeap, Size> &miniheaps,
276
- pid_t current) {
277
- lock_guard<mutex> lock(_miniheapLock);
278
-
279
- d_assert(sizeClass >= 0);
280
-
281
- for (MiniHeap *oldMH : miniheaps) {
282
- releaseMiniheapLocked(oldMH, sizeClass);
283
- }
284
- miniheaps.clear();
285
-
286
- d_assert(objectSize <= _maxObjectSize);
287
-
288
- #ifndef NDEBUG
289
- const size_t classMaxSize = SizeMap::ByteSizeForClass(sizeClass);
290
-
291
- d_assert_msg(objectSize == classMaxSize, "sz(%zu) shouldn't be greater than %zu (class %d)", objectSize,
292
- classMaxSize, sizeClass);
293
- #endif
294
- d_assert(sizeClass >= 0);
295
- d_assert(sizeClass < kNumBins);
296
-
297
- d_assert(miniheaps.size() == 0);
298
-
299
- // check our bins for a miniheap to reuse
300
- auto bytesFree = selectForReuse(sizeClass, miniheaps, current);
301
- if (bytesFree >= kMiniheapRefillGoalSize || miniheaps.full()) {
302
- return;
303
- }
304
-
305
- // if we have objects bigger than the size of a page, allocate
306
- // multiple pages to amortize the cost of creating a
307
- // miniheap/globally locking the heap. For example, asking for
308
- // 2048 byte objects would allocate 4 4KB pages.
309
- const size_t objectCount = max(kPageSize / objectSize, kMinStringLen);
310
- const size_t pageCount = PageCount(objectSize * objectCount);
311
-
312
- while (bytesFree < kMiniheapRefillGoalSize && !miniheaps.full()) {
313
- auto mh = allocMiniheapLocked(sizeClass, pageCount, objectCount, objectSize);
314
- d_assert(!mh->isAttached());
315
- mh->setAttached(current, freelistFor(mh->freelistId(), sizeClass));
316
- d_assert(mh->isAttached() && mh->current() == current);
317
- miniheaps.append(mh);
318
- bytesFree += mh->bytesFree();
319
- }
320
-
321
- return;
322
- }
323
-
324
- // large, page-multiple allocations
325
- void *ATTRIBUTE_NEVER_INLINE malloc(size_t sz);
326
-
327
- inline MiniHeap *ATTRIBUTE_ALWAYS_INLINE miniheapForWithEpoch(const void *ptr, size_t &currentEpoch) const {
328
- currentEpoch = _meshEpoch.current();
329
- return miniheapFor(ptr);
330
- }
331
-
332
- inline MiniHeap *ATTRIBUTE_ALWAYS_INLINE miniheapFor(const void *ptr) const {
333
- auto mh = reinterpret_cast<MiniHeap *>(Super::lookupMiniheap(ptr));
334
- return mh;
335
- }
336
-
337
- inline MiniHeap *ATTRIBUTE_ALWAYS_INLINE miniheapForID(const MiniHeapID id) const {
338
- auto mh = reinterpret_cast<MiniHeap *>(_mhAllocator.ptrFromOffset(id.value()));
339
- __builtin_prefetch(mh, 1, 2);
340
- return mh;
341
- }
342
-
343
- inline MiniHeapID miniheapIDFor(const MiniHeap *mh) const {
344
- return MiniHeapID{_mhAllocator.offsetFor(mh)};
345
- }
346
-
347
- void untrackMiniheapLocked(MiniHeap *mh) {
348
- // mesh::debug("%p (%u) untracked!\n", mh, GetMiniHeapID(mh));
349
- _stats.mhAllocCount -= 1;
350
- mh->getFreelist()->remove(freelistFor(mh->freelistId(), mh->sizeClass()));
351
- }
352
-
353
- void freeFor(MiniHeap *mh, void *ptr, size_t startEpoch);
354
-
355
- // called with lock held
356
- void freeMiniheapAfterMeshLocked(MiniHeap *mh, bool untrack = true) {
357
- // don't untrack a meshed miniheap -- it has already been untracked
358
- if (untrack && !mh->isMeshed()) {
359
- untrackMiniheapLocked(mh);
360
- }
361
-
362
- d_assert(!mh->getFreelist()->prev().hasValue());
363
- d_assert(!mh->getFreelist()->next().hasValue());
364
- mh->MiniHeap::~MiniHeap();
365
- // memset(reinterpret_cast<char *>(mh), 0x77, sizeof(MiniHeap));
366
- _mhAllocator.free(mh);
367
- _miniheapCount--;
368
- }
369
-
370
- void freeMiniheap(MiniHeap *&mh, bool untrack = true) {
371
- lock_guard<mutex> lock(_miniheapLock);
372
- freeMiniheapLocked(mh, untrack);
373
- }
374
-
375
- void freeMiniheapLocked(MiniHeap *&mh, bool untrack) {
376
- const auto spanSize = mh->spanSize();
377
- MiniHeap *toFree[kMaxMeshes];
378
- size_t last = 0;
379
-
380
- memset(toFree, 0, sizeof(*toFree) * kMaxMeshes);
381
-
382
- // avoid use after frees while freeing
383
- mh->forEachMeshed([&](MiniHeap *mh) {
384
- toFree[last++] = mh;
385
- return false;
386
- });
387
-
388
- for (size_t i = 0; i < last; i++) {
389
- MiniHeap *mh = toFree[i];
390
- const bool isMeshed = mh->isMeshed();
391
- const auto type = isMeshed ? internal::PageType::Meshed : internal::PageType::Dirty;
392
- Super::free(reinterpret_cast<void *>(mh->getSpanStart(arenaBegin())), spanSize, type);
393
- _stats.mhFreeCount++;
394
- freeMiniheapAfterMeshLocked(mh, untrack);
395
- }
396
-
397
- mh = nullptr;
398
- }
399
-
400
- // flushBinLocked empties _emptyFreelist[sizeClass]
401
- inline void flushBinLocked(size_t sizeClass) {
402
- // mesh::debug("flush bin %zu\n", sizeClass);
403
- d_assert(!_emptyFreelist[sizeClass].first.empty());
404
- if (_emptyFreelist[sizeClass].first.next() == list::Head) {
405
- return;
406
- }
407
-
408
- std::pair<MiniHeapListEntry, size_t> &empty = _emptyFreelist[sizeClass];
409
- MiniHeapID nextId = empty.first.next();
410
- while (nextId != list::Head) {
411
- auto mh = GetMiniHeap(nextId);
412
- nextId = mh->getFreelist()->next();
413
- freeMiniheapLocked(mh, true);
414
- empty.second--;
415
- }
416
-
417
- d_assert(empty.first.next() == list::Head);
418
- d_assert(empty.first.prev() == list::Head);
419
- }
420
-
421
- void ATTRIBUTE_NEVER_INLINE free(void *ptr);
422
-
423
- inline size_t getSize(void *ptr) const {
424
- if (unlikely(ptr == nullptr))
425
- return 0;
426
-
427
- lock_guard<mutex> lock(_miniheapLock);
428
- auto mh = miniheapFor(ptr);
429
- if (likely(mh)) {
430
- return mh->objectSize();
431
- } else {
432
- return 0;
433
- }
434
- }
435
-
436
- int mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
437
-
438
- size_t getAllocatedMiniheapCount() const {
439
- lock_guard<mutex> lock(_miniheapLock);
440
- return _miniheapCount;
441
- }
442
-
443
- void setMeshPeriodMs(std::chrono::milliseconds period) {
444
- _meshPeriodMs = period;
445
- }
446
-
447
- void lock() {
448
- _miniheapLock.lock();
449
- // internal::Heap().lock();
450
- }
451
-
452
- void unlock() {
453
- // internal::Heap().unlock();
454
- _miniheapLock.unlock();
455
- }
456
-
457
- // PUBLIC ONLY FOR TESTING
458
- // after call to meshLocked() completes src is a nullptr
459
- void ATTRIBUTE_NEVER_INLINE meshLocked(MiniHeap *dst, MiniHeap *&src);
460
-
461
- inline void ATTRIBUTE_ALWAYS_INLINE maybeMesh() {
462
- if (!kMeshingEnabled) {
463
- return;
464
- }
465
-
466
- if (_meshPeriod == 0) {
467
- return;
468
- }
469
-
470
- if (_meshPeriodMs == kZeroMs) {
471
- return;
472
- }
473
-
474
- const auto now = time::now();
475
- auto duration = chrono::duration_cast<chrono::milliseconds>(now - _lastMesh);
476
-
477
- if (likely(duration < _meshPeriodMs)) {
478
- return;
479
- }
480
-
481
- lock_guard<mutex> lock(_miniheapLock);
482
-
483
- {
484
- // ensure if two threads tried to grab the mesh lock at the same
485
- // time, the second one bows out gracefully without meshing
486
- // twice in a row.
487
- const auto lockedNow = time::now();
488
- auto duration = chrono::duration_cast<chrono::milliseconds>(lockedNow - _lastMesh);
489
-
490
- if (unlikely(duration < _meshPeriodMs)) {
491
- return;
492
- }
493
- }
494
-
495
- _lastMesh = now;
496
-
497
- meshAllSizeClassesLocked();
498
- }
499
-
500
- inline bool okToProceed(void *ptr) const {
501
- lock_guard<mutex> lock(_miniheapLock);
502
-
503
- if (ptr == nullptr) {
504
- return false;
505
- }
506
-
507
- return miniheapFor(ptr) != nullptr;
508
- }
509
-
510
- inline internal::vector<MiniHeap *> meshingCandidatesLocked(int sizeClass) const {
511
- // FIXME: duplicated with code in halfSplit
512
- internal::vector<MiniHeap *> bucket{};
513
-
514
- auto nextId = _partialFreelist[sizeClass].first.next();
515
- while (nextId != list::Head) {
516
- auto mh = GetMiniHeap(nextId);
517
- if (mh->isMeshingCandidate() && (mh->fullness() < kOccupancyCutoff)) {
518
- bucket.push_back(mh);
519
- }
520
- nextId = mh->getFreelist()->next();
521
- }
522
-
523
- return bucket;
524
- }
525
-
526
- private:
527
- // check for meshes in all size classes -- must be called LOCKED
528
- void meshAllSizeClassesLocked();
529
- // meshSizeClassLocked returns the number of merged sets found
530
- size_t meshSizeClassLocked(size_t sizeClass, MergeSetArray &mergeSets, SplitArray &left, SplitArray &right);
531
-
532
- const size_t _maxObjectSize;
533
- atomic_size_t _meshPeriod{kDefaultMeshPeriod};
534
- std::chrono::milliseconds _meshPeriodMs{kMeshPeriodMs};
535
-
536
- atomic_size_t ATTRIBUTE_ALIGNED(CACHELINE_SIZE) _lastMeshEffective{0};
537
-
538
- // we want this on its own cacheline
539
- EpochLock ATTRIBUTE_ALIGNED(CACHELINE_SIZE) _meshEpoch{};
540
-
541
- // always accessed with the mhRWLock exclusively locked. cachline
542
- // aligned to avoid sharing cacheline with _meshEpoch
543
- size_t ATTRIBUTE_ALIGNED(CACHELINE_SIZE) _miniheapCount{0};
544
-
545
- // these must only be accessed or modified with the _miniheapLock held
546
- std::array<std::pair<MiniHeapListEntry, size_t>, kNumBins> _emptyFreelist{
547
- Head, Head, Head, Head, Head, Head, Head, Head, Head, Head, Head, Head, Head,
548
- Head, Head, Head, Head, Head, Head, Head, Head, Head, Head, Head, Head};
549
- std::array<std::pair<MiniHeapListEntry, size_t>, kNumBins> _partialFreelist{
550
- Head, Head, Head, Head, Head, Head, Head, Head, Head, Head, Head, Head, Head,
551
- Head, Head, Head, Head, Head, Head, Head, Head, Head, Head, Head, Head};
552
- std::array<std::pair<MiniHeapListEntry, size_t>, kNumBins> _fullList{
553
- Head, Head, Head, Head, Head, Head, Head, Head, Head, Head, Head, Head, Head,
554
- Head, Head, Head, Head, Head, Head, Head, Head, Head, Head, Head, Head};
555
-
556
- mutable mutex _miniheapLock{};
557
-
558
- GlobalHeapStats _stats{};
559
-
560
- // XXX: should be atomic, but has exception spec?
561
- time::time_point _lastMesh;
562
- };
563
-
564
- static_assert(kNumBins == 25, "if this changes, add more 'Head's above");
565
- static_assert(sizeof(std::array<MiniHeapListEntry, kNumBins>) == kNumBins * 8, "list size is right");
566
- static_assert(sizeof(GlobalHeap) < (kNumBins * 8 * 3 + 64 * 7 + 100000), "gh small enough");
567
- } // namespace mesh
568
-
569
- #endif // MESH_GLOBAL_HEAP_H