sparsam 0.1.4
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/README.md +19 -0
- data/ext/extconf.rb +18 -0
- data/ext/ruby_hooks.c +96 -0
- data/ext/serializer.cpp +716 -0
- data/ext/serializer.h +101 -0
- data/ext/third-party/sparsepp/sparsepp/spp.h +4347 -0
- data/ext/third-party/sparsepp/sparsepp/spp_config.h +781 -0
- data/ext/third-party/sparsepp/sparsepp/spp_dlalloc.h +4023 -0
- data/ext/third-party/sparsepp/sparsepp/spp_memory.h +121 -0
- data/ext/third-party/sparsepp/sparsepp/spp_smartptr.h +76 -0
- data/ext/third-party/sparsepp/sparsepp/spp_stdint.h +16 -0
- data/ext/third-party/sparsepp/sparsepp/spp_timer.h +58 -0
- data/ext/third-party/sparsepp/sparsepp/spp_traits.h +122 -0
- data/ext/third-party/sparsepp/sparsepp/spp_utils.h +447 -0
- data/lib/sparsam.rb +10 -0
- data/lib/sparsam/base_class.rb +97 -0
- data/lib/sparsam/deserializer.rb +8 -0
- data/lib/sparsam/exceptions.rb +33 -0
- data/lib/sparsam/struct.rb +45 -0
- data/lib/sparsam/types.rb +108 -0
- data/lib/sparsam/union.rb +72 -0
- data/spec/gen-ruby/user_constants.rb +9 -0
- data/spec/gen-ruby/user_types.rb +106 -0
- data/spec/sparsam_spec.rb +304 -0
- data/spec/user.thrift +62 -0
- metadata +172 -0
@@ -0,0 +1,4023 @@
|
|
1
|
+
#ifndef spp_dlalloc__h_
|
2
|
+
#define spp_dlalloc__h_
|
3
|
+
|
4
|
+
/* This is a C++ allocator created from Doug Lea's dlmalloc
|
5
|
+
(Version 2.8.6 Wed Aug 29 06:57:58 2012)
|
6
|
+
see: http://g.oswego.edu/dl/html/malloc.html
|
7
|
+
*/
|
8
|
+
|
9
|
+
#include <sparsepp/spp_utils.h>
|
10
|
+
#include <sparsepp/spp_smartptr.h>
|
11
|
+
|
12
|
+
|
13
|
+
#ifndef SPP_FORCEINLINE
|
14
|
+
#if defined(__GNUC__)
|
15
|
+
#define SPP_FORCEINLINE __inline __attribute__ ((always_inline))
|
16
|
+
#elif defined(_MSC_VER)
|
17
|
+
#define SPP_FORCEINLINE __forceinline
|
18
|
+
#else
|
19
|
+
#define SPP_FORCEINLINE inline
|
20
|
+
#endif
|
21
|
+
#endif
|
22
|
+
|
23
|
+
|
24
|
+
#ifndef SPP_IMPL
|
25
|
+
#define SPP_IMPL SPP_FORCEINLINE
|
26
|
+
#endif
|
27
|
+
|
28
|
+
#ifndef SPP_API
|
29
|
+
#define SPP_API static
|
30
|
+
#endif
|
31
|
+
|
32
|
+
|
33
|
+
namespace spp
|
34
|
+
{
|
35
|
+
// ---------------------- allocator internal API -----------------------
|
36
|
+
typedef void* mspace;
|
37
|
+
|
38
|
+
/*
|
39
|
+
create_mspace creates and returns a new independent space with the
|
40
|
+
given initial capacity, or, if 0, the default granularity size. It
|
41
|
+
returns null if there is no system memory available to create the
|
42
|
+
space. If argument locked is non-zero, the space uses a separate
|
43
|
+
lock to control access. The capacity of the space will grow
|
44
|
+
dynamically as needed to service mspace_malloc requests. You can
|
45
|
+
control the sizes of incremental increases of this space by
|
46
|
+
compiling with a different SPP_DEFAULT_GRANULARITY or dynamically
|
47
|
+
setting with mallopt(M_GRANULARITY, value).
|
48
|
+
*/
|
49
|
+
SPP_API mspace create_mspace(size_t capacity, int locked);
|
50
|
+
SPP_API size_t destroy_mspace(mspace msp);
|
51
|
+
SPP_API void* mspace_malloc(mspace msp, size_t bytes);
|
52
|
+
SPP_API void mspace_free(mspace msp, void* mem);
|
53
|
+
SPP_API void* mspace_realloc(mspace msp, void* mem, size_t newsize);
|
54
|
+
|
55
|
+
#if 0
|
56
|
+
SPP_API mspace create_mspace_with_base(void* base, size_t capacity, int locked);
|
57
|
+
SPP_API int mspace_track_large_chunks(mspace msp, int enable);
|
58
|
+
SPP_API void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
|
59
|
+
SPP_API void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
|
60
|
+
SPP_API void** mspace_independent_calloc(mspace msp, size_t n_elements,
|
61
|
+
size_t elem_size, void* chunks[]);
|
62
|
+
SPP_API void** mspace_independent_comalloc(mspace msp, size_t n_elements,
|
63
|
+
size_t sizes[], void* chunks[]);
|
64
|
+
SPP_API size_t mspace_footprint(mspace msp);
|
65
|
+
SPP_API size_t mspace_max_footprint(mspace msp);
|
66
|
+
SPP_API size_t mspace_usable_size(const void* mem);
|
67
|
+
SPP_API int mspace_trim(mspace msp, size_t pad);
|
68
|
+
SPP_API int mspace_mallopt(int, int);
|
69
|
+
#endif
|
70
|
+
|
71
|
+
// -----------------------------------------------------------
|
72
|
+
// -----------------------------------------------------------
|
73
|
+
template<class T>
|
74
|
+
class spp_allocator
|
75
|
+
{
|
76
|
+
public:
|
77
|
+
typedef T value_type;
|
78
|
+
typedef T* pointer;
|
79
|
+
typedef ptrdiff_t difference_type;
|
80
|
+
typedef const T* const_pointer;
|
81
|
+
typedef size_t size_type;
|
82
|
+
|
83
|
+
spp_allocator() : _space(new MSpace) {}
|
84
|
+
|
85
|
+
void swap(spp_allocator &o)
|
86
|
+
{
|
87
|
+
std::swap(_space, o._space);
|
88
|
+
}
|
89
|
+
|
90
|
+
pointer allocate(size_t n, const_pointer /* unused */ = 0)
|
91
|
+
{
|
92
|
+
pointer res = static_cast<pointer>(mspace_malloc(_space->_sp, n * sizeof(T)));
|
93
|
+
if (!res)
|
94
|
+
throw std::bad_alloc();
|
95
|
+
return res;
|
96
|
+
}
|
97
|
+
|
98
|
+
void deallocate(pointer p, size_t /* unused */)
|
99
|
+
{
|
100
|
+
mspace_free(_space->_sp, p);
|
101
|
+
}
|
102
|
+
|
103
|
+
pointer reallocate(pointer p, size_t new_size)
|
104
|
+
{
|
105
|
+
pointer res = static_cast<pointer>(mspace_realloc(_space->_sp, p, new_size * sizeof(T)));
|
106
|
+
if (!res)
|
107
|
+
throw std::bad_alloc();
|
108
|
+
return res;
|
109
|
+
}
|
110
|
+
|
111
|
+
size_type max_size() const
|
112
|
+
{
|
113
|
+
return static_cast<size_type>(-1) / sizeof(value_type);
|
114
|
+
}
|
115
|
+
|
116
|
+
void construct(pointer p, const value_type& val)
|
117
|
+
{
|
118
|
+
new (p) value_type(val);
|
119
|
+
}
|
120
|
+
|
121
|
+
void destroy(pointer p) { p->~value_type(); }
|
122
|
+
|
123
|
+
template<class U>
|
124
|
+
struct rebind
|
125
|
+
{
|
126
|
+
// rebind to libc_allocator because we want to use malloc_inspect_all in destructive_iterator
|
127
|
+
// to reduce peak memory usage (we don't want <group_items> mixed with value_type when
|
128
|
+
// we traverse the allocated memory).
|
129
|
+
typedef spp::spp_allocator<U> other;
|
130
|
+
};
|
131
|
+
|
132
|
+
mspace space() const { return _space->_sp; }
|
133
|
+
|
134
|
+
// check if we can clear the whole allocator memory at once => works only if the allocator
|
135
|
+
// is not be shared. If can_clear() returns true, we expect that the next allocator call
|
136
|
+
// will be clear() - not allocate() or deallocate()
|
137
|
+
bool can_clear()
|
138
|
+
{
|
139
|
+
assert(!_space_to_clear);
|
140
|
+
_space_to_clear.reset();
|
141
|
+
_space_to_clear.swap(_space);
|
142
|
+
if (_space_to_clear->count() == 1)
|
143
|
+
return true;
|
144
|
+
else
|
145
|
+
_space_to_clear.swap(_space);
|
146
|
+
return false;
|
147
|
+
}
|
148
|
+
|
149
|
+
void clear()
|
150
|
+
{
|
151
|
+
assert(!_space && _space_to_clear);
|
152
|
+
_space_to_clear.reset();
|
153
|
+
_space = new MSpace;
|
154
|
+
}
|
155
|
+
|
156
|
+
private:
|
157
|
+
struct MSpace : public spp_rc
|
158
|
+
{
|
159
|
+
MSpace() :
|
160
|
+
_sp(create_mspace(0, 0))
|
161
|
+
{}
|
162
|
+
|
163
|
+
~MSpace()
|
164
|
+
{
|
165
|
+
destroy_mspace(_sp);
|
166
|
+
}
|
167
|
+
|
168
|
+
mspace _sp;
|
169
|
+
};
|
170
|
+
|
171
|
+
spp_sptr<MSpace> _space;
|
172
|
+
spp_sptr<MSpace> _space_to_clear;
|
173
|
+
};
|
174
|
+
}
|
175
|
+
|
176
|
+
|
177
|
+
// allocators are "equal" whenever memory allocated with one can be deallocated with the other
|
178
|
+
template<class T>
|
179
|
+
inline bool operator==(const spp_::spp_allocator<T> &a, const spp_::spp_allocator<T> &b)
|
180
|
+
{
|
181
|
+
return a.space() == b.space();
|
182
|
+
}
|
183
|
+
|
184
|
+
template<class T>
|
185
|
+
inline bool operator!=(const spp_::spp_allocator<T> &a, const spp_::spp_allocator<T> &b)
|
186
|
+
{
|
187
|
+
return !(a == b);
|
188
|
+
}
|
189
|
+
|
190
|
+
namespace std
|
191
|
+
{
|
192
|
+
template <class T>
|
193
|
+
inline void swap(spp_::spp_allocator<T> &a, spp_::spp_allocator<T> &b)
|
194
|
+
{
|
195
|
+
a.swap(b);
|
196
|
+
}
|
197
|
+
}
|
198
|
+
|
199
|
+
#if !defined(SPP_EXCLUDE_IMPLEMENTATION)
|
200
|
+
|
201
|
+
#ifndef WIN32
|
202
|
+
#ifdef _WIN32
|
203
|
+
#define WIN32 1
|
204
|
+
#endif
|
205
|
+
#ifdef _WIN32_WCE
|
206
|
+
#define SPP_LACKS_FCNTL_H
|
207
|
+
#define WIN32 1
|
208
|
+
#endif
|
209
|
+
#endif
|
210
|
+
|
211
|
+
#ifdef WIN32
|
212
|
+
#define WIN32_LEAN_AND_MEAN
|
213
|
+
#include <windows.h>
|
214
|
+
#include <tchar.h>
|
215
|
+
#define SPP_HAVE_MMAP 1
|
216
|
+
#define SPP_LACKS_UNISTD_H
|
217
|
+
#define SPP_LACKS_SYS_PARAM_H
|
218
|
+
#define SPP_LACKS_SYS_MMAN_H
|
219
|
+
#define SPP_LACKS_STRING_H
|
220
|
+
#define SPP_LACKS_STRINGS_H
|
221
|
+
#define SPP_LACKS_SYS_TYPES_H
|
222
|
+
#define SPP_LACKS_ERRNO_H
|
223
|
+
#define SPP_LACKS_SCHED_H
|
224
|
+
#ifndef SPP_MALLOC_FAILURE_ACTION
|
225
|
+
#define SPP_MALLOC_FAILURE_ACTION
|
226
|
+
#endif
|
227
|
+
#ifndef SPP_MMAP_CLEARS
|
228
|
+
#ifdef _WIN32_WCE /* WINCE reportedly does not clear */
|
229
|
+
#define SPP_MMAP_CLEARS 0
|
230
|
+
#else
|
231
|
+
#define SPP_MMAP_CLEARS 1
|
232
|
+
#endif
|
233
|
+
#endif
|
234
|
+
#endif
|
235
|
+
|
236
|
+
#if defined(DARWIN) || defined(_DARWIN)
|
237
|
+
#define SPP_HAVE_MMAP 1
|
238
|
+
/* OSX allocators provide 16 byte alignment */
|
239
|
+
#ifndef SPP_MALLOC_ALIGNMENT
|
240
|
+
#define SPP_MALLOC_ALIGNMENT ((size_t)16U)
|
241
|
+
#endif
|
242
|
+
#endif
|
243
|
+
|
244
|
+
#ifndef SPP_LACKS_SYS_TYPES_H
|
245
|
+
#include <sys/types.h> /* For size_t */
|
246
|
+
#endif
|
247
|
+
|
248
|
+
#ifndef SPP_MALLOC_ALIGNMENT
|
249
|
+
#define SPP_MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *)))
|
250
|
+
#endif
|
251
|
+
|
252
|
+
/* ------------------- size_t and alignment properties -------------------- */
|
253
|
+
static const size_t spp_max_size_t = ~(size_t)0;
|
254
|
+
static const size_t spp_size_t_bitsize = sizeof(size_t) << 3;
|
255
|
+
static const size_t spp_half_max_size_t = spp_max_size_t / 2U;
|
256
|
+
static const size_t spp_chunk_align_mask = SPP_MALLOC_ALIGNMENT - 1;
|
257
|
+
|
258
|
+
#if defined(SPP_DEBUG) || !defined(NDEBUG)
|
259
|
+
static bool spp_is_aligned(void *p) { return ((size_t)p & spp_chunk_align_mask) == 0; }
|
260
|
+
#endif
|
261
|
+
|
262
|
+
// the number of bytes to offset an address to align it
|
263
|
+
static size_t align_offset(void *p)
|
264
|
+
{
|
265
|
+
return (((size_t)p & spp_chunk_align_mask) == 0) ? 0 :
|
266
|
+
((SPP_MALLOC_ALIGNMENT - ((size_t)p & spp_chunk_align_mask)) & spp_chunk_align_mask);
|
267
|
+
}
|
268
|
+
|
269
|
+
|
270
|
+
#ifndef SPP_FOOTERS
|
271
|
+
#define SPP_FOOTERS 0
|
272
|
+
#endif
|
273
|
+
|
274
|
+
#ifndef SPP_ABORT
|
275
|
+
#define SPP_ABORT abort()
|
276
|
+
#endif
|
277
|
+
|
278
|
+
#ifndef SPP_ABORT_ON_ASSERT_FAILURE
|
279
|
+
#define SPP_ABORT_ON_ASSERT_FAILURE 1
|
280
|
+
#endif
|
281
|
+
|
282
|
+
#ifndef SPP_PROCEED_ON_ERROR
|
283
|
+
#define SPP_PROCEED_ON_ERROR 0
|
284
|
+
#endif
|
285
|
+
|
286
|
+
#ifndef SPP_INSECURE
|
287
|
+
#define SPP_INSECURE 0
|
288
|
+
#endif
|
289
|
+
|
290
|
+
#ifndef SPP_MALLOC_INSPECT_ALL
|
291
|
+
#define SPP_MALLOC_INSPECT_ALL 0
|
292
|
+
#endif
|
293
|
+
|
294
|
+
#ifndef SPP_HAVE_MMAP
|
295
|
+
#define SPP_HAVE_MMAP 1
|
296
|
+
#endif
|
297
|
+
|
298
|
+
#ifndef SPP_MMAP_CLEARS
|
299
|
+
#define SPP_MMAP_CLEARS 1
|
300
|
+
#endif
|
301
|
+
|
302
|
+
#ifndef SPP_HAVE_MREMAP
|
303
|
+
#ifdef linux
|
304
|
+
#define SPP_HAVE_MREMAP 1
|
305
|
+
#ifndef _GNU_SOURCE
|
306
|
+
#define _GNU_SOURCE /* Turns on mremap() definition */
|
307
|
+
#endif
|
308
|
+
#else
|
309
|
+
#define SPP_HAVE_MREMAP 0
|
310
|
+
#endif
|
311
|
+
#endif
|
312
|
+
|
313
|
+
#ifndef SPP_MALLOC_FAILURE_ACTION
|
314
|
+
#define SPP_MALLOC_FAILURE_ACTION errno = ENOMEM
|
315
|
+
#endif
|
316
|
+
|
317
|
+
|
318
|
+
#ifndef SPP_DEFAULT_GRANULARITY
|
319
|
+
#if defined(WIN32)
|
320
|
+
#define SPP_DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */
|
321
|
+
#else
|
322
|
+
#define SPP_DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
|
323
|
+
#endif
|
324
|
+
#endif
|
325
|
+
|
326
|
+
#ifndef SPP_DEFAULT_TRIM_THRESHOLD
|
327
|
+
#define SPP_DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
|
328
|
+
#endif
|
329
|
+
|
330
|
+
#ifndef SPP_DEFAULT_MMAP_THRESHOLD
|
331
|
+
#if SPP_HAVE_MMAP
|
332
|
+
#define SPP_DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
|
333
|
+
#else
|
334
|
+
#define SPP_DEFAULT_MMAP_THRESHOLD spp_max_size_t
|
335
|
+
#endif
|
336
|
+
#endif
|
337
|
+
|
338
|
+
#ifndef SPP_MAX_RELEASE_CHECK_RATE
|
339
|
+
#if SPP_HAVE_MMAP
|
340
|
+
#define SPP_MAX_RELEASE_CHECK_RATE 4095
|
341
|
+
#else
|
342
|
+
#define SPP_MAX_RELEASE_CHECK_RATE spp_max_size_t
|
343
|
+
#endif
|
344
|
+
#endif
|
345
|
+
|
346
|
+
#ifndef SPP_USE_BUILTIN_FFS
|
347
|
+
#define SPP_USE_BUILTIN_FFS 0
|
348
|
+
#endif
|
349
|
+
|
350
|
+
#ifndef SPP_USE_DEV_RANDOM
|
351
|
+
#define SPP_USE_DEV_RANDOM 0
|
352
|
+
#endif
|
353
|
+
|
354
|
+
#ifndef SPP_NO_SEGMENT_TRAVERSAL
|
355
|
+
#define SPP_NO_SEGMENT_TRAVERSAL 0
|
356
|
+
#endif
|
357
|
+
|
358
|
+
|
359
|
+
|
360
|
+
/*------------------------------ internal #includes ---------------------- */
|
361
|
+
|
362
|
+
#ifdef _MSC_VER
|
363
|
+
#pragma warning( disable : 4146 ) /* no "unsigned" warnings */
|
364
|
+
#endif
|
365
|
+
#ifndef SPP_LACKS_ERRNO_H
|
366
|
+
#include <errno.h> /* for SPP_MALLOC_FAILURE_ACTION */
|
367
|
+
#endif
|
368
|
+
|
369
|
+
#ifdef SPP_DEBUG
|
370
|
+
#if SPP_ABORT_ON_ASSERT_FAILURE
|
371
|
+
#undef assert
|
372
|
+
#define assert(x) if(!(x)) SPP_ABORT
|
373
|
+
#else
|
374
|
+
#include <assert.h>
|
375
|
+
#endif
|
376
|
+
#else
|
377
|
+
#ifndef assert
|
378
|
+
#define assert(x)
|
379
|
+
#endif
|
380
|
+
#define SPP_DEBUG 0
|
381
|
+
#endif
|
382
|
+
|
383
|
+
#if !defined(WIN32) && !defined(SPP_LACKS_TIME_H)
|
384
|
+
#include <time.h> /* for magic initialization */
|
385
|
+
#endif
|
386
|
+
|
387
|
+
#ifndef SPP_LACKS_STDLIB_H
|
388
|
+
#include <stdlib.h> /* for abort() */
|
389
|
+
#endif
|
390
|
+
|
391
|
+
#ifndef SPP_LACKS_STRING_H
|
392
|
+
#include <string.h> /* for memset etc */
|
393
|
+
#endif
|
394
|
+
|
395
|
+
#if SPP_USE_BUILTIN_FFS
|
396
|
+
#ifndef SPP_LACKS_STRINGS_H
|
397
|
+
#include <strings.h> /* for ffs */
|
398
|
+
#endif
|
399
|
+
#endif
|
400
|
+
|
401
|
+
#if SPP_HAVE_MMAP
|
402
|
+
#ifndef SPP_LACKS_SYS_MMAN_H
|
403
|
+
/* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */
|
404
|
+
#if (defined(linux) && !defined(__USE_GNU))
|
405
|
+
#define __USE_GNU 1
|
406
|
+
#include <sys/mman.h> /* for mmap */
|
407
|
+
#undef __USE_GNU
|
408
|
+
#else
|
409
|
+
#include <sys/mman.h> /* for mmap */
|
410
|
+
#endif
|
411
|
+
#endif
|
412
|
+
#ifndef SPP_LACKS_FCNTL_H
|
413
|
+
#include <fcntl.h>
|
414
|
+
#endif
|
415
|
+
#endif
|
416
|
+
|
417
|
+
#ifndef SPP_LACKS_UNISTD_H
|
418
|
+
#include <unistd.h> /* for sbrk, sysconf */
|
419
|
+
#else
|
420
|
+
#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
|
421
|
+
extern void* sbrk(ptrdiff_t);
|
422
|
+
#endif
|
423
|
+
#endif
|
424
|
+
|
425
|
+
#include <new>
|
426
|
+
|
427
|
+
namespace spp
|
428
|
+
{
|
429
|
+
|
430
|
+
/* Declarations for bit scanning on win32 */
|
431
|
+
#if defined(_MSC_VER) && _MSC_VER>=1300
|
432
|
+
#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */
|
433
|
+
extern "C" {
|
434
|
+
unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
|
435
|
+
unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
|
436
|
+
}
|
437
|
+
|
438
|
+
#define BitScanForward _BitScanForward
|
439
|
+
#define BitScanReverse _BitScanReverse
|
440
|
+
#pragma intrinsic(_BitScanForward)
|
441
|
+
#pragma intrinsic(_BitScanReverse)
|
442
|
+
#endif /* BitScanForward */
|
443
|
+
#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */
|
444
|
+
|
445
|
+
#ifndef WIN32
|
446
|
+
#ifndef malloc_getpagesize
|
447
|
+
#ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
|
448
|
+
#ifndef _SC_PAGE_SIZE
|
449
|
+
#define _SC_PAGE_SIZE _SC_PAGESIZE
|
450
|
+
#endif
|
451
|
+
#endif
|
452
|
+
#ifdef _SC_PAGE_SIZE
|
453
|
+
#define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
|
454
|
+
#else
|
455
|
+
#if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
|
456
|
+
extern size_t getpagesize();
|
457
|
+
#define malloc_getpagesize getpagesize()
|
458
|
+
#else
|
459
|
+
#ifdef WIN32 /* use supplied emulation of getpagesize */
|
460
|
+
#define malloc_getpagesize getpagesize()
|
461
|
+
#else
|
462
|
+
#ifndef SPP_LACKS_SYS_PARAM_H
|
463
|
+
#include <sys/param.h>
|
464
|
+
#endif
|
465
|
+
#ifdef EXEC_PAGESIZE
|
466
|
+
#define malloc_getpagesize EXEC_PAGESIZE
|
467
|
+
#else
|
468
|
+
#ifdef NBPG
|
469
|
+
#ifndef CLSIZE
|
470
|
+
#define malloc_getpagesize NBPG
|
471
|
+
#else
|
472
|
+
#define malloc_getpagesize (NBPG * CLSIZE)
|
473
|
+
#endif
|
474
|
+
#else
|
475
|
+
#ifdef NBPC
|
476
|
+
#define malloc_getpagesize NBPC
|
477
|
+
#else
|
478
|
+
#ifdef PAGESIZE
|
479
|
+
#define malloc_getpagesize PAGESIZE
|
480
|
+
#else /* just guess */
|
481
|
+
#define malloc_getpagesize ((size_t)4096U)
|
482
|
+
#endif
|
483
|
+
#endif
|
484
|
+
#endif
|
485
|
+
#endif
|
486
|
+
#endif
|
487
|
+
#endif
|
488
|
+
#endif
|
489
|
+
#endif
|
490
|
+
#endif
|
491
|
+
|
492
|
+
/* -------------------------- MMAP preliminaries ------------------------- */
|
493
|
+
|
494
|
+
/*
|
495
|
+
If SPP_HAVE_MORECORE or SPP_HAVE_MMAP are false, we just define calls and
|
496
|
+
checks to fail so compiler optimizer can delete code rather than
|
497
|
+
using so many "#if"s.
|
498
|
+
*/
|
499
|
+
|
500
|
+
|
501
|
+
/* MMAP must return mfail on failure */
|
502
|
+
static void *mfail = (void*)spp_max_size_t;
|
503
|
+
static char *cmfail = (char*)mfail;
|
504
|
+
|
505
|
+
#if SPP_HAVE_MMAP
|
506
|
+
|
507
|
+
#ifndef WIN32
|
508
|
+
#define SPP_MUNMAP_DEFAULT(a, s) munmap((a), (s))
|
509
|
+
#define SPP_MMAP_PROT (PROT_READ | PROT_WRITE)
|
510
|
+
#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
|
511
|
+
#define MAP_ANONYMOUS MAP_ANON
|
512
|
+
#endif
|
513
|
+
|
514
|
+
#ifdef MAP_ANONYMOUS
|
515
|
+
#define SPP_MMAP_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS)
|
516
|
+
#define SPP_MMAP_DEFAULT(s) mmap(0, (s), SPP_MMAP_PROT, SPP_MMAP_FLAGS, -1, 0)
|
517
|
+
#else /* MAP_ANONYMOUS */
|
518
|
+
/*
|
519
|
+
Nearly all versions of mmap support MAP_ANONYMOUS, so the following
|
520
|
+
is unlikely to be needed, but is supplied just in case.
|
521
|
+
*/
|
522
|
+
#define SPP_MMAP_FLAGS (MAP_PRIVATE)
|
523
|
+
static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
|
524
|
+
void SPP_MMAP_DEFAULT(size_t s)
|
525
|
+
{
|
526
|
+
if (dev_zero_fd < 0)
|
527
|
+
dev_zero_fd = open("/dev/zero", O_RDWR);
|
528
|
+
mmap(0, s, SPP_MMAP_PROT, SPP_MMAP_FLAGS, dev_zero_fd, 0);
|
529
|
+
}
|
530
|
+
#endif /* MAP_ANONYMOUS */
|
531
|
+
|
532
|
+
#define SPP_DIRECT_MMAP_DEFAULT(s) SPP_MMAP_DEFAULT(s)
|
533
|
+
|
534
|
+
#else /* WIN32 */
|
535
|
+
|
536
|
+
/* Win32 MMAP via VirtualAlloc */
|
537
|
+
static SPP_FORCEINLINE void* win32mmap(size_t size)
|
538
|
+
{
|
539
|
+
void* ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
|
540
|
+
return (ptr != 0) ? ptr : mfail;
|
541
|
+
}
|
542
|
+
|
543
|
+
/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
|
544
|
+
static SPP_FORCEINLINE void* win32direct_mmap(size_t size)
|
545
|
+
{
|
546
|
+
void* ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN,
|
547
|
+
PAGE_READWRITE);
|
548
|
+
return (ptr != 0) ? ptr : mfail;
|
549
|
+
}
|
550
|
+
|
551
|
+
/* This function supports releasing coalesed segments */
|
552
|
+
static SPP_FORCEINLINE int win32munmap(void* ptr, size_t size)
|
553
|
+
{
|
554
|
+
MEMORY_BASIC_INFORMATION minfo;
|
555
|
+
char* cptr = (char*)ptr;
|
556
|
+
while (size)
|
557
|
+
{
|
558
|
+
if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
|
559
|
+
return -1;
|
560
|
+
if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
|
561
|
+
minfo.State != MEM_COMMIT || minfo.RegionSize > size)
|
562
|
+
return -1;
|
563
|
+
if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
|
564
|
+
return -1;
|
565
|
+
cptr += minfo.RegionSize;
|
566
|
+
size -= minfo.RegionSize;
|
567
|
+
}
|
568
|
+
return 0;
|
569
|
+
}
|
570
|
+
|
571
|
+
#define SPP_MMAP_DEFAULT(s) win32mmap(s)
|
572
|
+
#define SPP_MUNMAP_DEFAULT(a, s) win32munmap((a), (s))
|
573
|
+
#define SPP_DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s)
|
574
|
+
#endif /* WIN32 */
|
575
|
+
#endif /* SPP_HAVE_MMAP */
|
576
|
+
|
577
|
+
#if SPP_HAVE_MREMAP
|
578
|
+
#ifndef WIN32
|
579
|
+
#define SPP_MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
|
580
|
+
#endif
|
581
|
+
#endif
|
582
|
+
|
583
|
+
/**
|
584
|
+
* Define SPP_CALL_MMAP/SPP_CALL_MUNMAP/SPP_CALL_DIRECT_MMAP
|
585
|
+
*/
|
586
|
+
#if SPP_HAVE_MMAP
|
587
|
+
#define USE_MMAP_BIT 1
|
588
|
+
|
589
|
+
#ifdef SPP_MMAP
|
590
|
+
#define SPP_CALL_MMAP(s) SPP_MMAP(s)
|
591
|
+
#else
|
592
|
+
#define SPP_CALL_MMAP(s) SPP_MMAP_DEFAULT(s)
|
593
|
+
#endif
|
594
|
+
|
595
|
+
#ifdef SPP_MUNMAP
|
596
|
+
#define SPP_CALL_MUNMAP(a, s) SPP_MUNMAP((a), (s))
|
597
|
+
#else
|
598
|
+
#define SPP_CALL_MUNMAP(a, s) SPP_MUNMAP_DEFAULT((a), (s))
|
599
|
+
#endif
|
600
|
+
|
601
|
+
#ifdef SPP_DIRECT_MMAP
|
602
|
+
#define SPP_CALL_DIRECT_MMAP(s) SPP_DIRECT_MMAP(s)
|
603
|
+
#else
|
604
|
+
#define SPP_CALL_DIRECT_MMAP(s) SPP_DIRECT_MMAP_DEFAULT(s)
|
605
|
+
#endif
|
606
|
+
|
607
|
+
#else /* SPP_HAVE_MMAP */
|
608
|
+
#define USE_MMAP_BIT 0
|
609
|
+
|
610
|
+
#define SPP_MMAP(s) mfail
|
611
|
+
#define SPP_MUNMAP(a, s) (-1)
|
612
|
+
#define SPP_DIRECT_MMAP(s) mfail
|
613
|
+
#define SPP_CALL_DIRECT_MMAP(s) SPP_DIRECT_MMAP(s)
|
614
|
+
#define SPP_CALL_MMAP(s) SPP_MMAP(s)
|
615
|
+
#define SPP_CALL_MUNMAP(a, s) SPP_MUNMAP((a), (s))
|
616
|
+
#endif
|
617
|
+
|
618
|
+
/**
|
619
|
+
* Define SPP_CALL_MREMAP
|
620
|
+
*/
|
621
|
+
#if SPP_HAVE_MMAP && SPP_HAVE_MREMAP
|
622
|
+
#ifdef MREMAP
|
623
|
+
#define SPP_CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
|
624
|
+
#else
|
625
|
+
#define SPP_CALL_MREMAP(addr, osz, nsz, mv) SPP_MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
|
626
|
+
#endif
|
627
|
+
#else
|
628
|
+
#define SPP_CALL_MREMAP(addr, osz, nsz, mv) mfail
|
629
|
+
#endif
|
630
|
+
|
631
|
+
/* mstate bit set if continguous morecore disabled or failed */
|
632
|
+
static const unsigned USE_NONCONTIGUOUS_BIT = 4U;
|
633
|
+
|
634
|
+
/* segment bit set in create_mspace_with_base */
|
635
|
+
static const unsigned EXTERN_BIT = 8U;
|
636
|
+
|
637
|
+
|
638
|
+
/* --------------------------- flags ------------------------ */
|
639
|
+
|
640
|
+
static const unsigned PINUSE_BIT = 1;
|
641
|
+
static const unsigned CINUSE_BIT = 2;
|
642
|
+
static const unsigned FLAG4_BIT = 4;
|
643
|
+
static const unsigned INUSE_BITS = (PINUSE_BIT | CINUSE_BIT);
|
644
|
+
static const unsigned FLAG_BITS = (PINUSE_BIT | CINUSE_BIT | FLAG4_BIT);
|
645
|
+
|
646
|
+
/* ------------------- Chunks sizes and alignments ----------------------- */
|
647
|
+
|
648
|
+
#if SPP_FOOTERS
|
649
|
+
static const unsigned CHUNK_OVERHEAD = 2 * sizeof(size_t);
|
650
|
+
#else
|
651
|
+
static const unsigned CHUNK_OVERHEAD = sizeof(size_t);
|
652
|
+
#endif
|
653
|
+
|
654
|
+
/* MMapped chunks need a second word of overhead ... */
|
655
|
+
static const unsigned SPP_MMAP_CHUNK_OVERHEAD = 2 * sizeof(size_t);
|
656
|
+
|
657
|
+
/* ... and additional padding for fake next-chunk at foot */
|
658
|
+
static const unsigned SPP_MMAP_FOOT_PAD = 4 * sizeof(size_t);
|
659
|
+
|
660
|
+
// ===============================================================================
|
661
|
+
struct malloc_chunk_header
|
662
|
+
{
|
663
|
+
void set_size_and_pinuse_of_free_chunk(size_t s)
|
664
|
+
{
|
665
|
+
_head = s | PINUSE_BIT;
|
666
|
+
set_foot(s);
|
667
|
+
}
|
668
|
+
|
669
|
+
void set_foot(size_t s)
|
670
|
+
{
|
671
|
+
((malloc_chunk_header *)((char*)this + s))->_prev_foot = s;
|
672
|
+
}
|
673
|
+
|
674
|
+
// extraction of fields from head words
|
675
|
+
bool cinuse() const { return !!(_head & CINUSE_BIT); }
|
676
|
+
bool pinuse() const { return !!(_head & PINUSE_BIT); }
|
677
|
+
bool flag4inuse() const { return !!(_head & FLAG4_BIT); }
|
678
|
+
bool is_inuse() const { return (_head & INUSE_BITS) != PINUSE_BIT; }
|
679
|
+
bool is_mmapped() const { return (_head & INUSE_BITS) == 0; }
|
680
|
+
|
681
|
+
size_t chunksize() const { return _head & ~(FLAG_BITS); }
|
682
|
+
|
683
|
+
void clear_pinuse() { _head &= ~PINUSE_BIT; }
|
684
|
+
void set_flag4() { _head |= FLAG4_BIT; }
|
685
|
+
void clear_flag4() { _head &= ~FLAG4_BIT; }
|
686
|
+
|
687
|
+
// Treat space at ptr +/- offset as a chunk
|
688
|
+
malloc_chunk_header * chunk_plus_offset(size_t s)
|
689
|
+
{
|
690
|
+
return (malloc_chunk_header *)((char*)this + s);
|
691
|
+
}
|
692
|
+
malloc_chunk_header * chunk_minus_offset(size_t s)
|
693
|
+
{
|
694
|
+
return (malloc_chunk_header *)((char*)this - s);
|
695
|
+
}
|
696
|
+
|
697
|
+
// Ptr to next or previous physical malloc_chunk.
|
698
|
+
malloc_chunk_header * next_chunk()
|
699
|
+
{
|
700
|
+
return (malloc_chunk_header *)((char*)this + (_head & ~FLAG_BITS));
|
701
|
+
}
|
702
|
+
malloc_chunk_header * prev_chunk()
|
703
|
+
{
|
704
|
+
return (malloc_chunk_header *)((char*)this - (_prev_foot));
|
705
|
+
}
|
706
|
+
|
707
|
+
// extract next chunk's pinuse bit
|
708
|
+
size_t next_pinuse() { return next_chunk()->_head & PINUSE_BIT; }
|
709
|
+
|
710
|
+
size_t _prev_foot; // Size of previous chunk (if free).
|
711
|
+
size_t _head; // Size and inuse bits.
|
712
|
+
};
|
713
|
+
|
714
|
+
// ===============================================================================
|
715
|
+
struct malloc_chunk : public malloc_chunk_header
|
716
|
+
{
|
717
|
+
// Set size, pinuse bit, foot, and clear next pinuse
|
718
|
+
void set_free_with_pinuse(size_t s, malloc_chunk* n)
|
719
|
+
{
|
720
|
+
n->clear_pinuse();
|
721
|
+
set_size_and_pinuse_of_free_chunk(s);
|
722
|
+
}
|
723
|
+
|
724
|
+
// Get the internal overhead associated with chunk p
|
725
|
+
size_t overhead_for() { return is_mmapped() ? SPP_MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD; }
|
726
|
+
|
727
|
+
// Return true if malloced space is not necessarily cleared
|
728
|
+
bool calloc_must_clear()
|
729
|
+
{
|
730
|
+
#if SPP_MMAP_CLEARS
|
731
|
+
return !is_mmapped();
|
732
|
+
#else
|
733
|
+
return true;
|
734
|
+
#endif
|
735
|
+
}
|
736
|
+
|
737
|
+
struct malloc_chunk* _fd; // double links -- used only if free.
|
738
|
+
struct malloc_chunk* _bk;
|
739
|
+
};
|
740
|
+
|
741
|
+
static const unsigned MCHUNK_SIZE = sizeof(malloc_chunk);
|
742
|
+
|
743
|
+
/* The smallest size we can malloc is an aligned minimal chunk */
|
744
|
+
static const unsigned MIN_CHUNK_SIZE = (MCHUNK_SIZE + spp_chunk_align_mask) & ~spp_chunk_align_mask;
|
745
|
+
|
746
|
+
typedef malloc_chunk mchunk;
|
747
|
+
typedef malloc_chunk* mchunkptr;
|
748
|
+
typedef malloc_chunk_header *hchunkptr;
|
749
|
+
typedef malloc_chunk* sbinptr; // The type of bins of chunks
|
750
|
+
typedef unsigned int bindex_t; // Described below
|
751
|
+
typedef unsigned int binmap_t; // Described below
|
752
|
+
typedef unsigned int flag_t; // The type of various bit flag sets
|
753
|
+
|
754
|
+
// conversion from malloc headers to user pointers, and back
|
755
|
+
static SPP_FORCEINLINE void *chunk2mem(const void *p) { return (void *)((char *)p + 2 * sizeof(size_t)); }
|
756
|
+
static SPP_FORCEINLINE mchunkptr mem2chunk(const void *mem) { return (mchunkptr)((char *)mem - 2 * sizeof(size_t)); }
|
757
|
+
|
758
|
+
// chunk associated with aligned address A
|
759
|
+
static SPP_FORCEINLINE mchunkptr align_as_chunk(char *A) { return (mchunkptr)(A + align_offset(chunk2mem(A))); }
|
760
|
+
|
761
|
+
// Bounds on request (not chunk) sizes.
|
762
|
+
static const unsigned MAX_REQUEST = (-MIN_CHUNK_SIZE) << 2;
|
763
|
+
static const unsigned MIN_REQUEST = MIN_CHUNK_SIZE - CHUNK_OVERHEAD - 1;
|
764
|
+
|
765
|
+
// pad request bytes into a usable size
|
766
|
+
static SPP_FORCEINLINE size_t pad_request(size_t req)
|
767
|
+
{
|
768
|
+
return (req + CHUNK_OVERHEAD + spp_chunk_align_mask) & ~spp_chunk_align_mask;
|
769
|
+
}
|
770
|
+
|
771
|
+
// pad request, checking for minimum (but not maximum)
|
772
|
+
static SPP_FORCEINLINE size_t request2size(size_t req)
|
773
|
+
{
|
774
|
+
return req < MIN_REQUEST ? MIN_CHUNK_SIZE : pad_request(req);
|
775
|
+
}
|
776
|
+
|
777
|
+
|
778
|
+
/* ------------------ Operations on head and foot fields ----------------- */
|
779
|
+
|
780
|
+
/*
|
781
|
+
The head field of a chunk is or'ed with PINUSE_BIT when previous
|
782
|
+
adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
|
783
|
+
use, unless mmapped, in which case both bits are cleared.
|
784
|
+
|
785
|
+
FLAG4_BIT is not used by this malloc, but might be useful in extensions.
|
786
|
+
*/
|
787
|
+
|
788
|
+
// Head value for fenceposts
|
789
|
+
static const unsigned FENCEPOST_HEAD = INUSE_BITS | sizeof(size_t);
|
790
|
+
|
791
|
+
|
792
|
+
/* ---------------------- Overlaid data structures ----------------------- */
|
793
|
+
|
794
|
+
/*
|
795
|
+
When chunks are not in use, they are treated as nodes of either
|
796
|
+
lists or trees.
|
797
|
+
|
798
|
+
"Small" chunks are stored in circular doubly-linked lists, and look
|
799
|
+
like this:
|
800
|
+
|
801
|
+
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
802
|
+
| Size of previous chunk |
|
803
|
+
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
804
|
+
`head:' | Size of chunk, in bytes |P|
|
805
|
+
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
806
|
+
| Forward pointer to next chunk in list |
|
807
|
+
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
808
|
+
| Back pointer to previous chunk in list |
|
809
|
+
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
810
|
+
| Unused space (may be 0 bytes long) .
|
811
|
+
. .
|
812
|
+
. |
|
813
|
+
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
814
|
+
`foot:' | Size of chunk, in bytes |
|
815
|
+
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
816
|
+
|
817
|
+
Larger chunks are kept in a form of bitwise digital trees (aka
|
818
|
+
tries) keyed on chunksizes. Because malloc_tree_chunks are only for
|
819
|
+
free chunks greater than 256 bytes, their size doesn't impose any
|
820
|
+
constraints on user chunk sizes. Each node looks like:
|
821
|
+
|
822
|
+
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
823
|
+
| Size of previous chunk |
|
824
|
+
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
825
|
+
`head:' | Size of chunk, in bytes |P|
|
826
|
+
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
827
|
+
| Forward pointer to next chunk of same size |
|
828
|
+
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
829
|
+
| Back pointer to previous chunk of same size |
|
830
|
+
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
831
|
+
| Pointer to left child (child[0]) |
|
832
|
+
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
833
|
+
| Pointer to right child (child[1]) |
|
834
|
+
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
835
|
+
| Pointer to parent |
|
836
|
+
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
837
|
+
| bin index of this chunk |
|
838
|
+
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
839
|
+
| Unused space .
|
840
|
+
. |
|
841
|
+
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
842
|
+
`foot:' | Size of chunk, in bytes |
|
843
|
+
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
844
|
+
|
845
|
+
Each tree holding treenodes is a tree of unique chunk sizes. Chunks
|
846
|
+
of the same size are arranged in a circularly-linked list, with only
|
847
|
+
the oldest chunk (the next to be used, in our FIFO ordering)
|
848
|
+
actually in the tree. (Tree members are distinguished by a non-null
|
849
|
+
parent pointer.) If a chunk with the same size an an existing node
|
850
|
+
is inserted, it is linked off the existing node using pointers that
|
851
|
+
work in the same way as fd/bk pointers of small chunks.
|
852
|
+
|
853
|
+
Each tree contains a power of 2 sized range of chunk sizes (the
|
854
|
+
smallest is 0x100 <= x < 0x180), which is is divided in half at each
|
855
|
+
tree level, with the chunks in the smaller half of the range (0x100
|
856
|
+
<= x < 0x140 for the top nose) in the left subtree and the larger
|
857
|
+
half (0x140 <= x < 0x180) in the right subtree. This is, of course,
|
858
|
+
done by inspecting individual bits.
|
859
|
+
|
860
|
+
Using these rules, each node's left subtree contains all smaller
|
861
|
+
sizes than its right subtree. However, the node at the root of each
|
862
|
+
subtree has no particular ordering relationship to either. (The
|
863
|
+
dividing line between the subtree sizes is based on trie relation.)
|
864
|
+
If we remove the last chunk of a given size from the interior of the
|
865
|
+
tree, we need to replace it with a leaf node. The tree ordering
|
866
|
+
rules permit a node to be replaced by any leaf below it.
|
867
|
+
|
868
|
+
The smallest chunk in a tree (a common operation in a best-fit
|
869
|
+
allocator) can be found by walking a path to the leftmost leaf in
|
870
|
+
the tree. Unlike a usual binary tree, where we follow left child
|
871
|
+
pointers until we reach a null, here we follow the right child
|
872
|
+
pointer any time the left one is null, until we reach a leaf with
|
873
|
+
both child pointers null. The smallest chunk in the tree will be
|
874
|
+
somewhere along that path.
|
875
|
+
|
876
|
+
The worst case number of steps to add, find, or remove a node is
|
877
|
+
bounded by the number of bits differentiating chunks within
|
878
|
+
bins. Under current bin calculations, this ranges from 6 up to 21
|
879
|
+
(for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
|
880
|
+
is of course much better.
|
881
|
+
*/
|
882
|
+
|
883
|
+
// ===============================================================================
|
884
|
+
struct malloc_tree_chunk : public malloc_chunk_header
|
885
|
+
{
|
886
|
+
malloc_tree_chunk *leftmost_child()
|
887
|
+
{
|
888
|
+
return _child[0] ? _child[0] : _child[1];
|
889
|
+
}
|
890
|
+
|
891
|
+
|
892
|
+
malloc_tree_chunk* _fd;
|
893
|
+
malloc_tree_chunk* _bk;
|
894
|
+
|
895
|
+
malloc_tree_chunk* _child[2];
|
896
|
+
malloc_tree_chunk* _parent;
|
897
|
+
bindex_t _index;
|
898
|
+
};
|
899
|
+
|
900
|
+
typedef malloc_tree_chunk tchunk;
|
901
|
+
typedef malloc_tree_chunk* tchunkptr;
|
902
|
+
typedef malloc_tree_chunk* tbinptr; // The type of bins of trees
|
903
|
+
|
904
|
+
/* ----------------------------- Segments -------------------------------- */
|
905
|
+
|
906
|
+
/*
|
907
|
+
Each malloc space may include non-contiguous segments, held in a
|
908
|
+
list headed by an embedded malloc_segment record representing the
|
909
|
+
top-most space. Segments also include flags holding properties of
|
910
|
+
the space. Large chunks that are directly allocated by mmap are not
|
911
|
+
included in this list. They are instead independently created and
|
912
|
+
destroyed without otherwise keeping track of them.
|
913
|
+
|
914
|
+
Segment management mainly comes into play for spaces allocated by
|
915
|
+
MMAP. Any call to MMAP might or might not return memory that is
|
916
|
+
adjacent to an existing segment. MORECORE normally contiguously
|
917
|
+
extends the current space, so this space is almost always adjacent,
|
918
|
+
which is simpler and faster to deal with. (This is why MORECORE is
|
919
|
+
used preferentially to MMAP when both are available -- see
|
920
|
+
sys_alloc.) When allocating using MMAP, we don't use any of the
|
921
|
+
hinting mechanisms (inconsistently) supported in various
|
922
|
+
implementations of unix mmap, or distinguish reserving from
|
923
|
+
committing memory. Instead, we just ask for space, and exploit
|
924
|
+
contiguity when we get it. It is probably possible to do
|
925
|
+
better than this on some systems, but no general scheme seems
|
926
|
+
to be significantly better.
|
927
|
+
|
928
|
+
Management entails a simpler variant of the consolidation scheme
|
929
|
+
used for chunks to reduce fragmentation -- new adjacent memory is
|
930
|
+
normally prepended or appended to an existing segment. However,
|
931
|
+
there are limitations compared to chunk consolidation that mostly
|
932
|
+
reflect the fact that segment processing is relatively infrequent
|
933
|
+
(occurring only when getting memory from system) and that we
|
934
|
+
don't expect to have huge numbers of segments:
|
935
|
+
|
936
|
+
* Segments are not indexed, so traversal requires linear scans. (It
|
937
|
+
would be possible to index these, but is not worth the extra
|
938
|
+
overhead and complexity for most programs on most platforms.)
|
939
|
+
* New segments are only appended to old ones when holding top-most
|
940
|
+
memory; if they cannot be prepended to others, they are held in
|
941
|
+
different segments.
|
942
|
+
|
943
|
+
Except for the top-most segment of an mstate, each segment record
|
944
|
+
is kept at the tail of its segment. Segments are added by pushing
|
945
|
+
segment records onto the list headed by &mstate.seg for the
|
946
|
+
containing mstate.
|
947
|
+
|
948
|
+
Segment flags control allocation/merge/deallocation policies:
|
949
|
+
* If EXTERN_BIT set, then we did not allocate this segment,
|
950
|
+
and so should not try to deallocate or merge with others.
|
951
|
+
(This currently holds only for the initial segment passed
|
952
|
+
into create_mspace_with_base.)
|
953
|
+
* If USE_MMAP_BIT set, the segment may be merged with
|
954
|
+
other surrounding mmapped segments and trimmed/de-allocated
|
955
|
+
using munmap.
|
956
|
+
* If neither bit is set, then the segment was obtained using
|
957
|
+
MORECORE so can be merged with surrounding MORECORE'd segments
|
958
|
+
and deallocated/trimmed using MORECORE with negative arguments.
|
959
|
+
*/
|
960
|
+
|
961
|
+
// ===============================================================================
|
962
|
+
struct malloc_segment
|
963
|
+
{
|
964
|
+
bool is_mmapped_segment() { return !!(_sflags & USE_MMAP_BIT); }
|
965
|
+
bool is_extern_segment() { return !!(_sflags & EXTERN_BIT); }
|
966
|
+
|
967
|
+
char* _base; // base address
|
968
|
+
size_t _size; // allocated size
|
969
|
+
malloc_segment* _next; // ptr to next segment
|
970
|
+
flag_t _sflags; // mmap and extern flag
|
971
|
+
};
|
972
|
+
|
973
|
+
typedef malloc_segment msegment;
|
974
|
+
typedef malloc_segment* msegmentptr;
|
975
|
+
|
976
|
+
/* ------------- Malloc_params ------------------- */
|
977
|
+
|
978
|
+
/*
|
979
|
+
malloc_params holds global properties, including those that can be
|
980
|
+
dynamically set using mallopt. There is a single instance, mparams,
|
981
|
+
initialized in init_mparams. Note that the non-zeroness of "magic"
|
982
|
+
also serves as an initialization flag.
|
983
|
+
*/
|
984
|
+
|
985
|
+
// ===============================================================================
|
986
|
+
struct malloc_params
|
987
|
+
{
|
988
|
+
malloc_params() : _magic(0) {}
|
989
|
+
|
990
|
+
void ensure_initialization()
|
991
|
+
{
|
992
|
+
if (!_magic)
|
993
|
+
_init();
|
994
|
+
}
|
995
|
+
|
996
|
+
SPP_IMPL int change(int param_number, int value);
|
997
|
+
|
998
|
+
size_t page_align(size_t sz)
|
999
|
+
{
|
1000
|
+
return (sz + (_page_size - 1)) & ~(_page_size - 1);
|
1001
|
+
}
|
1002
|
+
|
1003
|
+
size_t granularity_align(size_t sz)
|
1004
|
+
{
|
1005
|
+
return (sz + (_granularity - 1)) & ~(_granularity - 1);
|
1006
|
+
}
|
1007
|
+
|
1008
|
+
bool is_page_aligned(char *S)
|
1009
|
+
{
|
1010
|
+
return ((size_t)S & (_page_size - 1)) == 0;
|
1011
|
+
}
|
1012
|
+
|
1013
|
+
SPP_IMPL int _init();
|
1014
|
+
|
1015
|
+
size_t _magic;
|
1016
|
+
size_t _page_size;
|
1017
|
+
size_t _granularity;
|
1018
|
+
size_t _mmap_threshold;
|
1019
|
+
size_t _trim_threshold;
|
1020
|
+
flag_t _default_mflags;
|
1021
|
+
};
|
1022
|
+
|
1023
|
+
static malloc_params mparams;
|
1024
|
+
|
1025
|
+
/* ---------------------------- malloc_state ----------------------------- */
|
1026
|
+
|
1027
|
+
/*
|
1028
|
+
A malloc_state holds all of the bookkeeping for a space.
|
1029
|
+
The main fields are:
|
1030
|
+
|
1031
|
+
Top
|
1032
|
+
The topmost chunk of the currently active segment. Its size is
|
1033
|
+
cached in topsize. The actual size of topmost space is
|
1034
|
+
topsize+TOP_FOOT_SIZE, which includes space reserved for adding
|
1035
|
+
fenceposts and segment records if necessary when getting more
|
1036
|
+
space from the system. The size at which to autotrim top is
|
1037
|
+
cached from mparams in trim_check, except that it is disabled if
|
1038
|
+
an autotrim fails.
|
1039
|
+
|
1040
|
+
Designated victim (dv)
|
1041
|
+
This is the preferred chunk for servicing small requests that
|
1042
|
+
don't have exact fits. It is normally the chunk split off most
|
1043
|
+
recently to service another small request. Its size is cached in
|
1044
|
+
dvsize. The link fields of this chunk are not maintained since it
|
1045
|
+
is not kept in a bin.
|
1046
|
+
|
1047
|
+
SmallBins
|
1048
|
+
An array of bin headers for free chunks. These bins hold chunks
|
1049
|
+
with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
|
1050
|
+
chunks of all the same size, spaced 8 bytes apart. To simplify
|
1051
|
+
use in double-linked lists, each bin header acts as a malloc_chunk
|
1052
|
+
pointing to the real first node, if it exists (else pointing to
|
1053
|
+
itself). This avoids special-casing for headers. But to avoid
|
1054
|
+
waste, we allocate only the fd/bk pointers of bins, and then use
|
1055
|
+
repositioning tricks to treat these as the fields of a chunk.
|
1056
|
+
|
1057
|
+
TreeBins
|
1058
|
+
Treebins are pointers to the roots of trees holding a range of
|
1059
|
+
sizes. There are 2 equally spaced treebins for each power of two
|
1060
|
+
from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
|
1061
|
+
larger.
|
1062
|
+
|
1063
|
+
Bin maps
|
1064
|
+
There is one bit map for small bins ("smallmap") and one for
|
1065
|
+
treebins ("treemap). Each bin sets its bit when non-empty, and
|
1066
|
+
clears the bit when empty. Bit operations are then used to avoid
|
1067
|
+
bin-by-bin searching -- nearly all "search" is done without ever
|
1068
|
+
looking at bins that won't be selected. The bit maps
|
1069
|
+
conservatively use 32 bits per map word, even if on 64bit system.
|
1070
|
+
For a good description of some of the bit-based techniques used
|
1071
|
+
here, see Henry S. Warren Jr's book "Hacker's Delight" (and
|
1072
|
+
supplement at http://hackersdelight.org/). Many of these are
|
1073
|
+
intended to reduce the branchiness of paths through malloc etc, as
|
1074
|
+
well as to reduce the number of memory locations read or written.
|
1075
|
+
|
1076
|
+
Segments
|
1077
|
+
A list of segments headed by an embedded malloc_segment record
|
1078
|
+
representing the initial space.
|
1079
|
+
|
1080
|
+
Address check support
|
1081
|
+
The least_addr field is the least address ever obtained from
|
1082
|
+
MORECORE or MMAP. Attempted frees and reallocs of any address less
|
1083
|
+
than this are trapped (unless SPP_INSECURE is defined).
|
1084
|
+
|
1085
|
+
Magic tag
|
1086
|
+
A cross-check field that should always hold same value as mparams._magic.
|
1087
|
+
|
1088
|
+
Max allowed footprint
|
1089
|
+
The maximum allowed bytes to allocate from system (zero means no limit)
|
1090
|
+
|
1091
|
+
Flags
|
1092
|
+
Bits recording whether to use MMAP, locks, or contiguous MORECORE
|
1093
|
+
|
1094
|
+
Statistics
|
1095
|
+
Each space keeps track of current and maximum system memory
|
1096
|
+
obtained via MORECORE or MMAP.
|
1097
|
+
|
1098
|
+
Trim support
|
1099
|
+
Fields holding the amount of unused topmost memory that should trigger
|
1100
|
+
trimming, and a counter to force periodic scanning to release unused
|
1101
|
+
non-topmost segments.
|
1102
|
+
|
1103
|
+
Extension support
|
1104
|
+
A void* pointer and a size_t field that can be used to help implement
|
1105
|
+
extensions to this malloc.
|
1106
|
+
*/
|
1107
|
+
|
1108
|
+
|
1109
|
+
// ================================================================================
|
1110
|
+
class malloc_state
|
1111
|
+
{
|
1112
|
+
public:
|
1113
|
+
/* ----------------------- _malloc, _free, etc... --- */
|
1114
|
+
SPP_FORCEINLINE void* _malloc(size_t bytes);
|
1115
|
+
SPP_FORCEINLINE void _free(mchunkptr p);
|
1116
|
+
|
1117
|
+
|
1118
|
+
/* ------------------------ Relays to internal calls to malloc/free from realloc, memalign etc */
|
1119
|
+
void *internal_malloc(size_t b) { return mspace_malloc(this, b); }
|
1120
|
+
void internal_free(void *mem) { mspace_free(this, mem); }
|
1121
|
+
|
1122
|
+
/* ------------------------ ----------------------- */
|
1123
|
+
|
1124
|
+
SPP_IMPL void init_top(mchunkptr p, size_t psize);
|
1125
|
+
SPP_IMPL void init_bins();
|
1126
|
+
SPP_IMPL void init(char* tbase, size_t tsize);
|
1127
|
+
|
1128
|
+
/* ------------------------ System alloc/dealloc -------------------------- */
|
1129
|
+
SPP_IMPL void* sys_alloc(size_t nb);
|
1130
|
+
SPP_IMPL size_t release_unused_segments();
|
1131
|
+
SPP_IMPL int sys_trim(size_t pad);
|
1132
|
+
SPP_IMPL void dispose_chunk(mchunkptr p, size_t psize);
|
1133
|
+
|
1134
|
+
/* ----------------------- Internal support for realloc, memalign, etc --- */
|
1135
|
+
SPP_IMPL mchunkptr try_realloc_chunk(mchunkptr p, size_t nb, int can_move);
|
1136
|
+
SPP_IMPL void* internal_memalign(size_t alignment, size_t bytes);
|
1137
|
+
SPP_IMPL void** ialloc(size_t n_elements, size_t* sizes, int opts, void* chunks[]);
|
1138
|
+
SPP_IMPL size_t internal_bulk_free(void* array[], size_t nelem);
|
1139
|
+
SPP_IMPL void internal_inspect_all(void(*handler)(void *start, void *end,
|
1140
|
+
size_t used_bytes, void* callback_arg),
|
1141
|
+
void* arg);
|
1142
|
+
|
1143
|
+
/* -------------------------- system alloc setup (Operations on mflags) ----- */
|
1144
|
+
bool use_lock() const { return false; }
|
1145
|
+
void enable_lock() {}
|
1146
|
+
void set_lock(int) {}
|
1147
|
+
void disable_lock() {}
|
1148
|
+
|
1149
|
+
bool use_mmap() const { return !!(_mflags & USE_MMAP_BIT); }
|
1150
|
+
void enable_mmap() { _mflags |= USE_MMAP_BIT; }
|
1151
|
+
|
1152
|
+
#if SPP_HAVE_MMAP
|
1153
|
+
void disable_mmap() { _mflags &= ~USE_MMAP_BIT; }
|
1154
|
+
#else
|
1155
|
+
void disable_mmap() {}
|
1156
|
+
#endif
|
1157
|
+
|
1158
|
+
/* ----------------------- Runtime Check Support ------------------------- */
|
1159
|
+
|
1160
|
+
/*
|
1161
|
+
For security, the main invariant is that malloc/free/etc never
|
1162
|
+
writes to a static address other than malloc_state, unless static
|
1163
|
+
malloc_state itself has been corrupted, which cannot occur via
|
1164
|
+
malloc (because of these checks). In essence this means that we
|
1165
|
+
believe all pointers, sizes, maps etc held in malloc_state, but
|
1166
|
+
check all of those linked or offsetted from other embedded data
|
1167
|
+
structures. These checks are interspersed with main code in a way
|
1168
|
+
that tends to minimize their run-time cost.
|
1169
|
+
|
1170
|
+
When SPP_FOOTERS is defined, in addition to range checking, we also
|
1171
|
+
verify footer fields of inuse chunks, which can be used guarantee
|
1172
|
+
that the mstate controlling malloc/free is intact. This is a
|
1173
|
+
streamlined version of the approach described by William Robertson
|
1174
|
+
et al in "Run-time Detection of Heap-based Overflows" LISA'03
|
1175
|
+
http://www.usenix.org/events/lisa03/tech/robertson.html The footer
|
1176
|
+
of an inuse chunk holds the xor of its mstate and a random seed,
|
1177
|
+
that is checked upon calls to free() and realloc(). This is
|
1178
|
+
(probabalistically) unguessable from outside the program, but can be
|
1179
|
+
computed by any code successfully malloc'ing any chunk, so does not
|
1180
|
+
itself provide protection against code that has already broken
|
1181
|
+
security through some other means. Unlike Robertson et al, we
|
1182
|
+
always dynamically check addresses of all offset chunks (previous,
|
1183
|
+
next, etc). This turns out to be cheaper than relying on hashes.
|
1184
|
+
*/
|
1185
|
+
|
1186
|
+
|
1187
|
+
#if !SPP_INSECURE
|
1188
|
+
// Check if address a is at least as high as any from MORECORE or MMAP
|
1189
|
+
bool ok_address(void *a) const { return (char *)a >= _least_addr; }
|
1190
|
+
|
1191
|
+
// Check if address of next chunk n is higher than base chunk p
|
1192
|
+
static bool ok_next(void *p, void *n) { return p < n; }
|
1193
|
+
|
1194
|
+
// Check if p has inuse status
|
1195
|
+
static bool ok_inuse(mchunkptr p) { return p->is_inuse(); }
|
1196
|
+
|
1197
|
+
// Check if p has its pinuse bit on
|
1198
|
+
static bool ok_pinuse(mchunkptr p) { return p->pinuse(); }
|
1199
|
+
|
1200
|
+
// Check if (alleged) mstate m has expected magic field
|
1201
|
+
bool ok_magic() const { return _magic == mparams._magic; }
|
1202
|
+
|
1203
|
+
// In gcc, use __builtin_expect to minimize impact of checks
|
1204
|
+
#if defined(__GNUC__) && __GNUC__ >= 3
|
1205
|
+
static bool rtcheck(bool e) { return __builtin_expect(e, 1); }
|
1206
|
+
#else
|
1207
|
+
static bool rtcheck(bool e) { return e; }
|
1208
|
+
#endif
|
1209
|
+
#else
|
1210
|
+
static bool ok_address(void *) { return true; }
|
1211
|
+
static bool ok_next(void *, void *) { return true; }
|
1212
|
+
static bool ok_inuse(mchunkptr) { return true; }
|
1213
|
+
static bool ok_pinuse(mchunkptr) { return true; }
|
1214
|
+
static bool ok_magic() { return true; }
|
1215
|
+
static bool rtcheck(bool) { return true; }
|
1216
|
+
#endif
|
1217
|
+
|
1218
|
+
bool is_initialized() const { return _top != 0; }
|
1219
|
+
|
1220
|
+
bool use_noncontiguous() const { return !!(_mflags & USE_NONCONTIGUOUS_BIT); }
|
1221
|
+
void disable_contiguous() { _mflags |= USE_NONCONTIGUOUS_BIT; }
|
1222
|
+
|
1223
|
+
// Return segment holding given address
|
1224
|
+
msegmentptr segment_holding(char* addr) const
|
1225
|
+
{
|
1226
|
+
msegmentptr sp = (msegmentptr)&_seg;
|
1227
|
+
for (;;)
|
1228
|
+
{
|
1229
|
+
if (addr >= sp->_base && addr < sp->_base + sp->_size)
|
1230
|
+
return sp;
|
1231
|
+
if ((sp = sp->_next) == 0)
|
1232
|
+
return 0;
|
1233
|
+
}
|
1234
|
+
}
|
1235
|
+
|
1236
|
+
// Return true if segment contains a segment link
|
1237
|
+
int has_segment_link(msegmentptr ss) const
|
1238
|
+
{
|
1239
|
+
msegmentptr sp = (msegmentptr)&_seg;
|
1240
|
+
for (;;)
|
1241
|
+
{
|
1242
|
+
if ((char*)sp >= ss->_base && (char*)sp < ss->_base + ss->_size)
|
1243
|
+
return 1;
|
1244
|
+
if ((sp = sp->_next) == 0)
|
1245
|
+
return 0;
|
1246
|
+
}
|
1247
|
+
}
|
1248
|
+
|
1249
|
+
bool should_trim(size_t s) const { return s > _trim_check; }
|
1250
|
+
|
1251
|
+
/* -------------------------- Debugging setup ---------------------------- */
|
1252
|
+
|
1253
|
+
#if ! SPP_DEBUG
|
1254
|
+
void check_free_chunk(mchunkptr) {}
|
1255
|
+
void check_inuse_chunk(mchunkptr) {}
|
1256
|
+
void check_malloced_chunk(void*, size_t) {}
|
1257
|
+
void check_mmapped_chunk(mchunkptr) {}
|
1258
|
+
void check_malloc_state() {}
|
1259
|
+
void check_top_chunk(mchunkptr) {}
|
1260
|
+
#else /* SPP_DEBUG */
|
1261
|
+
void check_free_chunk(mchunkptr p) { do_check_free_chunk(p); }
|
1262
|
+
void check_inuse_chunk(mchunkptr p) { do_check_inuse_chunk(p); }
|
1263
|
+
void check_malloced_chunk(void* p, size_t s) { do_check_malloced_chunk(p, s); }
|
1264
|
+
void check_mmapped_chunk(mchunkptr p) { do_check_mmapped_chunk(p); }
|
1265
|
+
void check_malloc_state() { do_check_malloc_state(); }
|
1266
|
+
void check_top_chunk(mchunkptr p) { do_check_top_chunk(p); }
|
1267
|
+
|
1268
|
+
void do_check_any_chunk(mchunkptr p) const;
|
1269
|
+
void do_check_top_chunk(mchunkptr p) const;
|
1270
|
+
void do_check_mmapped_chunk(mchunkptr p) const;
|
1271
|
+
void do_check_inuse_chunk(mchunkptr p) const;
|
1272
|
+
void do_check_free_chunk(mchunkptr p) const;
|
1273
|
+
void do_check_malloced_chunk(void* mem, size_t s) const;
|
1274
|
+
void do_check_tree(tchunkptr t);
|
1275
|
+
void do_check_treebin(bindex_t i);
|
1276
|
+
void do_check_smallbin(bindex_t i);
|
1277
|
+
void do_check_malloc_state();
|
1278
|
+
int bin_find(mchunkptr x);
|
1279
|
+
size_t traverse_and_check();
|
1280
|
+
#endif
|
1281
|
+
|
1282
|
+
private:
|
1283
|
+
|
1284
|
+
/* ---------------------------- Indexing Bins ---------------------------- */
|
1285
|
+
|
1286
|
+
static bool is_small(size_t s) { return (s >> SMALLBIN_SHIFT) < NSMALLBINS; }
|
1287
|
+
static bindex_t small_index(size_t s) { return (bindex_t)(s >> SMALLBIN_SHIFT); }
|
1288
|
+
static size_t small_index2size(size_t i) { return i << SMALLBIN_SHIFT; }
|
1289
|
+
static bindex_t MIN_SMALL_INDEX() { return small_index(MIN_CHUNK_SIZE); }
|
1290
|
+
|
1291
|
+
// assign tree index for size S to variable I. Use x86 asm if possible
|
1292
|
+
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
|
1293
|
+
SPP_FORCEINLINE static bindex_t compute_tree_index(size_t S)
|
1294
|
+
{
|
1295
|
+
unsigned int X = S >> TREEBIN_SHIFT;
|
1296
|
+
if (X == 0)
|
1297
|
+
return 0;
|
1298
|
+
else if (X > 0xFFFF)
|
1299
|
+
return NTREEBINS - 1;
|
1300
|
+
|
1301
|
+
unsigned int K = (unsigned) sizeof(X) * __CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X);
|
1302
|
+
return (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1)));
|
1303
|
+
}
|
1304
|
+
|
1305
|
+
#elif defined (__INTEL_COMPILER)
|
1306
|
+
SPP_FORCEINLINE static bindex_t compute_tree_index(size_t S)
|
1307
|
+
{
|
1308
|
+
size_t X = S >> TREEBIN_SHIFT;
|
1309
|
+
if (X == 0)
|
1310
|
+
return 0;
|
1311
|
+
else if (X > 0xFFFF)
|
1312
|
+
return NTREEBINS - 1;
|
1313
|
+
|
1314
|
+
unsigned int K = _bit_scan_reverse(X);
|
1315
|
+
return (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1)));
|
1316
|
+
}
|
1317
|
+
|
1318
|
+
#elif defined(_MSC_VER) && _MSC_VER>=1300
|
1319
|
+
SPP_FORCEINLINE static bindex_t compute_tree_index(size_t S)
|
1320
|
+
{
|
1321
|
+
size_t X = S >> TREEBIN_SHIFT;
|
1322
|
+
if (X == 0)
|
1323
|
+
return 0;
|
1324
|
+
else if (X > 0xFFFF)
|
1325
|
+
return NTREEBINS - 1;
|
1326
|
+
|
1327
|
+
unsigned int K;
|
1328
|
+
_BitScanReverse((DWORD *) &K, (DWORD) X);
|
1329
|
+
return (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1)));
|
1330
|
+
}
|
1331
|
+
|
1332
|
+
#else // GNUC
|
1333
|
+
SPP_FORCEINLINE static bindex_t compute_tree_index(size_t S)
|
1334
|
+
{
|
1335
|
+
size_t X = S >> TREEBIN_SHIFT;
|
1336
|
+
if (X == 0)
|
1337
|
+
return 0;
|
1338
|
+
else if (X > 0xFFFF)
|
1339
|
+
return NTREEBINS - 1;
|
1340
|
+
|
1341
|
+
unsigned int Y = (unsigned int)X;
|
1342
|
+
unsigned int N = ((Y - 0x100) >> 16) & 8;
|
1343
|
+
unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;
|
1344
|
+
N += K;
|
1345
|
+
N += K = (((Y <<= K) - 0x4000) >> 16) & 2;
|
1346
|
+
K = 14 - N + ((Y <<= K) >> 15);
|
1347
|
+
return (K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1));
|
1348
|
+
}
|
1349
|
+
#endif
|
1350
|
+
|
1351
|
+
// Shift placing maximum resolved bit in a treebin at i as sign bit
|
1352
|
+
static bindex_t leftshift_for_tree_index(bindex_t i)
|
1353
|
+
{
|
1354
|
+
return (i == NTREEBINS - 1) ? 0 :
|
1355
|
+
((spp_size_t_bitsize - 1) - ((i >> 1) + TREEBIN_SHIFT - 2));
|
1356
|
+
}
|
1357
|
+
|
1358
|
+
// The size of the smallest chunk held in bin with index i
|
1359
|
+
static bindex_t minsize_for_tree_index(bindex_t i)
|
1360
|
+
{
|
1361
|
+
return ((size_t)1 << ((i >> 1) + TREEBIN_SHIFT)) |
|
1362
|
+
(((size_t)(i & 1)) << ((i >> 1) + TREEBIN_SHIFT - 1));
|
1363
|
+
}
|
1364
|
+
|
1365
|
+
|
1366
|
+
// ----------- isolate the least set bit of a bitmap
|
1367
|
+
static binmap_t least_bit(binmap_t x) { return x & -x; }
|
1368
|
+
|
1369
|
+
// ----------- mask with all bits to left of least bit of x on
|
1370
|
+
static binmap_t left_bits(binmap_t x) { return (x << 1) | -(x << 1); }
|
1371
|
+
|
1372
|
+
// index corresponding to given bit. Use x86 asm if possible
|
1373
|
+
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
|
1374
|
+
static bindex_t compute_bit2idx(binmap_t X)
|
1375
|
+
{
|
1376
|
+
unsigned int J;
|
1377
|
+
J = __builtin_ctz(X);
|
1378
|
+
return (bindex_t)J;
|
1379
|
+
}
|
1380
|
+
|
1381
|
+
#elif defined (__INTEL_COMPILER)
|
1382
|
+
static bindex_t compute_bit2idx(binmap_t X)
|
1383
|
+
{
|
1384
|
+
unsigned int J;
|
1385
|
+
J = _bit_scan_forward(X);
|
1386
|
+
return (bindex_t)J;
|
1387
|
+
}
|
1388
|
+
|
1389
|
+
#elif defined(_MSC_VER) && _MSC_VER>=1300
|
1390
|
+
static bindex_t compute_bit2idx(binmap_t X)
|
1391
|
+
{
|
1392
|
+
unsigned int J;
|
1393
|
+
_BitScanForward((DWORD *) &J, X);
|
1394
|
+
return (bindex_t)J;
|
1395
|
+
}
|
1396
|
+
|
1397
|
+
#elif SPP_USE_BUILTIN_FFS
|
1398
|
+
static bindex_t compute_bit2idx(binmap_t X) { return ffs(X) - 1; }
|
1399
|
+
|
1400
|
+
#else
|
1401
|
+
static bindex_t compute_bit2idx(binmap_t X)
|
1402
|
+
{
|
1403
|
+
unsigned int Y = X - 1;
|
1404
|
+
unsigned int K = Y >> (16 - 4) & 16;
|
1405
|
+
unsigned int N = K; Y >>= K;
|
1406
|
+
N += K = Y >> (8 - 3) & 8; Y >>= K;
|
1407
|
+
N += K = Y >> (4 - 2) & 4; Y >>= K;
|
1408
|
+
N += K = Y >> (2 - 1) & 2; Y >>= K;
|
1409
|
+
N += K = Y >> (1 - 0) & 1; Y >>= K;
|
1410
|
+
return (bindex_t)(N + Y);
|
1411
|
+
}
|
1412
|
+
#endif
|
1413
|
+
|
1414
|
+
/* ------------------------ Set up inuse chunks with or without footers ---*/
|
1415
|
+
#if !SPP_FOOTERS
|
1416
|
+
void mark_inuse_foot(malloc_chunk_header *, size_t) {}
|
1417
|
+
#else
|
1418
|
+
//Set foot of inuse chunk to be xor of mstate and seed
|
1419
|
+
void mark_inuse_foot(malloc_chunk_header *p, size_t s)
|
1420
|
+
{
|
1421
|
+
(((mchunkptr)((char*)p + s))->prev_foot = (size_t)this ^ mparams._magic);
|
1422
|
+
}
|
1423
|
+
#endif
|
1424
|
+
|
1425
|
+
void set_inuse(malloc_chunk_header *p, size_t s)
|
1426
|
+
{
|
1427
|
+
p->_head = (p->_head & PINUSE_BIT) | s | CINUSE_BIT;
|
1428
|
+
((mchunkptr)(((char*)p) + s))->_head |= PINUSE_BIT;
|
1429
|
+
mark_inuse_foot(p, s);
|
1430
|
+
}
|
1431
|
+
|
1432
|
+
void set_inuse_and_pinuse(malloc_chunk_header *p, size_t s)
|
1433
|
+
{
|
1434
|
+
p->_head = s | PINUSE_BIT | CINUSE_BIT;
|
1435
|
+
((mchunkptr)(((char*)p) + s))->_head |= PINUSE_BIT;
|
1436
|
+
mark_inuse_foot(p, s);
|
1437
|
+
}
|
1438
|
+
|
1439
|
+
void set_size_and_pinuse_of_inuse_chunk(malloc_chunk_header *p, size_t s)
|
1440
|
+
{
|
1441
|
+
p->_head = s | PINUSE_BIT | CINUSE_BIT;
|
1442
|
+
mark_inuse_foot(p, s);
|
1443
|
+
}
|
1444
|
+
|
1445
|
+
/* ------------------------ Addressing by index. See about smallbin repositioning --- */
|
1446
|
+
sbinptr smallbin_at(bindex_t i) const { return (sbinptr)((char*)&_smallbins[i << 1]); }
|
1447
|
+
tbinptr* treebin_at(bindex_t i) { return &_treebins[i]; }
|
1448
|
+
|
1449
|
+
/* ----------------------- bit corresponding to given index ---------*/
|
1450
|
+
static binmap_t idx2bit(bindex_t i) { return ((binmap_t)1 << i); }
|
1451
|
+
|
1452
|
+
// --------------- Mark/Clear bits with given index
|
1453
|
+
void mark_smallmap(bindex_t i) { _smallmap |= idx2bit(i); }
|
1454
|
+
void clear_smallmap(bindex_t i) { _smallmap &= ~idx2bit(i); }
|
1455
|
+
binmap_t smallmap_is_marked(bindex_t i) const { return _smallmap & idx2bit(i); }
|
1456
|
+
|
1457
|
+
void mark_treemap(bindex_t i) { _treemap |= idx2bit(i); }
|
1458
|
+
void clear_treemap(bindex_t i) { _treemap &= ~idx2bit(i); }
|
1459
|
+
binmap_t treemap_is_marked(bindex_t i) const { return _treemap & idx2bit(i); }
|
1460
|
+
|
1461
|
+
/* ------------------------ ----------------------- */
|
1462
|
+
SPP_FORCEINLINE void insert_small_chunk(mchunkptr P, size_t S);
|
1463
|
+
SPP_FORCEINLINE void unlink_small_chunk(mchunkptr P, size_t S);
|
1464
|
+
SPP_FORCEINLINE void unlink_first_small_chunk(mchunkptr B, mchunkptr P, bindex_t I);
|
1465
|
+
SPP_FORCEINLINE void replace_dv(mchunkptr P, size_t S);
|
1466
|
+
|
1467
|
+
/* ------------------------- Operations on trees ------------------------- */
|
1468
|
+
SPP_FORCEINLINE void insert_large_chunk(tchunkptr X, size_t S);
|
1469
|
+
SPP_FORCEINLINE void unlink_large_chunk(tchunkptr X);
|
1470
|
+
|
1471
|
+
/* ------------------------ Relays to large vs small bin operations */
|
1472
|
+
SPP_FORCEINLINE void insert_chunk(mchunkptr P, size_t S);
|
1473
|
+
SPP_FORCEINLINE void unlink_chunk(mchunkptr P, size_t S);
|
1474
|
+
|
1475
|
+
/* ----------------------- Direct-mmapping chunks ----------------------- */
|
1476
|
+
SPP_IMPL void* mmap_alloc(size_t nb);
|
1477
|
+
SPP_IMPL mchunkptr mmap_resize(mchunkptr oldp, size_t nb, int flags);
|
1478
|
+
|
1479
|
+
SPP_IMPL void reset_on_error();
|
1480
|
+
SPP_IMPL void* prepend_alloc(char* newbase, char* oldbase, size_t nb);
|
1481
|
+
SPP_IMPL void add_segment(char* tbase, size_t tsize, flag_t mmapped);
|
1482
|
+
|
1483
|
+
/* ------------------------ malloc --------------------------- */
|
1484
|
+
SPP_IMPL void* tmalloc_large(size_t nb);
|
1485
|
+
SPP_IMPL void* tmalloc_small(size_t nb);
|
1486
|
+
|
1487
|
+
/* ------------------------Bin types, widths and sizes -------- */
|
1488
|
+
static const size_t NSMALLBINS = 32;
|
1489
|
+
static const size_t NTREEBINS = 32;
|
1490
|
+
static const size_t SMALLBIN_SHIFT = 3;
|
1491
|
+
static const size_t SMALLBIN_WIDTH = 1 << SMALLBIN_SHIFT;
|
1492
|
+
static const size_t TREEBIN_SHIFT = 8;
|
1493
|
+
static const size_t MIN_LARGE_SIZE = 1 << TREEBIN_SHIFT;
|
1494
|
+
static const size_t MAX_SMALL_SIZE = (MIN_LARGE_SIZE - 1);
|
1495
|
+
static const size_t MAX_SMALL_REQUEST = (MAX_SMALL_SIZE - spp_chunk_align_mask - CHUNK_OVERHEAD);
|
1496
|
+
|
1497
|
+
/* ------------------------ data members --------------------------- */
|
1498
|
+
binmap_t _smallmap;
|
1499
|
+
binmap_t _treemap;
|
1500
|
+
size_t _dvsize;
|
1501
|
+
size_t _topsize;
|
1502
|
+
char* _least_addr;
|
1503
|
+
mchunkptr _dv;
|
1504
|
+
mchunkptr _top;
|
1505
|
+
size_t _trim_check;
|
1506
|
+
size_t _release_checks;
|
1507
|
+
size_t _magic;
|
1508
|
+
mchunkptr _smallbins[(NSMALLBINS + 1) * 2];
|
1509
|
+
tbinptr _treebins[NTREEBINS];
|
1510
|
+
public:
|
1511
|
+
size_t _footprint;
|
1512
|
+
size_t _max_footprint;
|
1513
|
+
size_t _footprint_limit; // zero means no limit
|
1514
|
+
flag_t _mflags;
|
1515
|
+
|
1516
|
+
msegment _seg;
|
1517
|
+
|
1518
|
+
private:
|
1519
|
+
void* _extp; // Unused but available for extensions
|
1520
|
+
size_t _exts;
|
1521
|
+
};
|
1522
|
+
|
1523
|
+
typedef malloc_state* mstate;
|
1524
|
+
|
1525
|
+
/* ------------- end malloc_state ------------------- */
|
1526
|
+
|
1527
|
+
#if SPP_FOOTERS
|
1528
|
+
static malloc_state* get_mstate_for(malloc_chunk_header *p)
|
1529
|
+
{
|
1530
|
+
return (malloc_state*)(((mchunkptr)((char*)(p) +
|
1531
|
+
(p->chunksize())))->prev_foot ^ mparams._magic);
|
1532
|
+
}
|
1533
|
+
#endif
|
1534
|
+
|
1535
|
+
/* -------------------------- system alloc setup ------------------------- */
|
1536
|
+
|
1537
|
+
|
1538
|
+
|
1539
|
+
// For mmap, use granularity alignment on windows, else page-align
|
1540
|
+
#ifdef WIN32
|
1541
|
+
#define mmap_align(S) mparams.granularity_align(S)
|
1542
|
+
#else
|
1543
|
+
#define mmap_align(S) mparams.page_align(S)
|
1544
|
+
#endif
|
1545
|
+
|
1546
|
+
// True if segment S holds address A
|
1547
|
+
static bool segment_holds(msegmentptr S, mchunkptr A)
|
1548
|
+
{
|
1549
|
+
return (char*)A >= S->_base && (char*)A < S->_base + S->_size;
|
1550
|
+
}
|
1551
|
+
|
1552
|
+
/*
|
1553
|
+
top_foot_size is padding at the end of a segment, including space
|
1554
|
+
that may be needed to place segment records and fenceposts when new
|
1555
|
+
noncontiguous segments are added.
|
1556
|
+
*/
|
1557
|
+
static SPP_FORCEINLINE size_t top_foot_size()
|
1558
|
+
{
|
1559
|
+
return align_offset(chunk2mem((void *)0)) +
|
1560
|
+
pad_request(sizeof(struct malloc_segment)) +
|
1561
|
+
MIN_CHUNK_SIZE;
|
1562
|
+
}
|
1563
|
+
|
1564
|
+
|
1565
|
+
// For sys_alloc, enough padding to ensure can malloc request on success
|
1566
|
+
static SPP_FORCEINLINE size_t sys_alloc_padding()
|
1567
|
+
{
|
1568
|
+
return top_foot_size() + SPP_MALLOC_ALIGNMENT;
|
1569
|
+
}
|
1570
|
+
|
1571
|
+
|
1572
|
+
#define SPP_USAGE_ERROR_ACTION(m,p) SPP_ABORT
|
1573
|
+
|
1574
|
+
/* ---------------------------- setting mparams -------------------------- */
|
1575
|
+
|
1576
|
+
// Initialize mparams
|
1577
|
+
int malloc_params::_init()
|
1578
|
+
{
|
1579
|
+
#ifdef NEED_GLOBAL_LOCK_INIT
|
1580
|
+
if (malloc_global_mutex_status <= 0)
|
1581
|
+
init_malloc_global_mutex();
|
1582
|
+
#endif
|
1583
|
+
|
1584
|
+
if (_magic == 0)
|
1585
|
+
{
|
1586
|
+
size_t magic;
|
1587
|
+
size_t psize;
|
1588
|
+
size_t gsize;
|
1589
|
+
|
1590
|
+
#ifndef WIN32
|
1591
|
+
psize = malloc_getpagesize;
|
1592
|
+
gsize = ((SPP_DEFAULT_GRANULARITY != 0) ? SPP_DEFAULT_GRANULARITY : psize);
|
1593
|
+
#else
|
1594
|
+
{
|
1595
|
+
SYSTEM_INFO system_info;
|
1596
|
+
GetSystemInfo(&system_info);
|
1597
|
+
psize = system_info.dwPageSize;
|
1598
|
+
gsize = ((SPP_DEFAULT_GRANULARITY != 0) ?
|
1599
|
+
SPP_DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);
|
1600
|
+
}
|
1601
|
+
#endif
|
1602
|
+
|
1603
|
+
/* Sanity-check configuration:
|
1604
|
+
size_t must be unsigned and as wide as pointer type.
|
1605
|
+
ints must be at least 4 bytes.
|
1606
|
+
alignment must be at least 8.
|
1607
|
+
Alignment, min chunk size, and page size must all be powers of 2.
|
1608
|
+
*/
|
1609
|
+
if ((sizeof(size_t) != sizeof(char*)) ||
|
1610
|
+
(spp_max_size_t < MIN_CHUNK_SIZE) ||
|
1611
|
+
(sizeof(int) < 4) ||
|
1612
|
+
(SPP_MALLOC_ALIGNMENT < (size_t)8U) ||
|
1613
|
+
((SPP_MALLOC_ALIGNMENT & (SPP_MALLOC_ALIGNMENT - 1)) != 0) ||
|
1614
|
+
((MCHUNK_SIZE & (MCHUNK_SIZE - 1)) != 0) ||
|
1615
|
+
((gsize & (gsize - 1)) != 0) ||
|
1616
|
+
((psize & (psize - 1)) != 0))
|
1617
|
+
SPP_ABORT;
|
1618
|
+
_granularity = gsize;
|
1619
|
+
_page_size = psize;
|
1620
|
+
_mmap_threshold = SPP_DEFAULT_MMAP_THRESHOLD;
|
1621
|
+
_trim_threshold = SPP_DEFAULT_TRIM_THRESHOLD;
|
1622
|
+
_default_mflags = USE_MMAP_BIT | USE_NONCONTIGUOUS_BIT;
|
1623
|
+
|
1624
|
+
{
|
1625
|
+
#if SPP_USE_DEV_RANDOM
|
1626
|
+
int fd;
|
1627
|
+
unsigned char buf[sizeof(size_t)];
|
1628
|
+
// Try to use /dev/urandom, else fall back on using time
|
1629
|
+
if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
|
1630
|
+
read(fd, buf, sizeof(buf)) == sizeof(buf))
|
1631
|
+
{
|
1632
|
+
magic = *((size_t *) buf);
|
1633
|
+
close(fd);
|
1634
|
+
}
|
1635
|
+
else
|
1636
|
+
#endif
|
1637
|
+
{
|
1638
|
+
#ifdef WIN32
|
1639
|
+
magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);
|
1640
|
+
#elif defined(SPP_LACKS_TIME_H)
|
1641
|
+
magic = (size_t)&magic ^ (size_t)0x55555555U;
|
1642
|
+
#else
|
1643
|
+
magic = (size_t)(time(0) ^ (size_t)0x55555555U);
|
1644
|
+
#endif
|
1645
|
+
}
|
1646
|
+
magic |= (size_t)8U; // ensure nonzero
|
1647
|
+
magic &= ~(size_t)7U; // improve chances of fault for bad values
|
1648
|
+
// Until memory modes commonly available, use volatile-write
|
1649
|
+
(*(volatile size_t *)(&(_magic))) = magic;
|
1650
|
+
}
|
1651
|
+
}
|
1652
|
+
|
1653
|
+
return 1;
|
1654
|
+
}
|
1655
|
+
|
1656
|
+
/*
|
1657
|
+
mallopt tuning options. SVID/XPG defines four standard parameter
|
1658
|
+
numbers for mallopt, normally defined in malloc.h. None of these
|
1659
|
+
are used in this malloc, so setting them has no effect. But this
|
1660
|
+
malloc does support the following options.
|
1661
|
+
*/
|
1662
|
+
static const int m_trim_threshold = -1;
|
1663
|
+
static const int m_granularity = -2;
|
1664
|
+
static const int m_mmap_threshold = -3;
|
1665
|
+
|
1666
|
+
// support for mallopt
|
1667
|
+
int malloc_params::change(int param_number, int value)
|
1668
|
+
{
|
1669
|
+
size_t val;
|
1670
|
+
ensure_initialization();
|
1671
|
+
val = (value == -1) ? spp_max_size_t : (size_t)value;
|
1672
|
+
|
1673
|
+
switch (param_number)
|
1674
|
+
{
|
1675
|
+
case m_trim_threshold:
|
1676
|
+
_trim_threshold = val;
|
1677
|
+
return 1;
|
1678
|
+
|
1679
|
+
case m_granularity:
|
1680
|
+
if (val >= _page_size && ((val & (val - 1)) == 0))
|
1681
|
+
{
|
1682
|
+
_granularity = val;
|
1683
|
+
return 1;
|
1684
|
+
}
|
1685
|
+
else
|
1686
|
+
return 0;
|
1687
|
+
|
1688
|
+
case m_mmap_threshold:
|
1689
|
+
_mmap_threshold = val;
|
1690
|
+
return 1;
|
1691
|
+
|
1692
|
+
default:
|
1693
|
+
return 0;
|
1694
|
+
}
|
1695
|
+
}
|
1696
|
+
|
1697
|
+
#if SPP_DEBUG
|
1698
|
+
/* ------------------------- Debugging Support --------------------------- */
|
1699
|
+
|
1700
|
+
// Check properties of any chunk, whether free, inuse, mmapped etc
|
1701
|
+
void malloc_state::do_check_any_chunk(mchunkptr p) const
|
1702
|
+
{
|
1703
|
+
assert((spp_is_aligned(chunk2mem(p))) || (p->_head == FENCEPOST_HEAD));
|
1704
|
+
assert(ok_address(p));
|
1705
|
+
}
|
1706
|
+
|
1707
|
+
// Check properties of top chunk
|
1708
|
+
void malloc_state::do_check_top_chunk(mchunkptr p) const
|
1709
|
+
{
|
1710
|
+
msegmentptr sp = segment_holding((char*)p);
|
1711
|
+
size_t sz = p->_head & ~INUSE_BITS; // third-lowest bit can be set!
|
1712
|
+
assert(sp != 0);
|
1713
|
+
assert((spp_is_aligned(chunk2mem(p))) || (p->_head == FENCEPOST_HEAD));
|
1714
|
+
assert(ok_address(p));
|
1715
|
+
assert(sz == _topsize);
|
1716
|
+
assert(sz > 0);
|
1717
|
+
assert(sz == ((sp->_base + sp->_size) - (char*)p) - top_foot_size());
|
1718
|
+
assert(p->pinuse());
|
1719
|
+
assert(!p->chunk_plus_offset(sz)->pinuse());
|
1720
|
+
}
|
1721
|
+
|
1722
|
+
// Check properties of (inuse) mmapped chunks
|
1723
|
+
void malloc_state::do_check_mmapped_chunk(mchunkptr p) const
|
1724
|
+
{
|
1725
|
+
size_t sz = p->chunksize();
|
1726
|
+
size_t len = (sz + (p->_prev_foot) + SPP_MMAP_FOOT_PAD);
|
1727
|
+
assert(p->is_mmapped());
|
1728
|
+
assert(use_mmap());
|
1729
|
+
assert((spp_is_aligned(chunk2mem(p))) || (p->_head == FENCEPOST_HEAD));
|
1730
|
+
assert(ok_address(p));
|
1731
|
+
assert(!is_small(sz));
|
1732
|
+
assert((len & (mparams._page_size - 1)) == 0);
|
1733
|
+
assert(p->chunk_plus_offset(sz)->_head == FENCEPOST_HEAD);
|
1734
|
+
assert(p->chunk_plus_offset(sz + sizeof(size_t))->_head == 0);
|
1735
|
+
}
|
1736
|
+
|
1737
|
+
// Check properties of inuse chunks
|
1738
|
+
void malloc_state::do_check_inuse_chunk(mchunkptr p) const
|
1739
|
+
{
|
1740
|
+
do_check_any_chunk(p);
|
1741
|
+
assert(p->is_inuse());
|
1742
|
+
assert(p->next_pinuse());
|
1743
|
+
// If not pinuse and not mmapped, previous chunk has OK offset
|
1744
|
+
assert(p->is_mmapped() || p->pinuse() || (mchunkptr)p->prev_chunk()->next_chunk() == p);
|
1745
|
+
if (p->is_mmapped())
|
1746
|
+
do_check_mmapped_chunk(p);
|
1747
|
+
}
|
1748
|
+
|
1749
|
+
// Check properties of free chunks
|
1750
|
+
void malloc_state::do_check_free_chunk(mchunkptr p) const
|
1751
|
+
{
|
1752
|
+
size_t sz = p->chunksize();
|
1753
|
+
mchunkptr next = (mchunkptr)p->chunk_plus_offset(sz);
|
1754
|
+
do_check_any_chunk(p);
|
1755
|
+
assert(!p->is_inuse());
|
1756
|
+
assert(!p->next_pinuse());
|
1757
|
+
assert(!p->is_mmapped());
|
1758
|
+
if (p != _dv && p != _top)
|
1759
|
+
{
|
1760
|
+
if (sz >= MIN_CHUNK_SIZE)
|
1761
|
+
{
|
1762
|
+
assert((sz & spp_chunk_align_mask) == 0);
|
1763
|
+
assert(spp_is_aligned(chunk2mem(p)));
|
1764
|
+
assert(next->_prev_foot == sz);
|
1765
|
+
assert(p->pinuse());
|
1766
|
+
assert(next == _top || next->is_inuse());
|
1767
|
+
assert(p->_fd->_bk == p);
|
1768
|
+
assert(p->_bk->_fd == p);
|
1769
|
+
}
|
1770
|
+
else // markers are always of size sizeof(size_t)
|
1771
|
+
assert(sz == sizeof(size_t));
|
1772
|
+
}
|
1773
|
+
}
|
1774
|
+
|
1775
|
+
// Check properties of malloced chunks at the point they are malloced
|
1776
|
+
void malloc_state::do_check_malloced_chunk(void* mem, size_t s) const
|
1777
|
+
{
|
1778
|
+
if (mem != 0)
|
1779
|
+
{
|
1780
|
+
mchunkptr p = mem2chunk(mem);
|
1781
|
+
size_t sz = p->_head & ~INUSE_BITS;
|
1782
|
+
do_check_inuse_chunk(p);
|
1783
|
+
assert((sz & spp_chunk_align_mask) == 0);
|
1784
|
+
assert(sz >= MIN_CHUNK_SIZE);
|
1785
|
+
assert(sz >= s);
|
1786
|
+
// unless mmapped, size is less than MIN_CHUNK_SIZE more than request
|
1787
|
+
assert(p->is_mmapped() || sz < (s + MIN_CHUNK_SIZE));
|
1788
|
+
}
|
1789
|
+
}
|
1790
|
+
|
1791
|
+
// Check a tree and its subtrees.
|
1792
|
+
void malloc_state::do_check_tree(tchunkptr t)
|
1793
|
+
{
|
1794
|
+
tchunkptr head = 0;
|
1795
|
+
tchunkptr u = t;
|
1796
|
+
bindex_t tindex = t->_index;
|
1797
|
+
size_t tsize = t->chunksize();
|
1798
|
+
bindex_t idx = compute_tree_index(tsize);
|
1799
|
+
assert(tindex == idx);
|
1800
|
+
assert(tsize >= MIN_LARGE_SIZE);
|
1801
|
+
assert(tsize >= minsize_for_tree_index(idx));
|
1802
|
+
assert((idx == NTREEBINS - 1) || (tsize < minsize_for_tree_index((idx + 1))));
|
1803
|
+
|
1804
|
+
do
|
1805
|
+
{
|
1806
|
+
// traverse through chain of same-sized nodes
|
1807
|
+
do_check_any_chunk((mchunkptr)u);
|
1808
|
+
assert(u->_index == tindex);
|
1809
|
+
assert(u->chunksize() == tsize);
|
1810
|
+
assert(!u->is_inuse());
|
1811
|
+
assert(!u->next_pinuse());
|
1812
|
+
assert(u->_fd->_bk == u);
|
1813
|
+
assert(u->_bk->_fd == u);
|
1814
|
+
if (u->_parent == 0)
|
1815
|
+
{
|
1816
|
+
assert(u->_child[0] == 0);
|
1817
|
+
assert(u->_child[1] == 0);
|
1818
|
+
}
|
1819
|
+
else
|
1820
|
+
{
|
1821
|
+
assert(head == 0); // only one node on chain has parent
|
1822
|
+
head = u;
|
1823
|
+
assert(u->_parent != u);
|
1824
|
+
assert(u->_parent->_child[0] == u ||
|
1825
|
+
u->_parent->_child[1] == u ||
|
1826
|
+
*((tbinptr*)(u->_parent)) == u);
|
1827
|
+
if (u->_child[0] != 0)
|
1828
|
+
{
|
1829
|
+
assert(u->_child[0]->_parent == u);
|
1830
|
+
assert(u->_child[0] != u);
|
1831
|
+
do_check_tree(u->_child[0]);
|
1832
|
+
}
|
1833
|
+
if (u->_child[1] != 0)
|
1834
|
+
{
|
1835
|
+
assert(u->_child[1]->_parent == u);
|
1836
|
+
assert(u->_child[1] != u);
|
1837
|
+
do_check_tree(u->_child[1]);
|
1838
|
+
}
|
1839
|
+
if (u->_child[0] != 0 && u->_child[1] != 0)
|
1840
|
+
assert(u->_child[0]->chunksize() < u->_child[1]->chunksize());
|
1841
|
+
}
|
1842
|
+
u = u->_fd;
|
1843
|
+
}
|
1844
|
+
while (u != t);
|
1845
|
+
assert(head != 0);
|
1846
|
+
}
|
1847
|
+
|
1848
|
+
// Check all the chunks in a treebin.
|
1849
|
+
void malloc_state::do_check_treebin(bindex_t i)
|
1850
|
+
{
|
1851
|
+
tbinptr* tb = (tbinptr*)treebin_at(i);
|
1852
|
+
tchunkptr t = *tb;
|
1853
|
+
int empty = (_treemap & (1U << i)) == 0;
|
1854
|
+
if (t == 0)
|
1855
|
+
assert(empty);
|
1856
|
+
if (!empty)
|
1857
|
+
do_check_tree(t);
|
1858
|
+
}
|
1859
|
+
|
1860
|
+
// Check all the chunks in a smallbin.
|
1861
|
+
void malloc_state::do_check_smallbin(bindex_t i)
|
1862
|
+
{
|
1863
|
+
sbinptr b = smallbin_at(i);
|
1864
|
+
mchunkptr p = b->_bk;
|
1865
|
+
unsigned int empty = (_smallmap & (1U << i)) == 0;
|
1866
|
+
if (p == b)
|
1867
|
+
assert(empty);
|
1868
|
+
if (!empty)
|
1869
|
+
{
|
1870
|
+
for (; p != b; p = p->_bk)
|
1871
|
+
{
|
1872
|
+
size_t size = p->chunksize();
|
1873
|
+
mchunkptr q;
|
1874
|
+
// each chunk claims to be free
|
1875
|
+
do_check_free_chunk(p);
|
1876
|
+
// chunk belongs in bin
|
1877
|
+
assert(small_index(size) == i);
|
1878
|
+
assert(p->_bk == b || p->_bk->chunksize() == p->chunksize());
|
1879
|
+
// chunk is followed by an inuse chunk
|
1880
|
+
q = (mchunkptr)p->next_chunk();
|
1881
|
+
if (q->_head != FENCEPOST_HEAD)
|
1882
|
+
do_check_inuse_chunk(q);
|
1883
|
+
}
|
1884
|
+
}
|
1885
|
+
}
|
1886
|
+
|
1887
|
+
// Find x in a bin. Used in other check functions.
|
1888
|
+
int malloc_state::bin_find(mchunkptr x)
|
1889
|
+
{
|
1890
|
+
size_t size = x->chunksize();
|
1891
|
+
if (is_small(size))
|
1892
|
+
{
|
1893
|
+
bindex_t sidx = small_index(size);
|
1894
|
+
sbinptr b = smallbin_at(sidx);
|
1895
|
+
if (smallmap_is_marked(sidx))
|
1896
|
+
{
|
1897
|
+
mchunkptr p = b;
|
1898
|
+
do
|
1899
|
+
{
|
1900
|
+
if (p == x)
|
1901
|
+
return 1;
|
1902
|
+
}
|
1903
|
+
while ((p = p->_fd) != b);
|
1904
|
+
}
|
1905
|
+
}
|
1906
|
+
else
|
1907
|
+
{
|
1908
|
+
bindex_t tidx = compute_tree_index(size);
|
1909
|
+
if (treemap_is_marked(tidx))
|
1910
|
+
{
|
1911
|
+
tchunkptr t = *treebin_at(tidx);
|
1912
|
+
size_t sizebits = size << leftshift_for_tree_index(tidx);
|
1913
|
+
while (t != 0 && t->chunksize() != size)
|
1914
|
+
{
|
1915
|
+
t = t->_child[(sizebits >> (spp_size_t_bitsize - 1)) & 1];
|
1916
|
+
sizebits <<= 1;
|
1917
|
+
}
|
1918
|
+
if (t != 0)
|
1919
|
+
{
|
1920
|
+
tchunkptr u = t;
|
1921
|
+
do
|
1922
|
+
{
|
1923
|
+
if (u == (tchunkptr)x)
|
1924
|
+
return 1;
|
1925
|
+
}
|
1926
|
+
while ((u = u->_fd) != t);
|
1927
|
+
}
|
1928
|
+
}
|
1929
|
+
}
|
1930
|
+
return 0;
|
1931
|
+
}
|
1932
|
+
|
1933
|
+
// Traverse each chunk and check it; return total
|
1934
|
+
size_t malloc_state::traverse_and_check()
|
1935
|
+
{
|
1936
|
+
size_t sum = 0;
|
1937
|
+
if (is_initialized())
|
1938
|
+
{
|
1939
|
+
msegmentptr s = (msegmentptr)&_seg;
|
1940
|
+
sum += _topsize + top_foot_size();
|
1941
|
+
while (s != 0)
|
1942
|
+
{
|
1943
|
+
mchunkptr q = align_as_chunk(s->_base);
|
1944
|
+
mchunkptr lastq = 0;
|
1945
|
+
assert(q->pinuse());
|
1946
|
+
while (segment_holds(s, q) &&
|
1947
|
+
q != _top && q->_head != FENCEPOST_HEAD)
|
1948
|
+
{
|
1949
|
+
sum += q->chunksize();
|
1950
|
+
if (q->is_inuse())
|
1951
|
+
{
|
1952
|
+
assert(!bin_find(q));
|
1953
|
+
do_check_inuse_chunk(q);
|
1954
|
+
}
|
1955
|
+
else
|
1956
|
+
{
|
1957
|
+
assert(q == _dv || bin_find(q));
|
1958
|
+
assert(lastq == 0 || lastq->is_inuse()); // Not 2 consecutive free
|
1959
|
+
do_check_free_chunk(q);
|
1960
|
+
}
|
1961
|
+
lastq = q;
|
1962
|
+
q = (mchunkptr)q->next_chunk();
|
1963
|
+
}
|
1964
|
+
s = s->_next;
|
1965
|
+
}
|
1966
|
+
}
|
1967
|
+
return sum;
|
1968
|
+
}
|
1969
|
+
|
1970
|
+
|
1971
|
+
// Check all properties of malloc_state.
|
1972
|
+
void malloc_state::do_check_malloc_state()
|
1973
|
+
{
|
1974
|
+
bindex_t i;
|
1975
|
+
size_t total;
|
1976
|
+
// check bins
|
1977
|
+
for (i = 0; i < NSMALLBINS; ++i)
|
1978
|
+
do_check_smallbin(i);
|
1979
|
+
for (i = 0; i < NTREEBINS; ++i)
|
1980
|
+
do_check_treebin(i);
|
1981
|
+
|
1982
|
+
if (_dvsize != 0)
|
1983
|
+
{
|
1984
|
+
// check dv chunk
|
1985
|
+
do_check_any_chunk(_dv);
|
1986
|
+
assert(_dvsize == _dv->chunksize());
|
1987
|
+
assert(_dvsize >= MIN_CHUNK_SIZE);
|
1988
|
+
assert(bin_find(_dv) == 0);
|
1989
|
+
}
|
1990
|
+
|
1991
|
+
if (_top != 0)
|
1992
|
+
{
|
1993
|
+
// check top chunk
|
1994
|
+
do_check_top_chunk(_top);
|
1995
|
+
//assert(topsize == top->chunksize()); redundant
|
1996
|
+
assert(_topsize > 0);
|
1997
|
+
assert(bin_find(_top) == 0);
|
1998
|
+
}
|
1999
|
+
|
2000
|
+
total = traverse_and_check();
|
2001
|
+
assert(total <= _footprint);
|
2002
|
+
assert(_footprint <= _max_footprint);
|
2003
|
+
}
|
2004
|
+
#endif // SPP_DEBUG
|
2005
|
+
|
2006
|
+
/* ----------------------- Operations on smallbins ----------------------- */
|
2007
|
+
|
2008
|
+
/*
|
2009
|
+
Various forms of linking and unlinking are defined as macros. Even
|
2010
|
+
the ones for trees, which are very long but have very short typical
|
2011
|
+
paths. This is ugly but reduces reliance on inlining support of
|
2012
|
+
compilers.
|
2013
|
+
*/
|
2014
|
+
|
2015
|
+
// Link a free chunk into a smallbin
|
2016
|
+
void malloc_state::insert_small_chunk(mchunkptr p, size_t s)
|
2017
|
+
{
|
2018
|
+
bindex_t I = small_index(s);
|
2019
|
+
mchunkptr B = smallbin_at(I);
|
2020
|
+
mchunkptr F = B;
|
2021
|
+
assert(s >= MIN_CHUNK_SIZE);
|
2022
|
+
if (!smallmap_is_marked(I))
|
2023
|
+
mark_smallmap(I);
|
2024
|
+
else if (rtcheck(ok_address(B->_fd)))
|
2025
|
+
F = B->_fd;
|
2026
|
+
else
|
2027
|
+
SPP_ABORT;
|
2028
|
+
B->_fd = p;
|
2029
|
+
F->_bk = p;
|
2030
|
+
p->_fd = F;
|
2031
|
+
p->_bk = B;
|
2032
|
+
}
|
2033
|
+
|
2034
|
+
// Unlink a chunk from a smallbin
|
2035
|
+
void malloc_state::unlink_small_chunk(mchunkptr p, size_t s)
|
2036
|
+
{
|
2037
|
+
mchunkptr F = p->_fd;
|
2038
|
+
mchunkptr B = p->_bk;
|
2039
|
+
bindex_t I = small_index(s);
|
2040
|
+
assert(p != B);
|
2041
|
+
assert(p != F);
|
2042
|
+
assert(p->chunksize() == small_index2size(I));
|
2043
|
+
if (rtcheck(F == smallbin_at(I) || (ok_address(F) && F->_bk == p)))
|
2044
|
+
{
|
2045
|
+
if (B == F)
|
2046
|
+
clear_smallmap(I);
|
2047
|
+
else if (rtcheck(B == smallbin_at(I) ||
|
2048
|
+
(ok_address(B) && B->_fd == p)))
|
2049
|
+
{
|
2050
|
+
F->_bk = B;
|
2051
|
+
B->_fd = F;
|
2052
|
+
}
|
2053
|
+
else
|
2054
|
+
SPP_ABORT;
|
2055
|
+
}
|
2056
|
+
else
|
2057
|
+
SPP_ABORT;
|
2058
|
+
}
|
2059
|
+
|
2060
|
+
// Unlink the first chunk from a smallbin
|
2061
|
+
void malloc_state::unlink_first_small_chunk(mchunkptr B, mchunkptr p, bindex_t I)
|
2062
|
+
{
|
2063
|
+
mchunkptr F = p->_fd;
|
2064
|
+
assert(p != B);
|
2065
|
+
assert(p != F);
|
2066
|
+
assert(p->chunksize() == small_index2size(I));
|
2067
|
+
if (B == F)
|
2068
|
+
clear_smallmap(I);
|
2069
|
+
else if (rtcheck(ok_address(F) && F->_bk == p))
|
2070
|
+
{
|
2071
|
+
F->_bk = B;
|
2072
|
+
B->_fd = F;
|
2073
|
+
}
|
2074
|
+
else
|
2075
|
+
SPP_ABORT;
|
2076
|
+
}
|
2077
|
+
|
2078
|
+
// Replace dv node, binning the old one
|
2079
|
+
// Used only when dvsize known to be small
|
2080
|
+
void malloc_state::replace_dv(mchunkptr p, size_t s)
|
2081
|
+
{
|
2082
|
+
size_t DVS = _dvsize;
|
2083
|
+
assert(is_small(DVS));
|
2084
|
+
if (DVS != 0)
|
2085
|
+
{
|
2086
|
+
mchunkptr DV = _dv;
|
2087
|
+
insert_small_chunk(DV, DVS);
|
2088
|
+
}
|
2089
|
+
_dvsize = s;
|
2090
|
+
_dv = p;
|
2091
|
+
}
|
2092
|
+
|
2093
|
+
/* ------------------------- Operations on trees ------------------------- */
|
2094
|
+
|
2095
|
+
// Insert chunk into tree
|
2096
|
+
void malloc_state::insert_large_chunk(tchunkptr X, size_t s)
|
2097
|
+
{
|
2098
|
+
tbinptr* H;
|
2099
|
+
bindex_t I = compute_tree_index(s);
|
2100
|
+
H = treebin_at(I);
|
2101
|
+
X->_index = I;
|
2102
|
+
X->_child[0] = X->_child[1] = 0;
|
2103
|
+
if (!treemap_is_marked(I))
|
2104
|
+
{
|
2105
|
+
mark_treemap(I);
|
2106
|
+
*H = X;
|
2107
|
+
X->_parent = (tchunkptr)H;
|
2108
|
+
X->_fd = X->_bk = X;
|
2109
|
+
}
|
2110
|
+
else
|
2111
|
+
{
|
2112
|
+
tchunkptr T = *H;
|
2113
|
+
size_t K = s << leftshift_for_tree_index(I);
|
2114
|
+
for (;;)
|
2115
|
+
{
|
2116
|
+
if (T->chunksize() != s)
|
2117
|
+
{
|
2118
|
+
tchunkptr* C = &(T->_child[(K >> (spp_size_t_bitsize - 1)) & 1]);
|
2119
|
+
K <<= 1;
|
2120
|
+
if (*C != 0)
|
2121
|
+
T = *C;
|
2122
|
+
else if (rtcheck(ok_address(C)))
|
2123
|
+
{
|
2124
|
+
*C = X;
|
2125
|
+
X->_parent = T;
|
2126
|
+
X->_fd = X->_bk = X;
|
2127
|
+
break;
|
2128
|
+
}
|
2129
|
+
else
|
2130
|
+
{
|
2131
|
+
SPP_ABORT;
|
2132
|
+
break;
|
2133
|
+
}
|
2134
|
+
}
|
2135
|
+
else
|
2136
|
+
{
|
2137
|
+
tchunkptr F = T->_fd;
|
2138
|
+
if (rtcheck(ok_address(T) && ok_address(F)))
|
2139
|
+
{
|
2140
|
+
T->_fd = F->_bk = X;
|
2141
|
+
X->_fd = F;
|
2142
|
+
X->_bk = T;
|
2143
|
+
X->_parent = 0;
|
2144
|
+
break;
|
2145
|
+
}
|
2146
|
+
else
|
2147
|
+
{
|
2148
|
+
SPP_ABORT;
|
2149
|
+
break;
|
2150
|
+
}
|
2151
|
+
}
|
2152
|
+
}
|
2153
|
+
}
|
2154
|
+
}
|
2155
|
+
|
2156
|
+
/*
|
2157
|
+
Unlink steps:
|
2158
|
+
|
2159
|
+
1. If x is a chained node, unlink it from its same-sized fd/bk links
|
2160
|
+
and choose its bk node as its replacement.
|
2161
|
+
2. If x was the last node of its size, but not a leaf node, it must
|
2162
|
+
be replaced with a leaf node (not merely one with an open left or
|
2163
|
+
right), to make sure that lefts and rights of descendents
|
2164
|
+
correspond properly to bit masks. We use the rightmost descendent
|
2165
|
+
of x. We could use any other leaf, but this is easy to locate and
|
2166
|
+
tends to counteract removal of leftmosts elsewhere, and so keeps
|
2167
|
+
paths shorter than minimally guaranteed. This doesn't loop much
|
2168
|
+
because on average a node in a tree is near the bottom.
|
2169
|
+
3. If x is the base of a chain (i.e., has parent links) relink
|
2170
|
+
x's parent and children to x's replacement (or null if none).
|
2171
|
+
*/
|
2172
|
+
|
2173
|
+
void malloc_state::unlink_large_chunk(tchunkptr X)
|
2174
|
+
{
|
2175
|
+
tchunkptr XP = X->_parent;
|
2176
|
+
tchunkptr R;
|
2177
|
+
if (X->_bk != X)
|
2178
|
+
{
|
2179
|
+
tchunkptr F = X->_fd;
|
2180
|
+
R = X->_bk;
|
2181
|
+
if (rtcheck(ok_address(F) && F->_bk == X && R->_fd == X))
|
2182
|
+
{
|
2183
|
+
F->_bk = R;
|
2184
|
+
R->_fd = F;
|
2185
|
+
}
|
2186
|
+
else
|
2187
|
+
SPP_ABORT;
|
2188
|
+
}
|
2189
|
+
else
|
2190
|
+
{
|
2191
|
+
tchunkptr* RP;
|
2192
|
+
if (((R = *(RP = &(X->_child[1]))) != 0) ||
|
2193
|
+
((R = *(RP = &(X->_child[0]))) != 0))
|
2194
|
+
{
|
2195
|
+
tchunkptr* CP;
|
2196
|
+
while ((*(CP = &(R->_child[1])) != 0) ||
|
2197
|
+
(*(CP = &(R->_child[0])) != 0))
|
2198
|
+
R = *(RP = CP);
|
2199
|
+
if (rtcheck(ok_address(RP)))
|
2200
|
+
*RP = 0;
|
2201
|
+
else
|
2202
|
+
SPP_ABORT;
|
2203
|
+
}
|
2204
|
+
}
|
2205
|
+
if (XP != 0)
|
2206
|
+
{
|
2207
|
+
tbinptr* H = treebin_at(X->_index);
|
2208
|
+
if (X == *H)
|
2209
|
+
{
|
2210
|
+
if ((*H = R) == 0)
|
2211
|
+
clear_treemap(X->_index);
|
2212
|
+
}
|
2213
|
+
else if (rtcheck(ok_address(XP)))
|
2214
|
+
{
|
2215
|
+
if (XP->_child[0] == X)
|
2216
|
+
XP->_child[0] = R;
|
2217
|
+
else
|
2218
|
+
XP->_child[1] = R;
|
2219
|
+
}
|
2220
|
+
else
|
2221
|
+
SPP_ABORT;
|
2222
|
+
if (R != 0)
|
2223
|
+
{
|
2224
|
+
if (rtcheck(ok_address(R)))
|
2225
|
+
{
|
2226
|
+
tchunkptr C0, C1;
|
2227
|
+
R->_parent = XP;
|
2228
|
+
if ((C0 = X->_child[0]) != 0)
|
2229
|
+
{
|
2230
|
+
if (rtcheck(ok_address(C0)))
|
2231
|
+
{
|
2232
|
+
R->_child[0] = C0;
|
2233
|
+
C0->_parent = R;
|
2234
|
+
}
|
2235
|
+
else
|
2236
|
+
SPP_ABORT;
|
2237
|
+
}
|
2238
|
+
if ((C1 = X->_child[1]) != 0)
|
2239
|
+
{
|
2240
|
+
if (rtcheck(ok_address(C1)))
|
2241
|
+
{
|
2242
|
+
R->_child[1] = C1;
|
2243
|
+
C1->_parent = R;
|
2244
|
+
}
|
2245
|
+
else
|
2246
|
+
SPP_ABORT;
|
2247
|
+
}
|
2248
|
+
}
|
2249
|
+
else
|
2250
|
+
SPP_ABORT;
|
2251
|
+
}
|
2252
|
+
}
|
2253
|
+
}
|
2254
|
+
|
2255
|
+
// Relays to large vs small bin operations
|
2256
|
+
|
2257
|
+
void malloc_state::insert_chunk(mchunkptr p, size_t s)
|
2258
|
+
{
|
2259
|
+
if (is_small(s))
|
2260
|
+
insert_small_chunk(p, s);
|
2261
|
+
else
|
2262
|
+
{
|
2263
|
+
tchunkptr tp = (tchunkptr)(p);
|
2264
|
+
insert_large_chunk(tp, s);
|
2265
|
+
}
|
2266
|
+
}
|
2267
|
+
|
2268
|
+
void malloc_state::unlink_chunk(mchunkptr p, size_t s)
|
2269
|
+
{
|
2270
|
+
if (is_small(s))
|
2271
|
+
unlink_small_chunk(p, s);
|
2272
|
+
else
|
2273
|
+
{
|
2274
|
+
tchunkptr tp = (tchunkptr)(p);
|
2275
|
+
unlink_large_chunk(tp);
|
2276
|
+
}
|
2277
|
+
}
|
2278
|
+
|
2279
|
+
|
2280
|
+
/* ----------------------- Direct-mmapping chunks ----------------------- */
|
2281
|
+
|
2282
|
+
/*
|
2283
|
+
Directly mmapped chunks are set up with an offset to the start of
|
2284
|
+
the mmapped region stored in the prev_foot field of the chunk. This
|
2285
|
+
allows reconstruction of the required argument to MUNMAP when freed,
|
2286
|
+
and also allows adjustment of the returned chunk to meet alignment
|
2287
|
+
requirements (especially in memalign).
|
2288
|
+
*/
|
2289
|
+
|
2290
|
+
// Malloc using mmap
|
2291
|
+
void* malloc_state::mmap_alloc(size_t nb)
|
2292
|
+
{
|
2293
|
+
size_t mmsize = mmap_align(nb + 6 * sizeof(size_t) + spp_chunk_align_mask);
|
2294
|
+
if (_footprint_limit != 0)
|
2295
|
+
{
|
2296
|
+
size_t fp = _footprint + mmsize;
|
2297
|
+
if (fp <= _footprint || fp > _footprint_limit)
|
2298
|
+
return 0;
|
2299
|
+
}
|
2300
|
+
if (mmsize > nb)
|
2301
|
+
{
|
2302
|
+
// Check for wrap around 0
|
2303
|
+
char* mm = (char*)(SPP_CALL_DIRECT_MMAP(mmsize));
|
2304
|
+
if (mm != cmfail)
|
2305
|
+
{
|
2306
|
+
size_t offset = align_offset(chunk2mem(mm));
|
2307
|
+
size_t psize = mmsize - offset - SPP_MMAP_FOOT_PAD;
|
2308
|
+
mchunkptr p = (mchunkptr)(mm + offset);
|
2309
|
+
p->_prev_foot = offset;
|
2310
|
+
p->_head = psize;
|
2311
|
+
mark_inuse_foot(p, psize);
|
2312
|
+
p->chunk_plus_offset(psize)->_head = FENCEPOST_HEAD;
|
2313
|
+
p->chunk_plus_offset(psize + sizeof(size_t))->_head = 0;
|
2314
|
+
|
2315
|
+
if (_least_addr == 0 || mm < _least_addr)
|
2316
|
+
_least_addr = mm;
|
2317
|
+
if ((_footprint += mmsize) > _max_footprint)
|
2318
|
+
_max_footprint = _footprint;
|
2319
|
+
assert(spp_is_aligned(chunk2mem(p)));
|
2320
|
+
check_mmapped_chunk(p);
|
2321
|
+
return chunk2mem(p);
|
2322
|
+
}
|
2323
|
+
}
|
2324
|
+
return 0;
|
2325
|
+
}
|
2326
|
+
|
2327
|
+
// Realloc using mmap
|
2328
|
+
mchunkptr malloc_state::mmap_resize(mchunkptr oldp, size_t nb, int flags)
|
2329
|
+
{
|
2330
|
+
size_t oldsize = oldp->chunksize();
|
2331
|
+
(void)flags; // placate people compiling -Wunused
|
2332
|
+
if (is_small(nb)) // Can't shrink mmap regions below small size
|
2333
|
+
return 0;
|
2334
|
+
|
2335
|
+
// Keep old chunk if big enough but not too big
|
2336
|
+
if (oldsize >= nb + sizeof(size_t) &&
|
2337
|
+
(oldsize - nb) <= (mparams._granularity << 1))
|
2338
|
+
return oldp;
|
2339
|
+
else
|
2340
|
+
{
|
2341
|
+
size_t offset = oldp->_prev_foot;
|
2342
|
+
size_t oldmmsize = oldsize + offset + SPP_MMAP_FOOT_PAD;
|
2343
|
+
size_t newmmsize = mmap_align(nb + 6 * sizeof(size_t) + spp_chunk_align_mask);
|
2344
|
+
char* cp = (char*)SPP_CALL_MREMAP((char*)oldp - offset,
|
2345
|
+
oldmmsize, newmmsize, flags);
|
2346
|
+
if (cp != cmfail)
|
2347
|
+
{
|
2348
|
+
mchunkptr newp = (mchunkptr)(cp + offset);
|
2349
|
+
size_t psize = newmmsize - offset - SPP_MMAP_FOOT_PAD;
|
2350
|
+
newp->_head = psize;
|
2351
|
+
mark_inuse_foot(newp, psize);
|
2352
|
+
newp->chunk_plus_offset(psize)->_head = FENCEPOST_HEAD;
|
2353
|
+
newp->chunk_plus_offset(psize + sizeof(size_t))->_head = 0;
|
2354
|
+
|
2355
|
+
if (cp < _least_addr)
|
2356
|
+
_least_addr = cp;
|
2357
|
+
if ((_footprint += newmmsize - oldmmsize) > _max_footprint)
|
2358
|
+
_max_footprint = _footprint;
|
2359
|
+
check_mmapped_chunk(newp);
|
2360
|
+
return newp;
|
2361
|
+
}
|
2362
|
+
}
|
2363
|
+
return 0;
|
2364
|
+
}
|
2365
|
+
|
2366
|
+
|
2367
|
+
/* -------------------------- mspace management -------------------------- */
|
2368
|
+
|
2369
|
+
// Initialize top chunk and its size
|
2370
|
+
void malloc_state::init_top(mchunkptr p, size_t psize)
|
2371
|
+
{
|
2372
|
+
// Ensure alignment
|
2373
|
+
size_t offset = align_offset(chunk2mem(p));
|
2374
|
+
p = (mchunkptr)((char*)p + offset);
|
2375
|
+
psize -= offset;
|
2376
|
+
|
2377
|
+
_top = p;
|
2378
|
+
_topsize = psize;
|
2379
|
+
p->_head = psize | PINUSE_BIT;
|
2380
|
+
// set size of fake trailing chunk holding overhead space only once
|
2381
|
+
p->chunk_plus_offset(psize)->_head = top_foot_size();
|
2382
|
+
_trim_check = mparams._trim_threshold; // reset on each update
|
2383
|
+
}
|
2384
|
+
|
2385
|
+
// Initialize bins for a new mstate that is otherwise zeroed out
|
2386
|
+
void malloc_state::init_bins()
|
2387
|
+
{
|
2388
|
+
// Establish circular links for smallbins
|
2389
|
+
bindex_t i;
|
2390
|
+
for (i = 0; i < NSMALLBINS; ++i)
|
2391
|
+
{
|
2392
|
+
sbinptr bin = smallbin_at(i);
|
2393
|
+
bin->_fd = bin->_bk = bin;
|
2394
|
+
}
|
2395
|
+
}
|
2396
|
+
|
2397
|
+
#if SPP_PROCEED_ON_ERROR
|
2398
|
+
|
2399
|
+
// default corruption action
|
2400
|
+
void malloc_state::reset_on_error()
|
2401
|
+
{
|
2402
|
+
int i;
|
2403
|
+
++malloc_corruption_error_count;
|
2404
|
+
// Reinitialize fields to forget about all memory
|
2405
|
+
_smallmap = _treemap = 0;
|
2406
|
+
_dvsize = _topsize = 0;
|
2407
|
+
_seg._base = 0;
|
2408
|
+
_seg._size = 0;
|
2409
|
+
_seg._next = 0;
|
2410
|
+
_top = _dv = 0;
|
2411
|
+
for (i = 0; i < NTREEBINS; ++i)
|
2412
|
+
*treebin_at(i) = 0;
|
2413
|
+
init_bins();
|
2414
|
+
}
|
2415
|
+
#endif
|
2416
|
+
|
2417
|
+
/* Allocate chunk and prepend remainder with chunk in successor base. */
|
2418
|
+
void* malloc_state::prepend_alloc(char* newbase, char* oldbase, size_t nb)
|
2419
|
+
{
|
2420
|
+
mchunkptr p = align_as_chunk(newbase);
|
2421
|
+
mchunkptr oldfirst = align_as_chunk(oldbase);
|
2422
|
+
size_t psize = (char*)oldfirst - (char*)p;
|
2423
|
+
mchunkptr q = (mchunkptr)p->chunk_plus_offset(nb);
|
2424
|
+
size_t qsize = psize - nb;
|
2425
|
+
set_size_and_pinuse_of_inuse_chunk(p, nb);
|
2426
|
+
|
2427
|
+
assert((char*)oldfirst > (char*)q);
|
2428
|
+
assert(oldfirst->pinuse());
|
2429
|
+
assert(qsize >= MIN_CHUNK_SIZE);
|
2430
|
+
|
2431
|
+
// consolidate remainder with first chunk of old base
|
2432
|
+
if (oldfirst == _top)
|
2433
|
+
{
|
2434
|
+
size_t tsize = _topsize += qsize;
|
2435
|
+
_top = q;
|
2436
|
+
q->_head = tsize | PINUSE_BIT;
|
2437
|
+
check_top_chunk(q);
|
2438
|
+
}
|
2439
|
+
else if (oldfirst == _dv)
|
2440
|
+
{
|
2441
|
+
size_t dsize = _dvsize += qsize;
|
2442
|
+
_dv = q;
|
2443
|
+
q->set_size_and_pinuse_of_free_chunk(dsize);
|
2444
|
+
}
|
2445
|
+
else
|
2446
|
+
{
|
2447
|
+
if (!oldfirst->is_inuse())
|
2448
|
+
{
|
2449
|
+
size_t nsize = oldfirst->chunksize();
|
2450
|
+
unlink_chunk(oldfirst, nsize);
|
2451
|
+
oldfirst = (mchunkptr)oldfirst->chunk_plus_offset(nsize);
|
2452
|
+
qsize += nsize;
|
2453
|
+
}
|
2454
|
+
q->set_free_with_pinuse(qsize, oldfirst);
|
2455
|
+
insert_chunk(q, qsize);
|
2456
|
+
check_free_chunk(q);
|
2457
|
+
}
|
2458
|
+
|
2459
|
+
check_malloced_chunk(chunk2mem(p), nb);
|
2460
|
+
return chunk2mem(p);
|
2461
|
+
}
|
2462
|
+
|
2463
|
+
// Add a segment to hold a new noncontiguous region
|
2464
|
+
void malloc_state::add_segment(char* tbase, size_t tsize, flag_t mmapped)
|
2465
|
+
{
|
2466
|
+
// Determine locations and sizes of segment, fenceposts, old top
|
2467
|
+
char* old_top = (char*)_top;
|
2468
|
+
msegmentptr oldsp = segment_holding(old_top);
|
2469
|
+
char* old_end = oldsp->_base + oldsp->_size;
|
2470
|
+
size_t ssize = pad_request(sizeof(struct malloc_segment));
|
2471
|
+
char* rawsp = old_end - (ssize + 4 * sizeof(size_t) + spp_chunk_align_mask);
|
2472
|
+
size_t offset = align_offset(chunk2mem(rawsp));
|
2473
|
+
char* asp = rawsp + offset;
|
2474
|
+
char* csp = (asp < (old_top + MIN_CHUNK_SIZE)) ? old_top : asp;
|
2475
|
+
mchunkptr sp = (mchunkptr)csp;
|
2476
|
+
msegmentptr ss = (msegmentptr)(chunk2mem(sp));
|
2477
|
+
mchunkptr tnext = (mchunkptr)sp->chunk_plus_offset(ssize);
|
2478
|
+
mchunkptr p = tnext;
|
2479
|
+
int nfences = 0;
|
2480
|
+
|
2481
|
+
// reset top to new space
|
2482
|
+
init_top((mchunkptr)tbase, tsize - top_foot_size());
|
2483
|
+
|
2484
|
+
// Set up segment record
|
2485
|
+
assert(spp_is_aligned(ss));
|
2486
|
+
set_size_and_pinuse_of_inuse_chunk(sp, ssize);
|
2487
|
+
*ss = _seg; // Push current record
|
2488
|
+
_seg._base = tbase;
|
2489
|
+
_seg._size = tsize;
|
2490
|
+
_seg._sflags = mmapped;
|
2491
|
+
_seg._next = ss;
|
2492
|
+
|
2493
|
+
// Insert trailing fenceposts
|
2494
|
+
for (;;)
|
2495
|
+
{
|
2496
|
+
mchunkptr nextp = (mchunkptr)p->chunk_plus_offset(sizeof(size_t));
|
2497
|
+
p->_head = FENCEPOST_HEAD;
|
2498
|
+
++nfences;
|
2499
|
+
if ((char*)(&(nextp->_head)) < old_end)
|
2500
|
+
p = nextp;
|
2501
|
+
else
|
2502
|
+
break;
|
2503
|
+
}
|
2504
|
+
assert(nfences >= 2);
|
2505
|
+
|
2506
|
+
// Insert the rest of old top into a bin as an ordinary free chunk
|
2507
|
+
if (csp != old_top)
|
2508
|
+
{
|
2509
|
+
mchunkptr q = (mchunkptr)old_top;
|
2510
|
+
size_t psize = csp - old_top;
|
2511
|
+
mchunkptr tn = (mchunkptr)q->chunk_plus_offset(psize);
|
2512
|
+
q->set_free_with_pinuse(psize, tn);
|
2513
|
+
insert_chunk(q, psize);
|
2514
|
+
}
|
2515
|
+
|
2516
|
+
check_top_chunk(_top);
|
2517
|
+
}
|
2518
|
+
|
2519
|
+
/* -------------------------- System allocation -------------------------- */
|
2520
|
+
|
2521
|
+
// Get memory from system using MMAP
|
2522
|
+
void* malloc_state::sys_alloc(size_t nb)
|
2523
|
+
{
|
2524
|
+
char* tbase = cmfail;
|
2525
|
+
size_t tsize = 0;
|
2526
|
+
flag_t mmap_flag = 0;
|
2527
|
+
size_t asize; // allocation size
|
2528
|
+
|
2529
|
+
mparams.ensure_initialization();
|
2530
|
+
|
2531
|
+
// Directly map large chunks, but only if already initialized
|
2532
|
+
if (use_mmap() && nb >= mparams._mmap_threshold && _topsize != 0)
|
2533
|
+
{
|
2534
|
+
void* mem = mmap_alloc(nb);
|
2535
|
+
if (mem != 0)
|
2536
|
+
return mem;
|
2537
|
+
}
|
2538
|
+
|
2539
|
+
asize = mparams.granularity_align(nb + sys_alloc_padding());
|
2540
|
+
if (asize <= nb)
|
2541
|
+
return 0; // wraparound
|
2542
|
+
if (_footprint_limit != 0)
|
2543
|
+
{
|
2544
|
+
size_t fp = _footprint + asize;
|
2545
|
+
if (fp <= _footprint || fp > _footprint_limit)
|
2546
|
+
return 0;
|
2547
|
+
}
|
2548
|
+
|
2549
|
+
/*
|
2550
|
+
Try getting memory with a call to MMAP new space (disabled if not SPP_HAVE_MMAP).
|
2551
|
+
We need to request enough bytes from system to ensure
|
2552
|
+
we can malloc nb bytes upon success, so pad with enough space for
|
2553
|
+
top_foot, plus alignment-pad to make sure we don't lose bytes if
|
2554
|
+
not on boundary, and round this up to a granularity unit.
|
2555
|
+
*/
|
2556
|
+
|
2557
|
+
if (SPP_HAVE_MMAP && tbase == cmfail)
|
2558
|
+
{
|
2559
|
+
// Try MMAP
|
2560
|
+
char* mp = (char*)(SPP_CALL_MMAP(asize));
|
2561
|
+
if (mp != cmfail)
|
2562
|
+
{
|
2563
|
+
tbase = mp;
|
2564
|
+
tsize = asize;
|
2565
|
+
mmap_flag = USE_MMAP_BIT;
|
2566
|
+
}
|
2567
|
+
}
|
2568
|
+
|
2569
|
+
if (tbase != cmfail)
|
2570
|
+
{
|
2571
|
+
|
2572
|
+
if ((_footprint += tsize) > _max_footprint)
|
2573
|
+
_max_footprint = _footprint;
|
2574
|
+
|
2575
|
+
if (!is_initialized())
|
2576
|
+
{
|
2577
|
+
// first-time initialization
|
2578
|
+
if (_least_addr == 0 || tbase < _least_addr)
|
2579
|
+
_least_addr = tbase;
|
2580
|
+
_seg._base = tbase;
|
2581
|
+
_seg._size = tsize;
|
2582
|
+
_seg._sflags = mmap_flag;
|
2583
|
+
_magic = mparams._magic;
|
2584
|
+
_release_checks = SPP_MAX_RELEASE_CHECK_RATE;
|
2585
|
+
init_bins();
|
2586
|
+
|
2587
|
+
// Offset top by embedded malloc_state
|
2588
|
+
mchunkptr mn = (mchunkptr)mem2chunk(this)->next_chunk();
|
2589
|
+
init_top(mn, (size_t)((tbase + tsize) - (char*)mn) - top_foot_size());
|
2590
|
+
}
|
2591
|
+
|
2592
|
+
else
|
2593
|
+
{
|
2594
|
+
// Try to merge with an existing segment
|
2595
|
+
msegmentptr sp = &_seg;
|
2596
|
+
// Only consider most recent segment if traversal suppressed
|
2597
|
+
while (sp != 0 && tbase != sp->_base + sp->_size)
|
2598
|
+
sp = (SPP_NO_SEGMENT_TRAVERSAL) ? 0 : sp->_next;
|
2599
|
+
if (sp != 0 &&
|
2600
|
+
!sp->is_extern_segment() &&
|
2601
|
+
(sp->_sflags & USE_MMAP_BIT) == mmap_flag &&
|
2602
|
+
segment_holds(sp, _top))
|
2603
|
+
{
|
2604
|
+
// append
|
2605
|
+
sp->_size += tsize;
|
2606
|
+
init_top(_top, _topsize + tsize);
|
2607
|
+
}
|
2608
|
+
else
|
2609
|
+
{
|
2610
|
+
if (tbase < _least_addr)
|
2611
|
+
_least_addr = tbase;
|
2612
|
+
sp = &_seg;
|
2613
|
+
while (sp != 0 && sp->_base != tbase + tsize)
|
2614
|
+
sp = (SPP_NO_SEGMENT_TRAVERSAL) ? 0 : sp->_next;
|
2615
|
+
if (sp != 0 &&
|
2616
|
+
!sp->is_extern_segment() &&
|
2617
|
+
(sp->_sflags & USE_MMAP_BIT) == mmap_flag)
|
2618
|
+
{
|
2619
|
+
char* oldbase = sp->_base;
|
2620
|
+
sp->_base = tbase;
|
2621
|
+
sp->_size += tsize;
|
2622
|
+
return prepend_alloc(tbase, oldbase, nb);
|
2623
|
+
}
|
2624
|
+
else
|
2625
|
+
add_segment(tbase, tsize, mmap_flag);
|
2626
|
+
}
|
2627
|
+
}
|
2628
|
+
|
2629
|
+
if (nb < _topsize)
|
2630
|
+
{
|
2631
|
+
// Allocate from new or extended top space
|
2632
|
+
size_t rsize = _topsize -= nb;
|
2633
|
+
mchunkptr p = _top;
|
2634
|
+
mchunkptr r = _top = (mchunkptr)p->chunk_plus_offset(nb);
|
2635
|
+
r->_head = rsize | PINUSE_BIT;
|
2636
|
+
set_size_and_pinuse_of_inuse_chunk(p, nb);
|
2637
|
+
check_top_chunk(_top);
|
2638
|
+
check_malloced_chunk(chunk2mem(p), nb);
|
2639
|
+
return chunk2mem(p);
|
2640
|
+
}
|
2641
|
+
}
|
2642
|
+
|
2643
|
+
SPP_MALLOC_FAILURE_ACTION;
|
2644
|
+
return 0;
|
2645
|
+
}
|
2646
|
+
|
2647
|
+
/* ----------------------- system deallocation -------------------------- */
|
2648
|
+
|
2649
|
+
// Unmap and unlink any mmapped segments that don't contain used chunks
|
2650
|
+
size_t malloc_state::release_unused_segments()
|
2651
|
+
{
|
2652
|
+
size_t released = 0;
|
2653
|
+
int nsegs = 0;
|
2654
|
+
msegmentptr pred = &_seg;
|
2655
|
+
msegmentptr sp = pred->_next;
|
2656
|
+
while (sp != 0)
|
2657
|
+
{
|
2658
|
+
char* base = sp->_base;
|
2659
|
+
size_t size = sp->_size;
|
2660
|
+
msegmentptr next = sp->_next;
|
2661
|
+
++nsegs;
|
2662
|
+
if (sp->is_mmapped_segment() && !sp->is_extern_segment())
|
2663
|
+
{
|
2664
|
+
mchunkptr p = align_as_chunk(base);
|
2665
|
+
size_t psize = p->chunksize();
|
2666
|
+
// Can unmap if first chunk holds entire segment and not pinned
|
2667
|
+
if (!p->is_inuse() && (char*)p + psize >= base + size - top_foot_size())
|
2668
|
+
{
|
2669
|
+
tchunkptr tp = (tchunkptr)p;
|
2670
|
+
assert(segment_holds(sp, p));
|
2671
|
+
if (p == _dv)
|
2672
|
+
{
|
2673
|
+
_dv = 0;
|
2674
|
+
_dvsize = 0;
|
2675
|
+
}
|
2676
|
+
else
|
2677
|
+
unlink_large_chunk(tp);
|
2678
|
+
if (SPP_CALL_MUNMAP(base, size) == 0)
|
2679
|
+
{
|
2680
|
+
released += size;
|
2681
|
+
_footprint -= size;
|
2682
|
+
// unlink obsoleted record
|
2683
|
+
sp = pred;
|
2684
|
+
sp->_next = next;
|
2685
|
+
}
|
2686
|
+
else
|
2687
|
+
{
|
2688
|
+
// back out if cannot unmap
|
2689
|
+
insert_large_chunk(tp, psize);
|
2690
|
+
}
|
2691
|
+
}
|
2692
|
+
}
|
2693
|
+
if (SPP_NO_SEGMENT_TRAVERSAL) // scan only first segment
|
2694
|
+
break;
|
2695
|
+
pred = sp;
|
2696
|
+
sp = next;
|
2697
|
+
}
|
2698
|
+
// Reset check counter
|
2699
|
+
_release_checks = (((size_t) nsegs > (size_t) SPP_MAX_RELEASE_CHECK_RATE) ?
|
2700
|
+
(size_t) nsegs : (size_t) SPP_MAX_RELEASE_CHECK_RATE);
|
2701
|
+
return released;
|
2702
|
+
}
|
2703
|
+
|
2704
|
+
int malloc_state::sys_trim(size_t pad)
|
2705
|
+
{
|
2706
|
+
size_t released = 0;
|
2707
|
+
mparams.ensure_initialization();
|
2708
|
+
if (pad < MAX_REQUEST && is_initialized())
|
2709
|
+
{
|
2710
|
+
pad += top_foot_size(); // ensure enough room for segment overhead
|
2711
|
+
|
2712
|
+
if (_topsize > pad)
|
2713
|
+
{
|
2714
|
+
// Shrink top space in _granularity - size units, keeping at least one
|
2715
|
+
size_t unit = mparams._granularity;
|
2716
|
+
size_t extra = ((_topsize - pad + (unit - 1)) / unit -
|
2717
|
+
1) * unit;
|
2718
|
+
msegmentptr sp = segment_holding((char*)_top);
|
2719
|
+
|
2720
|
+
if (!sp->is_extern_segment())
|
2721
|
+
{
|
2722
|
+
if (sp->is_mmapped_segment())
|
2723
|
+
{
|
2724
|
+
if (SPP_HAVE_MMAP &&
|
2725
|
+
sp->_size >= extra &&
|
2726
|
+
!has_segment_link(sp))
|
2727
|
+
{
|
2728
|
+
// can't shrink if pinned
|
2729
|
+
size_t newsize = sp->_size - extra;
|
2730
|
+
(void)newsize; // placate people compiling -Wunused-variable
|
2731
|
+
// Prefer mremap, fall back to munmap
|
2732
|
+
if ((SPP_CALL_MREMAP(sp->_base, sp->_size, newsize, 0) != mfail) ||
|
2733
|
+
(SPP_CALL_MUNMAP(sp->_base + newsize, extra) == 0))
|
2734
|
+
released = extra;
|
2735
|
+
}
|
2736
|
+
}
|
2737
|
+
}
|
2738
|
+
|
2739
|
+
if (released != 0)
|
2740
|
+
{
|
2741
|
+
sp->_size -= released;
|
2742
|
+
_footprint -= released;
|
2743
|
+
init_top(_top, _topsize - released);
|
2744
|
+
check_top_chunk(_top);
|
2745
|
+
}
|
2746
|
+
}
|
2747
|
+
|
2748
|
+
// Unmap any unused mmapped segments
|
2749
|
+
if (SPP_HAVE_MMAP)
|
2750
|
+
released += release_unused_segments();
|
2751
|
+
|
2752
|
+
// On failure, disable autotrim to avoid repeated failed future calls
|
2753
|
+
if (released == 0 && _topsize > _trim_check)
|
2754
|
+
_trim_check = spp_max_size_t;
|
2755
|
+
}
|
2756
|
+
|
2757
|
+
return (released != 0) ? 1 : 0;
|
2758
|
+
}
|
2759
|
+
|
2760
|
+
/* Consolidate and bin a chunk. Differs from exported versions
|
2761
|
+
of free mainly in that the chunk need not be marked as inuse.
|
2762
|
+
*/
|
2763
|
+
void malloc_state::dispose_chunk(mchunkptr p, size_t psize)
|
2764
|
+
{
|
2765
|
+
mchunkptr next = (mchunkptr)p->chunk_plus_offset(psize);
|
2766
|
+
if (!p->pinuse())
|
2767
|
+
{
|
2768
|
+
mchunkptr prev;
|
2769
|
+
size_t prevsize = p->_prev_foot;
|
2770
|
+
if (p->is_mmapped())
|
2771
|
+
{
|
2772
|
+
psize += prevsize + SPP_MMAP_FOOT_PAD;
|
2773
|
+
if (SPP_CALL_MUNMAP((char*)p - prevsize, psize) == 0)
|
2774
|
+
_footprint -= psize;
|
2775
|
+
return;
|
2776
|
+
}
|
2777
|
+
prev = (mchunkptr)p->chunk_minus_offset(prevsize);
|
2778
|
+
psize += prevsize;
|
2779
|
+
p = prev;
|
2780
|
+
if (rtcheck(ok_address(prev)))
|
2781
|
+
{
|
2782
|
+
// consolidate backward
|
2783
|
+
if (p != _dv)
|
2784
|
+
unlink_chunk(p, prevsize);
|
2785
|
+
else if ((next->_head & INUSE_BITS) == INUSE_BITS)
|
2786
|
+
{
|
2787
|
+
_dvsize = psize;
|
2788
|
+
p->set_free_with_pinuse(psize, next);
|
2789
|
+
return;
|
2790
|
+
}
|
2791
|
+
}
|
2792
|
+
else
|
2793
|
+
{
|
2794
|
+
SPP_ABORT;
|
2795
|
+
return;
|
2796
|
+
}
|
2797
|
+
}
|
2798
|
+
if (rtcheck(ok_address(next)))
|
2799
|
+
{
|
2800
|
+
if (!next->cinuse())
|
2801
|
+
{
|
2802
|
+
// consolidate forward
|
2803
|
+
if (next == _top)
|
2804
|
+
{
|
2805
|
+
size_t tsize = _topsize += psize;
|
2806
|
+
_top = p;
|
2807
|
+
p->_head = tsize | PINUSE_BIT;
|
2808
|
+
if (p == _dv)
|
2809
|
+
{
|
2810
|
+
_dv = 0;
|
2811
|
+
_dvsize = 0;
|
2812
|
+
}
|
2813
|
+
return;
|
2814
|
+
}
|
2815
|
+
else if (next == _dv)
|
2816
|
+
{
|
2817
|
+
size_t dsize = _dvsize += psize;
|
2818
|
+
_dv = p;
|
2819
|
+
p->set_size_and_pinuse_of_free_chunk(dsize);
|
2820
|
+
return;
|
2821
|
+
}
|
2822
|
+
else
|
2823
|
+
{
|
2824
|
+
size_t nsize = next->chunksize();
|
2825
|
+
psize += nsize;
|
2826
|
+
unlink_chunk(next, nsize);
|
2827
|
+
p->set_size_and_pinuse_of_free_chunk(psize);
|
2828
|
+
if (p == _dv)
|
2829
|
+
{
|
2830
|
+
_dvsize = psize;
|
2831
|
+
return;
|
2832
|
+
}
|
2833
|
+
}
|
2834
|
+
}
|
2835
|
+
else
|
2836
|
+
p->set_free_with_pinuse(psize, next);
|
2837
|
+
insert_chunk(p, psize);
|
2838
|
+
}
|
2839
|
+
else
|
2840
|
+
SPP_ABORT;
|
2841
|
+
}
|
2842
|
+
|
2843
|
+
/* ---------------------------- malloc --------------------------- */
|
2844
|
+
|
2845
|
+
// allocate a large request from the best fitting chunk in a treebin
|
2846
|
+
void* malloc_state::tmalloc_large(size_t nb)
|
2847
|
+
{
|
2848
|
+
tchunkptr v = 0;
|
2849
|
+
size_t rsize = -nb; // Unsigned negation
|
2850
|
+
tchunkptr t;
|
2851
|
+
bindex_t idx = compute_tree_index(nb);
|
2852
|
+
if ((t = *treebin_at(idx)) != 0)
|
2853
|
+
{
|
2854
|
+
// Traverse tree for this bin looking for node with size == nb
|
2855
|
+
size_t sizebits = nb << leftshift_for_tree_index(idx);
|
2856
|
+
tchunkptr rst = 0; // The deepest untaken right subtree
|
2857
|
+
for (;;)
|
2858
|
+
{
|
2859
|
+
tchunkptr rt;
|
2860
|
+
size_t trem = t->chunksize() - nb;
|
2861
|
+
if (trem < rsize)
|
2862
|
+
{
|
2863
|
+
v = t;
|
2864
|
+
if ((rsize = trem) == 0)
|
2865
|
+
break;
|
2866
|
+
}
|
2867
|
+
rt = t->_child[1];
|
2868
|
+
t = t->_child[(sizebits >> (spp_size_t_bitsize - 1)) & 1];
|
2869
|
+
if (rt != 0 && rt != t)
|
2870
|
+
rst = rt;
|
2871
|
+
if (t == 0)
|
2872
|
+
{
|
2873
|
+
t = rst; // set t to least subtree holding sizes > nb
|
2874
|
+
break;
|
2875
|
+
}
|
2876
|
+
sizebits <<= 1;
|
2877
|
+
}
|
2878
|
+
}
|
2879
|
+
if (t == 0 && v == 0)
|
2880
|
+
{
|
2881
|
+
// set t to root of next non-empty treebin
|
2882
|
+
binmap_t leftbits = left_bits(idx2bit(idx)) & _treemap;
|
2883
|
+
if (leftbits != 0)
|
2884
|
+
{
|
2885
|
+
binmap_t leastbit = least_bit(leftbits);
|
2886
|
+
bindex_t i = compute_bit2idx(leastbit);
|
2887
|
+
t = *treebin_at(i);
|
2888
|
+
}
|
2889
|
+
}
|
2890
|
+
|
2891
|
+
while (t != 0)
|
2892
|
+
{
|
2893
|
+
// find smallest of tree or subtree
|
2894
|
+
size_t trem = t->chunksize() - nb;
|
2895
|
+
if (trem < rsize)
|
2896
|
+
{
|
2897
|
+
rsize = trem;
|
2898
|
+
v = t;
|
2899
|
+
}
|
2900
|
+
t = t->leftmost_child();
|
2901
|
+
}
|
2902
|
+
|
2903
|
+
// If dv is a better fit, return 0 so malloc will use it
|
2904
|
+
if (v != 0 && rsize < (size_t)(_dvsize - nb))
|
2905
|
+
{
|
2906
|
+
if (rtcheck(ok_address(v)))
|
2907
|
+
{
|
2908
|
+
// split
|
2909
|
+
mchunkptr r = (mchunkptr)v->chunk_plus_offset(nb);
|
2910
|
+
assert(v->chunksize() == rsize + nb);
|
2911
|
+
if (rtcheck(ok_next(v, r)))
|
2912
|
+
{
|
2913
|
+
unlink_large_chunk(v);
|
2914
|
+
if (rsize < MIN_CHUNK_SIZE)
|
2915
|
+
set_inuse_and_pinuse(v, (rsize + nb));
|
2916
|
+
else
|
2917
|
+
{
|
2918
|
+
set_size_and_pinuse_of_inuse_chunk(v, nb);
|
2919
|
+
r->set_size_and_pinuse_of_free_chunk(rsize);
|
2920
|
+
insert_chunk(r, rsize);
|
2921
|
+
}
|
2922
|
+
return chunk2mem(v);
|
2923
|
+
}
|
2924
|
+
}
|
2925
|
+
SPP_ABORT;
|
2926
|
+
}
|
2927
|
+
return 0;
|
2928
|
+
}
|
2929
|
+
|
2930
|
+
// allocate a small request from the best fitting chunk in a treebin
|
2931
|
+
void* malloc_state::tmalloc_small(size_t nb)
|
2932
|
+
{
|
2933
|
+
tchunkptr t, v;
|
2934
|
+
size_t rsize;
|
2935
|
+
binmap_t leastbit = least_bit(_treemap);
|
2936
|
+
bindex_t i = compute_bit2idx(leastbit);
|
2937
|
+
v = t = *treebin_at(i);
|
2938
|
+
rsize = t->chunksize() - nb;
|
2939
|
+
|
2940
|
+
while ((t = t->leftmost_child()) != 0)
|
2941
|
+
{
|
2942
|
+
size_t trem = t->chunksize() - nb;
|
2943
|
+
if (trem < rsize)
|
2944
|
+
{
|
2945
|
+
rsize = trem;
|
2946
|
+
v = t;
|
2947
|
+
}
|
2948
|
+
}
|
2949
|
+
|
2950
|
+
if (rtcheck(ok_address(v)))
|
2951
|
+
{
|
2952
|
+
mchunkptr r = (mchunkptr)v->chunk_plus_offset(nb);
|
2953
|
+
assert(v->chunksize() == rsize + nb);
|
2954
|
+
if (rtcheck(ok_next(v, r)))
|
2955
|
+
{
|
2956
|
+
unlink_large_chunk(v);
|
2957
|
+
if (rsize < MIN_CHUNK_SIZE)
|
2958
|
+
set_inuse_and_pinuse(v, (rsize + nb));
|
2959
|
+
else
|
2960
|
+
{
|
2961
|
+
set_size_and_pinuse_of_inuse_chunk(v, nb);
|
2962
|
+
r->set_size_and_pinuse_of_free_chunk(rsize);
|
2963
|
+
replace_dv(r, rsize);
|
2964
|
+
}
|
2965
|
+
return chunk2mem(v);
|
2966
|
+
}
|
2967
|
+
}
|
2968
|
+
|
2969
|
+
SPP_ABORT;
|
2970
|
+
return 0;
|
2971
|
+
}
|
2972
|
+
|
2973
|
+
/* ---------------------------- malloc --------------------------- */
|
2974
|
+
|
2975
|
+
void* malloc_state::_malloc(size_t bytes)
|
2976
|
+
{
|
2977
|
+
if (1)
|
2978
|
+
{
|
2979
|
+
void* mem;
|
2980
|
+
size_t nb;
|
2981
|
+
if (bytes <= MAX_SMALL_REQUEST)
|
2982
|
+
{
|
2983
|
+
bindex_t idx;
|
2984
|
+
binmap_t smallbits;
|
2985
|
+
nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
|
2986
|
+
idx = small_index(nb);
|
2987
|
+
smallbits = _smallmap >> idx;
|
2988
|
+
|
2989
|
+
if ((smallbits & 0x3U) != 0)
|
2990
|
+
{
|
2991
|
+
// Remainderless fit to a smallbin.
|
2992
|
+
mchunkptr b, p;
|
2993
|
+
idx += ~smallbits & 1; // Uses next bin if idx empty
|
2994
|
+
b = smallbin_at(idx);
|
2995
|
+
p = b->_fd;
|
2996
|
+
assert(p->chunksize() == small_index2size(idx));
|
2997
|
+
unlink_first_small_chunk(b, p, idx);
|
2998
|
+
set_inuse_and_pinuse(p, small_index2size(idx));
|
2999
|
+
mem = chunk2mem(p);
|
3000
|
+
check_malloced_chunk(mem, nb);
|
3001
|
+
goto postaction;
|
3002
|
+
}
|
3003
|
+
|
3004
|
+
else if (nb > _dvsize)
|
3005
|
+
{
|
3006
|
+
if (smallbits != 0)
|
3007
|
+
{
|
3008
|
+
// Use chunk in next nonempty smallbin
|
3009
|
+
mchunkptr b, p, r;
|
3010
|
+
size_t rsize;
|
3011
|
+
binmap_t leftbits = (smallbits << idx) & left_bits(malloc_state::idx2bit(idx));
|
3012
|
+
binmap_t leastbit = least_bit(leftbits);
|
3013
|
+
bindex_t i = compute_bit2idx(leastbit);
|
3014
|
+
b = smallbin_at(i);
|
3015
|
+
p = b->_fd;
|
3016
|
+
assert(p->chunksize() == small_index2size(i));
|
3017
|
+
unlink_first_small_chunk(b, p, i);
|
3018
|
+
rsize = small_index2size(i) - nb;
|
3019
|
+
// Fit here cannot be remainderless if 4byte sizes
|
3020
|
+
if (sizeof(size_t) != 4 && rsize < MIN_CHUNK_SIZE)
|
3021
|
+
set_inuse_and_pinuse(p, small_index2size(i));
|
3022
|
+
else
|
3023
|
+
{
|
3024
|
+
set_size_and_pinuse_of_inuse_chunk(p, nb);
|
3025
|
+
r = (mchunkptr)p->chunk_plus_offset(nb);
|
3026
|
+
r->set_size_and_pinuse_of_free_chunk(rsize);
|
3027
|
+
replace_dv(r, rsize);
|
3028
|
+
}
|
3029
|
+
mem = chunk2mem(p);
|
3030
|
+
check_malloced_chunk(mem, nb);
|
3031
|
+
goto postaction;
|
3032
|
+
}
|
3033
|
+
|
3034
|
+
else if (_treemap != 0 && (mem = tmalloc_small(nb)) != 0)
|
3035
|
+
{
|
3036
|
+
check_malloced_chunk(mem, nb);
|
3037
|
+
goto postaction;
|
3038
|
+
}
|
3039
|
+
}
|
3040
|
+
}
|
3041
|
+
else if (bytes >= MAX_REQUEST)
|
3042
|
+
nb = spp_max_size_t; // Too big to allocate. Force failure (in sys alloc)
|
3043
|
+
else
|
3044
|
+
{
|
3045
|
+
nb = pad_request(bytes);
|
3046
|
+
if (_treemap != 0 && (mem = tmalloc_large(nb)) != 0)
|
3047
|
+
{
|
3048
|
+
check_malloced_chunk(mem, nb);
|
3049
|
+
goto postaction;
|
3050
|
+
}
|
3051
|
+
}
|
3052
|
+
|
3053
|
+
if (nb <= _dvsize)
|
3054
|
+
{
|
3055
|
+
size_t rsize = _dvsize - nb;
|
3056
|
+
mchunkptr p = _dv;
|
3057
|
+
if (rsize >= MIN_CHUNK_SIZE)
|
3058
|
+
{
|
3059
|
+
// split dv
|
3060
|
+
mchunkptr r = _dv = (mchunkptr)p->chunk_plus_offset(nb);
|
3061
|
+
_dvsize = rsize;
|
3062
|
+
r->set_size_and_pinuse_of_free_chunk(rsize);
|
3063
|
+
set_size_and_pinuse_of_inuse_chunk(p, nb);
|
3064
|
+
}
|
3065
|
+
else // exhaust dv
|
3066
|
+
{
|
3067
|
+
size_t dvs = _dvsize;
|
3068
|
+
_dvsize = 0;
|
3069
|
+
_dv = 0;
|
3070
|
+
set_inuse_and_pinuse(p, dvs);
|
3071
|
+
}
|
3072
|
+
mem = chunk2mem(p);
|
3073
|
+
check_malloced_chunk(mem, nb);
|
3074
|
+
goto postaction;
|
3075
|
+
}
|
3076
|
+
|
3077
|
+
else if (nb < _topsize)
|
3078
|
+
{
|
3079
|
+
// Split top
|
3080
|
+
size_t rsize = _topsize -= nb;
|
3081
|
+
mchunkptr p = _top;
|
3082
|
+
mchunkptr r = _top = (mchunkptr)p->chunk_plus_offset(nb);
|
3083
|
+
r->_head = rsize | PINUSE_BIT;
|
3084
|
+
set_size_and_pinuse_of_inuse_chunk(p, nb);
|
3085
|
+
mem = chunk2mem(p);
|
3086
|
+
check_top_chunk(_top);
|
3087
|
+
check_malloced_chunk(mem, nb);
|
3088
|
+
goto postaction;
|
3089
|
+
}
|
3090
|
+
|
3091
|
+
mem = sys_alloc(nb);
|
3092
|
+
|
3093
|
+
postaction:
|
3094
|
+
return mem;
|
3095
|
+
}
|
3096
|
+
|
3097
|
+
return 0;
|
3098
|
+
}
|
3099
|
+
|
3100
|
+
/* ---------------------------- free --------------------------- */
|
3101
|
+
|
3102
|
+
void malloc_state::_free(mchunkptr p)
|
3103
|
+
{
|
3104
|
+
if (1)
|
3105
|
+
{
|
3106
|
+
check_inuse_chunk(p);
|
3107
|
+
if (rtcheck(ok_address(p) && ok_inuse(p)))
|
3108
|
+
{
|
3109
|
+
size_t psize = p->chunksize();
|
3110
|
+
mchunkptr next = (mchunkptr)p->chunk_plus_offset(psize);
|
3111
|
+
if (!p->pinuse())
|
3112
|
+
{
|
3113
|
+
size_t prevsize = p->_prev_foot;
|
3114
|
+
if (p->is_mmapped())
|
3115
|
+
{
|
3116
|
+
psize += prevsize + SPP_MMAP_FOOT_PAD;
|
3117
|
+
if (SPP_CALL_MUNMAP((char*)p - prevsize, psize) == 0)
|
3118
|
+
_footprint -= psize;
|
3119
|
+
goto postaction;
|
3120
|
+
}
|
3121
|
+
else
|
3122
|
+
{
|
3123
|
+
mchunkptr prev = (mchunkptr)p->chunk_minus_offset(prevsize);
|
3124
|
+
psize += prevsize;
|
3125
|
+
p = prev;
|
3126
|
+
if (rtcheck(ok_address(prev)))
|
3127
|
+
{
|
3128
|
+
// consolidate backward
|
3129
|
+
if (p != _dv)
|
3130
|
+
unlink_chunk(p, prevsize);
|
3131
|
+
else if ((next->_head & INUSE_BITS) == INUSE_BITS)
|
3132
|
+
{
|
3133
|
+
_dvsize = psize;
|
3134
|
+
p->set_free_with_pinuse(psize, next);
|
3135
|
+
goto postaction;
|
3136
|
+
}
|
3137
|
+
}
|
3138
|
+
else
|
3139
|
+
goto erroraction;
|
3140
|
+
}
|
3141
|
+
}
|
3142
|
+
|
3143
|
+
if (rtcheck(ok_next(p, next) && ok_pinuse(next)))
|
3144
|
+
{
|
3145
|
+
if (!next->cinuse())
|
3146
|
+
{
|
3147
|
+
// consolidate forward
|
3148
|
+
if (next == _top)
|
3149
|
+
{
|
3150
|
+
size_t tsize = _topsize += psize;
|
3151
|
+
_top = p;
|
3152
|
+
p->_head = tsize | PINUSE_BIT;
|
3153
|
+
if (p == _dv)
|
3154
|
+
{
|
3155
|
+
_dv = 0;
|
3156
|
+
_dvsize = 0;
|
3157
|
+
}
|
3158
|
+
if (should_trim(tsize))
|
3159
|
+
sys_trim(0);
|
3160
|
+
goto postaction;
|
3161
|
+
}
|
3162
|
+
else if (next == _dv)
|
3163
|
+
{
|
3164
|
+
size_t dsize = _dvsize += psize;
|
3165
|
+
_dv = p;
|
3166
|
+
p->set_size_and_pinuse_of_free_chunk(dsize);
|
3167
|
+
goto postaction;
|
3168
|
+
}
|
3169
|
+
else
|
3170
|
+
{
|
3171
|
+
size_t nsize = next->chunksize();
|
3172
|
+
psize += nsize;
|
3173
|
+
unlink_chunk(next, nsize);
|
3174
|
+
p->set_size_and_pinuse_of_free_chunk(psize);
|
3175
|
+
if (p == _dv)
|
3176
|
+
{
|
3177
|
+
_dvsize = psize;
|
3178
|
+
goto postaction;
|
3179
|
+
}
|
3180
|
+
}
|
3181
|
+
}
|
3182
|
+
else
|
3183
|
+
p->set_free_with_pinuse(psize, next);
|
3184
|
+
|
3185
|
+
if (is_small(psize))
|
3186
|
+
{
|
3187
|
+
insert_small_chunk(p, psize);
|
3188
|
+
check_free_chunk(p);
|
3189
|
+
}
|
3190
|
+
else
|
3191
|
+
{
|
3192
|
+
tchunkptr tp = (tchunkptr)p;
|
3193
|
+
insert_large_chunk(tp, psize);
|
3194
|
+
check_free_chunk(p);
|
3195
|
+
if (--_release_checks == 0)
|
3196
|
+
release_unused_segments();
|
3197
|
+
}
|
3198
|
+
goto postaction;
|
3199
|
+
}
|
3200
|
+
}
|
3201
|
+
erroraction:
|
3202
|
+
SPP_USAGE_ERROR_ACTION(this, p);
|
3203
|
+
postaction:
|
3204
|
+
;
|
3205
|
+
}
|
3206
|
+
}
|
3207
|
+
|
3208
|
+
/* ------------ Internal support for realloc, memalign, etc -------------- */
|
3209
|
+
|
3210
|
+
// Try to realloc; only in-place unless can_move true
|
3211
|
+
mchunkptr malloc_state::try_realloc_chunk(mchunkptr p, size_t nb, int can_move)
|
3212
|
+
{
|
3213
|
+
mchunkptr newp = 0;
|
3214
|
+
size_t oldsize = p->chunksize();
|
3215
|
+
mchunkptr next = (mchunkptr)p->chunk_plus_offset(oldsize);
|
3216
|
+
if (rtcheck(ok_address(p) && ok_inuse(p) &&
|
3217
|
+
ok_next(p, next) && ok_pinuse(next)))
|
3218
|
+
{
|
3219
|
+
if (p->is_mmapped())
|
3220
|
+
newp = mmap_resize(p, nb, can_move);
|
3221
|
+
else if (oldsize >= nb)
|
3222
|
+
{
|
3223
|
+
// already big enough
|
3224
|
+
size_t rsize = oldsize - nb;
|
3225
|
+
if (rsize >= MIN_CHUNK_SIZE)
|
3226
|
+
{
|
3227
|
+
// split off remainder
|
3228
|
+
mchunkptr r = (mchunkptr)p->chunk_plus_offset(nb);
|
3229
|
+
set_inuse(p, nb);
|
3230
|
+
set_inuse(r, rsize);
|
3231
|
+
dispose_chunk(r, rsize);
|
3232
|
+
}
|
3233
|
+
newp = p;
|
3234
|
+
}
|
3235
|
+
else if (next == _top)
|
3236
|
+
{
|
3237
|
+
// extend into top
|
3238
|
+
if (oldsize + _topsize > nb)
|
3239
|
+
{
|
3240
|
+
size_t newsize = oldsize + _topsize;
|
3241
|
+
size_t newtopsize = newsize - nb;
|
3242
|
+
mchunkptr newtop = (mchunkptr)p->chunk_plus_offset(nb);
|
3243
|
+
set_inuse(p, nb);
|
3244
|
+
newtop->_head = newtopsize | PINUSE_BIT;
|
3245
|
+
_top = newtop;
|
3246
|
+
_topsize = newtopsize;
|
3247
|
+
newp = p;
|
3248
|
+
}
|
3249
|
+
}
|
3250
|
+
else if (next == _dv)
|
3251
|
+
{
|
3252
|
+
// extend into dv
|
3253
|
+
size_t dvs = _dvsize;
|
3254
|
+
if (oldsize + dvs >= nb)
|
3255
|
+
{
|
3256
|
+
size_t dsize = oldsize + dvs - nb;
|
3257
|
+
if (dsize >= MIN_CHUNK_SIZE)
|
3258
|
+
{
|
3259
|
+
mchunkptr r = (mchunkptr)p->chunk_plus_offset(nb);
|
3260
|
+
mchunkptr n = (mchunkptr)r->chunk_plus_offset(dsize);
|
3261
|
+
set_inuse(p, nb);
|
3262
|
+
r->set_size_and_pinuse_of_free_chunk(dsize);
|
3263
|
+
n->clear_pinuse();
|
3264
|
+
_dvsize = dsize;
|
3265
|
+
_dv = r;
|
3266
|
+
}
|
3267
|
+
else
|
3268
|
+
{
|
3269
|
+
// exhaust dv
|
3270
|
+
size_t newsize = oldsize + dvs;
|
3271
|
+
set_inuse(p, newsize);
|
3272
|
+
_dvsize = 0;
|
3273
|
+
_dv = 0;
|
3274
|
+
}
|
3275
|
+
newp = p;
|
3276
|
+
}
|
3277
|
+
}
|
3278
|
+
else if (!next->cinuse())
|
3279
|
+
{
|
3280
|
+
// extend into next free chunk
|
3281
|
+
size_t nextsize = next->chunksize();
|
3282
|
+
if (oldsize + nextsize >= nb)
|
3283
|
+
{
|
3284
|
+
size_t rsize = oldsize + nextsize - nb;
|
3285
|
+
unlink_chunk(next, nextsize);
|
3286
|
+
if (rsize < MIN_CHUNK_SIZE)
|
3287
|
+
{
|
3288
|
+
size_t newsize = oldsize + nextsize;
|
3289
|
+
set_inuse(p, newsize);
|
3290
|
+
}
|
3291
|
+
else
|
3292
|
+
{
|
3293
|
+
mchunkptr r = (mchunkptr)p->chunk_plus_offset(nb);
|
3294
|
+
set_inuse(p, nb);
|
3295
|
+
set_inuse(r, rsize);
|
3296
|
+
dispose_chunk(r, rsize);
|
3297
|
+
}
|
3298
|
+
newp = p;
|
3299
|
+
}
|
3300
|
+
}
|
3301
|
+
}
|
3302
|
+
else
|
3303
|
+
SPP_USAGE_ERROR_ACTION(m, chunk2mem(p));
|
3304
|
+
return newp;
|
3305
|
+
}
|
3306
|
+
|
3307
|
+
void* malloc_state::internal_memalign(size_t alignment, size_t bytes)
|
3308
|
+
{
|
3309
|
+
void* mem = 0;
|
3310
|
+
if (alignment < MIN_CHUNK_SIZE) // must be at least a minimum chunk size
|
3311
|
+
alignment = MIN_CHUNK_SIZE;
|
3312
|
+
if ((alignment & (alignment - 1)) != 0)
|
3313
|
+
{
|
3314
|
+
// Ensure a power of 2
|
3315
|
+
size_t a = SPP_MALLOC_ALIGNMENT << 1;
|
3316
|
+
while (a < alignment)
|
3317
|
+
a <<= 1;
|
3318
|
+
alignment = a;
|
3319
|
+
}
|
3320
|
+
if (bytes >= MAX_REQUEST - alignment)
|
3321
|
+
SPP_MALLOC_FAILURE_ACTION;
|
3322
|
+
else
|
3323
|
+
{
|
3324
|
+
size_t nb = request2size(bytes);
|
3325
|
+
size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
|
3326
|
+
mem = internal_malloc(req);
|
3327
|
+
if (mem != 0)
|
3328
|
+
{
|
3329
|
+
mchunkptr p = mem2chunk(mem);
|
3330
|
+
if ((((size_t)(mem)) & (alignment - 1)) != 0)
|
3331
|
+
{
|
3332
|
+
// misaligned
|
3333
|
+
/*
|
3334
|
+
Find an aligned spot inside chunk. Since we need to give
|
3335
|
+
back leading space in a chunk of at least MIN_CHUNK_SIZE, if
|
3336
|
+
the first calculation places us at a spot with less than
|
3337
|
+
MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
|
3338
|
+
We've allocated enough total room so that this is always
|
3339
|
+
possible.
|
3340
|
+
*/
|
3341
|
+
char* br = (char*)mem2chunk((void *)(((size_t)((char*)mem + alignment - 1)) &
|
3342
|
+
-alignment));
|
3343
|
+
char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE) ?
|
3344
|
+
br : br + alignment;
|
3345
|
+
mchunkptr newp = (mchunkptr)pos;
|
3346
|
+
size_t leadsize = pos - (char*)(p);
|
3347
|
+
size_t newsize = p->chunksize() - leadsize;
|
3348
|
+
|
3349
|
+
if (p->is_mmapped())
|
3350
|
+
{
|
3351
|
+
// For mmapped chunks, just adjust offset
|
3352
|
+
newp->_prev_foot = p->_prev_foot + leadsize;
|
3353
|
+
newp->_head = newsize;
|
3354
|
+
}
|
3355
|
+
else
|
3356
|
+
{
|
3357
|
+
// Otherwise, give back leader, use the rest
|
3358
|
+
set_inuse(newp, newsize);
|
3359
|
+
set_inuse(p, leadsize);
|
3360
|
+
dispose_chunk(p, leadsize);
|
3361
|
+
}
|
3362
|
+
p = newp;
|
3363
|
+
}
|
3364
|
+
|
3365
|
+
// Give back spare room at the end
|
3366
|
+
if (!p->is_mmapped())
|
3367
|
+
{
|
3368
|
+
size_t size = p->chunksize();
|
3369
|
+
if (size > nb + MIN_CHUNK_SIZE)
|
3370
|
+
{
|
3371
|
+
size_t remainder_size = size - nb;
|
3372
|
+
mchunkptr remainder = (mchunkptr)p->chunk_plus_offset(nb);
|
3373
|
+
set_inuse(p, nb);
|
3374
|
+
set_inuse(remainder, remainder_size);
|
3375
|
+
dispose_chunk(remainder, remainder_size);
|
3376
|
+
}
|
3377
|
+
}
|
3378
|
+
|
3379
|
+
mem = chunk2mem(p);
|
3380
|
+
assert(p->chunksize() >= nb);
|
3381
|
+
assert(((size_t)mem & (alignment - 1)) == 0);
|
3382
|
+
check_inuse_chunk(p);
|
3383
|
+
}
|
3384
|
+
}
|
3385
|
+
return mem;
|
3386
|
+
}
|
3387
|
+
|
3388
|
+
/*
|
3389
|
+
Common support for independent_X routines, handling
|
3390
|
+
all of the combinations that can result.
|
3391
|
+
The opts arg has:
|
3392
|
+
bit 0 set if all elements are same size (using sizes[0])
|
3393
|
+
bit 1 set if elements should be zeroed
|
3394
|
+
*/
|
3395
|
+
void** malloc_state::ialloc(size_t n_elements, size_t* sizes, int opts,
|
3396
|
+
void* chunks[])
|
3397
|
+
{
|
3398
|
+
|
3399
|
+
size_t element_size; // chunksize of each element, if all same
|
3400
|
+
size_t contents_size; // total size of elements
|
3401
|
+
size_t array_size; // request size of pointer array
|
3402
|
+
void* mem; // malloced aggregate space
|
3403
|
+
mchunkptr p; // corresponding chunk
|
3404
|
+
size_t remainder_size; // remaining bytes while splitting
|
3405
|
+
void** marray; // either "chunks" or malloced ptr array
|
3406
|
+
mchunkptr array_chunk; // chunk for malloced ptr array
|
3407
|
+
flag_t was_enabled; // to disable mmap
|
3408
|
+
size_t size;
|
3409
|
+
size_t i;
|
3410
|
+
|
3411
|
+
mparams.ensure_initialization();
|
3412
|
+
// compute array length, if needed
|
3413
|
+
if (chunks != 0)
|
3414
|
+
{
|
3415
|
+
if (n_elements == 0)
|
3416
|
+
return chunks; // nothing to do
|
3417
|
+
marray = chunks;
|
3418
|
+
array_size = 0;
|
3419
|
+
}
|
3420
|
+
else
|
3421
|
+
{
|
3422
|
+
// if empty req, must still return chunk representing empty array
|
3423
|
+
if (n_elements == 0)
|
3424
|
+
return (void**)internal_malloc(0);
|
3425
|
+
marray = 0;
|
3426
|
+
array_size = request2size(n_elements * (sizeof(void*)));
|
3427
|
+
}
|
3428
|
+
|
3429
|
+
// compute total element size
|
3430
|
+
if (opts & 0x1)
|
3431
|
+
{
|
3432
|
+
// all-same-size
|
3433
|
+
element_size = request2size(*sizes);
|
3434
|
+
contents_size = n_elements * element_size;
|
3435
|
+
}
|
3436
|
+
else
|
3437
|
+
{
|
3438
|
+
// add up all the sizes
|
3439
|
+
element_size = 0;
|
3440
|
+
contents_size = 0;
|
3441
|
+
for (i = 0; i != n_elements; ++i)
|
3442
|
+
contents_size += request2size(sizes[i]);
|
3443
|
+
}
|
3444
|
+
|
3445
|
+
size = contents_size + array_size;
|
3446
|
+
|
3447
|
+
/*
|
3448
|
+
Allocate the aggregate chunk. First disable direct-mmapping so
|
3449
|
+
malloc won't use it, since we would not be able to later
|
3450
|
+
free/realloc space internal to a segregated mmap region.
|
3451
|
+
*/
|
3452
|
+
was_enabled = use_mmap();
|
3453
|
+
disable_mmap();
|
3454
|
+
mem = internal_malloc(size - CHUNK_OVERHEAD);
|
3455
|
+
if (was_enabled)
|
3456
|
+
enable_mmap();
|
3457
|
+
if (mem == 0)
|
3458
|
+
return 0;
|
3459
|
+
|
3460
|
+
p = mem2chunk(mem);
|
3461
|
+
remainder_size = p->chunksize();
|
3462
|
+
|
3463
|
+
assert(!p->is_mmapped());
|
3464
|
+
|
3465
|
+
if (opts & 0x2)
|
3466
|
+
{
|
3467
|
+
// optionally clear the elements
|
3468
|
+
memset((size_t*)mem, 0, remainder_size - sizeof(size_t) - array_size);
|
3469
|
+
}
|
3470
|
+
|
3471
|
+
// If not provided, allocate the pointer array as final part of chunk
|
3472
|
+
if (marray == 0)
|
3473
|
+
{
|
3474
|
+
size_t array_chunk_size;
|
3475
|
+
array_chunk = (mchunkptr)p->chunk_plus_offset(contents_size);
|
3476
|
+
array_chunk_size = remainder_size - contents_size;
|
3477
|
+
marray = (void**)(chunk2mem(array_chunk));
|
3478
|
+
set_size_and_pinuse_of_inuse_chunk(array_chunk, array_chunk_size);
|
3479
|
+
remainder_size = contents_size;
|
3480
|
+
}
|
3481
|
+
|
3482
|
+
// split out elements
|
3483
|
+
for (i = 0; ; ++i)
|
3484
|
+
{
|
3485
|
+
marray[i] = chunk2mem(p);
|
3486
|
+
if (i != n_elements - 1)
|
3487
|
+
{
|
3488
|
+
if (element_size != 0)
|
3489
|
+
size = element_size;
|
3490
|
+
else
|
3491
|
+
size = request2size(sizes[i]);
|
3492
|
+
remainder_size -= size;
|
3493
|
+
set_size_and_pinuse_of_inuse_chunk(p, size);
|
3494
|
+
p = (mchunkptr)p->chunk_plus_offset(size);
|
3495
|
+
}
|
3496
|
+
else
|
3497
|
+
{
|
3498
|
+
// the final element absorbs any overallocation slop
|
3499
|
+
set_size_and_pinuse_of_inuse_chunk(p, remainder_size);
|
3500
|
+
break;
|
3501
|
+
}
|
3502
|
+
}
|
3503
|
+
|
3504
|
+
#if SPP_DEBUG
|
3505
|
+
if (marray != chunks)
|
3506
|
+
{
|
3507
|
+
// final element must have exactly exhausted chunk
|
3508
|
+
if (element_size != 0)
|
3509
|
+
assert(remainder_size == element_size);
|
3510
|
+
else
|
3511
|
+
assert(remainder_size == request2size(sizes[i]));
|
3512
|
+
check_inuse_chunk(mem2chunk(marray));
|
3513
|
+
}
|
3514
|
+
for (i = 0; i != n_elements; ++i)
|
3515
|
+
check_inuse_chunk(mem2chunk(marray[i]));
|
3516
|
+
|
3517
|
+
#endif
|
3518
|
+
|
3519
|
+
return marray;
|
3520
|
+
}
|
3521
|
+
|
3522
|
+
/* Try to free all pointers in the given array.
|
3523
|
+
Note: this could be made faster, by delaying consolidation,
|
3524
|
+
at the price of disabling some user integrity checks, We
|
3525
|
+
still optimize some consolidations by combining adjacent
|
3526
|
+
chunks before freeing, which will occur often if allocated
|
3527
|
+
with ialloc or the array is sorted.
|
3528
|
+
*/
|
3529
|
+
size_t malloc_state::internal_bulk_free(void* array[], size_t nelem)
|
3530
|
+
{
|
3531
|
+
size_t unfreed = 0;
|
3532
|
+
if (1)
|
3533
|
+
{
|
3534
|
+
void** a;
|
3535
|
+
void** fence = &(array[nelem]);
|
3536
|
+
for (a = array; a != fence; ++a)
|
3537
|
+
{
|
3538
|
+
void* mem = *a;
|
3539
|
+
if (mem != 0)
|
3540
|
+
{
|
3541
|
+
mchunkptr p = mem2chunk(mem);
|
3542
|
+
size_t psize = p->chunksize();
|
3543
|
+
#if SPP_FOOTERS
|
3544
|
+
if (get_mstate_for(p) != m)
|
3545
|
+
{
|
3546
|
+
++unfreed;
|
3547
|
+
continue;
|
3548
|
+
}
|
3549
|
+
#endif
|
3550
|
+
check_inuse_chunk(p);
|
3551
|
+
*a = 0;
|
3552
|
+
if (rtcheck(ok_address(p) && ok_inuse(p)))
|
3553
|
+
{
|
3554
|
+
void ** b = a + 1; // try to merge with next chunk
|
3555
|
+
mchunkptr next = (mchunkptr)p->next_chunk();
|
3556
|
+
if (b != fence && *b == chunk2mem(next))
|
3557
|
+
{
|
3558
|
+
size_t newsize = next->chunksize() + psize;
|
3559
|
+
set_inuse(p, newsize);
|
3560
|
+
*b = chunk2mem(p);
|
3561
|
+
}
|
3562
|
+
else
|
3563
|
+
dispose_chunk(p, psize);
|
3564
|
+
}
|
3565
|
+
else
|
3566
|
+
{
|
3567
|
+
SPP_ABORT;
|
3568
|
+
break;
|
3569
|
+
}
|
3570
|
+
}
|
3571
|
+
}
|
3572
|
+
if (should_trim(_topsize))
|
3573
|
+
sys_trim(0);
|
3574
|
+
}
|
3575
|
+
return unfreed;
|
3576
|
+
}
|
3577
|
+
|
3578
|
+
void malloc_state::init(char* tbase, size_t tsize)
|
3579
|
+
{
|
3580
|
+
_seg._base = _least_addr = tbase;
|
3581
|
+
_seg._size = _footprint = _max_footprint = tsize;
|
3582
|
+
_magic = mparams._magic;
|
3583
|
+
_release_checks = SPP_MAX_RELEASE_CHECK_RATE;
|
3584
|
+
_mflags = mparams._default_mflags;
|
3585
|
+
_extp = 0;
|
3586
|
+
_exts = 0;
|
3587
|
+
disable_contiguous();
|
3588
|
+
init_bins();
|
3589
|
+
mchunkptr mn = (mchunkptr)mem2chunk(this)->next_chunk();
|
3590
|
+
init_top(mn, (size_t)((tbase + tsize) - (char*)mn) - top_foot_size());
|
3591
|
+
check_top_chunk(_top);
|
3592
|
+
}
|
3593
|
+
|
3594
|
+
/* Traversal */
|
3595
|
+
#if SPP_MALLOC_INSPECT_ALL
|
3596
|
+
void malloc_state::internal_inspect_all(void(*handler)(void *start, void *end,
|
3597
|
+
size_t used_bytes,
|
3598
|
+
void* callback_arg),
|
3599
|
+
void* arg)
|
3600
|
+
{
|
3601
|
+
if (is_initialized())
|
3602
|
+
{
|
3603
|
+
mchunkptr top = top;
|
3604
|
+
msegmentptr s;
|
3605
|
+
for (s = &seg; s != 0; s = s->next)
|
3606
|
+
{
|
3607
|
+
mchunkptr q = align_as_chunk(s->base);
|
3608
|
+
while (segment_holds(s, q) && q->head != FENCEPOST_HEAD)
|
3609
|
+
{
|
3610
|
+
mchunkptr next = (mchunkptr)q->next_chunk();
|
3611
|
+
size_t sz = q->chunksize();
|
3612
|
+
size_t used;
|
3613
|
+
void* start;
|
3614
|
+
if (q->is_inuse())
|
3615
|
+
{
|
3616
|
+
used = sz - CHUNK_OVERHEAD; // must not be mmapped
|
3617
|
+
start = chunk2mem(q);
|
3618
|
+
}
|
3619
|
+
else
|
3620
|
+
{
|
3621
|
+
used = 0;
|
3622
|
+
if (is_small(sz))
|
3623
|
+
{
|
3624
|
+
// offset by possible bookkeeping
|
3625
|
+
start = (void*)((char*)q + sizeof(struct malloc_chunk));
|
3626
|
+
}
|
3627
|
+
else
|
3628
|
+
start = (void*)((char*)q + sizeof(struct malloc_tree_chunk));
|
3629
|
+
}
|
3630
|
+
if (start < (void*)next) // skip if all space is bookkeeping
|
3631
|
+
handler(start, next, used, arg);
|
3632
|
+
if (q == top)
|
3633
|
+
break;
|
3634
|
+
q = next;
|
3635
|
+
}
|
3636
|
+
}
|
3637
|
+
}
|
3638
|
+
}
|
3639
|
+
#endif // SPP_MALLOC_INSPECT_ALL
|
3640
|
+
|
3641
|
+
|
3642
|
+
|
3643
|
+
/* ----------------------------- user mspaces ---------------------------- */
|
3644
|
+
|
3645
|
+
static mstate init_user_mstate(char* tbase, size_t tsize)
|
3646
|
+
{
|
3647
|
+
size_t msize = pad_request(sizeof(malloc_state));
|
3648
|
+
mchunkptr msp = align_as_chunk(tbase);
|
3649
|
+
mstate m = (mstate)(chunk2mem(msp));
|
3650
|
+
memset(m, 0, msize);
|
3651
|
+
msp->_head = (msize | INUSE_BITS);
|
3652
|
+
m->init(tbase, tsize);
|
3653
|
+
return m;
|
3654
|
+
}
|
3655
|
+
|
3656
|
+
SPP_API mspace create_mspace(size_t capacity, int locked)
|
3657
|
+
{
|
3658
|
+
mstate m = 0;
|
3659
|
+
size_t msize;
|
3660
|
+
mparams.ensure_initialization();
|
3661
|
+
msize = pad_request(sizeof(malloc_state));
|
3662
|
+
if (capacity < (size_t) - (msize + top_foot_size() + mparams._page_size))
|
3663
|
+
{
|
3664
|
+
size_t rs = ((capacity == 0) ? mparams._granularity :
|
3665
|
+
(capacity + top_foot_size() + msize));
|
3666
|
+
size_t tsize = mparams.granularity_align(rs);
|
3667
|
+
char* tbase = (char*)(SPP_CALL_MMAP(tsize));
|
3668
|
+
if (tbase != cmfail)
|
3669
|
+
{
|
3670
|
+
m = init_user_mstate(tbase, tsize);
|
3671
|
+
m->_seg._sflags = USE_MMAP_BIT;
|
3672
|
+
m->set_lock(locked);
|
3673
|
+
}
|
3674
|
+
}
|
3675
|
+
return (mspace)m;
|
3676
|
+
}
|
3677
|
+
|
3678
|
+
SPP_API size_t destroy_mspace(mspace msp)
|
3679
|
+
{
|
3680
|
+
size_t freed = 0;
|
3681
|
+
mstate ms = (mstate)msp;
|
3682
|
+
if (ms->ok_magic())
|
3683
|
+
{
|
3684
|
+
msegmentptr sp = &ms->_seg;
|
3685
|
+
while (sp != 0)
|
3686
|
+
{
|
3687
|
+
char* base = sp->_base;
|
3688
|
+
size_t size = sp->_size;
|
3689
|
+
flag_t flag = sp->_sflags;
|
3690
|
+
(void)base; // placate people compiling -Wunused-variable
|
3691
|
+
sp = sp->_next;
|
3692
|
+
if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&
|
3693
|
+
SPP_CALL_MUNMAP(base, size) == 0)
|
3694
|
+
freed += size;
|
3695
|
+
}
|
3696
|
+
}
|
3697
|
+
else
|
3698
|
+
SPP_USAGE_ERROR_ACTION(ms, ms);
|
3699
|
+
return freed;
|
3700
|
+
}
|
3701
|
+
|
3702
|
+
/* ---------------------------- mspace versions of malloc/calloc/free routines -------------------- */
|
3703
|
+
SPP_API void* mspace_malloc(mspace msp, size_t bytes)
|
3704
|
+
{
|
3705
|
+
mstate ms = (mstate)msp;
|
3706
|
+
if (!ms->ok_magic())
|
3707
|
+
{
|
3708
|
+
SPP_USAGE_ERROR_ACTION(ms, ms);
|
3709
|
+
return 0;
|
3710
|
+
}
|
3711
|
+
return ms->_malloc(bytes);
|
3712
|
+
}
|
3713
|
+
|
3714
|
+
SPP_API void mspace_free(mspace msp, void* mem)
|
3715
|
+
{
|
3716
|
+
if (mem != 0)
|
3717
|
+
{
|
3718
|
+
mchunkptr p = mem2chunk(mem);
|
3719
|
+
#if SPP_FOOTERS
|
3720
|
+
mstate fm = get_mstate_for(p);
|
3721
|
+
(void)msp; // placate people compiling -Wunused
|
3722
|
+
#else
|
3723
|
+
mstate fm = (mstate)msp;
|
3724
|
+
#endif
|
3725
|
+
if (!fm->ok_magic())
|
3726
|
+
{
|
3727
|
+
SPP_USAGE_ERROR_ACTION(fm, p);
|
3728
|
+
return;
|
3729
|
+
}
|
3730
|
+
fm->_free(p);
|
3731
|
+
}
|
3732
|
+
}
|
3733
|
+
|
3734
|
+
SPP_API void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size)
|
3735
|
+
{
|
3736
|
+
void* mem;
|
3737
|
+
size_t req = 0;
|
3738
|
+
mstate ms = (mstate)msp;
|
3739
|
+
if (!ms->ok_magic())
|
3740
|
+
{
|
3741
|
+
SPP_USAGE_ERROR_ACTION(ms, ms);
|
3742
|
+
return 0;
|
3743
|
+
}
|
3744
|
+
if (n_elements != 0)
|
3745
|
+
{
|
3746
|
+
req = n_elements * elem_size;
|
3747
|
+
if (((n_elements | elem_size) & ~(size_t)0xffff) &&
|
3748
|
+
(req / n_elements != elem_size))
|
3749
|
+
req = spp_max_size_t; // force downstream failure on overflow
|
3750
|
+
}
|
3751
|
+
mem = ms->internal_malloc(req);
|
3752
|
+
if (mem != 0 && mem2chunk(mem)->calloc_must_clear())
|
3753
|
+
memset(mem, 0, req);
|
3754
|
+
return mem;
|
3755
|
+
}
|
3756
|
+
|
3757
|
+
SPP_API void* mspace_realloc(mspace msp, void* oldmem, size_t bytes)
|
3758
|
+
{
|
3759
|
+
void* mem = 0;
|
3760
|
+
if (oldmem == 0)
|
3761
|
+
mem = mspace_malloc(msp, bytes);
|
3762
|
+
else if (bytes >= MAX_REQUEST)
|
3763
|
+
SPP_MALLOC_FAILURE_ACTION;
|
3764
|
+
#ifdef REALLOC_ZERO_BYTES_FREES
|
3765
|
+
else if (bytes == 0)
|
3766
|
+
mspace_free(msp, oldmem);
|
3767
|
+
#endif
|
3768
|
+
else
|
3769
|
+
{
|
3770
|
+
size_t nb = request2size(bytes);
|
3771
|
+
mchunkptr oldp = mem2chunk(oldmem);
|
3772
|
+
#if ! SPP_FOOTERS
|
3773
|
+
mstate m = (mstate)msp;
|
3774
|
+
#else
|
3775
|
+
mstate m = get_mstate_for(oldp);
|
3776
|
+
if (!m->ok_magic())
|
3777
|
+
{
|
3778
|
+
SPP_USAGE_ERROR_ACTION(m, oldmem);
|
3779
|
+
return 0;
|
3780
|
+
}
|
3781
|
+
#endif
|
3782
|
+
if (1)
|
3783
|
+
{
|
3784
|
+
mchunkptr newp = m->try_realloc_chunk(oldp, nb, 1);
|
3785
|
+
if (newp != 0)
|
3786
|
+
{
|
3787
|
+
m->check_inuse_chunk(newp);
|
3788
|
+
mem = chunk2mem(newp);
|
3789
|
+
}
|
3790
|
+
else
|
3791
|
+
{
|
3792
|
+
mem = mspace_malloc(m, bytes);
|
3793
|
+
if (mem != 0)
|
3794
|
+
{
|
3795
|
+
size_t oc = oldp->chunksize() - oldp->overhead_for();
|
3796
|
+
memcpy(mem, oldmem, (oc < bytes) ? oc : bytes);
|
3797
|
+
mspace_free(m, oldmem);
|
3798
|
+
}
|
3799
|
+
}
|
3800
|
+
}
|
3801
|
+
}
|
3802
|
+
return mem;
|
3803
|
+
}
|
3804
|
+
|
3805
|
+
#if 0
|
3806
|
+
|
3807
|
+
SPP_API mspace create_mspace_with_base(void* base, size_t capacity, int locked)
|
3808
|
+
{
|
3809
|
+
mstate m = 0;
|
3810
|
+
size_t msize;
|
3811
|
+
mparams.ensure_initialization();
|
3812
|
+
msize = pad_request(sizeof(malloc_state));
|
3813
|
+
if (capacity > msize + top_foot_size() &&
|
3814
|
+
capacity < (size_t) - (msize + top_foot_size() + mparams._page_size))
|
3815
|
+
{
|
3816
|
+
m = init_user_mstate((char*)base, capacity);
|
3817
|
+
m->_seg._sflags = EXTERN_BIT;
|
3818
|
+
m->set_lock(locked);
|
3819
|
+
}
|
3820
|
+
return (mspace)m;
|
3821
|
+
}
|
3822
|
+
|
3823
|
+
SPP_API int mspace_track_large_chunks(mspace msp, int enable)
|
3824
|
+
{
|
3825
|
+
int ret = 0;
|
3826
|
+
mstate ms = (mstate)msp;
|
3827
|
+
if (1)
|
3828
|
+
{
|
3829
|
+
if (!ms->use_mmap())
|
3830
|
+
ret = 1;
|
3831
|
+
if (!enable)
|
3832
|
+
ms->enable_mmap();
|
3833
|
+
else
|
3834
|
+
ms->disable_mmap();
|
3835
|
+
}
|
3836
|
+
return ret;
|
3837
|
+
}
|
3838
|
+
|
3839
|
+
SPP_API void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes)
|
3840
|
+
{
|
3841
|
+
void* mem = 0;
|
3842
|
+
if (oldmem != 0)
|
3843
|
+
{
|
3844
|
+
if (bytes >= MAX_REQUEST)
|
3845
|
+
SPP_MALLOC_FAILURE_ACTION;
|
3846
|
+
else
|
3847
|
+
{
|
3848
|
+
size_t nb = request2size(bytes);
|
3849
|
+
mchunkptr oldp = mem2chunk(oldmem);
|
3850
|
+
#if ! SPP_FOOTERS
|
3851
|
+
mstate m = (mstate)msp;
|
3852
|
+
#else
|
3853
|
+
mstate m = get_mstate_for(oldp);
|
3854
|
+
(void)msp; // placate people compiling -Wunused
|
3855
|
+
if (!m->ok_magic())
|
3856
|
+
{
|
3857
|
+
SPP_USAGE_ERROR_ACTION(m, oldmem);
|
3858
|
+
return 0;
|
3859
|
+
}
|
3860
|
+
#endif
|
3861
|
+
if (1)
|
3862
|
+
{
|
3863
|
+
mchunkptr newp = m->try_realloc_chunk(oldp, nb, 0);
|
3864
|
+
if (newp == oldp)
|
3865
|
+
{
|
3866
|
+
m->check_inuse_chunk(newp);
|
3867
|
+
mem = oldmem;
|
3868
|
+
}
|
3869
|
+
}
|
3870
|
+
}
|
3871
|
+
}
|
3872
|
+
return mem;
|
3873
|
+
}
|
3874
|
+
|
3875
|
+
SPP_API void* mspace_memalign(mspace msp, size_t alignment, size_t bytes)
|
3876
|
+
{
|
3877
|
+
mstate ms = (mstate)msp;
|
3878
|
+
if (!ms->ok_magic())
|
3879
|
+
{
|
3880
|
+
SPP_USAGE_ERROR_ACTION(ms, ms);
|
3881
|
+
return 0;
|
3882
|
+
}
|
3883
|
+
if (alignment <= SPP_MALLOC_ALIGNMENT)
|
3884
|
+
return mspace_malloc(msp, bytes);
|
3885
|
+
return ms->internal_memalign(alignment, bytes);
|
3886
|
+
}
|
3887
|
+
|
3888
|
+
SPP_API void** mspace_independent_calloc(mspace msp, size_t n_elements,
|
3889
|
+
size_t elem_size, void* chunks[])
|
3890
|
+
{
|
3891
|
+
size_t sz = elem_size; // serves as 1-element array
|
3892
|
+
mstate ms = (mstate)msp;
|
3893
|
+
if (!ms->ok_magic())
|
3894
|
+
{
|
3895
|
+
SPP_USAGE_ERROR_ACTION(ms, ms);
|
3896
|
+
return 0;
|
3897
|
+
}
|
3898
|
+
return ms->ialloc(n_elements, &sz, 3, chunks);
|
3899
|
+
}
|
3900
|
+
|
3901
|
+
SPP_API void** mspace_independent_comalloc(mspace msp, size_t n_elements,
|
3902
|
+
size_t sizes[], void* chunks[])
|
3903
|
+
{
|
3904
|
+
mstate ms = (mstate)msp;
|
3905
|
+
if (!ms->ok_magic())
|
3906
|
+
{
|
3907
|
+
SPP_USAGE_ERROR_ACTION(ms, ms);
|
3908
|
+
return 0;
|
3909
|
+
}
|
3910
|
+
return ms->ialloc(n_elements, sizes, 0, chunks);
|
3911
|
+
}
|
3912
|
+
|
3913
|
+
#endif
|
3914
|
+
|
3915
|
+
SPP_API size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem)
|
3916
|
+
{
|
3917
|
+
return ((mstate)msp)->internal_bulk_free(array, nelem);
|
3918
|
+
}
|
3919
|
+
|
3920
|
+
#if SPP_MALLOC_INSPECT_ALL
|
3921
|
+
SPP_API void mspace_inspect_all(mspace msp,
|
3922
|
+
void(*handler)(void *start,
|
3923
|
+
void *end,
|
3924
|
+
size_t used_bytes,
|
3925
|
+
void* callback_arg),
|
3926
|
+
void* arg)
|
3927
|
+
{
|
3928
|
+
mstate ms = (mstate)msp;
|
3929
|
+
if (ms->ok_magic())
|
3930
|
+
internal_inspect_all(ms, handler, arg);
|
3931
|
+
else
|
3932
|
+
SPP_USAGE_ERROR_ACTION(ms, ms);
|
3933
|
+
}
|
3934
|
+
#endif
|
3935
|
+
|
3936
|
+
SPP_API int mspace_trim(mspace msp, size_t pad)
|
3937
|
+
{
|
3938
|
+
int result = 0;
|
3939
|
+
mstate ms = (mstate)msp;
|
3940
|
+
if (ms->ok_magic())
|
3941
|
+
result = ms->sys_trim(pad);
|
3942
|
+
else
|
3943
|
+
SPP_USAGE_ERROR_ACTION(ms, ms);
|
3944
|
+
return result;
|
3945
|
+
}
|
3946
|
+
|
3947
|
+
SPP_API size_t mspace_footprint(mspace msp)
|
3948
|
+
{
|
3949
|
+
size_t result = 0;
|
3950
|
+
mstate ms = (mstate)msp;
|
3951
|
+
if (ms->ok_magic())
|
3952
|
+
result = ms->_footprint;
|
3953
|
+
else
|
3954
|
+
SPP_USAGE_ERROR_ACTION(ms, ms);
|
3955
|
+
return result;
|
3956
|
+
}
|
3957
|
+
|
3958
|
+
SPP_API size_t mspace_max_footprint(mspace msp)
|
3959
|
+
{
|
3960
|
+
size_t result = 0;
|
3961
|
+
mstate ms = (mstate)msp;
|
3962
|
+
if (ms->ok_magic())
|
3963
|
+
result = ms->_max_footprint;
|
3964
|
+
else
|
3965
|
+
SPP_USAGE_ERROR_ACTION(ms, ms);
|
3966
|
+
return result;
|
3967
|
+
}
|
3968
|
+
|
3969
|
+
SPP_API size_t mspace_footprint_limit(mspace msp)
|
3970
|
+
{
|
3971
|
+
size_t result = 0;
|
3972
|
+
mstate ms = (mstate)msp;
|
3973
|
+
if (ms->ok_magic())
|
3974
|
+
{
|
3975
|
+
size_t maf = ms->_footprint_limit;
|
3976
|
+
result = (maf == 0) ? spp_max_size_t : maf;
|
3977
|
+
}
|
3978
|
+
else
|
3979
|
+
SPP_USAGE_ERROR_ACTION(ms, ms);
|
3980
|
+
return result;
|
3981
|
+
}
|
3982
|
+
|
3983
|
+
SPP_API size_t mspace_set_footprint_limit(mspace msp, size_t bytes)
|
3984
|
+
{
|
3985
|
+
size_t result = 0;
|
3986
|
+
mstate ms = (mstate)msp;
|
3987
|
+
if (ms->ok_magic())
|
3988
|
+
{
|
3989
|
+
if (bytes == 0)
|
3990
|
+
result = mparams.granularity_align(1); // Use minimal size
|
3991
|
+
if (bytes == spp_max_size_t)
|
3992
|
+
result = 0; // disable
|
3993
|
+
else
|
3994
|
+
result = mparams.granularity_align(bytes);
|
3995
|
+
ms->_footprint_limit = result;
|
3996
|
+
}
|
3997
|
+
else
|
3998
|
+
SPP_USAGE_ERROR_ACTION(ms, ms);
|
3999
|
+
return result;
|
4000
|
+
}
|
4001
|
+
|
4002
|
+
SPP_API size_t mspace_usable_size(const void* mem)
|
4003
|
+
{
|
4004
|
+
if (mem != 0)
|
4005
|
+
{
|
4006
|
+
mchunkptr p = mem2chunk(mem);
|
4007
|
+
if (p->is_inuse())
|
4008
|
+
return p->chunksize() - p->overhead_for();
|
4009
|
+
}
|
4010
|
+
return 0;
|
4011
|
+
}
|
4012
|
+
|
4013
|
+
SPP_API int mspace_mallopt(int param_number, int value)
|
4014
|
+
{
|
4015
|
+
return mparams.change(param_number, value);
|
4016
|
+
}
|
4017
|
+
|
4018
|
+
} // spp_ namespace
|
4019
|
+
|
4020
|
+
|
4021
|
+
#endif // SPP_EXCLUDE_IMPLEMENTATION
|
4022
|
+
|
4023
|
+
#endif // spp_dlalloc__h_
|