rj_schema 1.0.0 → 1.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/ext/rj_schema/rapidjson/CMakeLists.txt +23 -1
- data/ext/rj_schema/rapidjson/appveyor.yml +49 -1
- data/ext/rj_schema/rapidjson/bin/types/alotofkeys.json +502 -0
- data/ext/rj_schema/rapidjson/bin/unittestschema/idandref.json +69 -0
- data/ext/rj_schema/rapidjson/doc/stream.md +7 -7
- data/ext/rj_schema/rapidjson/doc/stream.zh-cn.md +1 -1
- data/ext/rj_schema/rapidjson/doc/tutorial.md +15 -15
- data/ext/rj_schema/rapidjson/example/schemavalidator/schemavalidator.cpp +2 -0
- data/ext/rj_schema/rapidjson/example/traverseaspointer.cpp +39 -0
- data/ext/rj_schema/rapidjson/include/rapidjson/allocators.h +460 -52
- data/ext/rj_schema/rapidjson/include/rapidjson/document.h +350 -60
- data/ext/rj_schema/rapidjson/include/rapidjson/internal/strfunc.h +14 -0
- data/ext/rj_schema/rapidjson/include/rapidjson/pointer.h +68 -1
- data/ext/rj_schema/rapidjson/include/rapidjson/rapidjson.h +60 -11
- data/ext/rj_schema/rapidjson/include/rapidjson/schema.h +249 -102
- data/ext/rj_schema/rapidjson/include/rapidjson/uri.h +466 -0
- data/ext/rj_schema/rapidjson/test/perftest/perftest.h +5 -4
- data/ext/rj_schema/rapidjson/test/perftest/rapidjsontest.cpp +20 -2
- data/ext/rj_schema/rapidjson/test/unittest/CMakeLists.txt +2 -0
- data/ext/rj_schema/rapidjson/test/unittest/allocatorstest.cpp +193 -1
- data/ext/rj_schema/rapidjson/test/unittest/documenttest.cpp +2 -0
- data/ext/rj_schema/rapidjson/test/unittest/platformtest.cpp +40 -0
- data/ext/rj_schema/rapidjson/test/unittest/pointertest.cpp +62 -2
- data/ext/rj_schema/rapidjson/test/unittest/schematest.cpp +372 -7
- data/ext/rj_schema/rapidjson/test/unittest/uritest.cpp +718 -0
- data/ext/rj_schema/rapidjson/test/unittest/valuetest.cpp +12 -2
- data/ext/rj_schema/rj_schema.cpp +3 -10
- data/lib/rj_schema.rb +1 -1
- metadata +9 -3
@@ -16,6 +16,13 @@
|
|
16
16
|
#define RAPIDJSON_ALLOCATORS_H_
|
17
17
|
|
18
18
|
#include "rapidjson.h"
|
19
|
+
#include "internal/meta.h"
|
20
|
+
|
21
|
+
#include <memory>
|
22
|
+
|
23
|
+
#if RAPIDJSON_HAS_CXX11
|
24
|
+
#include <type_traits>
|
25
|
+
#endif
|
19
26
|
|
20
27
|
RAPIDJSON_NAMESPACE_BEGIN
|
21
28
|
|
@@ -89,7 +96,14 @@ public:
|
|
89
96
|
}
|
90
97
|
return RAPIDJSON_REALLOC(originalPtr, newSize);
|
91
98
|
}
|
92
|
-
static void Free(void *ptr) { RAPIDJSON_FREE(ptr); }
|
99
|
+
static void Free(void *ptr) RAPIDJSON_NOEXCEPT { RAPIDJSON_FREE(ptr); }
|
100
|
+
|
101
|
+
bool operator==(const CrtAllocator&) const RAPIDJSON_NOEXCEPT {
|
102
|
+
return true;
|
103
|
+
}
|
104
|
+
bool operator!=(const CrtAllocator&) const RAPIDJSON_NOEXCEPT {
|
105
|
+
return false;
|
106
|
+
}
|
93
107
|
};
|
94
108
|
|
95
109
|
///////////////////////////////////////////////////////////////////////////////
|
@@ -113,16 +127,64 @@ public:
|
|
113
127
|
*/
|
114
128
|
template <typename BaseAllocator = CrtAllocator>
|
115
129
|
class MemoryPoolAllocator {
|
130
|
+
//! Chunk header for perpending to each chunk.
|
131
|
+
/*! Chunks are stored as a singly linked list.
|
132
|
+
*/
|
133
|
+
struct ChunkHeader {
|
134
|
+
size_t capacity; //!< Capacity of the chunk in bytes (excluding the header itself).
|
135
|
+
size_t size; //!< Current size of allocated memory in bytes.
|
136
|
+
ChunkHeader *next; //!< Next chunk in the linked list.
|
137
|
+
};
|
138
|
+
|
139
|
+
struct SharedData {
|
140
|
+
ChunkHeader *chunkHead; //!< Head of the chunk linked-list. Only the head chunk serves allocation.
|
141
|
+
BaseAllocator* ownBaseAllocator; //!< base allocator created by this object.
|
142
|
+
size_t refcount;
|
143
|
+
bool ownBuffer;
|
144
|
+
};
|
145
|
+
|
146
|
+
static const size_t SIZEOF_SHARED_DATA = RAPIDJSON_ALIGN(sizeof(SharedData));
|
147
|
+
static const size_t SIZEOF_CHUNK_HEADER = RAPIDJSON_ALIGN(sizeof(ChunkHeader));
|
148
|
+
|
149
|
+
static inline ChunkHeader *GetChunkHead(SharedData *shared)
|
150
|
+
{
|
151
|
+
return reinterpret_cast<ChunkHeader*>(reinterpret_cast<uint8_t*>(shared) + SIZEOF_SHARED_DATA);
|
152
|
+
}
|
153
|
+
static inline uint8_t *GetChunkBuffer(SharedData *shared)
|
154
|
+
{
|
155
|
+
return reinterpret_cast<uint8_t*>(shared->chunkHead) + SIZEOF_CHUNK_HEADER;
|
156
|
+
}
|
157
|
+
|
158
|
+
static const size_t kDefaultChunkCapacity = RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY; //!< Default chunk capacity.
|
159
|
+
|
116
160
|
public:
|
117
161
|
static const bool kNeedFree = false; //!< Tell users that no need to call Free() with this allocator. (concept Allocator)
|
162
|
+
static const bool kRefCounted = true; //!< Tell users that this allocator is reference counted on copy
|
118
163
|
|
119
164
|
//! Constructor with chunkSize.
|
120
165
|
/*! \param chunkSize The size of memory chunk. The default is kDefaultChunkSize.
|
121
166
|
\param baseAllocator The allocator for allocating memory chunks.
|
122
167
|
*/
|
168
|
+
explicit
|
123
169
|
MemoryPoolAllocator(size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) :
|
124
|
-
|
170
|
+
chunk_capacity_(chunkSize),
|
171
|
+
baseAllocator_(baseAllocator ? baseAllocator : RAPIDJSON_NEW(BaseAllocator)()),
|
172
|
+
shared_(static_cast<SharedData*>(baseAllocator_ ? baseAllocator_->Malloc(SIZEOF_SHARED_DATA + SIZEOF_CHUNK_HEADER) : 0))
|
125
173
|
{
|
174
|
+
RAPIDJSON_ASSERT(baseAllocator_ != 0);
|
175
|
+
RAPIDJSON_ASSERT(shared_ != 0);
|
176
|
+
if (baseAllocator) {
|
177
|
+
shared_->ownBaseAllocator = 0;
|
178
|
+
}
|
179
|
+
else {
|
180
|
+
shared_->ownBaseAllocator = baseAllocator_;
|
181
|
+
}
|
182
|
+
shared_->chunkHead = GetChunkHead(shared_);
|
183
|
+
shared_->chunkHead->capacity = 0;
|
184
|
+
shared_->chunkHead->size = 0;
|
185
|
+
shared_->chunkHead->next = 0;
|
186
|
+
shared_->ownBuffer = true;
|
187
|
+
shared_->refcount = 1;
|
126
188
|
}
|
127
189
|
|
128
190
|
//! Constructor with user-supplied buffer.
|
@@ -136,41 +198,101 @@ public:
|
|
136
198
|
\param baseAllocator The allocator for allocating memory chunks.
|
137
199
|
*/
|
138
200
|
MemoryPoolAllocator(void *buffer, size_t size, size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) :
|
139
|
-
|
201
|
+
chunk_capacity_(chunkSize),
|
202
|
+
baseAllocator_(baseAllocator),
|
203
|
+
shared_(static_cast<SharedData*>(AlignBuffer(buffer, size)))
|
204
|
+
{
|
205
|
+
RAPIDJSON_ASSERT(size >= SIZEOF_SHARED_DATA + SIZEOF_CHUNK_HEADER);
|
206
|
+
shared_->chunkHead = GetChunkHead(shared_);
|
207
|
+
shared_->chunkHead->capacity = size - SIZEOF_SHARED_DATA - SIZEOF_CHUNK_HEADER;
|
208
|
+
shared_->chunkHead->size = 0;
|
209
|
+
shared_->chunkHead->next = 0;
|
210
|
+
shared_->ownBaseAllocator = 0;
|
211
|
+
shared_->ownBuffer = false;
|
212
|
+
shared_->refcount = 1;
|
213
|
+
}
|
214
|
+
|
215
|
+
MemoryPoolAllocator(const MemoryPoolAllocator& rhs) RAPIDJSON_NOEXCEPT :
|
216
|
+
chunk_capacity_(rhs.chunk_capacity_),
|
217
|
+
baseAllocator_(rhs.baseAllocator_),
|
218
|
+
shared_(rhs.shared_)
|
219
|
+
{
|
220
|
+
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
|
221
|
+
++shared_->refcount;
|
222
|
+
}
|
223
|
+
MemoryPoolAllocator& operator=(const MemoryPoolAllocator& rhs) RAPIDJSON_NOEXCEPT
|
140
224
|
{
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
225
|
+
RAPIDJSON_NOEXCEPT_ASSERT(rhs.shared_->refcount > 0);
|
226
|
+
++rhs.shared_->refcount;
|
227
|
+
this->~MemoryPoolAllocator();
|
228
|
+
baseAllocator_ = rhs.baseAllocator_;
|
229
|
+
chunk_capacity_ = rhs.chunk_capacity_;
|
230
|
+
shared_ = rhs.shared_;
|
231
|
+
return *this;
|
147
232
|
}
|
148
233
|
|
234
|
+
#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
|
235
|
+
MemoryPoolAllocator(MemoryPoolAllocator&& rhs) RAPIDJSON_NOEXCEPT :
|
236
|
+
chunk_capacity_(rhs.chunk_capacity_),
|
237
|
+
baseAllocator_(rhs.baseAllocator_),
|
238
|
+
shared_(rhs.shared_)
|
239
|
+
{
|
240
|
+
RAPIDJSON_NOEXCEPT_ASSERT(rhs.shared_->refcount > 0);
|
241
|
+
rhs.shared_ = 0;
|
242
|
+
}
|
243
|
+
MemoryPoolAllocator& operator=(MemoryPoolAllocator&& rhs) RAPIDJSON_NOEXCEPT
|
244
|
+
{
|
245
|
+
RAPIDJSON_NOEXCEPT_ASSERT(rhs.shared_->refcount > 0);
|
246
|
+
this->~MemoryPoolAllocator();
|
247
|
+
baseAllocator_ = rhs.baseAllocator_;
|
248
|
+
chunk_capacity_ = rhs.chunk_capacity_;
|
249
|
+
shared_ = rhs.shared_;
|
250
|
+
rhs.shared_ = 0;
|
251
|
+
return *this;
|
252
|
+
}
|
253
|
+
#endif
|
254
|
+
|
149
255
|
//! Destructor.
|
150
256
|
/*! This deallocates all memory chunks, excluding the user-supplied buffer.
|
151
257
|
*/
|
152
|
-
~MemoryPoolAllocator() {
|
258
|
+
~MemoryPoolAllocator() RAPIDJSON_NOEXCEPT {
|
259
|
+
if (!shared_) {
|
260
|
+
// do nothing if moved
|
261
|
+
return;
|
262
|
+
}
|
263
|
+
if (shared_->refcount > 1) {
|
264
|
+
--shared_->refcount;
|
265
|
+
return;
|
266
|
+
}
|
153
267
|
Clear();
|
154
|
-
|
268
|
+
BaseAllocator *a = shared_->ownBaseAllocator;
|
269
|
+
if (shared_->ownBuffer) {
|
270
|
+
baseAllocator_->Free(shared_);
|
271
|
+
}
|
272
|
+
RAPIDJSON_DELETE(a);
|
155
273
|
}
|
156
274
|
|
157
|
-
//! Deallocates all memory chunks, excluding the user
|
158
|
-
void Clear() {
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
275
|
+
//! Deallocates all memory chunks, excluding the first/user one.
|
276
|
+
void Clear() RAPIDJSON_NOEXCEPT {
|
277
|
+
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
|
278
|
+
for (;;) {
|
279
|
+
ChunkHeader* c = shared_->chunkHead;
|
280
|
+
if (!c->next) {
|
281
|
+
break;
|
282
|
+
}
|
283
|
+
shared_->chunkHead = c->next;
|
284
|
+
baseAllocator_->Free(c);
|
163
285
|
}
|
164
|
-
|
165
|
-
chunkHead_->size = 0; // Clear user buffer
|
286
|
+
shared_->chunkHead->size = 0;
|
166
287
|
}
|
167
288
|
|
168
289
|
//! Computes the total capacity of allocated memory chunks.
|
169
290
|
/*! \return total capacity in bytes.
|
170
291
|
*/
|
171
|
-
size_t Capacity() const {
|
292
|
+
size_t Capacity() const RAPIDJSON_NOEXCEPT {
|
293
|
+
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
|
172
294
|
size_t capacity = 0;
|
173
|
-
for (ChunkHeader* c =
|
295
|
+
for (ChunkHeader* c = shared_->chunkHead; c != 0; c = c->next)
|
174
296
|
capacity += c->capacity;
|
175
297
|
return capacity;
|
176
298
|
}
|
@@ -178,25 +300,35 @@ public:
|
|
178
300
|
//! Computes the memory blocks allocated.
|
179
301
|
/*! \return total used bytes.
|
180
302
|
*/
|
181
|
-
size_t Size() const {
|
303
|
+
size_t Size() const RAPIDJSON_NOEXCEPT {
|
304
|
+
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
|
182
305
|
size_t size = 0;
|
183
|
-
for (ChunkHeader* c =
|
306
|
+
for (ChunkHeader* c = shared_->chunkHead; c != 0; c = c->next)
|
184
307
|
size += c->size;
|
185
308
|
return size;
|
186
309
|
}
|
187
310
|
|
311
|
+
//! Whether the allocator is shared.
|
312
|
+
/*! \return true or false.
|
313
|
+
*/
|
314
|
+
bool Shared() const RAPIDJSON_NOEXCEPT {
|
315
|
+
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
|
316
|
+
return shared_->refcount > 1;
|
317
|
+
}
|
318
|
+
|
188
319
|
//! Allocates a memory block. (concept Allocator)
|
189
320
|
void* Malloc(size_t size) {
|
321
|
+
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
|
190
322
|
if (!size)
|
191
323
|
return NULL;
|
192
324
|
|
193
325
|
size = RAPIDJSON_ALIGN(size);
|
194
|
-
if (
|
326
|
+
if (RAPIDJSON_UNLIKELY(shared_->chunkHead->size + size > shared_->chunkHead->capacity))
|
195
327
|
if (!AddChunk(chunk_capacity_ > size ? chunk_capacity_ : size))
|
196
328
|
return NULL;
|
197
329
|
|
198
|
-
void *buffer =
|
199
|
-
|
330
|
+
void *buffer = GetChunkBuffer(shared_) + shared_->chunkHead->size;
|
331
|
+
shared_->chunkHead->size += size;
|
200
332
|
return buffer;
|
201
333
|
}
|
202
334
|
|
@@ -205,6 +337,7 @@ public:
|
|
205
337
|
if (originalPtr == 0)
|
206
338
|
return Malloc(newSize);
|
207
339
|
|
340
|
+
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
|
208
341
|
if (newSize == 0)
|
209
342
|
return NULL;
|
210
343
|
|
@@ -216,10 +349,10 @@ public:
|
|
216
349
|
return originalPtr;
|
217
350
|
|
218
351
|
// Simply expand it if it is the last allocation and there is sufficient space
|
219
|
-
if (originalPtr ==
|
352
|
+
if (originalPtr == GetChunkBuffer(shared_) + shared_->chunkHead->size - originalSize) {
|
220
353
|
size_t increment = static_cast<size_t>(newSize - originalSize);
|
221
|
-
if (
|
222
|
-
|
354
|
+
if (shared_->chunkHead->size + increment <= shared_->chunkHead->capacity) {
|
355
|
+
shared_->chunkHead->size += increment;
|
223
356
|
return originalPtr;
|
224
357
|
}
|
225
358
|
}
|
@@ -235,50 +368,325 @@ public:
|
|
235
368
|
}
|
236
369
|
|
237
370
|
//! Frees a memory block (concept Allocator)
|
238
|
-
static void Free(void *ptr) { (void)ptr; } // Do nothing
|
371
|
+
static void Free(void *ptr) RAPIDJSON_NOEXCEPT { (void)ptr; } // Do nothing
|
239
372
|
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
373
|
+
//! Compare (equality) with another MemoryPoolAllocator
|
374
|
+
bool operator==(const MemoryPoolAllocator& rhs) const RAPIDJSON_NOEXCEPT {
|
375
|
+
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
|
376
|
+
RAPIDJSON_NOEXCEPT_ASSERT(rhs.shared_->refcount > 0);
|
377
|
+
return shared_ == rhs.shared_;
|
378
|
+
}
|
379
|
+
//! Compare (inequality) with another MemoryPoolAllocator
|
380
|
+
bool operator!=(const MemoryPoolAllocator& rhs) const RAPIDJSON_NOEXCEPT {
|
381
|
+
return !operator==(rhs);
|
382
|
+
}
|
245
383
|
|
384
|
+
private:
|
246
385
|
//! Creates a new chunk.
|
247
386
|
/*! \param capacity Capacity of the chunk in bytes.
|
248
387
|
\return true if success.
|
249
388
|
*/
|
250
389
|
bool AddChunk(size_t capacity) {
|
251
390
|
if (!baseAllocator_)
|
252
|
-
|
253
|
-
if (ChunkHeader* chunk =
|
391
|
+
shared_->ownBaseAllocator = baseAllocator_ = RAPIDJSON_NEW(BaseAllocator)();
|
392
|
+
if (ChunkHeader* chunk = static_cast<ChunkHeader*>(baseAllocator_->Malloc(SIZEOF_CHUNK_HEADER + capacity))) {
|
254
393
|
chunk->capacity = capacity;
|
255
394
|
chunk->size = 0;
|
256
|
-
chunk->next =
|
257
|
-
|
395
|
+
chunk->next = shared_->chunkHead;
|
396
|
+
shared_->chunkHead = chunk;
|
258
397
|
return true;
|
259
398
|
}
|
260
399
|
else
|
261
400
|
return false;
|
262
401
|
}
|
263
402
|
|
264
|
-
static
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
403
|
+
static inline void* AlignBuffer(void* buf, size_t &size)
|
404
|
+
{
|
405
|
+
RAPIDJSON_NOEXCEPT_ASSERT(buf != 0);
|
406
|
+
const uintptr_t mask = sizeof(void*) - 1;
|
407
|
+
const uintptr_t ubuf = reinterpret_cast<uintptr_t>(buf);
|
408
|
+
if (RAPIDJSON_UNLIKELY(ubuf & mask)) {
|
409
|
+
const uintptr_t abuf = (ubuf + mask) & ~mask;
|
410
|
+
RAPIDJSON_ASSERT(size >= abuf - ubuf);
|
411
|
+
buf = reinterpret_cast<void*>(abuf);
|
412
|
+
size -= abuf - ubuf;
|
413
|
+
}
|
414
|
+
return buf;
|
415
|
+
}
|
274
416
|
|
275
|
-
ChunkHeader *chunkHead_; //!< Head of the chunk linked-list. Only the head chunk serves allocation.
|
276
417
|
size_t chunk_capacity_; //!< The minimum capacity of chunk when they are allocated.
|
277
|
-
void *userBuffer_; //!< User supplied buffer.
|
278
418
|
BaseAllocator* baseAllocator_; //!< base allocator for allocating memory chunks.
|
279
|
-
|
419
|
+
SharedData *shared_; //!< The shared data of the allocator
|
280
420
|
};
|
281
421
|
|
422
|
+
namespace internal {
|
423
|
+
template<typename, typename = void>
|
424
|
+
struct IsRefCounted :
|
425
|
+
public FalseType
|
426
|
+
{ };
|
427
|
+
template<typename T>
|
428
|
+
struct IsRefCounted<T, typename internal::EnableIfCond<T::kRefCounted>::Type> :
|
429
|
+
public TrueType
|
430
|
+
{ };
|
431
|
+
}
|
432
|
+
|
433
|
+
template<typename T, typename A>
|
434
|
+
inline T* Realloc(A& a, T* old_p, size_t old_n, size_t new_n)
|
435
|
+
{
|
436
|
+
RAPIDJSON_NOEXCEPT_ASSERT(old_n <= SIZE_MAX / sizeof(T) && new_n <= SIZE_MAX / sizeof(T));
|
437
|
+
return static_cast<T*>(a.Realloc(old_p, old_n * sizeof(T), new_n * sizeof(T)));
|
438
|
+
}
|
439
|
+
|
440
|
+
template<typename T, typename A>
|
441
|
+
inline T *Malloc(A& a, size_t n = 1)
|
442
|
+
{
|
443
|
+
return Realloc<T, A>(a, NULL, 0, n);
|
444
|
+
}
|
445
|
+
|
446
|
+
template<typename T, typename A>
|
447
|
+
inline void Free(A& a, T *p, size_t n = 1)
|
448
|
+
{
|
449
|
+
static_cast<void>(Realloc<T, A>(a, p, n, 0));
|
450
|
+
}
|
451
|
+
|
452
|
+
#ifdef __GNUC__
|
453
|
+
RAPIDJSON_DIAG_PUSH
|
454
|
+
RAPIDJSON_DIAG_OFF(effc++) // std::allocator can safely be inherited
|
455
|
+
#endif
|
456
|
+
|
457
|
+
template <typename T, typename BaseAllocator = CrtAllocator>
|
458
|
+
class StdAllocator :
|
459
|
+
public std::allocator<T>
|
460
|
+
{
|
461
|
+
typedef std::allocator<T> allocator_type;
|
462
|
+
#if RAPIDJSON_HAS_CXX11
|
463
|
+
typedef std::allocator_traits<allocator_type> traits_type;
|
464
|
+
#else
|
465
|
+
typedef allocator_type traits_type;
|
466
|
+
#endif
|
467
|
+
|
468
|
+
public:
|
469
|
+
typedef BaseAllocator BaseAllocatorType;
|
470
|
+
|
471
|
+
StdAllocator() RAPIDJSON_NOEXCEPT :
|
472
|
+
allocator_type(),
|
473
|
+
baseAllocator_()
|
474
|
+
{ }
|
475
|
+
|
476
|
+
StdAllocator(const StdAllocator& rhs) RAPIDJSON_NOEXCEPT :
|
477
|
+
allocator_type(rhs),
|
478
|
+
baseAllocator_(rhs.baseAllocator_)
|
479
|
+
{ }
|
480
|
+
|
481
|
+
template<typename U>
|
482
|
+
StdAllocator(const StdAllocator<U, BaseAllocator>& rhs) RAPIDJSON_NOEXCEPT :
|
483
|
+
allocator_type(rhs),
|
484
|
+
baseAllocator_(rhs.baseAllocator_)
|
485
|
+
{ }
|
486
|
+
|
487
|
+
#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
|
488
|
+
StdAllocator(StdAllocator&& rhs) RAPIDJSON_NOEXCEPT :
|
489
|
+
allocator_type(std::move(rhs)),
|
490
|
+
baseAllocator_(std::move(rhs.baseAllocator_))
|
491
|
+
{ }
|
492
|
+
#endif
|
493
|
+
#if RAPIDJSON_HAS_CXX11
|
494
|
+
using propagate_on_container_move_assignment = std::true_type;
|
495
|
+
using propagate_on_container_swap = std::true_type;
|
496
|
+
#endif
|
497
|
+
|
498
|
+
/* implicit */
|
499
|
+
StdAllocator(const BaseAllocator& allocator) RAPIDJSON_NOEXCEPT :
|
500
|
+
allocator_type(),
|
501
|
+
baseAllocator_(allocator)
|
502
|
+
{ }
|
503
|
+
|
504
|
+
~StdAllocator() RAPIDJSON_NOEXCEPT
|
505
|
+
{ }
|
506
|
+
|
507
|
+
template<typename U>
|
508
|
+
struct rebind {
|
509
|
+
typedef StdAllocator<U, BaseAllocator> other;
|
510
|
+
};
|
511
|
+
|
512
|
+
typedef typename traits_type::size_type size_type;
|
513
|
+
typedef typename traits_type::difference_type difference_type;
|
514
|
+
|
515
|
+
typedef typename traits_type::value_type value_type;
|
516
|
+
typedef typename traits_type::pointer pointer;
|
517
|
+
typedef typename traits_type::const_pointer const_pointer;
|
518
|
+
|
519
|
+
#if RAPIDJSON_HAS_CXX11
|
520
|
+
|
521
|
+
typedef typename std::add_lvalue_reference<value_type>::type &reference;
|
522
|
+
typedef typename std::add_lvalue_reference<typename std::add_const<value_type>::type>::type &const_reference;
|
523
|
+
|
524
|
+
pointer address(reference r) const RAPIDJSON_NOEXCEPT
|
525
|
+
{
|
526
|
+
return std::addressof(r);
|
527
|
+
}
|
528
|
+
const_pointer address(const_reference r) const RAPIDJSON_NOEXCEPT
|
529
|
+
{
|
530
|
+
return std::addressof(r);
|
531
|
+
}
|
532
|
+
|
533
|
+
size_type max_size() const RAPIDJSON_NOEXCEPT
|
534
|
+
{
|
535
|
+
return traits_type::max_size(*this);
|
536
|
+
}
|
537
|
+
|
538
|
+
template <typename ...Args>
|
539
|
+
void construct(pointer p, Args&&... args)
|
540
|
+
{
|
541
|
+
traits_type::construct(*this, p, std::forward<Args>(args)...);
|
542
|
+
}
|
543
|
+
void destroy(pointer p)
|
544
|
+
{
|
545
|
+
traits_type::destroy(*this, p);
|
546
|
+
}
|
547
|
+
|
548
|
+
#else // !RAPIDJSON_HAS_CXX11
|
549
|
+
|
550
|
+
typedef typename allocator_type::reference reference;
|
551
|
+
typedef typename allocator_type::const_reference const_reference;
|
552
|
+
|
553
|
+
pointer address(reference r) const RAPIDJSON_NOEXCEPT
|
554
|
+
{
|
555
|
+
return allocator_type::address(r);
|
556
|
+
}
|
557
|
+
const_pointer address(const_reference r) const RAPIDJSON_NOEXCEPT
|
558
|
+
{
|
559
|
+
return allocator_type::address(r);
|
560
|
+
}
|
561
|
+
|
562
|
+
size_type max_size() const RAPIDJSON_NOEXCEPT
|
563
|
+
{
|
564
|
+
return allocator_type::max_size();
|
565
|
+
}
|
566
|
+
|
567
|
+
void construct(pointer p, const_reference r)
|
568
|
+
{
|
569
|
+
allocator_type::construct(p, r);
|
570
|
+
}
|
571
|
+
void destroy(pointer p)
|
572
|
+
{
|
573
|
+
allocator_type::destroy(p);
|
574
|
+
}
|
575
|
+
|
576
|
+
#endif // !RAPIDJSON_HAS_CXX11
|
577
|
+
|
578
|
+
template <typename U>
|
579
|
+
U* allocate(size_type n = 1, const void* = 0)
|
580
|
+
{
|
581
|
+
return RAPIDJSON_NAMESPACE::Malloc<U>(baseAllocator_, n);
|
582
|
+
}
|
583
|
+
template <typename U>
|
584
|
+
void deallocate(U* p, size_type n = 1)
|
585
|
+
{
|
586
|
+
RAPIDJSON_NAMESPACE::Free<U>(baseAllocator_, p, n);
|
587
|
+
}
|
588
|
+
|
589
|
+
pointer allocate(size_type n = 1, const void* = 0)
|
590
|
+
{
|
591
|
+
return allocate<value_type>(n);
|
592
|
+
}
|
593
|
+
void deallocate(pointer p, size_type n = 1)
|
594
|
+
{
|
595
|
+
deallocate<value_type>(p, n);
|
596
|
+
}
|
597
|
+
|
598
|
+
#if RAPIDJSON_HAS_CXX11
|
599
|
+
using is_always_equal = std::is_empty<BaseAllocator>;
|
600
|
+
#endif
|
601
|
+
|
602
|
+
template<typename U>
|
603
|
+
bool operator==(const StdAllocator<U, BaseAllocator>& rhs) const RAPIDJSON_NOEXCEPT
|
604
|
+
{
|
605
|
+
return baseAllocator_ == rhs.baseAllocator_;
|
606
|
+
}
|
607
|
+
template<typename U>
|
608
|
+
bool operator!=(const StdAllocator<U, BaseAllocator>& rhs) const RAPIDJSON_NOEXCEPT
|
609
|
+
{
|
610
|
+
return !operator==(rhs);
|
611
|
+
}
|
612
|
+
|
613
|
+
//! rapidjson Allocator concept
|
614
|
+
static const bool kNeedFree = BaseAllocator::kNeedFree;
|
615
|
+
static const bool kRefCounted = internal::IsRefCounted<BaseAllocator>::Value;
|
616
|
+
void* Malloc(size_t size)
|
617
|
+
{
|
618
|
+
return baseAllocator_.Malloc(size);
|
619
|
+
}
|
620
|
+
void* Realloc(void* originalPtr, size_t originalSize, size_t newSize)
|
621
|
+
{
|
622
|
+
return baseAllocator_.Realloc(originalPtr, originalSize, newSize);
|
623
|
+
}
|
624
|
+
static void Free(void *ptr) RAPIDJSON_NOEXCEPT
|
625
|
+
{
|
626
|
+
BaseAllocator::Free(ptr);
|
627
|
+
}
|
628
|
+
|
629
|
+
private:
|
630
|
+
template <typename, typename>
|
631
|
+
friend class StdAllocator; // access to StdAllocator<!T>.*
|
632
|
+
|
633
|
+
BaseAllocator baseAllocator_;
|
634
|
+
};
|
635
|
+
|
636
|
+
#if !RAPIDJSON_HAS_CXX17 // std::allocator<void> deprecated in C++17
|
637
|
+
template <typename BaseAllocator>
|
638
|
+
class StdAllocator<void, BaseAllocator> :
|
639
|
+
public std::allocator<void>
|
640
|
+
{
|
641
|
+
typedef std::allocator<void> allocator_type;
|
642
|
+
|
643
|
+
public:
|
644
|
+
typedef BaseAllocator BaseAllocatorType;
|
645
|
+
|
646
|
+
StdAllocator() RAPIDJSON_NOEXCEPT :
|
647
|
+
allocator_type(),
|
648
|
+
baseAllocator_()
|
649
|
+
{ }
|
650
|
+
|
651
|
+
StdAllocator(const StdAllocator& rhs) RAPIDJSON_NOEXCEPT :
|
652
|
+
allocator_type(rhs),
|
653
|
+
baseAllocator_(rhs.baseAllocator_)
|
654
|
+
{ }
|
655
|
+
|
656
|
+
template<typename U>
|
657
|
+
StdAllocator(const StdAllocator<U, BaseAllocator>& rhs) RAPIDJSON_NOEXCEPT :
|
658
|
+
allocator_type(rhs),
|
659
|
+
baseAllocator_(rhs.baseAllocator_)
|
660
|
+
{ }
|
661
|
+
|
662
|
+
/* implicit */
|
663
|
+
StdAllocator(const BaseAllocator& baseAllocator) RAPIDJSON_NOEXCEPT :
|
664
|
+
allocator_type(),
|
665
|
+
baseAllocator_(baseAllocator)
|
666
|
+
{ }
|
667
|
+
|
668
|
+
~StdAllocator() RAPIDJSON_NOEXCEPT
|
669
|
+
{ }
|
670
|
+
|
671
|
+
template<typename U>
|
672
|
+
struct rebind {
|
673
|
+
typedef StdAllocator<U, BaseAllocator> other;
|
674
|
+
};
|
675
|
+
|
676
|
+
typedef typename allocator_type::value_type value_type;
|
677
|
+
|
678
|
+
private:
|
679
|
+
template <typename, typename>
|
680
|
+
friend class StdAllocator; // access to StdAllocator<!T>.*
|
681
|
+
|
682
|
+
BaseAllocator baseAllocator_;
|
683
|
+
};
|
684
|
+
#endif
|
685
|
+
|
686
|
+
#ifdef __GNUC__
|
687
|
+
RAPIDJSON_DIAG_POP
|
688
|
+
#endif
|
689
|
+
|
282
690
|
RAPIDJSON_NAMESPACE_END
|
283
691
|
|
284
692
|
#endif // RAPIDJSON_ENCODINGS_H_
|