oinky 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. data/LICENSE +22 -0
  2. data/README.md +141 -0
  3. data/ext/extconf.rb +79 -0
  4. data/ext/include/oinky.h +424 -0
  5. data/ext/include/oinky.hpp +63 -0
  6. data/ext/include/oinky/nky_base.hpp +1116 -0
  7. data/ext/include/oinky/nky_core.hpp +1603 -0
  8. data/ext/include/oinky/nky_cursor.hpp +665 -0
  9. data/ext/include/oinky/nky_dialect.hpp +107 -0
  10. data/ext/include/oinky/nky_error.hpp +164 -0
  11. data/ext/include/oinky/nky_fixed_table.hpp +710 -0
  12. data/ext/include/oinky/nky_handle.hpp +334 -0
  13. data/ext/include/oinky/nky_index.hpp +1038 -0
  14. data/ext/include/oinky/nky_log.hpp +15 -0
  15. data/ext/include/oinky/nky_merge_itr.hpp +403 -0
  16. data/ext/include/oinky/nky_model.hpp +110 -0
  17. data/ext/include/oinky/nky_pool.hpp +760 -0
  18. data/ext/include/oinky/nky_public.hpp +808 -0
  19. data/ext/include/oinky/nky_serializer.hpp +1625 -0
  20. data/ext/include/oinky/nky_strtable.hpp +504 -0
  21. data/ext/include/oinky/nky_table.hpp +1996 -0
  22. data/ext/nky_lib.cpp +390 -0
  23. data/ext/nky_lib_core.hpp +212 -0
  24. data/ext/nky_lib_index.cpp +158 -0
  25. data/ext/nky_lib_table.cpp +224 -0
  26. data/lib/oinky.rb +1284 -0
  27. data/lib/oinky/compiler.rb +106 -0
  28. data/lib/oinky/cpp_emitter.rb +311 -0
  29. data/lib/oinky/dsl.rb +167 -0
  30. data/lib/oinky/error.rb +19 -0
  31. data/lib/oinky/modelbase.rb +12 -0
  32. data/lib/oinky/nbuffer.rb +152 -0
  33. data/lib/oinky/normalize.rb +132 -0
  34. data/lib/oinky/oc_builder.rb +44 -0
  35. data/lib/oinky/query.rb +193 -0
  36. data/lib/oinky/rb_emitter.rb +147 -0
  37. data/lib/oinky/shard.rb +40 -0
  38. data/lib/oinky/testsup.rb +104 -0
  39. data/lib/oinky/version.rb +9 -0
  40. data/oinky.gemspec +36 -0
  41. metadata +120 -0
@@ -0,0 +1,760 @@
1
+ // This source is distributed under the terms of the MIT License. Refer
2
+ // to the 'LICENSE' file for details.
3
+ //
4
+ // Copyright (c) Jacob Lacouture, 2012
5
+
6
+ namespace Oinky
7
+ {
8
+ namespace Utils
9
+ {
10
+ using namespace Oinky::Errors;
11
+
12
+ class nky_malloc_t
13
+ {
14
+ bool in_teardown;
15
+ public:
16
+ nky_malloc_t() : in_teardown(false) {}
17
+
18
+ void *malloc(uint32 size) {
19
+ OINKY_ASSERT(!in_teardown);
20
+ OINKY_ASSERT(size);
21
+ // easy for now...
22
+ void *ptr = ::malloc(size);
23
+ if (!ptr) {
24
+ throw_error(no_resources());
25
+ }
26
+ return ptr;
27
+ }
28
+
29
+ void free(void *ptr) {
30
+ // For the simple allocator, obviously, we are dealing with the
31
+ // global heap, so we can't just stop freeing memory, whether in
32
+ // teardowon or not.
33
+ ::free(ptr);
34
+ }
35
+
36
+ // Advise the allocator of future allocation.
37
+ // If the allocator needs to allocate another block, it should
38
+ // allocate enough to handle this set of requests. The request
39
+ // group is specified with two parameters. One the total number of
40
+ // anticipated requests, and two the aggregate size of those requests
41
+ // in bytes. This allows the allocator to compute its internal overhead
42
+ // requirements more accurately than if we received only the aggregate
43
+ // byte count.
44
+ void prepare(uint32 objcount, uint32 totalsize) {}
45
+
46
+ // Stop tracking memory allocation. Free can be no-op, as the entire
47
+ // allocator will be torn down shortly. No more allocations should occur.
48
+ // Calling this method is idempotent.
49
+ void teardown() {
50
+ in_teardown = true;
51
+ }
52
+ };
53
+
54
+ //
55
+ // This is the dumbest pool allocator I could think of, but it works, and
56
+ // it's a lot faster than malloc.
57
+ //
58
+ // I can barely believe I had to write this myself, but every public
59
+ // implementation I could find was significantly over or under-engineered.
60
+ //
61
+ class pool_allocator
62
+ {
63
+ typedef boost::intrusive::list_base_hook<> list_entry;
64
+ typedef boost::intrusive::list<list_entry> blocklist_t;
65
+
66
+ struct freerange
67
+ {
68
+ // Size of run in bytes.
69
+ uint32 bytes : 30;
70
+ // Is it free.
71
+ uint32 free : 1;
72
+ // Is it in the free list. (It might not be, even if it's free)
73
+ uint32 in_list : 1;
74
+
75
+ static freerange zero() {
76
+ freerange x;
77
+ x.bytes = x.free = x.in_list = 0;
78
+ return x;
79
+ }
80
+
81
+ static freerange free_in_list(uint32 bytes) {
82
+ freerange x;
83
+ x.free = x.in_list = 1;
84
+ x.bytes = bytes;
85
+ return x;
86
+ }
87
+
88
+ static freerange free_no_list(uint32 bytes) {
89
+ freerange x;
90
+ x.free = 1;
91
+ x.in_list = 0;
92
+ x.bytes = bytes;
93
+ return x;
94
+ }
95
+
96
+ bool operator==(const freerange &other) const {
97
+ return (bytes == other.bytes) &&
98
+ (free == other.free) &&
99
+ (in_list == other.in_list);
100
+ }
101
+ };
102
+
103
+ struct inter_space_header {
104
+ freerange left;
105
+ freerange right;
106
+
107
+ inter_space_header *get_next() {
108
+ OINKY_ASSERT(right.bytes);
109
+ char *ptr = ((char *)(this + 1)) + right.bytes;
110
+ return (inter_space_header *) ptr;
111
+ }
112
+
113
+ inter_space_header *get_previous() {
114
+ OINKY_ASSERT(left.bytes);
115
+ char *ptr = ((char *)(this - 1)) - left.bytes;
116
+ return (inter_space_header *) ptr;
117
+ }
118
+
119
+ static inline const inter_space_header *block(const char *ptr) { return (const inter_space_header *)ptr; }
120
+
121
+ static void validate_block(const inter_space_header *any) {
122
+ const char *begin =(const char *)any;
123
+ while (block(begin)->left.bytes) {
124
+ // Can't be in the list unless it's free.
125
+ OINKY_ASSERT(!block(begin)->left.in_list || block(begin)->left.free);
126
+ const char *p = begin - block(begin)->left.bytes - sizeof(inter_space_header);
127
+ OINKY_ASSERT(block(p)->right == block(begin)->left);
128
+ // No two consecutive blocks may be in the list.
129
+ OINKY_ASSERT(!block(p)->left.in_list || !block(begin)->left.in_list);
130
+ begin = p;
131
+ }
132
+ uint32 length = *(const uint32 *)(begin - 4);
133
+ const char *end =(const char *)any;
134
+ while (block(end)->right.bytes) {
135
+ // Can't be in the list unless it's free.
136
+ OINKY_ASSERT(!block(end)->left.in_list || block(end)->left.free);
137
+ const char *n = end + block(end)->right.bytes + sizeof(inter_space_header);
138
+ OINKY_ASSERT(block(n)->left == block(end)->right);
139
+ // No two consecutive blocks may be in the list.
140
+ OINKY_ASSERT(!block(n)->left.in_list || !block(end)->left.in_list);
141
+ end = n;
142
+ }
143
+ end += sizeof(inter_space_header);
144
+ OINKY_ASSERT(length == (end - begin));
145
+ }
146
+ };
147
+
148
+ enum constants_ec {
149
+ // A free block will not be returned to the free space map unless
150
+ // it is at least this large.
151
+ MIN_FREE_BLOCK_SIZE = 1024
152
+ };
153
+
154
+ BOOST_STATIC_ASSERT(sizeof(list_entry) < MIN_FREE_BLOCK_SIZE);
155
+
156
+ void *allocate_from_block(inter_space_header *hdr, uint32 required)
157
+ {
158
+ // Headers should always be 8-byte aligned.
159
+ OINKY_ASSERT((((size_t)(char *)hdr) & 7) == 0);
160
+
161
+ // Preconditions. This block should no longer be linked.
162
+ inter_space_header *next = hdr->get_next();
163
+ OINKY_ASSERT(!hdr->right.in_list);
164
+ OINKY_ASSERT(hdr->right.free && next->left.free);
165
+ OINKY_ASSERT(hdr->right.bytes == next->left.bytes);
166
+ OINKY_ASSERT(hdr->right.bytes >= required);
167
+
168
+ //
169
+ char *ptr = (char *)(hdr + 1);
170
+ required = (required + 7) & ~7;
171
+
172
+ // If the remaining section is too small, we won't even bother
173
+ // remembering it.
174
+ if (hdr->right.bytes - required < 64) {
175
+ // Forget this section. We'll just over-allocate.
176
+ BOOST_STATIC_ASSERT(sizeof(inter_space_header) * 2 + sizeof(list_entry) <= 64);
177
+ } else {
178
+ // Otherwise, we will turn the right portion into a new section and
179
+ // put it on the free list.
180
+ inter_space_header *ih = (inter_space_header *)(ptr + required);
181
+ ih->right.bytes = hdr->right.bytes - required - sizeof(inter_space_header);
182
+ ih->right.free = ih->right.in_list = 1;
183
+ freespace.push_front(* ::new(ih + 1) list_entry());
184
+ next->left = ih->right;
185
+ next = ih;
186
+
187
+ hdr->right.bytes = required;
188
+ }
189
+
190
+ hdr->right.free = 0;
191
+ next->left = hdr->right;
192
+
193
+ return ptr;
194
+ }
195
+
196
+ void free_internal(void *ptr) {
197
+ // All user ptrs are 8-byte aligned.
198
+ OINKY_ASSERT((((size_t)ptr) & 7) == 0);
199
+
200
+ inter_space_header *lh = ((inter_space_header *) ptr) - 1;
201
+ inter_space_header *rh = lh->get_next();
202
+
203
+ // We don't need this anymore.
204
+ ptr = NULL;
205
+
206
+ OINKY_ASSERT(lh->right == rh->left);
207
+ OINKY_ASSERT((lh->right.free | lh->right.in_list) == 0);
208
+
209
+ // Mark the block free.
210
+ lh->right.free = rh->left.free = 1;
211
+
212
+ // Merge center block with the left if it's free.
213
+ if (lh->left.free) {
214
+ OINKY_ASSERT(lh->left.bytes);
215
+ inter_space_header *llh = lh->get_previous();
216
+ llh->right.bytes += lh->right.bytes + sizeof(inter_space_header);
217
+ rh->left = llh->right;
218
+ lh = llh;
219
+ }
220
+
221
+ // If the right is free, merge it.
222
+ if (rh->right.free) {
223
+ OINKY_ASSERT(rh->right.bytes);
224
+ inter_space_header *rrh = rh->get_next();
225
+
226
+ // If the right is in the list, remove it.
227
+ if (rh->right.in_list) {
228
+ // Remove the right from the list.
229
+ OINKY_ASSERT(rh->right.free && rh->right.bytes);
230
+ list_entry *le = (list_entry *)(rh + 1);
231
+ blocklist_t::iterator j = blocklist_t::s_iterator_to(*le);
232
+ freespace.erase(j);
233
+ }
234
+
235
+ lh->right.bytes += rh->right.bytes + sizeof(inter_space_header);
236
+ rrh->left = lh->right;
237
+ rh = rrh;
238
+ }
239
+
240
+ // Now we are ready to insert in the list. We should have fully
241
+ // merged by now.
242
+ OINKY_ASSERT((lh->left.free | rh->right.free) == 0);
243
+
244
+ // Should still be consistent.
245
+ OINKY_ASSERT(lh->right == rh->left);
246
+
247
+ // If we are in the list already, or below the size threshold for
248
+ // insertion, then exit.
249
+ if (lh->right.in_list || (lh->right.bytes < MIN_FREE_BLOCK_SIZE)) {
250
+ return;
251
+ }
252
+
253
+ // Now insert in the free list. We insert in the back to keep the
254
+ // front stable, so that we maximize locality on newly allocated objects.
255
+ freespace.push_back(* ::new(lh + 1) list_entry());
256
+ lh->right.in_list = rh->left.in_list = 1;
257
+ }
258
+
259
+ void *allocate_from_block_list(uint32 required)
260
+ {
261
+ blocklist_t::iterator i = freespace.begin();
262
+ blocklist_t::iterator e = freespace.end();
263
+ while (i != e) {
264
+ list_entry *hook = &(*i);
265
+ inter_space_header *hdr = ((inter_space_header *)hook) - 1;
266
+ // Why else would it be in this list?
267
+ OINKY_ASSERT(hdr->right.free && hdr->get_next()->left.free);
268
+ OINKY_ASSERT(hdr->right.in_list && hdr->get_next()->left.in_list);
269
+ OINKY_ASSERT(!hdr->left.in_list && !hdr->get_next()->right.in_list);
270
+ uint32 available = hdr->right.bytes;
271
+
272
+ // We should have merged during free.
273
+ OINKY_ASSERT(!hdr->left.free);
274
+ OINKY_ASSERT(!hdr->get_next()->right.free);
275
+
276
+ // If this block is too small and can't be merged.
277
+ if (available < required) {
278
+ // Remove from the free list if it's not worth saving.
279
+ if (available < MIN_FREE_BLOCK_SIZE) {
280
+ hdr->right.in_list = 0;
281
+ hdr->get_next()->left.in_list = 0;
282
+ blocklist_t::iterator j = i;
283
+ ++i;
284
+ freespace.erase(j);
285
+ } else {
286
+ // Otherwise try the next block.
287
+ ++i;
288
+ }
289
+ continue;
290
+ }
291
+
292
+ // This block will be large enough.
293
+ freespace.erase(i);
294
+ hdr->right.in_list = 0;
295
+ hdr->get_next()->left.in_list = 0;
296
+
297
+ // We found a large enough block.
298
+ return allocate_from_block(hdr, required);
299
+ }
300
+
301
+ // If we got here, then we have no block large enough. Allocate one.
302
+ inter_space_header *newblk = add_allocation(required);
303
+ return allocate_from_block(newblk, required);
304
+ }
305
+
306
+ inter_space_header *add_allocation(uint32 totalsize)
307
+ {
308
+ if (totalsize < total_allocation >> 1) {
309
+ totalsize = total_allocation >> 1;
310
+ }
311
+ if (totalsize < MIN_FREE_BLOCK_SIZE << 1) {
312
+ totalsize = MIN_FREE_BLOCK_SIZE << 1;
313
+ }
314
+ if (totalsize < min_target_alloc) {
315
+ totalsize = min_target_alloc;
316
+ }
317
+
318
+ // User allocations are always multiples of 8 bytes.
319
+ totalsize = (totalsize + 7) & ~7;
320
+
321
+ // Allocate 8 extra bytes so that even if we get misaligned data from
322
+ // malloc, we can fix it.
323
+ totalsize += sizeof(void *) + 4 + (2 * sizeof(inter_space_header)) + 8;
324
+
325
+ char *ptr = (char *) ::malloc(totalsize);
326
+ if (!ptr) {
327
+ throw_error(no_resources());
328
+ }
329
+ total_allocation += totalsize;
330
+
331
+ // This list entry must be at the exact beginning of the allocation,
332
+ // however aligned.
333
+ *(void **)ptr = allblocks;
334
+ allblocks = ptr;
335
+
336
+ // Skip the block list pointer.
337
+ uint32 prefix_size = sizeof(void *) + 4;
338
+ // Ensure all headers and allocations are 8-byte aligned.
339
+ prefix_size += 8 - (((size_t)ptr + prefix_size) & 7);
340
+
341
+ // Now move the pointer and adjust the byte count.
342
+ ptr += prefix_size;
343
+ totalsize -= prefix_size;
344
+ totalsize &= ~7;
345
+ // Now write the usable size. (This is purely for debugging)
346
+ *(((uint32*)ptr)-1) = totalsize;
347
+
348
+ inter_space_header *hdr = (inter_space_header *) ptr;
349
+ hdr->left = freerange::zero();
350
+ hdr->right = freerange::free_no_list(totalsize - (2 * sizeof(inter_space_header)));
351
+ hdr->get_next()->left = hdr->right;
352
+ hdr->get_next()->right = freerange::zero();
353
+
354
+ return hdr;
355
+ }
356
+
357
+ void destroy()
358
+ {
359
+ while (allblocks) {
360
+ void *ptr = allblocks;
361
+ allblocks = *(void **)allblocks;
362
+ ::free(ptr);
363
+ }
364
+ // Don't bother unlinking. Just short circuit the list destructor.
365
+ ::new(&freespace) blocklist_t();
366
+ }
367
+
368
+ // Linked list of allocated blocks. (These are not freed until the
369
+ // pool is destroyed.)
370
+ void *allblocks;
371
+ // Linked list of free space regions.
372
+ blocklist_t freespace;
373
+
374
+ // Keep track of how much we've allocated in total. We'll grow by a percentage.
375
+ uint32 total_allocation;
376
+ uint32 min_target_alloc;
377
+ bool in_teardown;
378
+
379
+ public:
380
+ pool_allocator() : allblocks(NULL), total_allocation(0), min_target_alloc(0), in_teardown(false) {}
381
+
382
+ ~pool_allocator()
383
+ {
384
+ destroy();
385
+ }
386
+
387
+ void *malloc(uint32 size) {
388
+ OINKY_ASSERT(!in_teardown);
389
+ OINKY_ASSERT(size);
390
+ // easy for now...
391
+ return allocate_from_block_list(size);
392
+ }
393
+
394
+ void free(void *ptr) {
395
+ // If we're in teardown, then we don't bother freeing each object
396
+ // individually. We know the pool will soon be destroyed completely.
397
+ if (!in_teardown) {
398
+ free_internal(ptr);
399
+ }
400
+ }
401
+
402
+ // This can be called on any pointer returned by malloc but not yet freed.
403
+ // It will validate the block pointers to the left and right of the
404
+ // allocated object. It's obviously more manual and less definitive than
405
+ // valgrind, but it's a lot faster too.
406
+ inline void validate_pool_allocation(const void *ptr) const
407
+ {
408
+ #ifdef DEBUG
409
+ OINKY_ASSERT((((size_t)ptr) & 7) == 0);
410
+ inter_space_header::validate_block(((const inter_space_header *)ptr)-1);
411
+ #endif
412
+ }
413
+
414
+ // Advise the allocator of future allocation.
415
+ // If the allocator needs to allocate another block, it should
416
+ // allocate enough to handle this set of requests. The request
417
+ // group is specified with two parameters. One the total number of
418
+ // anticipated requests, and two the aggregate size of those requests
419
+ // in bytes. This allows the allocator to compute its internal overhead
420
+ // requirements more accurately than if we received only the aggregate
421
+ // byte count.
422
+ void prepare(uint32 objcount, uint32 totalsize) {
423
+ min_target_alloc = totalsize + ((objcount + 1) * sizeof(inter_space_header));
424
+ }
425
+
426
+ // Stop tracking memory allocation. Free can be no-op, as the entire
427
+ // allocator will be torn down shortly. No more allocations should occur.
428
+ // Calling this method is idempotent.
429
+ void teardown() {
430
+ in_teardown = true;
431
+ }
432
+ };
433
+
434
+ // This allows us to be more lazy/rigorous depending on whether our allocator
435
+ // can clean up after us. The default is to be rigorous, so it derives
436
+ // from false_type.
437
+ template<typename T>
438
+ struct is_pooled_allocator : boost::false_type {};
439
+
440
+ template<>
441
+ struct is_pooled_allocator<pool_allocator> : boost::true_type {};
442
+
443
+ //#################################################
444
+ //## Here is where we activate one allocator or the other at compile-time.
445
+
446
+ // The malloc allocator leaks cursor contexts, but it's useful if you suspect
447
+ // a bug and want to leverage valgrind or some other such tool.
448
+ //
449
+ //typedef nky_malloc_t nky_allocator_t;
450
+
451
+ // The custom pool allocator relies on the same synchronization requirements
452
+ // as the database instance that owns it. It's entirely unsynchronized,
453
+ // and it offers locality advantages even in many-instance concurrent
454
+ // scenarios, where all the dynamic objects related to a particular database
455
+ // are located near each other.
456
+ typedef pool_allocator nky_allocator_t;
457
+
458
+
459
+ // This is basically a clone of std::vector, except it uses the DB memory
460
+ // allocator instead of the heap.
461
+ template<typename T>
462
+ class db_vector
463
+ {
464
+ nky_allocator_t *alloc;
465
+ uint32 count;
466
+ uint32 reserved;
467
+ T* values;
468
+
469
+ void destroy()
470
+ {
471
+ // This calls the element destructors.
472
+ resize(0);
473
+ // Now free the memory.
474
+ if (values) {
475
+ alloc->free(values);
476
+ values = NULL;
477
+ reserved = 0;
478
+ }
479
+ }
480
+
481
+ public:
482
+ db_vector(const db_vector<T> &src) {
483
+ // Deep copy.
484
+ ::new(this) db_vector<T>(src.alloc, src.begin(), src.end());
485
+ }
486
+ db_vector &operator=(const db_vector<T> &src) {
487
+ if (this == &src) return *this;
488
+ // Construct into a tmp, so that if it raises, we are still valid.
489
+ db_vector tmp(src);
490
+ // Nothing else can raise, so now we can safely destroy
491
+ // ourselves.
492
+ destroy();
493
+ // And transfer from tmp to self.
494
+ alloc = tmp.alloc;
495
+ count = tmp.count;
496
+ reserved = tmp.reserved;
497
+ values = tmp.values;
498
+ // Make sure tmp destruction is a no-op.
499
+ tmp.values = NULL;
500
+ tmp.reserved = tmp.count = 0;
501
+ return *this;
502
+ }
503
+
504
+ db_vector(nky_allocator_t *_alloc, uint32 _count = 0) :
505
+ alloc(_alloc),
506
+ count(0),
507
+ reserved(0),
508
+ values(NULL)
509
+ {
510
+ // This creates a vector of the given size, with all elements
511
+ // default constructed.
512
+ if (_count) {
513
+ resize(_count);
514
+ }
515
+ }
516
+
517
+ template<typename ITR>
518
+ void assign(ITR begin, ITR end)
519
+ {
520
+ resize(std::distance(begin,end));
521
+ T *v = values;
522
+ for (ITR i = begin; i != end; ++i, ++v) {
523
+ *v = *i;
524
+ }
525
+ }
526
+
527
+ template<typename ITR>
528
+ db_vector(nky_allocator_t *_alloc, ITR begin, ITR end) :
529
+ alloc(_alloc),
530
+ count(0),
531
+ reserved(0),
532
+ values(NULL)
533
+ {
534
+ uint32 tsize = std::distance(begin,end);
535
+ // This does not invoke any constructors.
536
+ reserve(tsize);
537
+
538
+ // copy construct
539
+ T* v = values;
540
+ try {
541
+ for (ITR i = begin; i != end; ++i, ++v) {
542
+ ::new(v) T(*i);
543
+ ++count;
544
+ }
545
+ } catch (...) {
546
+ while (count) {
547
+ --count;
548
+ values[count].~T();
549
+ }
550
+ throw;
551
+ }
552
+ }
553
+
554
+ ~db_vector()
555
+ {
556
+ destroy();
557
+ }
558
+
559
+ std::size_t size() const { return count; }
560
+ void resize(uint32 newsize) {
561
+ reserve(newsize);
562
+
563
+ // Init the new values.
564
+ if (newsize > count) {
565
+ // default construct
566
+ while (count<newsize) {
567
+ ::new(values + count) T();
568
+ ++count;
569
+ }
570
+ } else {
571
+ while (count > newsize) {
572
+ values[count-1].~T();
573
+ --count;
574
+ }
575
+ }
576
+ }
577
+ void reserve(uint32 newsize) {
578
+ if (reserved < newsize) {
579
+ // alloc
580
+ T* nv = (T*) alloc->malloc(sizeof(T) * newsize);
581
+ // copy construct
582
+ int i;
583
+ try {
584
+ for (i=0;i<count;++i) {
585
+ ::new(nv + i) T(values[i]);
586
+ }
587
+ } catch (...) {
588
+ while (i > 0) {
589
+ --i;
590
+ nv[i].~T();
591
+ }
592
+ alloc->free(nv);
593
+ throw;
594
+ }
595
+ // Now use new bufer.
596
+ // This only changes reservation, not size.
597
+ if (values) {
598
+ for (i=0;i<count;++i) {
599
+ values[i].~T();
600
+ }
601
+ alloc->free(values);
602
+ }
603
+ values = nv;
604
+ reserved = newsize;
605
+ }
606
+ }
607
+
608
+ nky_allocator_t *allocator() const { return alloc; }
609
+
610
+ typedef T* iterator;
611
+ typedef const T* const_iterator;
612
+ const T* begin() const { return values; }
613
+ const T* end() const { return values + count; }
614
+ T* begin() { return values; }
615
+ T* end() { return values + count; }
616
+
617
+ T& operator[](int x) { return begin()[x]; }
618
+ const T& operator[](int x) const { return begin()[x]; }
619
+ };
620
+
621
+ // Like the above, but cannot ever grow its allocation. It does not
622
+ // instantiate T's copy-constructor. It can still be resized.
623
+ template<typename T>
624
+ class db_fixed_vector : boost::noncopyable
625
+ {
626
+ nky_allocator_t *alloc;
627
+ uint32 count;
628
+ uint32 reserved;
629
+ T* values;
630
+
631
+ public:
632
+ db_fixed_vector(nky_allocator_t *_alloc, uint32 _count = 0) :
633
+ alloc(_alloc),
634
+ count(0),
635
+ reserved(0),
636
+ values(NULL)
637
+ {
638
+ reserve(_count);
639
+ // This creates a vector of the given size, with all elements
640
+ // default constructed.
641
+ try {
642
+ while (count < _count) {
643
+ ::new(values + count) T();
644
+ ++count;
645
+ }
646
+ } catch (...) {
647
+ while (count) {
648
+ --count;
649
+ values[count].~T();
650
+ }
651
+ throw;
652
+ }
653
+ }
654
+
655
+ template<typename ITR>
656
+ db_fixed_vector(nky_allocator_t *_alloc, ITR begin, ITR end) :
657
+ alloc(_alloc),
658
+ count(0),
659
+ reserved(0),
660
+ values(NULL)
661
+ {
662
+ uint32 tsize = std::distance(begin,end);
663
+ // This does not invoke any constructors.
664
+ reserve(tsize);
665
+
666
+ // construct. This can be copy-constuct or whatever. Note that
667
+ // this method doesn't instantiate the copy constructor unless
668
+ // someone calls it with an ITR that evaluates to T.
669
+ T* v = values;
670
+ try {
671
+ for (ITR i = begin; i != end; ++i, ++v) {
672
+ ::new(v) T(*i);
673
+ ++count;
674
+ }
675
+ } catch (...) {
676
+ while (count) {
677
+ --count;
678
+ values[count].~T();
679
+ }
680
+ throw;
681
+ }
682
+ }
683
+
684
+ ~db_fixed_vector()
685
+ {
686
+ // This calls the element destructors.
687
+ resize(0);
688
+ // Now free the memory.
689
+ if (values) {
690
+ alloc->free(values);
691
+ values = NULL;
692
+ reserved = 0;
693
+ }
694
+ }
695
+
696
+ std::size_t size() const { return count; }
697
+ void resize(uint32 newsize) {
698
+ if (count && (newsize > reserved)) {
699
+ throw_error(invalid_argument());
700
+ }
701
+
702
+ // Init the new values.
703
+ if (newsize > count) {
704
+ reserve(newsize);
705
+
706
+ // default construct
707
+ while (count<newsize) {
708
+ ::new(values + count) T();
709
+ ++count;
710
+ }
711
+ } else {
712
+ while (count > newsize) {
713
+ --count;
714
+ values[count].~T();
715
+ }
716
+ }
717
+ }
718
+ void reserve(uint32 newsize) {
719
+ if (count && newsize) {
720
+ OINKY_ASSERT(false);
721
+ throw_error(invalid_argument());
722
+ }
723
+ if (newsize <= reserved) {
724
+ return;
725
+ }
726
+ OINKY_ASSERT(count == 0);
727
+
728
+ // NOTE: We don't actually care if we've already reserved.
729
+ // The point is that we can't copy-construct, which only requires
730
+ // that count==0 || newsize == 0.
731
+ //OINKY_ASSERT(reseved == 0);
732
+ //OINKY_ASSERT(!values);
733
+ if (values) {
734
+ alloc->free(values);
735
+ values = NULL;
736
+ reserved = 0;
737
+ count = 0;
738
+ }
739
+ if (newsize) {
740
+ // alloc. We don't construct until resize.
741
+ values = (T*) alloc->malloc(sizeof(T) * newsize);
742
+ reserved = newsize;
743
+ }
744
+ }
745
+
746
+ typedef T* iterator;
747
+ typedef const T* const_iterator;
748
+ const T* begin() const { return values; }
749
+ const T* end() const { return values + count; }
750
+ T* begin() { return values; }
751
+ T* end() { return values + count; }
752
+
753
+ T& operator[](int x) { return begin()[x]; }
754
+ const T& operator[](int x) const { return begin()[x]; }
755
+ };
756
+
757
+
758
+ } //namespace Utils
759
+ } //namespace Oinky
760
+