oinky 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (41) hide show
  1. data/LICENSE +22 -0
  2. data/README.md +141 -0
  3. data/ext/extconf.rb +79 -0
  4. data/ext/include/oinky.h +424 -0
  5. data/ext/include/oinky.hpp +63 -0
  6. data/ext/include/oinky/nky_base.hpp +1116 -0
  7. data/ext/include/oinky/nky_core.hpp +1603 -0
  8. data/ext/include/oinky/nky_cursor.hpp +665 -0
  9. data/ext/include/oinky/nky_dialect.hpp +107 -0
  10. data/ext/include/oinky/nky_error.hpp +164 -0
  11. data/ext/include/oinky/nky_fixed_table.hpp +710 -0
  12. data/ext/include/oinky/nky_handle.hpp +334 -0
  13. data/ext/include/oinky/nky_index.hpp +1038 -0
  14. data/ext/include/oinky/nky_log.hpp +15 -0
  15. data/ext/include/oinky/nky_merge_itr.hpp +403 -0
  16. data/ext/include/oinky/nky_model.hpp +110 -0
  17. data/ext/include/oinky/nky_pool.hpp +760 -0
  18. data/ext/include/oinky/nky_public.hpp +808 -0
  19. data/ext/include/oinky/nky_serializer.hpp +1625 -0
  20. data/ext/include/oinky/nky_strtable.hpp +504 -0
  21. data/ext/include/oinky/nky_table.hpp +1996 -0
  22. data/ext/nky_lib.cpp +390 -0
  23. data/ext/nky_lib_core.hpp +212 -0
  24. data/ext/nky_lib_index.cpp +158 -0
  25. data/ext/nky_lib_table.cpp +224 -0
  26. data/lib/oinky.rb +1284 -0
  27. data/lib/oinky/compiler.rb +106 -0
  28. data/lib/oinky/cpp_emitter.rb +311 -0
  29. data/lib/oinky/dsl.rb +167 -0
  30. data/lib/oinky/error.rb +19 -0
  31. data/lib/oinky/modelbase.rb +12 -0
  32. data/lib/oinky/nbuffer.rb +152 -0
  33. data/lib/oinky/normalize.rb +132 -0
  34. data/lib/oinky/oc_builder.rb +44 -0
  35. data/lib/oinky/query.rb +193 -0
  36. data/lib/oinky/rb_emitter.rb +147 -0
  37. data/lib/oinky/shard.rb +40 -0
  38. data/lib/oinky/testsup.rb +104 -0
  39. data/lib/oinky/version.rb +9 -0
  40. data/oinky.gemspec +36 -0
  41. metadata +120 -0
@@ -0,0 +1,1603 @@
1
+ // This source is distributed under the terms of the MIT License. Refer
2
+ // to the 'LICENSE' file for details.
3
+ //
4
+ // Copyright (c) Jacob Lacouture, 2012
5
+
6
+ namespace Oinky
7
+ {
8
+ namespace Internal
9
+ {
10
+ using namespace Oinky::Errors;
11
+ using namespace Oinky::Utils;
12
+
13
+
14
+ // This stores the rows in an arbitrary (but consistent) order. They are
15
+ // ordered by sequence number (rowkey for fixed rows).
16
+ typedef boost::intrusive::set_base_hook<> row_tree_hook;
17
+
18
+ typedef boost::intrusive::rbtree_node_traits<void *,false> row_index_hook_traits;
19
+ typedef row_index_hook_traits::node row_index_hook;
20
+ typedef boost::intrusive::rbtree_algorithms<row_index_hook_traits> treealg;
21
+
22
+ struct prow_shape_t
23
+ {
24
+ // Number of bytes allocated for column data.
25
+ uint32 column_bytes;
26
+
27
+ // This is the number of index entries included in the pending_row
28
+ // allocation. This should be >= the number of active indexes.
29
+ // The entire allocation need not be used, and the allocation usage
30
+ // need not be dense. If an index is deleted from the table,
31
+ // it may leave a hole.
32
+ index_idx_t idx_count;
33
+
34
+ bool contains(const prow_shape_t &other) const {
35
+ return (column_bytes >= other.column_bytes) && (idx_count >= other.idx_count);
36
+ }
37
+
38
+ static prow_shape_t compute_reserve_shape(prow_shape_t min) {
39
+ prow_shape_t newshape;
40
+ newshape.idx_count = compute_reserve_size(min.idx_count);
41
+ newshape.column_bytes = (compute_reserve_size(min.column_bytes) + 7) & ~7;
42
+ return newshape;
43
+ }
44
+
45
+ // When reserving arrays, we over-alloc by some margin. Given a minimum
46
+ // size, this computes the actual size we allocate.
47
+ template<typename T>
48
+ static inline T compute_reserve_size(T min) {
49
+ uint32 k = ((min * 5) + 7) >> 2;
50
+ // Ensure the cast doesn't cause us to return something too small.
51
+ T v = (T) k;
52
+ if (v < min) {
53
+ return min;
54
+ } else {
55
+ return v;
56
+ }
57
+ }
58
+ };
59
+
60
+ // The row itself is in an intrustive list. The rows are not sorted in their
61
+ // primary index.
62
+ template<typename TABLE_CTX>
63
+ class pending_row : public row_tree_hook, boost::noncopyable
64
+ {
65
+ public:
66
+ typedef TABLE_CTX table_ctx;
67
+ typedef index_ctx_t<table_ctx> index_ctx;
68
+ typedef pending_row<table_ctx> pending_row_t;
69
+
70
+ table_ctx *table() const { return tbl; }
71
+ uint32 crs_ref_count() const { return cursor_ref_count; }
72
+ bool is_inactive() const { return pending_destruction || ls.is_deleted(); }
73
+
74
+ // insert/delete markers for this row.
75
+ //
76
+ // There is no such thing as update. update ==> delete;insert
77
+ ls_marker ls;
78
+ private:
79
+ // Parent table.
80
+ table_ctx *tbl;
81
+
82
+ // The row is partitioned into several arrays. This keeps track of
83
+ // how large each array is, and where each begins.
84
+ prow_shape_t shape;
85
+
86
+ // How many cursors refer to this row?
87
+ // This is more or less the only reference counting we do anywhere.
88
+ uint32 cursor_ref_count : 31;
89
+ // Did we intend to destroy this object? If the cursor_ref_count
90
+ // was nonzero, we might have had to delay destruction, in which
91
+ // case we'll do it when the cursor_ref_count reaches zero.
92
+ // This never gets unset once it's set.
93
+ uint32 pending_destruction : 1;
94
+
95
+ public:
96
+ // This is set during serialization. It is the rowkey that the row will
97
+ // be assigned in the newly serialized table. It would be nice to combine
98
+ // this with the sequence_number below, but that's more difficult than
99
+ // it seems.
100
+ row_idx_t new_rowkey;
101
+
102
+ // This is the sequence number of insertion into the table. It is the
103
+ // secondary sort key for all indices, after the index values themselves.
104
+ // Even if the row gets updated or grown, the sequence number will remain
105
+ // the same, allowing the cursor position to behave consistently in
106
+ // non-unique indices. Some mainstream DBs permit cursoring over unique
107
+ // indices only, but that's a bit obnoxious. This sequence number is
108
+ // unique to every row insertion for a particular table, over the lifetime
109
+ // of that table. (It can probably be uint32). But We never serialize it,
110
+ // and it restarts at zero with each mount.
111
+ //
112
+ // It need not increase monotonically, but it does need to be unique over the table.
113
+ uint64 sequence_number;
114
+
115
+ // The live tree is ordered by sequence number.
116
+ struct live_tree_compare
117
+ {
118
+ bool operator()(uint64 lseq, const pending_row_t &right) const {
119
+ return right.sequence_number < lseq;
120
+ }
121
+ bool operator()(const pending_row_t &left, uint64 rseq) const {
122
+ return left.sequence_number < rseq;
123
+ }
124
+ bool operator()(const pending_row_t &left, const pending_row_t &right) const {
125
+ return left.sequence_number < right.sequence_number;
126
+ }
127
+ };
128
+ typedef boost::intrusive::rbtree<pending_row_t,
129
+ boost::intrusive::compare<live_tree_compare> > row_tree_t;
130
+
131
+ // Interlock not necessary, since we are still strictly non-reentrant.
132
+ // even in reference counting. Dereference is never implicit.
133
+ void cursor_reference() {
134
+ // If it's pending destruction or deleted, the only way we can be adding
135
+ // a reference is by cloning a cursor.
136
+ OINKY_ASSERT(!pending_destruction || cursor_ref_count);
137
+ OINKY_ASSERT(!ls.is_deleted() || cursor_ref_count);
138
+ cursor_ref_count += 1;
139
+ }
140
+ void cursor_dereference() {
141
+ OINKY_ASSERT(cursor_ref_count);
142
+ cursor_ref_count -= 1;
143
+ if (!cursor_ref_count && pending_destruction) {
144
+ do_delete();
145
+ }
146
+ }
147
+
148
+ inline const prow_shape_t &get_shape() const { return shape; }
149
+
150
+ public:
151
+ // These are the table column_values. The index values are not
152
+ // stored redundantly. The index contexts use the index definition to
153
+ // peek back into these source values.
154
+ char *column_data() {
155
+ return (char *) hooks_end();
156
+ }
157
+ const char *column_data() const {
158
+ return (const char *) hooks_end();
159
+ }
160
+ const char *column_data_end() const {
161
+ return column_data() + shape.column_bytes;
162
+ }
163
+
164
+ // The index hooks are stored immediately after the pending_row
165
+ // structure itself. This is the only way we can backtrack to get
166
+ // from the index hook to the row pointer. The column values are stored
167
+ // after the index hooks.
168
+ row_index_hook *hooks_begin() {
169
+ return (row_index_hook *) (this + 1);
170
+ }
171
+ row_index_hook *hooks_end() {
172
+ return hooks_begin() + shape.idx_count;
173
+ }
174
+ const row_index_hook *hooks_begin() const {
175
+ return (row_index_hook *) (this + 1);
176
+ }
177
+ const row_index_hook *hooks_end() const {
178
+ return hooks_begin() + shape.idx_count;
179
+ }
180
+
181
+ // Convert an index entry to a row object.
182
+ static const pending_row_t *from_index_hook(const row_index_hook *hook, const index_ctx *idx) {
183
+ OINKY_ASSERT(hook && (hook != &idx->head));
184
+ index_idx_t index_idx = idx->index_idx;
185
+ const pending_row_t *row = (const pending_row_t *)(hook - index_idx) - 1;
186
+ OINKY_ASSERT(idx->table == row->tbl);
187
+ OINKY_ASSERT(index_idx < row->shape.idx_count);
188
+ return row;
189
+ }
190
+
191
+ static pending_row_t *from_index_hook(row_index_hook *hook, const index_ctx *idx) {
192
+ OINKY_ASSERT(hook && (hook != &idx->head));
193
+ index_idx_t index_idx = idx->index_idx;
194
+ pending_row_t *row = (pending_row_t *)(hook - index_idx) - 1;
195
+ OINKY_ASSERT(idx->table == row->tbl);
196
+ OINKY_ASSERT(index_idx < row->shape.idx_count);
197
+ return row;
198
+ }
199
+
200
+ row_index_hook *get_hook(index_idx_t index_idx) {
201
+ OINKY_ASSERT(index_idx < shape.idx_count);
202
+ return hooks_begin() + index_idx;
203
+ }
204
+
205
+ static size_t comp_size(const prow_shape_t &rs) {
206
+ return
207
+ sizeof(pending_row_t) +
208
+ rs.column_bytes +
209
+ (rs.idx_count * sizeof(row_index_hook));
210
+ }
211
+
212
+ private:
213
+ // The DB handle tells us which pool to use. This gives better cache
214
+ // locality as well as a lock-free pool implementation.
215
+ //
216
+ // I've never overloaded 'operator new' before, and I'm pretty
217
+ // sure I don't want to. I just make the constructor
218
+ // private and create this static factory.
219
+ //
220
+ pending_row(table_ctx *_tbl, const prow_shape_t &_shape) :
221
+ tbl(_tbl),
222
+ shape(_shape),
223
+ cursor_ref_count(0),
224
+ pending_destruction(0)
225
+ {}
226
+ ~pending_row() {
227
+ OINKY_ASSERT(!cursor_ref_count);
228
+ }
229
+
230
+ public:
231
+ static pending_row_t *make_new(table_ctx *tbl) {
232
+ prow_shape_t shape = tbl->get_minimum_prow_shape();
233
+ return make_new(tbl, shape);
234
+ }
235
+
236
+ static pending_row_t *make_new(table_ctx *tbl, const prow_shape_t &_shape) {
237
+ pending_row_t *row = (pending_row_t *)
238
+ tbl->db->allocator.malloc(pending_row_t::comp_size(_shape));
239
+
240
+ // Invoke the placement constructor.
241
+ ::new(row) pending_row_t(
242
+ tbl,
243
+ _shape);
244
+ return row;
245
+ }
246
+
247
+ void try_delete()
248
+ {
249
+ if (cursor_ref_count) {
250
+ pending_destruction = 1;
251
+ } else {
252
+ do_delete();
253
+ }
254
+ }
255
+
256
+ private:
257
+ void do_delete()
258
+ {
259
+ table_ctx *table = tbl;
260
+ // destroy
261
+ this->~pending_row();
262
+ // free
263
+ table->db->allocator.free(this);
264
+ }
265
+
266
+ void operator delete(void *_row) {
267
+ // Should not be calling this.
268
+ OINKY_ASSERT(false);
269
+ }
270
+
271
+ public:
272
+ // If an index is added subsequent to the creation of this row, we may
273
+ // need to expand this row's allocation to fit the new index entry.
274
+ // This method does that check, and if necessary, allocates a new object
275
+ // to replace this one, preserving all existing indexes.
276
+ //
277
+ // NOTE: This routine can delete this! If it does, it returns its
278
+ // replacement.
279
+ pending_row_t *cond_grow_self(const prow_shape_t &_minshape, const prow_shape_t &_newshape)
280
+ {
281
+ // Why grow a dead entry???
282
+ // Also we depend on the fact that we're on the live list below.
283
+ OINKY_ASSERT( ls.delete_sp == 0 );
284
+ OINKY_ASSERT(!pending_destruction);
285
+
286
+ // Logic.
287
+ OINKY_ASSERT( _newshape.contains(_minshape) );
288
+ OINKY_ASSERT( _newshape.column_bytes >= tbl->prow_byte_limit );
289
+ OINKY_ASSERT( (uint32) _newshape.idx_count >= tbl->indexes_by_position.limit() );
290
+
291
+ // _newshape is what we allocate, but specified mins are what we need.
292
+ if (shape.contains(_minshape)) {
293
+ return this;
294
+ }
295
+
296
+ pending_row_t *nr = make_new(tbl, _newshape);
297
+ nr->ls = ls;
298
+
299
+ // Copy the sequence number.
300
+ nr->sequence_number = sequence_number;
301
+
302
+ // Copy all the column data.
303
+ memcpy(nr->column_data(), column_data(), MIN(shape.column_bytes, _newshape.column_bytes));
304
+
305
+ // Replace every index entry with the new node, fully unlinking this node
306
+ // in the process. Make sure we only replace the index nodes that are
307
+ // actually valid.
308
+ typedef typename table_ctx::index_positional_itr_t tbl_idx_itr_t;
309
+ tbl_idx_itr_t idx_end = tbl->indexes_by_position.end();
310
+ for (tbl_idx_itr_t i = tbl->indexes_by_position.begin(); i != idx_end; ++i) {
311
+ // Not all index trees are valid. Unused ones are uninitialized.
312
+ if (*i) {
313
+ index_idx_t index_idx = (*i)->index_idx;
314
+ treealg::replace_node(get_hook(index_idx), &(*i)->head, nr->get_hook(index_idx));
315
+ }
316
+ }
317
+ // Now swap the primary list entry.
318
+ typename row_tree_t::iterator i = row_tree_t::s_iterator_to(*this);
319
+ tbl->live_pending.insert_equal(i, *nr);
320
+ tbl->live_pending.erase(i);
321
+ // Delete the old and return the new.
322
+ try_delete();
323
+ return nr;
324
+ }
325
+
326
+ template<template<typename T> class CB, typename CB_CC>
327
+ bool dispatch_value(const column_ctx &cc, CB_CC *ctx) const {
328
+ const char *data = column_data() + cc.prow_byte_offset;
329
+ switch (cc.ctype) {
330
+ case column_types::Bit : {
331
+ uint8 mask = 1 << cc.prow_bit_offset;
332
+ bool v = (*data & mask) != 0;
333
+ return CB<bool>::call(cc, v,ctx);
334
+ }
335
+ //int
336
+ case column_types::Int8 :
337
+ return CB<int8>::call(cc, *(const int8 *)data,ctx);
338
+ case column_types::Int16 :
339
+ return CB<int16>::call(cc, *(const int16 *)data,ctx);
340
+ case column_types::Int32 :
341
+ return CB<int32>::call(cc, *(const int32 *)data,ctx);
342
+ case column_types::Int64 :
343
+ return CB<int64>::call(cc, *(const int64 *)data,ctx);
344
+ //uint
345
+ case column_types::Uint8 :
346
+ return CB<uint8>::call(cc, *(const uint8 *)data,ctx);
347
+ case column_types::Uint16 :
348
+ return CB<uint16>::call(cc, *(const uint16 *)data,ctx);
349
+ case column_types::Uint32 :
350
+ return CB<uint32>::call(cc, *(const uint32 *)data,ctx);
351
+ case column_types::Uint64 :
352
+ return CB<uint64>::call(cc, *(const uint64 *)data,ctx);
353
+ //float
354
+ case column_types::Float32 :
355
+ return CB<float32>::call(cc, *(const float32 *)data,ctx);
356
+ case column_types::Float64 :
357
+ return CB<float64>::call(cc, *(const float64 *)data,ctx);
358
+
359
+ case column_types::Datetime :
360
+ return CB<datetime_t>::call(cc, *(const datetime_t *)data,ctx);
361
+ // Variants and strings are stored as safe_cv_t values.
362
+ case column_types::String :
363
+ case column_types::Variant :
364
+ return CB<safe_cv_t>::call(cc, *(const safe_cv_t *)data,ctx);
365
+ }
366
+ throw_error(implementation_bug());
367
+ }
368
+
369
+ safe_cv_t get_value(const column_ctx &ctx) const {
370
+ safe_cv_t cv;
371
+ dispatch_value<get_value_cb>(ctx, &cv);
372
+ return cv;
373
+ }
374
+
375
+ template<typename T>
376
+ struct get_value_cb {
377
+ static inline bool call(const column_ctx &ctx, const T &x, safe_cv_t *val) {
378
+ *val = x;
379
+ return true;
380
+ }
381
+ };
382
+
383
+ private:
384
+ template<typename T>
385
+ inline void priv_set_value(const column_ctx &ctx, const T &v) {
386
+ char *data = column_data() + ctx.prow_byte_offset;
387
+ *(T*)data = v;
388
+ }
389
+ public:
390
+ // These avoid the extra switch if we get direct dispatch from the
391
+ // fixed enumerator, for example.
392
+ //int
393
+ void set_value(const column_ctx &cc, int8 val) {
394
+ OINKY_ASSERT(cc.ctype == column_types::Int8);
395
+ priv_set_value(cc,val);
396
+ }
397
+ void set_value(const column_ctx &cc, int16 val) {
398
+ OINKY_ASSERT(cc.ctype == column_types::Int16);
399
+ priv_set_value(cc,val);
400
+ }
401
+ void set_value(const column_ctx &cc, int32 val) {
402
+ OINKY_ASSERT(cc.ctype == column_types::Int32);
403
+ priv_set_value(cc,val);
404
+ }
405
+ void set_value(const column_ctx &cc, int64 val) {
406
+ OINKY_ASSERT(cc.ctype == column_types::Int64);
407
+ priv_set_value(cc,val);
408
+ }
409
+ //uint
410
+ void set_value(const column_ctx &cc, uint8 val) {
411
+ OINKY_ASSERT(cc.ctype == column_types::Uint8);
412
+ priv_set_value(cc,val);
413
+ }
414
+ void set_value(const column_ctx &cc, uint16 val) {
415
+ OINKY_ASSERT(cc.ctype == column_types::Uint16);
416
+ priv_set_value(cc,val);
417
+ }
418
+ void set_value(const column_ctx &cc, uint32 val) {
419
+ OINKY_ASSERT(cc.ctype == column_types::Uint32);
420
+ priv_set_value(cc,val);
421
+ }
422
+ void set_value(const column_ctx &cc, uint64 val) {
423
+ OINKY_ASSERT(cc.ctype == column_types::Uint64);
424
+ priv_set_value(cc,val);
425
+ }
426
+ //float
427
+ void set_value(const column_ctx &cc, float32 val) {
428
+ OINKY_ASSERT(cc.ctype == column_types::Float32);
429
+ priv_set_value(cc,val);
430
+ }
431
+ void set_value(const column_ctx &cc, float64 val) {
432
+ OINKY_ASSERT(cc.ctype == column_types::Float64);
433
+ priv_set_value(cc,val);
434
+ }
435
+
436
+ void set_value(const column_ctx &cc, datetime_t val) {
437
+ OINKY_ASSERT(cc.ctype == column_types::Datetime);
438
+ priv_set_value(cc,val);
439
+ }
440
+ void set_value(const column_ctx &cc, bool v) {
441
+ OINKY_ASSERT(cc.ctype == column_types::Bit);
442
+ char *data = column_data() + cc.prow_byte_offset;
443
+ uint8 mask = 1 << cc.prow_bit_offset;
444
+ if (v) {
445
+ *data |= mask;
446
+ } else {
447
+ *data &= ~mask;
448
+ }
449
+ }
450
+
451
+ void set_value(const column_ctx &cc, const safe_cv_t &val) {
452
+ const char *data = column_data() + cc.prow_byte_offset;
453
+ switch (cc.ctype) {
454
+ case column_types::Bit :
455
+ set_value(cc, val.bit_value());
456
+ return;
457
+ //int
458
+ case column_types::Int8 :
459
+ priv_set_value<int8>(cc, val.int_value());
460
+ return;
461
+ case column_types::Int16 :
462
+ priv_set_value<int16>(cc, val.int_value());
463
+ return;
464
+ case column_types::Int32 :
465
+ priv_set_value<int32>(cc, val.int_value());
466
+ return;
467
+ case column_types::Int64 :
468
+ priv_set_value<int64>(cc, val.int_value());
469
+ return;
470
+ //uint
471
+ case column_types::Uint8 :
472
+ priv_set_value<uint8>(cc, val.uint_value());
473
+ return;
474
+ case column_types::Uint16 :
475
+ priv_set_value<uint16>(cc, val.uint_value());
476
+ return;
477
+ case column_types::Uint32 :
478
+ priv_set_value<uint32>(cc, val.uint_value());
479
+ return;
480
+ case column_types::Uint64 :
481
+ priv_set_value<uint64>(cc, val.uint_value());
482
+ return;
483
+ //float
484
+ case column_types::Float32 :
485
+ priv_set_value<float32>(cc, val.f32_value());
486
+ return;
487
+ case column_types::Float64 :
488
+ priv_set_value<float64>(cc, val.f64_value());
489
+ return;
490
+
491
+ case column_types::Datetime :
492
+ priv_set_value<datetime_t>(cc, val.dt_value());
493
+ return;
494
+ // Variants and strings are stored as safe_cv_t values.
495
+ case column_types::String :
496
+ case column_types::Variant :
497
+ priv_set_value<safe_cv_t>(cc, val);
498
+ return;
499
+ }
500
+ throw_error(implementation_bug());
501
+ }
502
+
503
+ template<template<typename T> class CB, typename CTX>
504
+ void each_raw_value(const column_ctx *const* cols_begin, const column_ctx *const* cols_end, CTX *ctx) const {
505
+ for (const column_ctx *const* i = cols_begin; i != cols_end; ++i) {
506
+ const column_ctx &col(**i);
507
+ if (!dispatch_value<CB>(col, ctx)) {
508
+ return;
509
+ }
510
+ }
511
+ }
512
+
513
+ template<template<typename T> class CB, typename SELECTOR, typename CTX>
514
+ void each_raw_value(const SELECTOR &_selector, CTX *ctx) const {
515
+ _selector.check_valid(tbl);
516
+ column_selector_accessor<SELECTOR> selector(_selector);
517
+ uint32 l = selector.colcount();
518
+ const column_ctx *const * cols_begin = selector.colrefs();
519
+ const column_ctx *const * cols_end = cols_begin + l;
520
+ each_raw_value<CB>(cols_begin, cols_end, ctx);
521
+ }
522
+
523
+ template<typename FN>
524
+ struct variant_cb {
525
+ template<typename T>
526
+ struct fn {
527
+ inline static bool call(const column_ctx &ctx, T value, const FN *fn) {
528
+ safe_cv_t v(value);
529
+ return (*fn)(ctx, v);
530
+ }
531
+ };
532
+ };
533
+
534
+ // Parameters are a column definition and a value
535
+ template<typename SELECTOR, typename FN>
536
+ void each_column_value(const SELECTOR &_selector, FN fn) const {
537
+ // This does nothing at runtime. But it does verify the FN signature,
538
+ // and if in error, it generates an error with fewer levels of
539
+ // template indirection than the next line.
540
+ check_function_concept<bool(const column_ctx &, const safe_cv_t &cv)>(fn);
541
+
542
+ // Map the raw callback values to safe_cv_t values before passing to
543
+ // the callback.
544
+ each_raw_value<variant_cb<FN>::template fn>(_selector, &fn);
545
+ }
546
+ };
547
+
548
+ //
549
+ // The default traits just assume that the context can be cast to
550
+ // context_base.
551
+ //
552
+ template<typename CONTEXT_T, typename INDEX_T, typename ALLOCATOR_T>
553
+ struct default_ctx_map_traits
554
+ {
555
+ typedef CONTEXT_T context_t;
556
+ typedef INDEX_T index_t;
557
+ typedef ALLOCATOR_T allocator_t;
558
+
559
+ // These are template methods so that they can be individually overriden,
560
+ // and nonsense implementations will be ignored until they are instantiated.
561
+ template<typename CTX_T>
562
+ static const mstring_safe &key_from_ctx(const CTX_T *ctx) {
563
+ return ctx->name;
564
+ }
565
+ template<typename CTX_T>
566
+ static ls_marker &ls_from_ctx(CTX_T *ctx) {
567
+ return ctx->ls;
568
+ }
569
+ template<typename CTX_T>
570
+ static const ls_marker &ls_from_ctx(const CTX_T *ctx) {
571
+ return ctx->ls;
572
+ }
573
+ template<typename CTX_T>
574
+ static void on_deactivation(CTX_T *ctx) {
575
+ ctx->on_deactivation();
576
+ }
577
+ };
578
+
579
+ template<typename TRAITS_T>
580
+ class context_map_t
581
+ {
582
+ typedef context_map_t<TRAITS_T> context_map;
583
+
584
+ typedef typename TRAITS_T::allocator_t allocator_t;
585
+ typedef typename TRAITS_T::index_t index_t;
586
+ typedef typename TRAITS_T::context_t context_t;
587
+
588
+ meta_stringtable_t *stringtable;
589
+ db_fixed_vector<context_t> f_contexts;
590
+ uint32 _live_count;
591
+
592
+ context_t *fbegin() { return f_contexts.begin(); }
593
+ context_t *fend() { return f_contexts.end(); }
594
+ const context_t *fbegin() const { return f_contexts.begin(); }
595
+ const context_t *fend() const { return f_contexts.end(); }
596
+
597
+ //
598
+ // NOTE:
599
+ // We use the global heap for the context hosts. It's not a big deal.
600
+ // They only get allocated when the schema's being modified anyway.
601
+ // This is more expediency than any good reason.
602
+ //
603
+ // Note also that there is no indirection in the vector. The contexts
604
+ // are allocated contiguously. This can't be done with the dynamic
605
+ // object because the contexts may be noncopyable.
606
+
607
+ //Define a set using the base hook that will store values in reverse order
608
+
609
+ typedef boost::intrusive::avl_set_base_hook<> tree_hook;
610
+ struct context_host : tree_hook
611
+ {
612
+ context_t ctx;
613
+ int in_live : 1;
614
+ int in_deleted : 1;
615
+
616
+ const mstring_safe &key() const {
617
+ return TRAITS_T::key_from_ctx(&ctx);
618
+ }
619
+ ls_marker &ls() {
620
+ return TRAITS_T::ls_from_ctx(&ctx);
621
+ }
622
+ const ls_marker &ls() const {
623
+ return TRAITS_T::ls_from_ctx(&ctx);
624
+ }
625
+
626
+ context_host() :
627
+ in_live(0),
628
+ in_deleted(0)
629
+ {}
630
+ };
631
+
632
+ struct host_compare_live
633
+ {
634
+ template<typename KEY>
635
+ bool operator()(const KEY &lkey, const context_host &right) const
636
+ {
637
+ return -(*this)(right,lkey);
638
+ }
639
+ template<typename KEY>
640
+ bool operator()(const context_host &left, const KEY &rkey) const
641
+ {
642
+ OINKY_ASSERT(!left.ls().is_deleted());
643
+ OINKY_ASSERT(left.in_live);
644
+ return left.key().compare_to(rkey) < 0;
645
+ }
646
+ bool operator()(const context_host &left, const context_host &right) const
647
+ {
648
+ OINKY_ASSERT(right.in_live && left.in_live);
649
+ OINKY_ASSERT(!right.in_deleted && !left.in_deleted);
650
+ return left.key().compare_to(right.key()) < 0;
651
+ }
652
+ };
653
+
654
+ struct host_compare_deleted
655
+ {
656
+ bool operator()(const context_host &left, const context_host &right) const
657
+ {
658
+ OINKY_ASSERT(left.ls().is_deleted());
659
+ OINKY_ASSERT(right.ls().is_deleted());
660
+ OINKY_ASSERT(!right.in_live && !left.in_live);
661
+ OINKY_ASSERT(right.in_deleted && left.in_deleted);
662
+ int k = left.key().compare_to(right.key());
663
+ if (k) return (k < 0);
664
+ // We compare the insertion SP, not the delete SP. The
665
+ // insertion SP should never match, because an intervening
666
+ // delete would have been required, which would also have
667
+ // the same SP as the inserts, which should have caused
668
+ // a hard delete of the first.
669
+ OINKY_ASSERT(left.ls().insert_sp != right.ls().insert_sp);
670
+ return left.ls().insert_sp < right.ls().insert_sp;
671
+ }
672
+ };
673
+
674
+ // The live contexts.
675
+ typedef boost::intrusive::avl_set<
676
+ context_host,
677
+ boost::intrusive::compare<host_compare_live> > live_tree_t;
678
+ typedef typename live_tree_t::iterator lt_itr_t;
679
+ typedef typename live_tree_t::const_iterator lt_citr_t;
680
+
681
+ // The deleted contexts. (Preserved for savepoint rollback.)
682
+ typedef boost::intrusive::avl_set<
683
+ context_host,
684
+ boost::intrusive::compare<host_compare_deleted> > deleted_tree_t;
685
+ typedef typename deleted_tree_t::iterator dt_itr_t;
686
+ typedef typename deleted_tree_t::const_iterator dt_citr_t;
687
+
688
+ allocator_t *allocator;
689
+ live_tree_t live;
690
+ deleted_tree_t deleted;
691
+
692
+ uint32 computed_packed_header_bytes;
693
+
694
+ public:
695
+ context_map_t(allocator_t *_allocator, meta_stringtable_t *_stringtable) :
696
+ stringtable(_stringtable),
697
+ f_contexts(_allocator),
698
+ allocator(_allocator),
699
+ _live_count(0)
700
+ {}
701
+
702
+ ~context_map_t() {
703
+ destroy_dynamic();
704
+ }
705
+
706
+ private:
707
+ static void destroy_item(context_host *h) {
708
+ delete h;
709
+ }
710
+
711
+ void destroy_dynamic() {
712
+ live.clear_and_dispose(&destroy_item);
713
+ deleted.clear_and_dispose(&destroy_item);
714
+ }
715
+ public:
716
+ struct get_name_ref
717
+ {
718
+ typedef m_strtable_ref result_type;
719
+ m_strtable_ref operator()(const context_t *ref) const { return key(ref).ref; }
720
+ };
721
+ typedef boost::transform_iterator<get_name_ref,context_t *> fixed_name_ref_itr;
722
+
723
+ // This is the fastest way of finding a specific table.
724
+ context_t *find(const db_string &name)
725
+ {
726
+ // Search the dynamic table.
727
+ lt_itr_t i = live.find(name);
728
+ if (i != live.end()) {
729
+ // This should have been moved to the deleted list if it
730
+ // was deleted.
731
+ OINKY_ASSERT(!i->is_deleted());
732
+ return &i->ctx;
733
+ }
734
+ // Before we can search the fixed table, must convert to a ref.
735
+ m_strtable_ref mref;
736
+ typename meta_stringtable_t::iterator smi = stringtable->find(name);
737
+ if (smi == stringtable->end()) {
738
+ // Not in the string table, which means an object with that name
739
+ // can't exist in serialized form yet.
740
+ return NULL;
741
+ }
742
+ mref = *smi;
743
+ // Find the ref in the fixed table.
744
+ fixed_name_ref_itr fni = std::find(fixed_name_ref_itr(fbegin()), fixed_name_ref_itr(fend()), mref);
745
+ // Convert back to a context iterator.
746
+ context_t * k = fni - fbegin();
747
+ if (k == fend()) {
748
+ // Not in the fixed set.
749
+ return NULL;
750
+ }
751
+ // It's in the fixed set, but we now have to check if it was deleted.
752
+ if (k->ls().is_deleted()) {
753
+ return NULL;
754
+ }
755
+ return k;
756
+ }
757
+ private:
758
+ context_host *host_from_ctx(context_t *ctx)
759
+ {
760
+ if ((ctx >= fbegin()) && (ctx < fend())) {
761
+ // Here we don't free the context explicitly.
762
+ return NULL;
763
+ } else {
764
+ int offset = ((char *) &((context_host *)(5))->ctx) - ((char *) 5);
765
+ context_host *host = (context_host *) (((char *)ctx) - offset);
766
+ return host;
767
+ }
768
+ }
769
+
770
+ // Move from the live to the deleted tree.
771
+ void delete_ctx(context_t *ctx)
772
+ {
773
+ OINKY_ASSERT(ls(ctx).is_deleted());
774
+ context_host *h = host_from_ctx(ctx);
775
+ OINKY_ASSERT(h);
776
+ if (h) {
777
+ delete h;
778
+ }
779
+ }
780
+
781
+ static const mstring_safe &key(const context_t *ctx) {
782
+ return TRAITS_T::key_from_ctx(ctx);
783
+ }
784
+ static const ls_marker &ls(const context_t *ctx) {
785
+ return TRAITS_T::ls_from_ctx(ctx);
786
+ }
787
+ static ls_marker &ls(context_t *ctx) {
788
+ return TRAITS_T::ls_from_ctx(ctx);
789
+ }
790
+
791
+ lt_itr_t insert_live(context_host *h)
792
+ {
793
+ OINKY_ASSERT(h->ls().insert_sp > 0);
794
+ OINKY_ASSERT(!h->ls().is_deleted());
795
+ OINKY_ASSERT(!h->in_live && !h->in_deleted);
796
+
797
+ h->in_live = 1;
798
+ std::pair<lt_itr_t, bool> r = live.insert(*h);
799
+
800
+ // A conficting name is an exception, which at this level generally
801
+ // points to an internal implementation error.
802
+ if (!r.second) {
803
+ h->in_live = 0;
804
+ OINKY_ASSERT(false);
805
+ throw_error(object_exists());
806
+ }
807
+
808
+ return r.first;
809
+ }
810
+
811
+ static inline bool notmodified_cb(const context_t *ctx, sp_marker_t tsp) { return false; }
812
+ public:
813
+ template<typename CB>
814
+ bool modified_since(sp_marker_t tsp, CB cb) const
815
+ {
816
+ check_function_concept<bool(const context_t *ctx, sp_marker_t tsp)>(cb);
817
+
818
+ lt_citr_t lend = live.end();
819
+ for (lt_citr_t i = live.begin(); i != lend; ++i) {
820
+ if (i->ls().insert_sp > tsp) {
821
+ return true;
822
+ }
823
+ const context_host *h = &(*i);
824
+ if ((i->ls().delete_sp == 0) && cb(&h->ctx, tsp)) {
825
+ return true;
826
+ }
827
+ }
828
+
829
+ for (const context_t * i = fbegin(); i != fend(); ++i) {
830
+ OINKY_ASSERT(ls(i).insert_sp == 0);
831
+ if (ls(i).delete_sp > tsp) {
832
+ return true;
833
+ }
834
+ if ((ls(i).delete_sp == 0) && cb(i, tsp)) {
835
+ return true;
836
+ }
837
+ }
838
+ dt_citr_t dend = deleted.end();
839
+ for (dt_citr_t i = deleted.begin(); i != dend; ) {
840
+ const context_host *h = &(*i);
841
+ OINKY_ASSERT(h->ls().insert_sp > 0);
842
+ if (h->ls().delete_sp > tsp) {
843
+ return true;
844
+ }
845
+ }
846
+ return false;
847
+ }
848
+
849
+ bool modified_since(sp_marker_t tsp) const {
850
+ return modified_since(tsp, &notmodified_cb);
851
+ }
852
+
853
+ // roll back all drops/creates which have an SP marker higher than target_sp.
854
+ template<typename UNCREATE_CB, typename UNDROP_CB, typename UNCREATE_DROP_CB>
855
+ void sp_rollback(sp_marker_t target_sp, UNCREATE_CB uncreate_cb, UNDROP_CB undrop_cb, UNCREATE_DROP_CB uncreate_drop_cb)
856
+ {
857
+ // To avoid namespace collisions, we first do all the uncreates.
858
+ lt_itr_t lend = live.end();
859
+ for (lt_itr_t i = live.begin(); i != lend; ) {
860
+ if (i->ls().insert_sp > target_sp) {
861
+ context_host *h = &(*i);
862
+ _live_count -= 1;
863
+ uncreate_cb(&h->ctx, target_sp);
864
+ TRAITS_T::on_deactivation(&h->ctx);
865
+ lt_itr_t tmp = i;
866
+ ++i;
867
+ live.erase(tmp);
868
+ h->in_live = 0;
869
+ delete h;
870
+ } else {
871
+ ++i;
872
+ }
873
+ }
874
+ // Then we do the undrops. Here we have to be sure to check the
875
+ // fixed set as well, which we don't have to worry about un-creating.
876
+ for (context_t * i = fbegin(); i != fend(); ++i) {
877
+ OINKY_ASSERT(ls(i).insert_sp == 0);
878
+ if (ls(i).delete_sp > target_sp) {
879
+ ls(i).delete_sp = 0;
880
+ _live_count += 1;
881
+ undrop_cb(i, target_sp);
882
+ }
883
+ }
884
+ dt_itr_t dend = deleted.end();
885
+ for (dt_itr_t i = deleted.begin(); i != dend; ) {
886
+ context_host *h = &(*i);
887
+ OINKY_ASSERT(h->ls().insert_sp > 0);
888
+ if (h->ls().insert_sp > target_sp) {
889
+ // Here we can hard-delete if we advance beyond create
890
+ dt_itr_t tmp = i;
891
+ ++i;
892
+ deleted.erase(tmp);
893
+ h->in_deleted = 0;
894
+ // callback for rolling back both the create and the drop.
895
+ uncreate_drop_cb(&h->ctx, target_sp);
896
+ delete h;
897
+ } else if (h->ls().delete_sp > target_sp) {
898
+ // Otherwise, we are rolling back to between create
899
+ // and drop, which means we un-drop
900
+ dt_itr_t tmp = i;
901
+ ++i;
902
+ deleted.erase(tmp);
903
+ h->in_deleted = 0;
904
+ h->in_live = 1;
905
+ h->ls().delete_sp = 0;
906
+ live.insert(*h);
907
+ _live_count += 1;
908
+ undrop_cb(&h->ctx, target_sp);
909
+ } else {
910
+ ++i;
911
+ }
912
+ }
913
+ }
914
+
915
+ // Serialization and user-accesses always iterate in order, for different
916
+ // reasons. Context sequences are always ordered by name in serialization.
917
+ // User accesses are ordered for simplicity and consistency.
918
+ class by_name_t
919
+ {
920
+ // The left iterator is over the fixed contexts...skipping the deleted
921
+ // ones, and returning pointers to the context objects.
922
+ class fixed_nod_itr_t : public
923
+ boost::iterator_facade<
924
+ fixed_nod_itr_t,
925
+ context_t *,
926
+ boost::random_access_traversal_tag,
927
+ context_t *
928
+ >
929
+ {
930
+ typedef fixed_nod_itr_t this_t;
931
+
932
+ context_t *dereference() const { return ptr; }
933
+ bool equal(const this_t &other) const { return ptr == other.ptr; }
934
+ void increment() { ++ptr; }
935
+ void decrement() { --ptr; }
936
+ void advance(int x) { ptr += x; }
937
+ int distance_to(const this_t &other) const { return ptr - other.ptr; }
938
+
939
+ context_t *ptr;
940
+ friend class boost::iterator_core_access;
941
+ public:
942
+ fixed_nod_itr_t() : ptr(NULL) {}
943
+ fixed_nod_itr_t(context_t *_ptr) : ptr(_ptr) {}
944
+ };
945
+
946
+ struct skipdeleted {
947
+ bool operator()(const fixed_nod_itr_t &i) const {
948
+ return !ls(*i).is_deleted();
949
+ }
950
+ };
951
+
952
+ typedef boost::filter_iterator<skipdeleted, fixed_nod_itr_t> bn_fixed;
953
+
954
+ // The right iterator returns pointers to context objects from the live
955
+ // tree. This does not require skipping anything, but it does
956
+ // still require a transform from the context_host object.
957
+ struct from_host {
958
+ typedef context_t *result_type;
959
+ context_t *operator()(context_host &i) const {
960
+ return &(i.ctx);
961
+ }
962
+ };
963
+ typedef boost::transform_iterator<from_host, lt_itr_t> bn_dynamic;
964
+
965
+ bn_dynamic make_dynamic_itr(const lt_itr_t &where) const {
966
+ return bn_dynamic(where, from_host());
967
+ }
968
+
969
+ // The merge iterator zips the two
970
+ BOOST_STATIC_ASSERT((boost::is_same<typename bn_fixed::value_type, context_t *>::value));
971
+ BOOST_STATIC_ASSERT((boost::is_same<typename bn_dynamic::value_type, context_t *>::value));
972
+
973
+ struct compare_fd {
974
+ template<typename LEFT, typename RIGHT>
975
+ int operator()(const LEFT &left, const RIGHT &right) const {
976
+ return key(*left).compare_to(key(*right));
977
+ }
978
+ };
979
+
980
+ typedef symmetric_iterator<bn_fixed> fixed_range_t;
981
+ typedef symmetric_iterator<bn_dynamic> dynamic_range_t;
982
+ typedef merge_iterator<bn_fixed, bn_dynamic, compare_fd> itr_base_t;
983
+
984
+ public:
985
+ class iterator : public boost::iterator_adaptor<
986
+ iterator,
987
+ itr_base_t
988
+ >
989
+ {
990
+ typedef boost::iterator_adaptor<iterator,itr_base_t> base_t;
991
+
992
+ public:
993
+ iterator() {}
994
+
995
+ iterator(const fixed_range_t &f, const dynamic_range_t &d) :
996
+ base_t(itr_base_t(f,d))
997
+ {}
998
+
999
+ private:
1000
+ const bn_fixed &fixed() const { return base_t::base().left(); }
1001
+ const bn_dynamic &dynamic() const { return base_t::base().right(); }
1002
+
1003
+ friend class by_name_t;
1004
+ friend class context_map_t<TRAITS_T>;
1005
+ };
1006
+
1007
+ private:
1008
+ context_map &map;
1009
+ by_name_t(context_map &_map) : map(_map) {}
1010
+ friend class context_map_t<TRAITS_T>;
1011
+
1012
+ bn_fixed bn_begin() { return make_fixed(map.fbegin()); }
1013
+ bn_fixed bn_end() { return make_fixed(map.fend()); }
1014
+
1015
+ bn_fixed make_fixed(context_t *where) {
1016
+ OINKY_ASSERT(where >= map.fbegin());
1017
+ OINKY_ASSERT(where <= map.fend());
1018
+ return bn_fixed(fixed_nod_itr_t(where), fixed_nod_itr_t(map.fend()));
1019
+ }
1020
+
1021
+ iterator make(
1022
+ const bn_fixed &fi,
1023
+ const bn_dynamic &di)
1024
+ {
1025
+ return iterator(
1026
+ fixed_range_t(fi, bn_begin(), bn_end()),
1027
+ dynamic_range_t(di, bn_dynamic(map.live.begin()), bn_dynamic(map.live.end())));
1028
+ }
1029
+
1030
+ struct get_name
1031
+ {
1032
+ typedef mstring_safe result_type;
1033
+ const mstring_safe &operator()(const context_t &ref) const { return key(&ref); }
1034
+ };
1035
+ typedef boost::transform_iterator<get_name,context_t *> fixed_name_itr;
1036
+
1037
+ public:
1038
+ iterator make(const bn_fixed &fi, const lt_itr_t &li) {
1039
+ return make(fi, bn_dynamic(li));
1040
+ }
1041
+
1042
+ iterator begin() {
1043
+ return make(bn_begin(), bn_dynamic(map.live.begin()));
1044
+ }
1045
+ iterator end() {
1046
+ return make(bn_end(), bn_dynamic(map.live.end()));
1047
+ }
1048
+
1049
+ iterator lower_bound(const db_string &name) {
1050
+ if (!name.validate_metastring()) {
1051
+ throw_error(invalid_argument());
1052
+ }
1053
+ // FNI is a random-access iterator, which is useful for search.
1054
+ fixed_name_itr fnb = fixed_name_itr(map.fbegin());
1055
+ fixed_name_itr fni = std::lower_bound(fnb,fixed_name_itr(map.fend()), name);
1056
+ // bn_fixed is a traversal iterator, which we can obtain from the above random-access
1057
+ // iterator once we know the offset.
1058
+ bn_fixed fitr = make_fixed(map.fbegin() + (fni - fnb));
1059
+ return make(
1060
+ fitr,
1061
+ bn_dynamic(map.live.lower_bound(name, host_compare_live())));
1062
+ }
1063
+ iterator upper_bound(const db_string &name) {
1064
+ if (!name.validate_metastring()) {
1065
+ throw_error(invalid_argument());
1066
+ }
1067
+ // FNI is a random-access iterator, which is useful for search.
1068
+ fixed_name_itr fnb = fixed_name_itr(map.fbegin());
1069
+ fixed_name_itr fni = std::upper_bound(fnb,fixed_name_itr(map.fend()), name);
1070
+ // bn_fixed is a traversal iterator, which we can obtain from the above random-access
1071
+ // iterator once we know the offset.
1072
+ bn_fixed fitr = make_fixed(map.fbegin() + (fni - fnb));
1073
+ return make(
1074
+ fitr,
1075
+ bn_dynamic(map.live.upper_bound(name, host_compare_live())));
1076
+ }
1077
+ iterator find(const db_string &name) {
1078
+ // Find is tricky. Can't just invoke find on both iterators, as we do
1079
+ // in the case of upper/lower bound, because one may be valid, and
1080
+ // the other at the end. A merge_iterator cannot be constructed from
1081
+ // two such misaligned iterators. Iterating from that point won't
1082
+ // increment or decrement correctly.
1083
+ iterator i = lower_bound(name);
1084
+ if ((i == end()) || (key(*i) != name)) {
1085
+ return end();
1086
+ }
1087
+ return i;
1088
+ }
1089
+
1090
+ template<typename FN>
1091
+ iterator insert(const FN &fn, const mstring_safe &name, sp_marker_t sp)
1092
+ {
1093
+ return map.insert(fn, name, sp);
1094
+ }
1095
+ };
1096
+ typedef typename by_name_t::iterator by_name_iterator;
1097
+
1098
+ by_name_t by_name() { return by_name_t(*this); }
1099
+
1100
+ inline uint32 hard_context_count_limit() const {
1101
+ return (uint32) (index_t) -1;
1102
+ }
1103
+
1104
+ private:
1105
+
1106
+ // Allocates a context and returns iterator to it. The context is
1107
+ // constructed from a copy of the given context. No reference to the
1108
+ // given context is retained.
1109
+ template<typename FN>
1110
+ by_name_iterator insert(const FN &fn, const mstring_safe &name, sp_marker_t sp)
1111
+ {
1112
+ by_name_iterator existing = by_name().lower_bound(name);
1113
+ if ((existing != by_name().end()) && (key(*existing) == name)) {
1114
+ // Caller is still internal, and should have checked this before
1115
+ // (potentially) extending the index range.
1116
+ OINKY_ASSERT(false);
1117
+ throw_error(object_exists());
1118
+ }
1119
+
1120
+ // Check that we haven't reached a max-live-count.
1121
+ if (_live_count == hard_context_count_limit()) {
1122
+ throw_error(limit_exceeded());
1123
+ }
1124
+
1125
+ // This can raise.
1126
+ std::auto_ptr<context_host> h(new context_host());
1127
+ context_t *ctx = &(h->ctx);
1128
+
1129
+ // And so can this.
1130
+ fn(ctx, name);
1131
+ // But nothing else.
1132
+
1133
+ // The prototype should set the name.
1134
+ OINKY_ASSERT(TRAITS_T::key_from_ctx(ctx) == name);
1135
+ // We set this here, not in the prototype
1136
+ OINKY_ASSERT(TRAITS_T::ls_from_ctx(ctx).insert_sp == 0);
1137
+ OINKY_ASSERT(TRAITS_T::ls_from_ctx(ctx).delete_sp == 0);
1138
+ TRAITS_T::ls_from_ctx(ctx).insert_sp = sp;
1139
+
1140
+ // Now that we've set the SP and initialized everything, we
1141
+ // insert in the live tree.
1142
+ lt_itr_t li = insert_live(h.get());
1143
+ _live_count += 1;
1144
+
1145
+ // Now we're linked, so better not free.
1146
+ h.release();
1147
+
1148
+ // The fixed position will be the same (one beyond) but the dynamic
1149
+ // will now point precisely to the new entry.
1150
+ by_name_iterator new_bn = by_name().make(existing.fixed(), li);
1151
+ by_name_iterator tmp = new_bn; (void) tmp;
1152
+ OINKY_ASSERT( ++tmp == existing );
1153
+ return new_bn;
1154
+ }
1155
+
1156
+ // To drop (column/index/table), we must specify the savepoint responsible
1157
+ // for the delete, so we know when to roll back.
1158
+ void drop(context_t *ctx, sp_marker_t sp) {
1159
+ // If this context is being deleted in the same SP in which it was
1160
+ // inserted, we will do a hard-delete.
1161
+ OINKY_ASSERT(ls(ctx).delete_sp == 0);
1162
+ OINKY_ASSERT(sp >= ls(ctx).insert_sp);
1163
+ OINKY_ASSERT(sp > 0);
1164
+
1165
+ // Do the target callback
1166
+ TRAITS_T::on_deactivation(ctx);
1167
+ _live_count -= 1;
1168
+
1169
+ // Remove from the name index
1170
+ context_host *h = host_from_ctx(ctx);
1171
+ if (h) {
1172
+ // This context was created dynamically.
1173
+ OINKY_ASSERT(h->in_live && !h->in_deleted);
1174
+ OINKY_ASSERT(ls(ctx).insert_sp != 0);
1175
+ live.erase(*h);
1176
+ ls(ctx).delete_sp = sp;
1177
+ h->in_live = 0;
1178
+
1179
+ // If we are dropping in same SP as we deleted, then we
1180
+ // can hard-delete.
1181
+ if (ls(ctx).delete_sp == ls(ctx).insert_sp) {
1182
+ delete_ctx(ctx);
1183
+ } else {
1184
+ h->in_deleted = 1;
1185
+ std::pair<dt_itr_t, bool> r = deleted.insert(*h);
1186
+ OINKY_ASSERT(r.second);
1187
+ }
1188
+ } else {
1189
+ // This context was unpacked from serialized form.
1190
+ OINKY_ASSERT(ls(ctx).insert_sp == 0);
1191
+ ls(ctx).delete_sp = sp;
1192
+ }
1193
+ }
1194
+ private:
1195
+
1196
+ template<typename T, typename SCHEME>
1197
+ friend class ::Oinky::Serialization::Serializer;
1198
+
1199
+ template<typename BASE>
1200
+ struct set_accessor_identity : public BASE
1201
+ {
1202
+ set_accessor_identity(context_map &_map) : BASE(_map) {}
1203
+ };
1204
+
1205
+ public:
1206
+ template<typename ITR_VALUE,
1207
+ template<typename BASE> class ACCESSOR = set_accessor_identity>
1208
+ class set_accessor_t
1209
+ {
1210
+ class accessor_base
1211
+ {
1212
+ protected:
1213
+ context_map &map;
1214
+ public:
1215
+ class iterator : public boost::iterator_adaptor<
1216
+ iterator,
1217
+ by_name_iterator,
1218
+ ITR_VALUE
1219
+ >
1220
+ {
1221
+ typedef boost::iterator_adaptor<iterator,by_name_iterator,ITR_VALUE> base_t;
1222
+ friend class accessor_base;
1223
+
1224
+ const by_name_iterator &source_itr() const { return base_t::base(); }
1225
+
1226
+ public:
1227
+ iterator() {}
1228
+ iterator(const by_name_iterator &b) : base_t(b) {}
1229
+
1230
+ const ITR_VALUE &operator*() { return dereference(); }
1231
+ const ITR_VALUE *operator->() { return &dereference(); }
1232
+
1233
+ private:
1234
+ // Obviously, we do NOT want to return the raw context_t ptr
1235
+ // to the user.
1236
+ //
1237
+ // The context thus gets passed to the constructor of the
1238
+ // external type before the user can do anything with it.
1239
+ //
1240
+ ITR_VALUE v;
1241
+ const ITR_VALUE &dereference() { return v = ITR_VALUE(*base_t::base()); }
1242
+ friend class boost::iterator_core_access;
1243
+ };
1244
+
1245
+ accessor_base(context_map &_map) : map(_map) {}
1246
+
1247
+ iterator begin() const { return iterator(map.by_name().begin()); }
1248
+ iterator end() const { return iterator(map.by_name().end()); }
1249
+
1250
+ iterator lower_bound(const db_string &name) const {
1251
+ return iterator(map.by_name().lower_bound(name));
1252
+ }
1253
+ iterator upper_bound(const db_string &name) const {
1254
+ return iterator(map.by_name().upper_bound(name));
1255
+ }
1256
+ iterator find(const db_string &name) const {
1257
+ return iterator(map.by_name().find(name));
1258
+ }
1259
+
1260
+ ITR_VALUE operator[](const char *name) const {
1261
+ db_string str(name);
1262
+ return (*this)[str];
1263
+ }
1264
+ ITR_VALUE operator[](const db_string &name) const {
1265
+ iterator i = find(name);
1266
+ if (i == end()) {
1267
+ throw_error(object_not_found());
1268
+ }
1269
+ return *i;
1270
+ }
1271
+
1272
+ protected:
1273
+ template<typename FN>
1274
+ iterator create(const db_string &name, sp_marker_t sp, const FN &fn) {
1275
+ std::pair<iterator, bool> res = create_if(name, sp, fn);
1276
+ if (!res.second) {
1277
+ throw_error(object_exists());
1278
+ }
1279
+ return res.first;
1280
+ }
1281
+
1282
+ // First field of the result-pair is true if the object is new.
1283
+ template<typename FN>
1284
+ std::pair<iterator, bool> create_if(const db_string &name, sp_marker_t sp, const FN &fn)
1285
+ {
1286
+ if (!name.validate_metastring()) {
1287
+ throw_error(invalid_argument());
1288
+ }
1289
+ iterator i = find(name);
1290
+ if (i != end()) {
1291
+ return std::make_pair(i, false);
1292
+ }
1293
+
1294
+ // Make a safe metastring to store the object name permanently.
1295
+ mstring_safe mstr(map.stringtable->make_safestring(name));
1296
+
1297
+ i = iterator(map.by_name().insert(fn, mstr, sp));
1298
+ return std::make_pair(i,true);
1299
+ }
1300
+
1301
+ context_t *extract_ctx(const iterator &itr) const {
1302
+ return *(itr.source_itr());
1303
+ }
1304
+
1305
+ void drop(iterator &itr, sp_marker_t sp)
1306
+ {
1307
+ context_t *ctx = extract_ctx(itr);
1308
+ map.drop(ctx, sp);
1309
+ itr = end();
1310
+ }
1311
+ };
1312
+ public:
1313
+ typedef ACCESSOR<accessor_base> type;
1314
+ };
1315
+ };
1316
+
1317
+
1318
+ // Allocates a new context from the limit. This does not
1319
+ // reuse vacated entries.
1320
+ template<typename context_t, bool REUSE_SLOTS>
1321
+ class position_map;
1322
+
1323
+ template<typename context_t>
1324
+ class position_map_base
1325
+ {
1326
+ protected:
1327
+ db_vector<context_t *> ptrs;
1328
+ uint32 _limit;
1329
+ uint32 _live_count;
1330
+
1331
+ public:
1332
+ typedef typename context_t::position_idx_t position_idx_t;
1333
+
1334
+ uint32 capacity() const { return ptrs.size(); }
1335
+
1336
+ position_map_base(nky_allocator_t *_alloc) :
1337
+ ptrs(_alloc),
1338
+ _limit(0),
1339
+ _live_count(0)
1340
+ {}
1341
+
1342
+ //
1343
+ void assign_sequential(context_t *begin, context_t *end) {
1344
+ if (ptrs.size() < end - begin) {
1345
+ ptrs.resize(end - begin);
1346
+ }
1347
+ context_t *s = begin;
1348
+ int i=0;
1349
+ for (;s<end;++s,++i) {
1350
+ ptrs[i] = s;
1351
+ }
1352
+ _live_count = _limit = i;
1353
+ for (;i < ptrs.size();++i) {
1354
+ ptrs[i] = NULL;
1355
+ }
1356
+ }
1357
+
1358
+ void resize(uint32 newsize) {
1359
+ OINKY_ASSERT(newsize >= _limit);
1360
+ ptrs.resize(newsize);
1361
+ for (int i=_limit;i<newsize;++i) {
1362
+ ptrs[i] = NULL;
1363
+ }
1364
+ }
1365
+
1366
+ void drop(position_idx_t position, context_t *ctx) {
1367
+ OINKY_ASSERT(ptrs.size() > (uint32) position);
1368
+ OINKY_ASSERT(ptrs[position] == ctx);
1369
+ ptrs[position] = NULL;
1370
+ _live_count -= 1;
1371
+ }
1372
+
1373
+ context_t * const *begin() const { return ptrs.begin(); }
1374
+ context_t * const *end() const { return ptrs.end(); }
1375
+ context_t *operator[](position_idx_t x) const { return begin()[x]; }
1376
+
1377
+ void cond_grow(uint32 min_size)
1378
+ {
1379
+ if (min_size < capacity) {
1380
+ return;
1381
+ }
1382
+ uint32 newsize = prow_shape_t::compute_reserve_size(min_size);
1383
+ resize(newsize);
1384
+ }
1385
+
1386
+ uint32 limit() const { return _limit; }
1387
+ uint32 live_count() const { return _live_count; }
1388
+ };
1389
+
1390
+ template<typename context_t>
1391
+ class position_map<context_t, false> : public position_map_base<context_t>
1392
+ {
1393
+ protected:
1394
+ using position_map_base<context_t>::ptrs;
1395
+ using position_map_base<context_t>::_limit;
1396
+ using position_map_base<context_t>::_live_count;
1397
+ public:
1398
+ using position_map_base<context_t>::resize;
1399
+ typedef typename context_t::position_idx_t position_idx_t;
1400
+
1401
+ position_map(nky_allocator_t *_alloc) : position_map_base<context_t>(_alloc) {}
1402
+
1403
+ void uncreate(uint32 position, context_t *ctx)
1404
+ {
1405
+ OINKY_ASSERT(ptrs[position] == ctx);
1406
+ ptrs[position] = NULL;
1407
+ _live_count -= 1;
1408
+ // This is the only time we roll back the limit. If we
1409
+ // are rolling back the create, then anything
1410
+ // referencing it must be getting destroyed also.
1411
+ //
1412
+ // This test is necessary, since uncreates may be performed
1413
+ // in batches, and not necessarily in perfect reverse-order.
1414
+ if (_limit > position) {
1415
+ _limit = position;
1416
+ }
1417
+ }
1418
+ void undrop(uint32 position, context_t *ctx)
1419
+ {
1420
+ OINKY_ASSERT(ptrs[position] == NULL);
1421
+ ptrs[position] = ctx;
1422
+ _live_count += 1;
1423
+ // Unlike the case for uncreate, we know that the create stamp of
1424
+ // this context precedes anything being uncretated, thus we can
1425
+ // assert that the position is lower (still reserved).
1426
+ OINKY_ASSERT(_limit > position);
1427
+ }
1428
+
1429
+ void create(uint32 position, context_t *ctx) {
1430
+ OINKY_ASSERT(ptrs.size() > position);
1431
+ OINKY_ASSERT(ptrs[position] == NULL);
1432
+ OINKY_ASSERT(position == _limit);
1433
+ _limit = position + 1;
1434
+
1435
+ ptrs[position] = ctx;
1436
+ _live_count += 1;
1437
+ }
1438
+
1439
+ uint32 next_position() {
1440
+ if (_limit == ptrs.size()) {
1441
+ resize(((_limit * 5) >> 2) + 3);
1442
+ }
1443
+ OINKY_ASSERT(!ptrs[_limit]);
1444
+ return _limit;
1445
+ }
1446
+ };
1447
+
1448
+ template<typename context_t>
1449
+ class position_map<context_t, true> : public position_map_base<context_t>
1450
+ {
1451
+ protected:
1452
+ using position_map_base<context_t>::ptrs;
1453
+ using position_map_base<context_t>::_limit;
1454
+ using position_map_base<context_t>::_live_count;
1455
+ using position_map_base<context_t>::resize;
1456
+ public:
1457
+ typedef typename context_t::position_idx_t position_idx_t;
1458
+
1459
+ position_map(nky_allocator_t *_alloc) : position_map_base<context_t>(_alloc) {}
1460
+
1461
+ void uncreate(position_idx_t position, context_t *ctx)
1462
+ {
1463
+ OINKY_ASSERT(ptrs[position] == ctx);
1464
+ ptrs[position] = NULL;
1465
+ _live_count -= 1;
1466
+ // Unlike the un-reusing implementation, we do not allocate in
1467
+ // monotonically increasing positions, so we can't drop the
1468
+ // limit, EVER.
1469
+ }
1470
+ void undrop(position_idx_t position, context_t *ctx)
1471
+ {
1472
+ OINKY_ASSERT(_limit > (uint32) position);
1473
+ OINKY_ASSERT(ptrs[position] == NULL);
1474
+ ptrs[position] = ctx;
1475
+ _live_count += 1;
1476
+ }
1477
+
1478
+ void create(position_idx_t position, context_t *ctx) {
1479
+ OINKY_ASSERT(ptrs.size() > (uint32) position);
1480
+ OINKY_ASSERT(ptrs[position] == NULL);
1481
+ if (_limit <= (uint32) position) {
1482
+ _limit = (uint32) position + 1;
1483
+ }
1484
+ ptrs[position] = ctx;
1485
+ _live_count += 1;
1486
+ }
1487
+
1488
+ position_idx_t next_position() {
1489
+ uint32 i;
1490
+ if (_live_count == _limit) {
1491
+ if (_limit == ptrs.size()) {
1492
+ // Conservative overallocation.
1493
+ resize(((_limit * 5) >> 2) + 3);
1494
+ }
1495
+ i = _limit;
1496
+ } else {
1497
+ for (i=0;ptrs[i];++i) {
1498
+ //NOTHING
1499
+ }
1500
+ OINKY_ASSERT(i < _limit);
1501
+ }
1502
+ OINKY_ASSERT(!ptrs[i]);
1503
+ // Check that the index is within range of the index type.
1504
+ position_idx_t pos = (position_idx_t) i;
1505
+ if (i != (uint32) pos) {
1506
+ // We should have checked this earlier before trying to assign a
1507
+ // position.
1508
+ OINKY_ASSERT(false);
1509
+ throw_error(limit_exceeded());
1510
+ }
1511
+ return pos;
1512
+ }
1513
+ };
1514
+
1515
+ template<typename ROW>
1516
+ struct row_value_extractor
1517
+ {
1518
+ // This class must be specialized.
1519
+ BOOST_STATIC_ASSERT(sizeof(ROW) != sizeof(ROW));
1520
+ };
1521
+
1522
+ template<typename TBL>
1523
+ struct row_value_extractor<pending_row<TBL> >
1524
+ {
1525
+ typedef pending_row<TBL> pending_row_t;
1526
+ typedef safe_cv_t ex_value_t;
1527
+
1528
+ static inline ex_value_t get_column_value(const pending_row_t &row, const column_ctx &col, const TBL *tbl) {
1529
+ OINKY_ASSERT(row.table() == tbl);
1530
+ return row.get_value(col);
1531
+ }
1532
+ };
1533
+
1534
+ template<typename INDEX, typename ROW>
1535
+ struct index_row_compare_t
1536
+ {
1537
+ const INDEX *idx;
1538
+ typedef typename INDEX::idx_column_iterator idx_column_iterator;
1539
+ typedef row_value_extractor<ROW> extractor_t;
1540
+ typedef typename extractor_t::ex_value_t ex_value_t;
1541
+
1542
+ index_row_compare_t(const INDEX *_idx) : idx(_idx) {}
1543
+
1544
+ template<typename ITR_T>
1545
+ int compare(const ROW &left, const simple_sequence<ITR_T> &right) const
1546
+ {
1547
+ // An index spec is just a list of column refs and corresponding
1548
+ // orderings. The refs are indexes into the row's normal column
1549
+ // set.
1550
+ idx_column_iterator i = idx->coldefs_begin();
1551
+ idx_column_iterator end = idx->coldefs_end();
1552
+ ITR_T ri(right.begin());
1553
+ for (;i != end;++i, ++ri) {
1554
+ if (ri == right.end()) {
1555
+ // Premature end of seq means seq<itr
1556
+ //
1557
+ // NOTE: !!!
1558
+ // This semantic does not change depending on the
1559
+ // sort-order of the index column.
1560
+ return 1;
1561
+ }
1562
+ idx_column_ctx icc(*i);
1563
+ ex_value_t lv(extractor_t::get_column_value(left, *icc.column, idx->table));
1564
+ int k = lv.compare_to(*ri);
1565
+ if (k != 0) {
1566
+ return icc.ascending ? k : -k;
1567
+ }
1568
+ }
1569
+ // If seq has not yet reached end, we do not treat it as greater.
1570
+ // values beyond the set we require are ignored completely.
1571
+ return 0;
1572
+ }
1573
+ template<typename ITR_T>
1574
+ int compare(const simple_sequence<ITR_T> &left, const ROW &right) const
1575
+ {
1576
+ return -compare(right, left);
1577
+ }
1578
+ int compare(const ROW &left, const ROW &right) const
1579
+ {
1580
+ OINKY_ASSERT(left.table() == right.table());
1581
+
1582
+ // An index spec is just a list of column refs and corresponding
1583
+ // orderings. The refs are indexes into the row's normal column
1584
+ // set.
1585
+ idx_column_iterator i = idx->coldefs_begin();
1586
+ idx_column_iterator end = idx->coldefs_end();
1587
+ for (;i != end;++i) {
1588
+ idx_column_ctx icc(*i);
1589
+ ex_value_t lv(extractor_t::get_column_value(left, *icc.column, idx->table));
1590
+ ex_value_t rv(extractor_t::get_column_value(right, *icc.column, idx->table));
1591
+ int k = lv.compare_to(rv);
1592
+ if (k != 0) {
1593
+ return icc.ascending ? k : -k;
1594
+ }
1595
+ }
1596
+ return 0;
1597
+ }
1598
+ };
1599
+
1600
+
1601
+ } //namespace Internal
1602
+ } //namespace Oinky
1603
+