oinky 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (41) hide show
  1. data/LICENSE +22 -0
  2. data/README.md +141 -0
  3. data/ext/extconf.rb +79 -0
  4. data/ext/include/oinky.h +424 -0
  5. data/ext/include/oinky.hpp +63 -0
  6. data/ext/include/oinky/nky_base.hpp +1116 -0
  7. data/ext/include/oinky/nky_core.hpp +1603 -0
  8. data/ext/include/oinky/nky_cursor.hpp +665 -0
  9. data/ext/include/oinky/nky_dialect.hpp +107 -0
  10. data/ext/include/oinky/nky_error.hpp +164 -0
  11. data/ext/include/oinky/nky_fixed_table.hpp +710 -0
  12. data/ext/include/oinky/nky_handle.hpp +334 -0
  13. data/ext/include/oinky/nky_index.hpp +1038 -0
  14. data/ext/include/oinky/nky_log.hpp +15 -0
  15. data/ext/include/oinky/nky_merge_itr.hpp +403 -0
  16. data/ext/include/oinky/nky_model.hpp +110 -0
  17. data/ext/include/oinky/nky_pool.hpp +760 -0
  18. data/ext/include/oinky/nky_public.hpp +808 -0
  19. data/ext/include/oinky/nky_serializer.hpp +1625 -0
  20. data/ext/include/oinky/nky_strtable.hpp +504 -0
  21. data/ext/include/oinky/nky_table.hpp +1996 -0
  22. data/ext/nky_lib.cpp +390 -0
  23. data/ext/nky_lib_core.hpp +212 -0
  24. data/ext/nky_lib_index.cpp +158 -0
  25. data/ext/nky_lib_table.cpp +224 -0
  26. data/lib/oinky.rb +1284 -0
  27. data/lib/oinky/compiler.rb +106 -0
  28. data/lib/oinky/cpp_emitter.rb +311 -0
  29. data/lib/oinky/dsl.rb +167 -0
  30. data/lib/oinky/error.rb +19 -0
  31. data/lib/oinky/modelbase.rb +12 -0
  32. data/lib/oinky/nbuffer.rb +152 -0
  33. data/lib/oinky/normalize.rb +132 -0
  34. data/lib/oinky/oc_builder.rb +44 -0
  35. data/lib/oinky/query.rb +193 -0
  36. data/lib/oinky/rb_emitter.rb +147 -0
  37. data/lib/oinky/shard.rb +40 -0
  38. data/lib/oinky/testsup.rb +104 -0
  39. data/lib/oinky/version.rb +9 -0
  40. data/oinky.gemspec +36 -0
  41. metadata +120 -0
@@ -0,0 +1,1996 @@
1
+ // This source is distributed under the terms of the MIT License. Refer
2
+ // to the 'LICENSE' file for details.
3
+ //
4
+ // Copyright (c) Jacob Lacouture, 2012
5
+
6
+ namespace Oinky
7
+ {
8
+ namespace Internal
9
+ {
10
+ using namespace Oinky::Errors;
11
+
12
+ // This is an accessor which can be used for both select and insert.
13
+ // It defines a column subset and ordering. This is what gives
14
+ // a simple sequence of row values its meaning, by binding those values
15
+ // to specific columns. As with any accessor, the resulting object
16
+ // must be destroyed before the host object (the table) is deallocated.
17
+ //
18
+ // Make a column set from a sequence of column names. This column set
19
+ // will then be used in conjunction with the "insert" method to indicate
20
+ // which columns we're providing values for. This two-stage insert
21
+ // method allows groups of columns to be inserted faster, because
22
+ // we only have to look up the column names once. The same columnset
23
+ // can be used for select.
24
+ template<typename TABLE_CTX>
25
+ class column_selector_template
26
+ {
27
+ protected:
28
+ typedef TABLE_CTX table_ctx;
29
+ typedef index_ctx_t<table_ctx> index_ctx;
30
+ typedef column_selector_template<table_ctx> column_selector_t;
31
+
32
+ friend class table_ctx_t<typename table_ctx::db_t>;
33
+ friend class index_ctx_t<table_ctx>;
34
+ friend class table_handle_t<table_ctx>;
35
+ friend class pending_row<TABLE_CTX>;
36
+ friend struct column_selector_accessor<column_selector_t>;
37
+ template<typename INTERNAL_T, typename HOST_T> friend class cursor_handle_t;
38
+
39
+ table_ctx *table;
40
+ uint32 tbl_cs_schema;
41
+ const column_ctx *const *colrefs;
42
+ uint32 colcount;
43
+
44
+ typedef std::vector<const column_ctx *> vector_t;
45
+ std::tr1::shared_ptr<vector_t> refs;
46
+
47
+ column_selector_template(
48
+ table_ctx *_table,
49
+ const column_ctx **_colrefs,
50
+ uint32 _colcount) :
51
+ table(_table),
52
+ tbl_cs_schema(_table->last_colset_change),
53
+ colrefs(_colrefs),
54
+ colcount(_colcount)
55
+ {}
56
+
57
+ void check_valid(const table_ctx *_table) const {
58
+ if ((table != _table) ||
59
+ (table->last_colset_change != tbl_cs_schema))
60
+ {
61
+ throw_error(invalid_argument());
62
+ }
63
+ }
64
+
65
+ // Most members of this type are hidden because the type is exposed.
66
+ static column_selector_t select_index(index_ctx *index) {
67
+ column_selector_t x;
68
+ x.table = index->table;
69
+ x.tbl_cs_schema = x.table->last_colset_change;
70
+ x.colcount = index->defn.col_count;
71
+ x.colrefs = index->column_refs;
72
+ return x;
73
+ }
74
+
75
+ static column_selector_t select_all(table_ctx *table) {
76
+ column_selector_t x;
77
+ x.table = table;
78
+ x.tbl_cs_schema = table->last_colset_change;
79
+ x.colcount = table->cols_by_position.size();
80
+ x.colrefs = table->cols_by_position.begin();
81
+ return x;
82
+ }
83
+
84
+ // This permits duplicate columns, which we certainly want for select,
85
+ // but probably not for insert.
86
+ template<typename COL_NAME_SEQ>
87
+ column_selector_template(table_ctx *_table, const COL_NAME_SEQ &seq) :
88
+ table(_table),
89
+ tbl_cs_schema(_table->last_colset_change)
90
+ {
91
+ typedef typename table_ctx::cols_by_name_itr_t cols_by_name_itr_t;
92
+
93
+ uint32 len = seq.size();
94
+ refs = boost::make_shared<vector_t>(len);
95
+
96
+ typename COL_NAME_SEQ::itr_t s = seq.begin();
97
+
98
+ cols_by_name_itr_t bni;
99
+ cols_by_name_itr_t bne = table->columns.by_name().end();
100
+
101
+ for (uint32 i = 0; i < len; ++i, ++s) {
102
+ db_string colname(*s);
103
+
104
+ bni = table->columns.by_name().find(colname);
105
+ if (bni == bne) {
106
+ throw_error(object_not_found());
107
+ }
108
+ column_ctx *ctx = (*bni);
109
+ (*refs)[i] = ctx;
110
+ }
111
+
112
+ // Init base.
113
+ colcount = len;
114
+ colrefs = &((*refs)[0]);
115
+ }
116
+ public:
117
+ column_selector_template() : table(NULL), tbl_cs_schema(0), colrefs(NULL), colcount(0) {}
118
+
119
+ uint32 column_count() const { return colcount; }
120
+
121
+ uint32 last_colset_change() const { return tbl_cs_schema; }
122
+
123
+ template<typename OSTREAM>
124
+ void format(OSTREAM &os) const {
125
+ check_valid(table);
126
+
127
+ // This prints selected columns in selected order.
128
+ for (uint32 i = 0; i < colcount; ++i) {
129
+ const column_ctx *col = colrefs[i];
130
+ if (!col) {
131
+ throw_error(invalid_argument());
132
+ }
133
+ if (i) {
134
+ os << ", ";
135
+ }
136
+ os << col->colname.as_string();
137
+ }
138
+ }
139
+ };
140
+
141
+
142
+ template<typename T>
143
+ struct explode_callbacks
144
+ {
145
+ template<typename ROW>
146
+ static bool call(const column_ctx &cc, T v, ROW *row) {
147
+ OINKY_ASSERT(cc.ctype != column_types::String);
148
+
149
+ if (cc.ctype == column_types::Variant) {
150
+ row->set_value(cc, safe_cv_t(v));
151
+ } else {
152
+ row->set_value(cc, v);
153
+ }
154
+ return true;
155
+ }
156
+ };
157
+ template<>
158
+ struct explode_callbacks<safe_cv_t>
159
+ {
160
+ template<typename ROW>
161
+ static bool call(const column_ctx &cc, const safe_cv_t &v, ROW *row) {
162
+ row->set_value(cc, v);
163
+ return true;
164
+ }
165
+ };
166
+
167
+
168
+ // All rows are ordered by an internal key. This is not exposed to the
169
+ // user, because we reorder it freely. The key is just an index into the
170
+ // array of rows. All rows are exactly the same length, regardless of
171
+ // content. This is because we can put all variable-length data into
172
+ // the string table.
173
+ //
174
+ // The key is completely local to the table and its indexes. It
175
+ // is never used even as a foreign key to other tables. Foreign key
176
+ // relationships are, of course, defined and used by the user, and
177
+ // therefore require a user-defined key.
178
+
179
+ template<typename DB_T>
180
+ class table_ctx_t
181
+ {
182
+ template<typename X,typename SCHEME> friend class ::Oinky::Serialization::Serializer;
183
+
184
+ // This isn't public to expose to the user, but with the index_ctx,
185
+ // pending_row, fixed_t::iterator, etc.
186
+ public:
187
+ typedef DB_T db_t;
188
+ typedef typename db_t::allocator_t allocator_t;
189
+ typedef table_ctx_t<DB_T> table_ctx;
190
+ typedef index_ctx_t<table_ctx> index_ctx;
191
+ typedef table_handle_t<table_ctx> table_handle;
192
+
193
+ friend class table_handle_t<table_ctx>;
194
+
195
+ struct colmap_traits : default_ctx_map_traits<column_ctx, column_idx_t, allocator_t>
196
+ {
197
+ static const mstring_safe &key_from_ctx(const column_ctx *ctx) {
198
+ return ctx->colname;
199
+ }
200
+ // Hide the default implementation.
201
+ static void on_deactivation(const column_ctx *ctx) {}
202
+ };
203
+ struct indexmap_traits : default_ctx_map_traits<index_ctx, index_idx_t, allocator_t>
204
+ {
205
+ static const mstring_safe &key_from_ctx(const index_ctx *ctx) {
206
+ return ctx->indexname;
207
+ }
208
+ };
209
+
210
+ mstring_safe tablename;
211
+ ls_marker ls;
212
+ db_t *db;
213
+ uint32 last_colset_change;
214
+ uint32 last_ixset_change;
215
+
216
+ // Every time this table's schema gets altered (new/dropped column/index)
217
+ // we mark the update, invalidating schema caches, such as column_selectors.
218
+ void mark_colset_change() {
219
+ last_colset_change = db->mark_schema_change();
220
+ }
221
+ void mark_ixset_change () {
222
+ last_ixset_change = db->mark_schema_change();
223
+ }
224
+
225
+ bool table_modified_since(sp_marker_t tsp) const {
226
+ // Deleted pending rows.
227
+ if (dead_pending && (dead_pending->ls.delete_sp > tsp)) {
228
+ return true;
229
+ }
230
+ // Deleted fixed rows
231
+ if (fixed_dropped_sp > tsp) {
232
+ return true;
233
+ }
234
+ BOOST_FOREACH(pending_deletes_t::value_type di, pending_deletes)
235
+ {
236
+ if (di.second > tsp) { return true; }
237
+ }
238
+ cpitr_t pend = live_pending.end();
239
+ for (
240
+ cpitr_t pi = live_pending.begin();
241
+ pi != pend;
242
+ ++pi
243
+ )
244
+ {
245
+ if (pi->ls.insert_sp > tsp) { return true; }
246
+ }
247
+
248
+ if (columns.modified_since(tsp)) {
249
+ return true;
250
+ }
251
+ if (indexes.modified_since(tsp)) {
252
+ return true;
253
+ }
254
+ return false;
255
+ }
256
+
257
+ // Column definitions
258
+ typedef context_map_t<colmap_traits> columns_t;
259
+ typedef typename columns_t::by_name_iterator cols_by_name_itr_t;
260
+ columns_t columns;
261
+
262
+ // Rows deleted from the fixed set.
263
+ // (Rows deleted from the pending sets are not represented,
264
+ // because we just remove them from the live_pending set.)
265
+ //
266
+ // This structure maps the row Id to the SP timestamp which was active
267
+ // when it was deleted. If we rollback to a point before that savepoint
268
+ // we will remove the entry from this map.
269
+ //
270
+ // NOTE that we're not using a ls_marker object because the insert is
271
+ // implicitly zero. What we store here is the delete timestamp only.
272
+ typedef std::tr1::unordered_map<row_idx_t, sp_marker_t> pending_deletes_t;
273
+ pending_deletes_t pending_deletes;
274
+
275
+ // This is the serialized table data. In the common case, where the
276
+ // table structure is unaltered and the bulk of the data was deserialized
277
+ // (rather than newly inserted) most of the data is stored here.
278
+ //
279
+ // Note that we iterate over the fixed column set only, since the validity
280
+ // of both that set and the fixed values are coincident, and the raw pointer
281
+ // iterator is more efficient in the common case.
282
+ typedef tabledata_fixed<table_ctx> fixed_t;
283
+ fixed_t fixed;
284
+
285
+ // Our set of indices. This structure stores both the serialized indices
286
+ // and any new ones.
287
+ typedef context_map_t<indexmap_traits> indexes_t;
288
+ indexes_t indexes;
289
+
290
+ // The pending_row object stores column values and index hooks within
291
+ // arrays. Each of those positions maps to the relevant index/column
292
+ // descriptor via these maps. Each context object contains an integer
293
+ // position specifier, providing the reverse mapping.
294
+ //
295
+ // Each of these maps is by reference position (column_itx_t and
296
+ // index_idx_t) respectively. These positions are reused. In the case
297
+ // of indexes, the reference position and storage position are the same,
298
+ // and the position is fixed for the lifetime of the index.
299
+ // In the case of columns, the positions change whenever a column is added
300
+ // or removed, but the storage is not reused. The usage of the
301
+ // column storage slots is determined separately, using the prow_xxx_xxx
302
+ // limit values below.
303
+ db_vector<column_ctx *> cols_by_position;
304
+ position_map<index_ctx,true> indexes_by_position;
305
+
306
+ // While the above column map just provide fast-access to the context
307
+ // objects by pseudo-index, the following are responsible for marking
308
+ // which column storage slots are reserved.
309
+ //
310
+ // Because we do not reuse these positions, we store them as uint32s
311
+ // rather than as column_idx_t values, which could be easily exhausted
312
+ // if we create/drop many columns.
313
+ uint32 prow_byte_limit;
314
+ // A bit position is an offset and shift.
315
+ uint32 prow_bit_loc;
316
+ uint8 prow_bit_limit;
317
+
318
+ typedef index_ctx *const *index_positional_itr_t;
319
+ typedef column_ctx *const *cols_positional_itr_t;
320
+
321
+ // This is the dynamic row data. If a row is inserted, it goes here.
322
+ // If a row is modified, it gets inserted here, and its index gets
323
+ // inserted into the pending_delete_rows table. We also reinsert it
324
+ // into all indexes.
325
+ typedef pending_row<table_ctx> pending_row_t;
326
+ typedef typename pending_row_t::row_tree_t row_tree_t;
327
+ row_tree_t live_pending;
328
+ pending_row_t *dead_pending;
329
+
330
+ // Iterators over each container type.
331
+ typedef typename fixed_t::iterator fixed_itr_t;
332
+ typedef typename row_tree_t::iterator pitr_t;
333
+ typedef typename row_tree_t::const_iterator cpitr_t;
334
+
335
+ // Primary ordering for tables (consistent enumeration).
336
+ // Secondary ordering for indices (after indexed values, obviously).
337
+ // See comment in pending_row::sequence_number.
338
+ uint64 insert_sequence_limit;
339
+
340
+ // Some statistics we compute in the first phase of serialization,
341
+ // used during the second phase.
342
+ struct computed_packing_data
343
+ {
344
+ uint32 indexset_size;
345
+ uint32 rowdata_bytes;
346
+ uint32 total_size;
347
+ row_offset_accumulator_t row_width;
348
+ } packing;
349
+
350
+ // Constructor must be safely default-constructible. Usually we
351
+ // construct, then deserialize into the constructed target.
352
+ table_ctx_t(db_t *_db = NULL) :
353
+ db(_db),
354
+ last_colset_change(0),
355
+ last_ixset_change(0),
356
+ columns(_db ? &_db->allocator : NULL, _db ? &_db->metastrings : NULL),
357
+ fixed(
358
+ tabular_t(),
359
+ this),
360
+ indexes(_db ? &_db->allocator : NULL, _db ? &_db->metastrings : NULL),
361
+ cols_by_position(_db ? &_db->allocator : NULL),
362
+ indexes_by_position(_db ? &_db->allocator : NULL),
363
+ prow_byte_limit(0),
364
+ prow_bit_loc(0),
365
+ prow_bit_limit(0),
366
+ dead_pending(NULL),
367
+ insert_sequence_limit(0)
368
+ {}
369
+
370
+ ~table_ctx_t() {
371
+ // We have to invalidate cursors on destruction, in addition to
372
+ // uncreate and drop. This is to catch the more common database
373
+ // teardown case.
374
+ invalidate_cursors();
375
+
376
+ // Now delete each pending row. Theoretically we don't even
377
+ // have to clean these up if we're using the pool allocator.
378
+ // However, if we're using a global malloc for any reason,
379
+ // we'll leak very badly unless we clean these things up, so
380
+ // we do. In general, we don't expect this to be a performance
381
+ // problem, since the number of pending rows is generally quite
382
+ // small relative to the size of the instance.
383
+ //
384
+ // This is safe, even though the indices may still reference these
385
+ // rows, because we will next destroy the index ctxs, whose
386
+ // destructors never touch the pending rows.
387
+ uncreate_live_rows(0);
388
+ uncreate_dead_rows(0);
389
+ }
390
+
391
+ void reinit(db_t *_db)
392
+ {
393
+ this->~table_ctx();
394
+ ::new(this) table_ctx(_db);
395
+ }
396
+
397
+ bool is_deleted(row_idx_t idx) const {
398
+ OINKY_ASSERT(idx < fixed.tabular.row_count);
399
+ // If we dropped everything, or if
400
+ return fixed_dropped() || (pending_deletes.find(idx) != pending_deletes.end());
401
+ }
402
+ bool fixed_dropped() const { return fixed_dropped_sp != 0; }
403
+
404
+ private:
405
+ // If we delete all rows (or explode), we don't bother creating a
406
+ // delete entry for every record. We just set this value.
407
+ sp_marker_t fixed_dropped_sp;
408
+
409
+ // When the table schema is altered (new column added, column deleted,
410
+ // index added, etc, the table gets exploded. That means we fully
411
+ // deserialize it into pending_row_t structures.
412
+ //
413
+ // We specify the new row width. This may be larger than the current
414
+ // row width. Preallocating larger rows saves us from having to reallocate
415
+ // them later, since we need to store the columns contiguously.
416
+ //
417
+ // The target row width cannot be less than the current row width. The only
418
+ // way to do that would be to explode and delete the column(s) in a
419
+ // single step.
420
+ void explode(prow_shape_t target_shape)
421
+ {
422
+ // Exploding applies to fixed rows only.
423
+ if (!fixed_dropped()) {
424
+ // How many fixed rows still exist?
425
+ uint32 newrows = fixed.tabular.row_count - pending_deletes.size();
426
+
427
+ if (newrows) {
428
+ uint32 rowcount = row_count();
429
+
430
+ // Because we do not create a new tombstone for each deleted
431
+ // fixed item, we are effectively adding new rows to the table
432
+ // until we batch-delete all the fixed items at the end.
433
+ // As we delete, we add to this list. Then we can proces an
434
+ // exception by simply destroying this list. The space we use
435
+ // for this hook is an extra index allocation (currently
436
+ // unused), but able to be reused in the future. Once we
437
+ // finish exploding, of course, we can reuse the hook.
438
+ pending_row_t *undo_head = NULL;
439
+ uint32 fake_index_idx = indexes_by_position.next_position();
440
+
441
+ try {
442
+ // Make sure we have space for our temporary hook.
443
+ if (target_shape.idx_count <= (index_idx_t) fake_index_idx) {
444
+ target_shape.idx_count += 1;
445
+ }
446
+
447
+ // Advise the allocator that we're about to do a lot of
448
+ // allocation.
449
+ db->allocator.prepare(
450
+ newrows,
451
+ newrows * pending_row_t::comp_size(target_shape));
452
+
453
+ // Migrate all undeleted fixed rows to pending.
454
+ fixed_itr_t fi = fixed.begin();
455
+ fixed_itr_t fend = fixed.end();
456
+
457
+ for (;fi != fend; ++fi) {
458
+ if (fi.is_deleted()) {
459
+ continue;
460
+ }
461
+
462
+ pending_row_t *row = pending_row_t::make_new(this, target_shape);
463
+ OINKY_ASSERT((uint32) row->get_shape().idx_count > fake_index_idx);
464
+ // Set the lifespan.
465
+ row->ls.insert_sp = db->active_sp();
466
+ // The sequence number is the original rowkey.
467
+ row->sequence_number = *fi;
468
+ // Copy the column values.
469
+ fixed.template each_raw_value<explode_callbacks>(
470
+ cols_by_position.begin(),
471
+ cols_by_position.end(),
472
+ *fi,
473
+ row);
474
+
475
+ // Link each index entry.
476
+ this->link_row_indexes(row, true);
477
+
478
+ // And into the table list.
479
+ std::pair<pitr_t,bool> ires = live_pending.insert_unique(*row);
480
+ OINKY_ASSERT(ires.second);
481
+
482
+ // Link the fake entry
483
+ * (pending_row_t **) row->get_hook((index_idx_t) fake_index_idx) = undo_head;
484
+ undo_head = row;
485
+ }
486
+ } catch (...) {
487
+ // Unlink and free all the row objects we allocated.
488
+ while (undo_head) {
489
+ pending_row_t *row = undo_head;
490
+ undo_head = * (pending_row_t **) row->get_hook((index_idx_t) fake_index_idx);
491
+
492
+ // There's no reason this try shouldn't succeed, unless
493
+ // the user is failing to synchronize, as we just
494
+ // created this row.
495
+ unlink_row_indexes(row);
496
+ live_pending.erase(row_tree_t::s_iterator_to(*row));
497
+ row->try_delete();
498
+ }
499
+ // re-raise.
500
+ throw;
501
+ } // try
502
+
503
+ // We haven't dropped the remaining fixed rows yet.
504
+ OINKY_ASSERT(rowcount + newrows == row_count());
505
+ } // if (newrows)
506
+
507
+ // Now drop the entire remaining fixed row set.
508
+ // Do this regardless of whether there are any rows to drop, as
509
+ // the fixed rows being dropped is used as a test for whether the
510
+ // table schema has been altered.
511
+ fixed_dropped_sp = db->active_sp();
512
+ }// if (!fixed_dropped())
513
+ }
514
+
515
+ public:
516
+ // Compute the minimum row shape required to represent a row
517
+ // in this table.
518
+ //
519
+ // We generally presume schema changes are rare. So new rows
520
+ // always use the minimum shape. Only when growing existing
521
+ // rows do we over-alloc, and that logic is contained entirely
522
+ // within cond_grow_pending_rows.
523
+ prow_shape_t get_minimum_prow_shape() const
524
+ {
525
+ prow_shape_t shape;
526
+ shape.idx_count = indexes_by_position.limit();
527
+ shape.column_bytes = prow_byte_limit;
528
+ return shape;
529
+ }
530
+
531
+ void cond_grow_pending_rows(const prow_shape_t &_ss)
532
+ {
533
+ // We generally presume schema changes are rare. So until
534
+ // we see the first schema change, we do not over-alloc.
535
+ prow_shape_t newshape = prow_shape_t::compute_reserve_shape(get_minimum_prow_shape());
536
+
537
+ newshape.idx_count = MAX(newshape.idx_count, _ss.idx_count);
538
+ newshape.column_bytes = MAX(newshape.column_bytes, _ss.column_bytes);
539
+
540
+ pitr_t i = live_pending.begin();
541
+ pitr_t end = live_pending.end();
542
+ while (i != end) {
543
+ pending_row_t *pr = &(*i);
544
+ // No point growing deleted rows.
545
+ OINKY_ASSERT(!pr->ls.is_deleted());
546
+ // Iterator may have to be reset if the object gets reallocated.
547
+ pr = pr->cond_grow_self(_ss, newshape);
548
+ // The row may have been replaced, invaliding our iterator.
549
+ i = row_tree_t::s_iterator_to(*pr);
550
+ ++i;
551
+ }
552
+ }
553
+
554
+ void link_row_indexes(pending_row_t *row, bool skip_fixed_rowcheck)
555
+ {
556
+ try {
557
+ index_positional_itr_t end = indexes_by_position.end();
558
+ for (index_positional_itr_t i = indexes_by_position.begin();i != end; ++i) {
559
+ if (*i) {
560
+ (*i)->link_pending(row, skip_fixed_rowcheck);
561
+ }
562
+ }
563
+ } catch (...) {
564
+ // This routine is only intended to be used when we KNOW the
565
+ // index-insertions will succeed. SP rollback, update-rollback, etc.
566
+ OINKY_ASSERT(false);
567
+ throw;
568
+ }
569
+ }
570
+ void unlink_row_indexes(pending_row_t *row)
571
+ {
572
+ index_positional_itr_t end = indexes_by_position.end();
573
+ for (index_positional_itr_t i = indexes_by_position.begin();i != end; ++i) {
574
+ if (*i) {
575
+ (*i)->unlink_pending(row);
576
+ }
577
+ }
578
+ }
579
+
580
+ public:
581
+ //############## Select
582
+ typedef column_selector_template<table_ctx> column_selector_t;
583
+
584
+
585
+ typedef symmetric_iterator<fixed_itr_t> fixed_range_t;
586
+ typedef symmetric_iterator<pitr_t> pending_range_t;
587
+
588
+ struct merge_itr_compare
589
+ {
590
+ // We must always place the fixed before the pending, because we need
591
+ // to iterator over dead fixed iterators, but not my more than one place.
592
+ // Using a generic filtering iterator over the fixed set doesn't allow
593
+ // us to build stable cursors on top of it.
594
+ template<typename INT>
595
+ int compare(INT left, INT right) const {
596
+ if (left < right) return -1;
597
+ if (left > right) return 1;
598
+ return 0;
599
+ }
600
+ int operator()(const fixed_itr_t &left, const fixed_itr_t &right) const {
601
+ // These are both just row indices
602
+ return compare(*left, *right);
603
+ }
604
+ template<typename PITR>
605
+ int operator()(const PITR &left, const PITR &right) const {
606
+ return compare(left->sequence_number, right->sequence_number);
607
+ }
608
+ template<typename PITR>
609
+ int operator()(const PITR &left, const fixed_itr_t &right) const {
610
+ return -(*this)(right,left);
611
+ }
612
+ template<typename PITR>
613
+ int operator()(const fixed_itr_t &left, const PITR &right) const {
614
+ int k = compare((uint64) *left, right->sequence_number);
615
+ if (k) {
616
+ return k;
617
+ }
618
+ OINKY_ASSERT(left.is_deleted());
619
+ return -1;
620
+ }
621
+ };
622
+
623
+ struct key_pitr_compare
624
+ {
625
+ bool operator()(uint64 left, const pending_row_t &right) const {
626
+ return merge_itr_compare().compare(left, right.sequence_number) < 0;
627
+ }
628
+ bool operator()(const pending_row_t &left, uint64 right) const {
629
+ return merge_itr_compare().compare(left.sequence_number, right) < 0;
630
+ }
631
+ };
632
+
633
+ struct pending_to_row {
634
+ pending_row_t *operator()(const pitr_t &pitr) const {
635
+ pending_row_t &pr(*pitr);
636
+ return &pr;
637
+ }
638
+ template<typename SAFE_PITR>
639
+ pending_row_t *operator()(const SAFE_PITR &pitr) const {
640
+ return pitr.to_row();
641
+ }
642
+ };
643
+ typedef typename iterator_builder<
644
+ table_ctx,
645
+ fixed_itr_t,
646
+ fixed_range_t,
647
+ pitr_t,
648
+ pending_range_t,
649
+ merge_itr_compare,
650
+ pending_to_row>::iterator row_iterator;
651
+
652
+ //
653
+ // TBD:
654
+ // A MUCH more efficient way of enumerating would iterate the fixed
655
+ // iterator in parallel with the deleted iterator. Sort of a subtracting-
656
+ // merge iterator. The current implementation does a random-lookup in the
657
+ // deleted list for each row. The reason I haven't fixed it is that the
658
+ // behavior is unavoidable for index enumerations. If it's that big a
659
+ // problem for table enumerations, then a better solution is needed for
660
+ // both. I'm just hoping it doesn't actually matter at all.
661
+ //
662
+
663
+ row_iterator row_iterator_at(const pitr_t &pi) {
664
+ // This is only used to create an iterator aligned with a particular
665
+ // pending row.
666
+ OINKY_ASSERT( pi != live_pending.end() );
667
+ row_iterator itr(
668
+ this,
669
+ fixed_range_t(fixed.itr_at(pi->sequence_number), fixed.begin(), fixed.end()),
670
+ pending_range_t(pi, live_pending.begin(), live_pending.end()));
671
+ // The fixed itr of the same position should be deleted, or nonexistent.
672
+ OINKY_ASSERT(itr.pending_active());
673
+ return itr;
674
+ }
675
+
676
+ row_iterator rows_begin() {
677
+ return row_iterator(
678
+ this,
679
+ fixed_range_t(fixed.begin(), fixed.begin(), fixed.end()),
680
+ pending_range_t(live_pending.begin(), live_pending.begin(), live_pending.end()));
681
+ }
682
+
683
+ row_iterator rows_end() {
684
+ return row_iterator(
685
+ this,
686
+ fixed_range_t(fixed.end(), fixed.begin(), fixed.end()),
687
+ pending_range_t(live_pending.end(), live_pending.begin(), live_pending.end()));
688
+ }
689
+
690
+ uint32 row_count() const {
691
+ uint32 fixed_count = fixed_dropped() ?
692
+ 0 :
693
+ fixed.tabular.row_count - pending_deletes.size();
694
+ return fixed_count + live_pending.size();
695
+ }
696
+
697
+ private:
698
+ void uncreate_idx(index_ctx *idx, sp_marker_t sp)
699
+ {
700
+ indexes_by_position.uncreate(idx->index_idx, idx);
701
+ // The index has to unlink itself from live rows.
702
+ idx->uncreate(sp);
703
+ mark_ixset_change();
704
+ }
705
+ void undrop_idx(index_ctx *idx, sp_marker_t sp)
706
+ {
707
+ indexes_by_position.undrop(idx->index_idx, idx);
708
+ // The index has to relink itself to all live rows.
709
+ idx->undrop(sp);
710
+ mark_ixset_change();
711
+ }
712
+ // This means that we are undoing both the create and drop of the
713
+ // index. For us that's a no-op.
714
+ void uncreate_drop_idx(index_ctx *idx, sp_marker_t sp)
715
+ {}
716
+
717
+ void rollback_prow_data_allocation(column_ctx *col)
718
+ {
719
+ // Roll back our storage limits.
720
+ if (col->is_bit()) {
721
+ // The bit-byte-limit (bit_loc) is on a separate stack from
722
+ // the byte-limit (byte_limit). We have to roll back the
723
+ // bit limit only.
724
+ if (col->prow_byte_offset <= prow_bit_loc)
725
+ {
726
+ // Under byte_offset equality, we need a more strict test
727
+ // for moving the bit limit. If the byte offset is less,
728
+ // then we always move th emit limit.
729
+ if ((col->prow_byte_offset < prow_bit_loc) || (col->prow_bit_offset < prow_bit_limit)) {
730
+ prow_bit_limit = col->prow_bit_offset;
731
+ }
732
+
733
+ prow_bit_loc = col->prow_byte_offset;
734
+
735
+ if (col->prow_bit_offset == 0) {
736
+ // The stacks align for bit-columns where the bit_offset
737
+ // is zero. In this case, the bit allocation was done at
738
+ // the end of the byte stack, so we can roll that back also
739
+ // (as always, assuming it hasn't been done already).
740
+ if (col->prow_byte_offset < prow_byte_limit) {
741
+ prow_byte_limit = col->prow_byte_offset;
742
+ } else {
743
+ // Equality here should NEVER occur.
744
+ OINKY_ASSERT(col->prow_byte_offset != prow_byte_limit);
745
+ }
746
+ }
747
+ }
748
+ } else {
749
+ if (col->prow_byte_offset < prow_byte_limit) {
750
+ prow_byte_limit = col->prow_byte_offset;
751
+ }
752
+ }
753
+ }
754
+
755
+ void uncreate_col(column_ctx *col, sp_marker_t sp)
756
+ {
757
+ // Roll back our storage limits.
758
+ rollback_prow_data_allocation(col);
759
+ mark_colset_change();
760
+ }
761
+ void undrop_col(column_ctx *col, sp_marker_t sp)
762
+ {
763
+ mark_colset_change();
764
+ }
765
+ // Roll back both the create and drop, in reverse order. The undrop
766
+ // is nothing for us. We just have to roll back the allocation.
767
+ void uncreate_drop_col(column_ctx *col, sp_marker_t sp)
768
+ {
769
+ rollback_prow_data_allocation(col);
770
+ }
771
+
772
+ // This deletes live rows that were created since the target SP
773
+ void uncreate_live_rows(sp_marker_t tsp)
774
+ {
775
+ pitr_t pend = live_pending.end();
776
+ for (
777
+ pitr_t pi = live_pending.begin();
778
+ pi != pend;
779
+ //NOTHING
780
+ )
781
+ {
782
+ pending_row_t *row(&(*pi));
783
+ OINKY_ASSERT(row->ls.delete_sp == 0);
784
+
785
+ // If the row is newer than the SP, delete it outright.
786
+ if (row->ls.insert_sp > tsp) {
787
+ ++pi;
788
+ unlink_row_indexes(row);
789
+
790
+ // Remove the core list entry.
791
+ live_pending.erase(row_tree_t::s_iterator_to(*row));
792
+ row->try_delete();
793
+ continue;
794
+ }
795
+ // next.
796
+ ++pi;
797
+ }
798
+ }
799
+
800
+ // This permenently deletes erased or overwritten rows that were
801
+ // orginally created after the target SP.
802
+ void uncreate_dead_rows(sp_marker_t tsp)
803
+ {
804
+ pending_row_t **parent = &dead_pending;
805
+ while (*parent) {
806
+ pending_row_t *row = *parent;
807
+
808
+ OINKY_ASSERT(row->ls.delete_sp != 0);
809
+
810
+ // If the row is newer than the SP, delete it outright.
811
+ if (row->ls.insert_sp > tsp) {
812
+ // Remove this item from the list.
813
+ *parent = *(pending_row_t **)row;
814
+ row->try_delete();
815
+ continue;
816
+ } else if (row->ls.delete_sp <= tsp) {
817
+ // We insert into the dead list at the front, during row delete.
818
+ // During SP rollback, we can stop as soon as we encounter a
819
+ // record which was deleted before the SP target.
820
+ break;
821
+ } else {
822
+ // We can't early-out in this case. There may be elements
823
+ // beyond this one that need to be deallocated.
824
+ parent = (pending_row_t **)row;
825
+ }
826
+ }
827
+ }
828
+
829
+ // This moves rows from the dead list back to the live list. The
830
+ // dead-list is reverse-ordered by deletion SP, so we undelete everything
831
+ // up to the limit and then stop. No need to go to the end.
832
+ void undelete_dead_rows(sp_marker_t tsp)
833
+ {
834
+ while (dead_pending) {
835
+ pending_row_t *row = dead_pending;
836
+
837
+ OINKY_ASSERT(row->ls.insert_sp <= tsp);
838
+
839
+ // If the row is newer than the SP, delete it outright.
840
+ if (row->ls.delete_sp > tsp) {
841
+ // Undelete.
842
+ row->ls.delete_sp = 0;
843
+
844
+ // Remove from the dead-list.
845
+ // Extract the next item and then reinit the row hook, which we
846
+ // corrupted by sticking it on our list
847
+ dead_pending = *(pending_row_t **)row;
848
+ row_tree_hook *hook = row;
849
+ ::new(hook) row_tree_hook();
850
+
851
+ // Put it on the live list.
852
+ std::pair<pitr_t,bool> ires = live_pending.insert_unique(*row);
853
+ OINKY_ASSERT(ires.second);
854
+
855
+ // Put it back in the indexes.
856
+ link_row_indexes(row, true);
857
+
858
+ continue;
859
+ } else {
860
+ // Early out because dead-list is sorted on delete_sp.
861
+ break;
862
+ }
863
+ }
864
+ }
865
+
866
+ public:
867
+ // We rollback to a target sp. Anything marked later than this sp gets
868
+ // deleted. rolling back to 0 deletes anything marked >= 1, which is
869
+ // everything but the original data.
870
+ //
871
+ // Rolling back to a savepoint never causes allocation.
872
+ void sp_rollback(sp_marker_t tsp)
873
+ {
874
+ // Nothing within this block should raise. The catch is just
875
+ // to confirm that.
876
+ try {
877
+ // First we un-insert rows, then we modify the indexes, and then
878
+ // we un-delete rows. We do things in this order to
879
+ // avoid generating index uniqueness violations with intermediate
880
+ // row state which never existed. This is a challenge because
881
+ // row rollback and index are processed in separate batches, rather
882
+ // than in strictly reverse-chronological order.
883
+ uncreate_live_rows(tsp);
884
+ // And do the same for the dead list.
885
+ uncreate_dead_rows(tsp);
886
+
887
+ // Now un-delete the fixed entries.
888
+ pending_deletes_t::iterator dend = pending_deletes.end();
889
+ for (
890
+ pending_deletes_t::iterator di = pending_deletes.begin();
891
+ di != dend;
892
+ //NOTHING
893
+ )
894
+ {
895
+ if (di->second > tsp) {
896
+ pending_deletes_t::iterator tmp = di;
897
+ ++di;
898
+ pending_deletes.erase(tmp);
899
+ } else {
900
+ ++di;
901
+ }
902
+ }
903
+ // Conditionally un-explode the fixed data.
904
+ if (fixed_dropped_sp > tsp) {
905
+ fixed_dropped_sp = 0;
906
+ }
907
+
908
+ // Now that we have deleted all the pending rows (and unlinked their
909
+ // index hooks) we can uncreate/undrop any indexes that have been
910
+ // created/dropped since the SP.
911
+ indexes.sp_rollback(
912
+ tsp,
913
+ boost::bind(&table_ctx::uncreate_idx, this, _1, _2),
914
+ boost::bind(&table_ctx::undrop_idx, this, _1, _2),
915
+ boost::bind(&table_ctx::uncreate_drop_idx, this, _1, _2));
916
+
917
+ // Now rollback the indexes themselves. This includes the set that
918
+ // has been undropped, but not those that have been uncreated. It
919
+ // also includes those which have been neither undropped nor uncreated.
920
+ index_positional_itr_t idxend = indexes_by_position.end();
921
+ for (index_positional_itr_t idxi = indexes_by_position.begin(); idxi != idxend; ++idxi) {
922
+ if (*idxi) {
923
+ (*idxi)->sp_rollback(tsp);
924
+ }
925
+ }
926
+
927
+ // Bring back deleted rows.
928
+ undelete_dead_rows(tsp);
929
+
930
+ // Now undrop/uncreate columns.
931
+ // This can really be done at any point in the rollback process.
932
+ // It's orthogonal to row/index rollback, since none of those
933
+ // processes touch the column contexts.
934
+ //
935
+ // Unlike the index rollback, these callbacks do not update
936
+ // cols_by_position. We just rebuild that from scratch after
937
+ // teh columns are all restored.
938
+ columns.sp_rollback(
939
+ tsp,
940
+ boost::bind(&table_ctx::uncreate_col, this, _1, _2),
941
+ boost::bind(&table_ctx::undrop_col, this, _1, _2),
942
+ boost::bind(&table_ctx::uncreate_drop_col, this, _1, _2));
943
+
944
+ // Now we reassign the colmap from scratch.
945
+ rebuild_cols_by_position();
946
+ } catch (...) {
947
+ // An exception here is an internal bug.
948
+ //
949
+ // Nothing should throw within this block. We're not allocating
950
+ // memory, and theoretically we're not creating any state which
951
+ // didn't exist before. Furthermore, we do all the deletions
952
+ // prior to all the insertions, so we should not violate any
953
+ // uniqueness constraints during the intermediate states.
954
+ OINKY_ASSERT(false);
955
+ throw;
956
+ }
957
+ }
958
+
959
+ private:
960
+ void rebuild_cols_by_position()
961
+ {
962
+ cols_by_name_itr_t cn_i = columns.by_name().begin();
963
+ cols_by_name_itr_t cn_end = columns.by_name().end();
964
+ cols_by_position.resize(std::distance(cn_i, cn_end));
965
+ column_ctx **cbp_i = cols_by_position.begin();
966
+ for (;cn_i != cn_end; ++cn_i, ++cbp_i) {
967
+ *cbp_i = *cn_i;
968
+ }
969
+ }
970
+
971
+ void erase_pending(pending_row_t *row, bool unlink_all)
972
+ {
973
+ // Should not yet be deleted.
974
+ OINKY_ASSERT(row->ls.insert_sp > 0);
975
+ OINKY_ASSERT(row->ls.delete_sp == 0);
976
+
977
+ // Double-delete is a very dangerous misuse by the caller, likely
978
+ // caused by using an invalid iterator. It's one of the few such
979
+ // usage errors we check for even in production builds.
980
+ if (row->ls.delete_sp != 0) {
981
+ throw_error(invalid_argument());
982
+ }
983
+
984
+ // Set the delete SP and optionally unlink index entries.
985
+ row->ls.delete_sp = db->active_sp();
986
+ OINKY_ASSERT(row->ls.insert_sp <= row->ls.delete_sp);
987
+ if (unlink_all) {
988
+ unlink_row_indexes(row);
989
+
990
+ // Remove from the active-list.
991
+ live_pending.erase(row_tree_t::s_iterator_to(*row));
992
+ }
993
+
994
+ // If we insert/delete in the same savepoint, we can remove aggressively.
995
+ if (row->ls.insert_sp == row->ls.delete_sp) {
996
+ row->try_delete();
997
+ return;
998
+ }
999
+
1000
+ // Otherwise, we'll keep the row around on the dead-list, so
1001
+ // we can rollback if we need to.
1002
+ *(pending_row_t **)row = dead_pending;
1003
+ dead_pending = row;
1004
+ }
1005
+
1006
+ void erase_fixed(row_idx_t offset)
1007
+ {
1008
+ // Should not yet be deleted.
1009
+ OINKY_ASSERT(pending_deletes.find(offset) == pending_deletes.end());
1010
+ // Where asserts are not active, but there is still a usage error
1011
+ // causing a repeated delete, this will not alter the delete timestamp.
1012
+ // So a sp rollback to before current will not un-delete, which is
1013
+ // the desired behavior. Redundant-deletes are consistent no-ops.
1014
+ pending_deletes.insert(std::make_pair(offset, db->active_sp()));
1015
+ }
1016
+ void unerase_fixed(row_idx_t offset)
1017
+ {
1018
+ // Should be deleted.
1019
+ pending_deletes_t::iterator i = pending_deletes.find(offset);
1020
+ OINKY_ASSERT(i != pending_deletes.end());
1021
+ pending_deletes.erase(i);
1022
+ }
1023
+
1024
+ public:
1025
+ // This invalidates the iterator, and any other iterator to this object,
1026
+ // obviously.
1027
+ void erase(const row_iterator &pos)
1028
+ {
1029
+ if (pos.pending_active()) {
1030
+ erase_pending(&(*pos.pending()), true);
1031
+ } else {
1032
+ erase_fixed(*pos.fixed());
1033
+ }
1034
+ }
1035
+
1036
+ // Replace the row identified by itr with a row defined by the new values.
1037
+ // All existing iterators to this row are invalidated. The parameter iterator
1038
+ // gets reassigned to the new row. Iterators to any other row are unaffected.
1039
+ template<typename SEQ_T>
1040
+ void update_row(row_iterator &itr, const column_selector_t &cols, const SEQ_T &values)
1041
+ {
1042
+ if (itr.pending_active()) {
1043
+ pitr_t pitr = itr.pending();
1044
+ pending_row_t *row = update_dynamic_row(&(*pitr), cols, values);
1045
+ itr = row_iterator_at(row_tree_t::s_iterator_to(*row));
1046
+ } else {
1047
+ fixed_itr_t fitr = itr.fixed();
1048
+ itr = update_fixed_row(*fitr, cols, values);
1049
+ }
1050
+ }
1051
+
1052
+ private:
1053
+ template<typename SEQ_T>
1054
+ pending_row_t *update_dynamic_row(pending_row_t *row, const column_selector_t &cols, const SEQ_T &values)
1055
+ {
1056
+ // Verify that the selector is still current.
1057
+ cols.check_valid(this);
1058
+
1059
+ try {
1060
+ // First unlink the existing row.
1061
+ unlink_row_indexes(row);
1062
+
1063
+ // If we can reuse the row, then we will. Otherwise, we'll
1064
+ // simply call insert_row to do the work of creating
1065
+ // and initializing a new one.
1066
+ //
1067
+ // We can only reuse the row if it's large enough, and if the
1068
+ // sp hasn't advanced. Also, if there is no existing cursor
1069
+ // on the row.
1070
+ if ((row->ls.insert_sp == db->active_sp()) &&
1071
+ (row->get_shape().column_bytes >= prow_byte_limit) &&
1072
+ (row->crs_ref_count() == 0))
1073
+ {
1074
+ // This will save the values and insert the row into each
1075
+ // of the indexes, based on these new values. If insertion
1076
+ // fails, it will unwind all index insertions and restore
1077
+ // the original column values to the row.
1078
+ //
1079
+ // We will then restore the previous index links in the
1080
+ // exception handler below.
1081
+ store_and_index_row_values(row, true, cols, values);
1082
+
1083
+ // Return the original row.
1084
+ return row;
1085
+ } else {
1086
+ pitr_t hint = row_tree_t::s_iterator_to(*row);
1087
+ pitr_t old = hint;
1088
+ ++hint;
1089
+ live_pending.erase(old);
1090
+
1091
+ try {
1092
+ // This can fail due to allocation error, uniqueness constraint, etc...
1093
+ pitr_t pi = insert_row(
1094
+ cols,
1095
+ values,
1096
+ hint,
1097
+ boost::bind(&table_ctx::fill_row_clone_pending,this,row,_1),
1098
+ &row->sequence_number
1099
+ );
1100
+
1101
+ // Preserve the sequence number, since this is the same logical row.
1102
+ pending_row_t &nr = *pi;
1103
+ OINKY_ASSERT(nr.sequence_number == row->sequence_number);
1104
+
1105
+ // Erase the old row. Do NOT unlink the index entries
1106
+ // or the live_pending links again.
1107
+ //
1108
+ // This will preserve the row on the deleted list if the SP
1109
+ // has moved since it was inserted.
1110
+ erase_pending(row, false);
1111
+
1112
+ // Return the new row.
1113
+ return &nr;
1114
+ } catch (...) {
1115
+ pitr_t p = live_pending.insert_unique(hint, *row);
1116
+ OINKY_ASSERT(&(*p) == row);
1117
+ throw;
1118
+ }
1119
+ }
1120
+ } catch (...) {
1121
+ // Either store_and_index_row_values's index insertion or
1122
+ // insert_row's allocation/index insertion raised.
1123
+ //
1124
+ // The old row data remains valid. Simply restore the old row hooks.
1125
+ link_row_indexes(row, true);
1126
+ throw;
1127
+ }
1128
+ }
1129
+
1130
+ template<typename SEQ_T>
1131
+ row_iterator update_fixed_row(row_idx_t fixedpos, const column_selector_t &cols, const SEQ_T &values)
1132
+ {
1133
+ try {
1134
+ // We erase the existing row before trying to insert, to avoid
1135
+ // spurious uniqueness violations.
1136
+
1137
+ // This allocation can fail.
1138
+ erase_fixed(fixedpos);
1139
+
1140
+ // This can fail due to allocation error, uniqueness constraint, etc...
1141
+ uint64 seq = (uint64) fixedpos;
1142
+ pitr_t pi = insert_row(
1143
+ cols,
1144
+ values,
1145
+ live_pending.begin(),
1146
+ boost::bind(&table_ctx::fill_row_clone_fixed, this, fixedpos, _1),
1147
+ &seq
1148
+ );
1149
+ return row_iterator_at(pi);
1150
+ } catch (...) {
1151
+ // Restore previous state.
1152
+ unerase_fixed(fixedpos);
1153
+ throw;
1154
+ }
1155
+ }
1156
+
1157
+ private:
1158
+ template<typename SEQ_T>
1159
+ void store_and_index_row_values(pending_row_t *row, bool save_values, const column_selector_t &cols, const SEQ_T &values)
1160
+ {
1161
+ uint32 save_bytes = row->get_shape().column_bytes;
1162
+ void *saved_values = NULL;
1163
+ if (save_values) {
1164
+ saved_values = alloca(save_bytes);
1165
+ memcpy(saved_values, row->column_data(), save_bytes);
1166
+ }
1167
+
1168
+ index_positional_itr_t begin = indexes_by_position.begin();
1169
+ index_positional_itr_t end = indexes_by_position.end();
1170
+ index_positional_itr_t i = begin;
1171
+
1172
+ try {
1173
+ // Now we'll iterate through the row values as passed to us and
1174
+ // check the type/save the value.
1175
+ typename SEQ_T::itr_t rvi = values.begin();
1176
+ const column_ctx *const *ci = cols.colrefs;
1177
+ for (; rvi != values.end(); ++rvi, ++ci) {
1178
+ OINKY_ASSERT(*ci);
1179
+ const column_ctx &col(**ci);
1180
+ // The given value should be variant.
1181
+ variant_cv_t cv(*rvi);
1182
+ // Save the value. This does a type-check/cast too.
1183
+ row->set_value(col, safe_cv_t(col.type(), cv, &db->userstrings));
1184
+ }
1185
+
1186
+ // Insert a row for each index.
1187
+ for (i = begin; i != end; ++i) {
1188
+ if (*i) {
1189
+ (*i)->link_pending(row, false);
1190
+ }
1191
+ }
1192
+ } catch (...) {
1193
+ // On exception, we must remove the row from each index.
1194
+ for (;i != begin;) {
1195
+ // The exception was for the current i position. So we
1196
+ // advance to the previous.
1197
+ --i;
1198
+ if (*i) {
1199
+ (*i)->unlink_pending(row);
1200
+ }
1201
+ }
1202
+
1203
+ // Restore original values, if saved.
1204
+ if (saved_values) {
1205
+ memcpy(row->column_data(), saved_values, save_bytes);
1206
+ }
1207
+
1208
+ // Pass the exception to caller.
1209
+ throw;
1210
+ }
1211
+ }
1212
+
1213
+ public:
1214
+ void fill_row_defaults(pending_row_t *target) const
1215
+ {
1216
+ // Now assign to each column the default value, since the
1217
+ // row given to us by the caller is not complete.
1218
+ cols_positional_itr_t ci = cols_by_position.begin();
1219
+ cols_positional_itr_t endcols = cols_by_position.end();
1220
+ for (;ci != endcols;++ci) {
1221
+ target->set_value(**ci, (*ci)->default_value);
1222
+ }
1223
+ }
1224
+
1225
+ void fill_row_clone_pending(const pending_row_t *source, pending_row_t *target) const
1226
+ {
1227
+ // cp_bytes are the amount currently in use. Either row may be
1228
+ // over-allocated.
1229
+ uint32 cp_bytes = get_minimum_prow_shape().column_bytes;
1230
+ OINKY_ASSERT(target->get_shape().column_bytes >= cp_bytes);
1231
+ OINKY_ASSERT(source->get_shape().column_bytes >= cp_bytes);
1232
+
1233
+ memcpy(target->column_data(), source->column_data(), cp_bytes);
1234
+ }
1235
+
1236
+ void fill_row_clone_fixed(row_idx_t fixedpos, pending_row_t *target) const
1237
+ {
1238
+ uint32 cp_bytes = get_minimum_prow_shape().column_bytes;
1239
+ OINKY_ASSERT(target->get_shape().column_bytes <= cp_bytes);
1240
+
1241
+ // We basically just explode the fixed row into the target, and then
1242
+ // modify the exploded row.
1243
+ fixed.template each_raw_value<explode_callbacks>(
1244
+ cols_by_position.begin(),
1245
+ cols_by_position.end(),
1246
+ fixedpos,
1247
+ target);
1248
+ }
1249
+
1250
+ template<typename SEQ_T, typename TEMPLATE>
1251
+ pitr_t insert_row(
1252
+ const column_selector_t &cols,
1253
+ const SEQ_T &values,
1254
+ const pitr_t &hint,
1255
+ TEMPLATE templatefn,
1256
+ const uint64 *sequence_number = NULL)
1257
+ {
1258
+ // Verify that the selector is still current.
1259
+ cols.check_valid(this);
1260
+
1261
+ // Verify that we have enough values.
1262
+ if (cols.colcount != values.size()) {
1263
+ throw_error(invalid_argument());
1264
+ }
1265
+
1266
+ // This will create the pending-row object with sufficient column
1267
+ // entries and index hooks.
1268
+ pending_row_t *row = pending_row_t::make_new(this);
1269
+
1270
+ // Try-catch to deallocate.
1271
+ // Can't use auto-ptr because delete is private.
1272
+ try {
1273
+ row->ls.insert_sp = db->active_sp();
1274
+
1275
+ // Store the default values in the row. (These can come from
1276
+ // the column-defaults, or from the row we are overwriting).
1277
+ templatefn(row);
1278
+
1279
+ // Assign the row the given sequence number, or a new one.
1280
+ if (sequence_number) {
1281
+ row->sequence_number = *sequence_number;
1282
+ } else {
1283
+ row->sequence_number = insert_sequence_limit;
1284
+ ++insert_sequence_limit;
1285
+ }
1286
+
1287
+ // This will raise an exception if any of the indexes can't be
1288
+ // joined due to a uniqueness constraint. It is guaranteed to
1289
+ // be complete either way. All indexes will be joined or none.
1290
+ //
1291
+ // We do this FIRST because it's the only thing that can fail, and
1292
+ // so we don't have to worry about undoing anything else if it does.
1293
+ store_and_index_row_values(row, false, cols, values);
1294
+
1295
+ // Nothing further will fail.
1296
+
1297
+ // Ordering is important here. We don't have to undo this
1298
+ // in the exception handler if it's the last thing we do.
1299
+ pitr_t ires = live_pending.insert_unique(hint, *row);
1300
+ OINKY_ASSERT(&(*ires) == row);
1301
+
1302
+ // Return an iterator to the new row.
1303
+ return ires;
1304
+ } catch (...) {
1305
+ row->try_delete();
1306
+ throw;
1307
+ }
1308
+ }
1309
+
1310
+
1311
+ struct safe_tbl_cursor_host
1312
+ {
1313
+ table_ctx *tbl;
1314
+ safe_tbl_cursor_host(table_ctx *_tbl) : tbl(_tbl) {}
1315
+ safe_tbl_cursor_host() : tbl(NULL) {}
1316
+
1317
+ bool operator==(const safe_tbl_cursor_host &other) const {
1318
+ return tbl == other.tbl;
1319
+ }
1320
+ bool operator!=(const safe_tbl_cursor_host &other) const {
1321
+ return tbl != other.tbl;
1322
+ }
1323
+
1324
+ allocator_t &allocator() const { return tbl->db->allocator; }
1325
+
1326
+ table_ctx *table() const { return tbl; }
1327
+
1328
+ void check_state() const
1329
+ {
1330
+ if (tbl->ls.is_deleted()) {
1331
+ throw_error(object_deleted());
1332
+ }
1333
+ }
1334
+
1335
+ // Alternatives are lower/upper bound. Depends on fwd/reverse
1336
+ pending_row_t *seek_pending(bool use_lower_bound, pending_row_t *row) const
1337
+ {
1338
+ OINKY_ASSERT(row->is_inactive());
1339
+
1340
+ // The table cursor just searches by sequence number.
1341
+ pitr_t h;
1342
+ if (use_lower_bound) {
1343
+ h = tbl->live_pending.lower_bound(row->sequence_number, key_pitr_compare());
1344
+ } else {
1345
+ h = tbl->live_pending.upper_bound(row->sequence_number, key_pitr_compare());
1346
+ }
1347
+ if (h == tbl->live_pending.end()) {
1348
+ return NULL;
1349
+ }
1350
+ return &(*h);
1351
+ }
1352
+
1353
+ pending_row_t *seek_prev(pending_row_t *row) const {
1354
+ pitr_t h;
1355
+ if (row == NULL) {
1356
+ h = tbl->live_pending.end();
1357
+ } else {
1358
+ h = row_tree_t::s_iterator_to(*row);
1359
+ }
1360
+
1361
+ if (h == tbl->live_pending.begin()) {
1362
+ return NULL;
1363
+ } else {
1364
+ --h;
1365
+ return &(*h);
1366
+ }
1367
+ }
1368
+ pending_row_t *seek_next(pending_row_t *row) const {
1369
+ pitr_t h;
1370
+ if (row == NULL) {
1371
+ h = tbl->live_pending.begin();
1372
+ } else {
1373
+ h = row_tree_t::s_iterator_to(*row);
1374
+ ++h;
1375
+ }
1376
+
1377
+ if (h == tbl->live_pending.end()) {
1378
+ return NULL;
1379
+ } else {
1380
+ return &(*h);
1381
+ }
1382
+ }
1383
+
1384
+ // Link/Unlink the internal cursor object from the host list.
1385
+ template<typename INTERNAL>
1386
+ void unlink_cursor(INTERNAL *i) {
1387
+ tbl->active_cursors.erase(table_ctx::active_cursors_t::s_iterator_to(*i));
1388
+ }
1389
+ template<typename INTERNAL>
1390
+ void link_cursor(INTERNAL *i) {
1391
+ tbl->active_cursors.push_back(*i);
1392
+ }
1393
+
1394
+ inline void assert_row_valid(pending_row_t *row) const {
1395
+ OINKY_ASSERT(row && (row->table() == tbl));
1396
+ }
1397
+ };
1398
+
1399
+ class safe_pending_cursor : public safe_pending_cursor_t<pending_row_t, safe_tbl_cursor_host>
1400
+ {
1401
+ typedef safe_pending_cursor_t<pending_row_t, safe_tbl_cursor_host> base_t;
1402
+ public:
1403
+ using base_t::host;
1404
+ using base_t::to_row;
1405
+
1406
+ safe_pending_cursor() {}
1407
+
1408
+ safe_pending_cursor(const safe_tbl_cursor_host &_host, pending_row_t *_row, bool _bbegin) :
1409
+ base_t(_host, _row, _bbegin)
1410
+ {}
1411
+
1412
+ const safe_pending_cursor &i() const { return *this; }
1413
+ };
1414
+
1415
+ typedef typename iterator_builder<
1416
+ table_ctx,
1417
+ fixed_itr_t,
1418
+ fixed_range_t,
1419
+ safe_pending_cursor,
1420
+ safe_pending_cursor,
1421
+ merge_itr_compare,
1422
+ pending_to_row>::iterator safe_itr_t;
1423
+
1424
+ typedef cursor_internal_t<safe_itr_t> cursor_internal;
1425
+ typedef cursor_handle_t<cursor_internal, safe_tbl_cursor_host> cursor_handle;
1426
+
1427
+ //
1428
+ // We keep track of all the active cursors on this table. (excluding the
1429
+ // indices). We invalidate them when the index gets dropped or uncreated.
1430
+ //
1431
+ typedef boost::intrusive::list<cursor_internal> active_cursors_t;
1432
+ active_cursors_t active_cursors;
1433
+
1434
+ void invalidate_cursors()
1435
+ {
1436
+ while (active_cursors.begin() != active_cursors.end()) {
1437
+ typename active_cursors_t::iterator i = active_cursors.begin();
1438
+ i->invalidate();
1439
+ active_cursors.erase(i);
1440
+ }
1441
+ }
1442
+
1443
+ // Invoked when the table is dropped from the DB, or when a create is rolled back via SP.
1444
+ void on_deactivation() {
1445
+ invalidate_cursors();
1446
+ db->mark_schema_change();
1447
+ }
1448
+
1449
+ void make_column(column_ctx *target, const mstring_safe &colname, table_column_def def)
1450
+ {
1451
+ // Set the column name
1452
+ target->colname = colname;
1453
+
1454
+ // Must set the type before calling internalize.
1455
+ target->ctype = def.column_type;
1456
+ // This can throw if internalization requires an allocation.
1457
+ target->default_value = target->internalize(def.default_value, &db->userstrings);
1458
+ // This can throw if we can't obtain a reference position.
1459
+ cols_by_position.resize(cols_by_position.size() + 1);
1460
+ // ** Nothing beyond here should throw.
1461
+
1462
+ // Determine which bytes in the pending_row will be reserved for
1463
+ // these column values.
1464
+ allocate_pending_column_data(target);
1465
+
1466
+ mark_colset_change();
1467
+ }
1468
+
1469
+ void allocate_pending_column_data(column_ctx *col)
1470
+ {
1471
+ // Assign storage in the prow objects.
1472
+ if (col->is_bit()) {
1473
+ if (prow_bit_limit == 0) {
1474
+ col->prow_byte_offset = prow_bit_loc = prow_byte_limit;
1475
+ col->prow_bit_offset = 0;
1476
+ prow_byte_limit += 1;
1477
+ prow_bit_limit = 1;
1478
+ } else {
1479
+ col->prow_byte_offset = prow_bit_loc;
1480
+ col->prow_bit_offset = prow_bit_limit;
1481
+ prow_bit_limit = (prow_bit_limit + 1) & 7;
1482
+ }
1483
+ } else {
1484
+ col->prow_byte_offset = prow_byte_limit;
1485
+ col->prow_bit_offset = 0;
1486
+ prow_byte_limit += compute_pending_width(col->ctype);
1487
+ }
1488
+ }
1489
+
1490
+ template<typename ITR_T>
1491
+ void make_index(
1492
+ index_ctx *index,
1493
+ const mstring_safe &name,
1494
+ index_uniqueness_spec_t uniq,
1495
+ ITR_T cbegin,
1496
+ ITR_T cend)
1497
+ {
1498
+ // This must be an iterator over public column defs, which reference
1499
+ // columns by name.
1500
+ BOOST_STATIC_ASSERT((boost::is_same<
1501
+ typename boost::iterator_value<ITR_T>::type,
1502
+ index_column_def>::value));
1503
+
1504
+ // Determine which set of hooks the index will use. Any slot vacated
1505
+ // by a dropped index is fair game, but we may have to grow the map,
1506
+ // which can raise.
1507
+ index_idx_t index_idx = (index_idx_t) indexes_by_position.next_position();
1508
+
1509
+ // Clone the index definition.
1510
+ index->reinit_dynamic(
1511
+ this,
1512
+ index_idx,
1513
+ uniq,
1514
+ cbegin,
1515
+ cend);
1516
+
1517
+ index->indexname = name;
1518
+
1519
+ bool mightfail = true;
1520
+
1521
+ // We have to free that slot if anything fails.
1522
+ try {
1523
+ prow_shape_t newshape = get_minimum_prow_shape();
1524
+ if (newshape.idx_count == index_idx) {
1525
+ newshape.idx_count = (index_idx_t)(newshape.idx_count + 1);
1526
+ }
1527
+
1528
+ if (!fixed_dropped()) {
1529
+ // Before we can index fixed rows, we have to create pending objects
1530
+ // for them. Explode does this.
1531
+ // This could throw if there's an allocation failure.
1532
+ explode(prow_shape_t::compute_reserve_shape(newshape));
1533
+ }
1534
+
1535
+ // We must always check that the rows are sufficiently large if this
1536
+ // is a new extremum.
1537
+ bool newlimit = true;
1538
+ for (
1539
+ index_ctx *const*i = indexes_by_position.begin() + index_idx + 1;
1540
+ i != indexes_by_position.end();
1541
+ i++)
1542
+ {
1543
+ // Any existing larger index saves us the trouble.
1544
+ if (*i) {
1545
+ newlimit = false;
1546
+ break;
1547
+ }
1548
+ }
1549
+ if (newlimit) {
1550
+ // This could throw if there's an allocation failure.
1551
+ cond_grow_pending_rows(newshape);
1552
+ }
1553
+
1554
+ // Now link all the rows.
1555
+ //
1556
+ // This could throw if the index creates a constraint that's not
1557
+ // met by existing rows. If the index has no uniqueness constraint,
1558
+ // this should not fail.
1559
+ if (!index->defn.require_unique) {
1560
+ mightfail = false;
1561
+ }
1562
+ index->link_all();
1563
+ } catch (...) {
1564
+ OINKY_ASSERT(mightfail);
1565
+ throw;
1566
+ }
1567
+
1568
+ // Now that the index is valid, insert it.
1569
+ indexes_by_position.create(index->index_idx, index);
1570
+
1571
+ mark_ixset_change();
1572
+ }
1573
+ };
1574
+
1575
+ // This is the public/user interface to the table_ctx.
1576
+ template<typename TABLE_CTX>
1577
+ class table_handle_t
1578
+ {
1579
+ typedef TABLE_CTX table_ctx;
1580
+ table_ctx *table;
1581
+
1582
+ typedef typename table_ctx::index_ctx index_ctx;
1583
+ typedef typename table_ctx::column_selector_t column_selector_t;
1584
+ typedef typename table_ctx::indexes_t indexes_t;
1585
+ typedef typename table_ctx::columns_t columns_t;
1586
+ typedef typename table_ctx::safe_itr_t safe_itr_t;
1587
+ typedef typename table_ctx::pending_row_t pending_row_t;
1588
+ typedef typename table_ctx::safe_pending_cursor safe_pending_cursor;
1589
+ typedef typename table_ctx::safe_tbl_cursor_host safe_tbl_cursor_host;
1590
+
1591
+ public:
1592
+ table_handle_t() : table(NULL) {}
1593
+ table_handle_t(table_ctx *_table) : table(_table) {}
1594
+
1595
+ // This tells if the handle value is uninitialized. If this returns false,
1596
+ // it does NOT mean the handle is valid. It may still be an expired handle
1597
+ // or just invalid. But it is a test as to whether it's initialized at all.
1598
+ bool is_null() const { return table == NULL; }
1599
+
1600
+ //###############
1601
+ // For the C API. The type is completely opaque, but an equivalent
1602
+ // handle object can be recovered from it.
1603
+ class raw_handle_t {};
1604
+ raw_handle_t *raw() const { return (raw_handle_t *) table; }
1605
+ table_handle_t(raw_handle_t *_table) : table((table_ctx *) _table) {}
1606
+
1607
+ //###############
1608
+ // Handle OPS
1609
+ db_string name() const { return table->tablename.as_string(); }
1610
+ uint32 last_colset_change() const { return table->last_colset_change; }
1611
+ uint32 last_ixset_change() const { return table->last_ixset_change; }
1612
+ uint32 last_schema_change() const {
1613
+ return MAX(last_ixset_change(), last_colset_change());
1614
+ }
1615
+
1616
+ bool table_modified_since(sp_marker_t sp) const {
1617
+ return table->table_modified_since(sp);
1618
+ }
1619
+
1620
+ typedef index_handle_t<index_ctx> index_handle;
1621
+ typedef typename column_ctx::column_handle column_handle;
1622
+ typedef typename table_ctx::row_iterator row_iterator;
1623
+ typedef typename index_ctx::iterator index_row_iterator;
1624
+ typedef typename table_ctx::cursor_handle cursor_handle;
1625
+ typedef typename table_ctx::index_positional_itr_t index_positional_itr_t;
1626
+ typedef typename table_ctx::cols_by_name_itr_t cols_by_name_itr_t;
1627
+
1628
+ // Column selectors choose sets of columns on which to operate on
1629
+ // (insert/select/etc.)
1630
+ template<typename NAME_ITR_T>
1631
+ column_selector_t make_selector(NAME_ITR_T begin, NAME_ITR_T end) const {
1632
+ return make_selector(simple_sequence<NAME_ITR_T>(begin,end));
1633
+ }
1634
+
1635
+ // TBD:
1636
+ // At the moment we return this rather than exposing the constructor
1637
+ // to the user. This is more inline with the iterator model.
1638
+ // It implies the lifetime requirements. The object is generated
1639
+ // and therefore owned by the table.
1640
+ template<typename SEQ_T>
1641
+ column_selector_t make_selector(const SEQ_T &cols) const {
1642
+ return column_selector_t(table, cols);
1643
+ }
1644
+
1645
+ column_selector_t all_columns() const {
1646
+ return column_selector_t::select_all(table);
1647
+ }
1648
+
1649
+ // Insert data
1650
+ template<typename VAL_ITR_T>
1651
+ row_iterator insert_row(const column_selector_t &cs, VAL_ITR_T begin, VAL_ITR_T end) const {
1652
+ return insert_row(cs,simple_sequence<VAL_ITR_T>(begin,end));
1653
+ }
1654
+ template<typename SEQ_T>
1655
+ row_iterator insert_row(const column_selector_t &cs, const SEQ_T &seq) const {
1656
+ typename table_ctx::pitr_t pi = table->insert_row(
1657
+ cs,
1658
+ seq,
1659
+ table->live_pending.end(),
1660
+ boost::bind(&table_ctx::fill_row_defaults,table,_1));
1661
+ return table->row_iterator_at(pi);
1662
+ }
1663
+
1664
+ // Update
1665
+ template<typename VAL_ITR_T>
1666
+ void update_row(row_iterator &itr, const column_selector_t &cs, VAL_ITR_T begin, VAL_ITR_T end) const {
1667
+ update_row(itr, cs, simple_sequence<VAL_ITR_T>(begin,end));
1668
+ }
1669
+ template<typename SEQ_T>
1670
+ void update_row(row_iterator &itr, const column_selector_t &cs, const SEQ_T &newrow) const {
1671
+ table->update_row(itr, cs, newrow);
1672
+ }
1673
+
1674
+ template<typename CURSOR, typename VAL_ITR_T>
1675
+ void update_row(const CURSOR &crs, const column_selector_t &cs, VAL_ITR_T begin, VAL_ITR_T end) const {
1676
+ update_row(crs, cs, simple_sequence<VAL_ITR_T>(begin,end));
1677
+ }
1678
+ template<typename CURSOR, typename SEQ_T>
1679
+ void update_row(const CURSOR &crs, const column_selector_t &cs, const SEQ_T &newrow) const {
1680
+
1681
+ row_idx_t fixed = (row_idx_t) -1;
1682
+ pending_row_t *row = NULL;
1683
+
1684
+ // This will throw if cursor's target-row has been modified or
1685
+ // deleted since the cursor position was last set.
1686
+ crs.unpack_check_active(table, &fixed, &row);
1687
+
1688
+ if (row) {
1689
+ table->update_dynamic_row(row, cs, newrow);
1690
+ } else {
1691
+ table->update_fixed_row(fixed, cs, newrow);
1692
+ }
1693
+ }
1694
+
1695
+ // Erase row
1696
+ void erase(row_iterator &row) const {
1697
+ table->erase(row);
1698
+ }
1699
+
1700
+ template<typename CURSOR>
1701
+ void erase(const CURSOR &crs) const {
1702
+ row_idx_t fixed = (row_idx_t) -1;
1703
+ pending_row_t *row = NULL;
1704
+
1705
+ // This will throw if cursor's target-row has been modified or
1706
+ // deleted since the cursor position was last set.
1707
+ crs.unpack_check_active(table, &fixed, &row);
1708
+
1709
+ if (row) {
1710
+ table->erase_pending(row, true);
1711
+ } else {
1712
+ table->erase_fixed(fixed);
1713
+ }
1714
+ }
1715
+
1716
+ template<typename BASE>
1717
+ class columns_accessor_spec : public BASE
1718
+ {
1719
+ table_ctx *table;
1720
+ using BASE::extract_ctx;
1721
+ public:
1722
+ columns_accessor_spec(table_ctx *_table) :
1723
+ BASE(_table->columns),
1724
+ table(_table)
1725
+ {}
1726
+
1727
+ typedef typename BASE::iterator iterator;
1728
+
1729
+ uint32 count() const { return table->cols_by_position.size(); }
1730
+
1731
+ template<typename DEFAULT_VALUE_T>
1732
+ iterator create(const db_string &colname, column_type_code_t _ct, DEFAULT_VALUE_T default_value) {
1733
+ table_column_def def(colname, _ct, variant_cv_t(default_value));
1734
+ create(def);
1735
+ return BASE::find(colname);
1736
+ }
1737
+
1738
+ iterator create(const table_column_def &def) {
1739
+ create(&def, (&def) + 1);
1740
+ return BASE::find(def.column_name);
1741
+ }
1742
+
1743
+ template<typename SEQ>
1744
+ void create(const SEQ &seq) {
1745
+ create(seq.begin(), seq.end());
1746
+ }
1747
+
1748
+ template<typename ITR>
1749
+ void create(const ITR &_begin, const ITR &_end)
1750
+ {
1751
+ BOOST_STATIC_ASSERT((boost::is_same<
1752
+ table_column_def,
1753
+ typename boost::iterator_value<ITR>::type>::value));
1754
+
1755
+ // How many columns are we adding? Allocate the context.
1756
+ uint32 bytes = 0, bits = 0, cdelta = 0;
1757
+ for (ITR ci = _begin;ci != _end; ++ci) {
1758
+ cdelta += 1;
1759
+ if (ci->column_type == column_types::Bit) {
1760
+ bits += 1;
1761
+ } else {
1762
+ if (ci->column_type > OINKY_VALUE_TYPE_MAX) {
1763
+ throw_error(invalid_argument());
1764
+ }
1765
+ bytes += compute_pending_width(ci->column_type);
1766
+ }
1767
+ }
1768
+
1769
+ // Compute the new minimum row shape.
1770
+ prow_shape_t target_shape = table->get_minimum_prow_shape();
1771
+ target_shape.column_bytes += bytes + ((bits + 7)>>3);
1772
+
1773
+ // This will just ensure allocation. It will not reassign anything.
1774
+ table->cols_by_position.reserve(table->cols_by_position.size() + cdelta);
1775
+
1776
+ // Grow the pending row objects. We have to do this regardless,
1777
+ // since we are less aggressive about reallocating rows than
1778
+ // the context map.
1779
+ //
1780
+ // Do it before calling explode, which could end up creating
1781
+ // a lot more rows that would then need to be iterated through.
1782
+ table->cond_grow_pending_rows(target_shape);
1783
+
1784
+ // Alter-table causes explode. Do it if we haven't already.
1785
+ table->explode(prow_shape_t::compute_reserve_shape(target_shape));
1786
+
1787
+ // Record the current savepoint in case we need to roll back.
1788
+ sp_marker_t sp = table->db->active_sp();
1789
+
1790
+ // If there's any chance this can non-atomically fail, then
1791
+ // advance the savepoint. Such is the case if we are creating
1792
+ // more than one column.
1793
+ if (cdelta > 1) {
1794
+ table->db->new_sp();
1795
+ }
1796
+
1797
+ // Save our iterators so we can set default values later.
1798
+ uint32 ccount = 0;
1799
+ column_ctx **newcols = (column_ctx **) alloca(cdelta * sizeof(column_ctx *));
1800
+
1801
+ try {
1802
+ // Now ready to create columns.
1803
+ for (ITR i = _begin; i != _end; ++i, ++ccount) {
1804
+ const table_column_def &def(*i);
1805
+
1806
+ // Create the column.
1807
+ iterator colsitr = BASE::create(
1808
+ def.column_name,
1809
+ table->db->active_sp(),
1810
+ boost::bind(
1811
+ &table_ctx::make_column,
1812
+ table,
1813
+ _1,
1814
+ _2,
1815
+ def));
1816
+
1817
+ newcols[ccount] = extract_ctx(colsitr);
1818
+ }
1819
+ OINKY_ASSERT(ccount == cdelta);
1820
+
1821
+ // Now set all the default values. Everything here is safe
1822
+ // against exceptions, not that it matters.
1823
+ typename table_ctx::pitr_t ri = table->live_pending.begin();
1824
+ typename table_ctx::pitr_t rend = table->live_pending.end();
1825
+ for (;ri != rend; ++ri) {
1826
+ OINKY_ASSERT(!ri->ls.is_deleted());
1827
+ for (uint32 ci = 0; ci < cdelta; ++ci) {
1828
+ const column_ctx &cc(*newcols[ci]);
1829
+ ri->set_value(cc, cc.default_value);
1830
+ }
1831
+ }
1832
+ } catch (...) {
1833
+ table->sp_rollback(sp);
1834
+ throw;
1835
+ }
1836
+
1837
+ // At this point we've done everything but update the ordered
1838
+ // direct column map. There was no point in doing it incrementally.
1839
+ // Easier to just rebuild it now all at once.
1840
+ table->rebuild_cols_by_position();
1841
+ }
1842
+
1843
+ void drop(iterator &itr)
1844
+ {
1845
+ // Get the context object.
1846
+ column_ctx *ctx = BASE::extract_ctx(itr);
1847
+
1848
+ // Check to see if any indexes reference the column. If so, we
1849
+ // can't drop the column.
1850
+ index_positional_itr_t idx = table->indexes_by_position.begin();
1851
+ index_positional_itr_t idxend = table->indexes_by_position.end();
1852
+ for (;idx != idxend; ++idx) {
1853
+ if (!*idx) continue;
1854
+ index_ctx &ix(**idx);
1855
+
1856
+ const column_ctx *const *i = ix.column_refs;
1857
+ const column_ctx *const *iend = i + ix.defn.col_count;
1858
+ for (;i != iend; ++i) {
1859
+ if (*i == ctx) {
1860
+ throw_error(object_still_referenced());
1861
+ }
1862
+ }
1863
+ }
1864
+
1865
+ // Clear to drop.
1866
+ BASE::drop(itr, table->db->active_sp());
1867
+ table->rebuild_cols_by_position();
1868
+
1869
+ table->mark_colset_change();
1870
+ }
1871
+ };
1872
+
1873
+ template<typename BASE>
1874
+ class indices_accessor_spec : public BASE
1875
+ {
1876
+ table_ctx *table;
1877
+ public:
1878
+ indices_accessor_spec(table_ctx *_table) :
1879
+ BASE(_table->indexes),
1880
+ table(_table)
1881
+ {}
1882
+
1883
+ size_t count() const { return table->indexes_by_position.live_count(); }
1884
+
1885
+ typedef typename BASE::iterator iterator;
1886
+
1887
+ template<typename ITR_T>
1888
+ iterator create(const db_string &name, index_uniqueness_spec_t uniq, ITR_T cbegin, ITR_T cend)
1889
+ {
1890
+ BOOST_STATIC_ASSERT((boost::is_same<
1891
+ typename boost::iterator_value<ITR_T>::type,
1892
+ index_column_def>::value));
1893
+
1894
+ return BASE::create(
1895
+ name,
1896
+ table->db->active_sp(),
1897
+ boost::bind(
1898
+ boost::type<void>(),
1899
+ &table_ctx::template make_index<ITR_T>,
1900
+ table,
1901
+ _1,
1902
+ _2,
1903
+ uniq,
1904
+ cbegin,
1905
+ cend));
1906
+ }
1907
+
1908
+ void drop(iterator &itr)
1909
+ {
1910
+ // Must do this before we remove it from the context map.
1911
+ index_ctx *ctx = BASE::extract_ctx(itr);
1912
+ BASE::drop(itr, table->db->active_sp());
1913
+ table->indexes_by_position.drop(ctx->index_idx, ctx);
1914
+ ctx->drop(table->db->active_sp());
1915
+ table->mark_ixset_change();
1916
+ }
1917
+ };
1918
+
1919
+ // Enumerate table metadata
1920
+ typedef typename indexes_t::template set_accessor_t<index_handle, indices_accessor_spec>::type indices_accessor_t;
1921
+ indices_accessor_t indices() const { return indices_accessor_t(table); }
1922
+
1923
+ typedef typename columns_t::template set_accessor_t<column_handle, columns_accessor_spec>::type columns_accessor_t;
1924
+ columns_accessor_t columns() const { return columns_accessor_t(table); }
1925
+
1926
+ // Enumerate rows.
1927
+ row_iterator rows_begin() const {
1928
+ return table->rows_begin();
1929
+ }
1930
+
1931
+ row_iterator rows_end() const {
1932
+ return table->rows_end();
1933
+ }
1934
+
1935
+ uint32 row_count() const {
1936
+ return table->row_count();
1937
+ }
1938
+
1939
+ cursor_handle new_cursor(const row_iterator &where) const {
1940
+ if (table != where.table) {
1941
+ // Invalid iterator, or iterator over a different table.
1942
+ throw_error(invalid_argument());
1943
+ }
1944
+
1945
+ return cursor_handle(safe_tbl_cursor_host(table), make_safe_itr(where));
1946
+ }
1947
+ void delete_cursor(cursor_handle &where) const {
1948
+ where.free();
1949
+ }
1950
+ void move_cursor(cursor_handle &crs, const row_iterator &where) const {
1951
+ if ((table != crs.host.tbl) ||
1952
+ (table != where.table))
1953
+ {
1954
+ // Cursor/Iterator/TbHandle do not match.
1955
+ throw_error(invalid_argument());
1956
+ }
1957
+
1958
+ crs.iterator() = make_safe_itr(where);
1959
+ }
1960
+ private:
1961
+ safe_itr_t make_safe_itr(const row_iterator &where) const
1962
+ {
1963
+ bool p_bbegin = where.pending_range().before_begin();
1964
+
1965
+ pending_row_t *p_row = where.pending_range().is_valid() ?
1966
+ typename table_ctx::pending_to_row()(where.pending()) :
1967
+ NULL;
1968
+
1969
+ return safe_itr_t(
1970
+ table,
1971
+ where.fixed_range(),
1972
+ safe_pending_cursor(safe_tbl_cursor_host(table), p_row, p_bbegin));
1973
+ }
1974
+ };
1975
+
1976
+ } //namespace Internal
1977
+ } //namespace Oinky
1978
+
1979
+
1980
+ namespace std
1981
+ {
1982
+
1983
+ template<class _Traits, class TABLE_CTX>
1984
+ inline std::basic_ostream<char, _Traits>&
1985
+ operator<<(
1986
+ std::basic_ostream<char, _Traits> &os,
1987
+ const Oinky::Internal::column_selector_template<TABLE_CTX> &selector
1988
+ )
1989
+ {
1990
+ selector.format(os);
1991
+ return os;
1992
+ }
1993
+
1994
+
1995
+ }//namespace std
1996
+