oinky 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. data/LICENSE +22 -0
  2. data/README.md +141 -0
  3. data/ext/extconf.rb +79 -0
  4. data/ext/include/oinky.h +424 -0
  5. data/ext/include/oinky.hpp +63 -0
  6. data/ext/include/oinky/nky_base.hpp +1116 -0
  7. data/ext/include/oinky/nky_core.hpp +1603 -0
  8. data/ext/include/oinky/nky_cursor.hpp +665 -0
  9. data/ext/include/oinky/nky_dialect.hpp +107 -0
  10. data/ext/include/oinky/nky_error.hpp +164 -0
  11. data/ext/include/oinky/nky_fixed_table.hpp +710 -0
  12. data/ext/include/oinky/nky_handle.hpp +334 -0
  13. data/ext/include/oinky/nky_index.hpp +1038 -0
  14. data/ext/include/oinky/nky_log.hpp +15 -0
  15. data/ext/include/oinky/nky_merge_itr.hpp +403 -0
  16. data/ext/include/oinky/nky_model.hpp +110 -0
  17. data/ext/include/oinky/nky_pool.hpp +760 -0
  18. data/ext/include/oinky/nky_public.hpp +808 -0
  19. data/ext/include/oinky/nky_serializer.hpp +1625 -0
  20. data/ext/include/oinky/nky_strtable.hpp +504 -0
  21. data/ext/include/oinky/nky_table.hpp +1996 -0
  22. data/ext/nky_lib.cpp +390 -0
  23. data/ext/nky_lib_core.hpp +212 -0
  24. data/ext/nky_lib_index.cpp +158 -0
  25. data/ext/nky_lib_table.cpp +224 -0
  26. data/lib/oinky.rb +1284 -0
  27. data/lib/oinky/compiler.rb +106 -0
  28. data/lib/oinky/cpp_emitter.rb +311 -0
  29. data/lib/oinky/dsl.rb +167 -0
  30. data/lib/oinky/error.rb +19 -0
  31. data/lib/oinky/modelbase.rb +12 -0
  32. data/lib/oinky/nbuffer.rb +152 -0
  33. data/lib/oinky/normalize.rb +132 -0
  34. data/lib/oinky/oc_builder.rb +44 -0
  35. data/lib/oinky/query.rb +193 -0
  36. data/lib/oinky/rb_emitter.rb +147 -0
  37. data/lib/oinky/shard.rb +40 -0
  38. data/lib/oinky/testsup.rb +104 -0
  39. data/lib/oinky/version.rb +9 -0
  40. data/oinky.gemspec +36 -0
  41. metadata +120 -0
@@ -0,0 +1,334 @@
1
+ // This source is distributed under the terms of the MIT License. Refer
2
+ // to the 'LICENSE' file for details.
3
+ //
4
+ // Copyright (c) Jacob Lacouture, 2012
5
+
6
+ namespace Oinky
7
+ {
8
+ using namespace Oinky::Utils;
9
+ using namespace Oinky::Errors;
10
+
11
+ namespace Internal
12
+ {
13
+
14
+ // Internally, DB references look like this.
15
+ template<typename ALLOCATOR_T>
16
+ class db_handle_internal_t
17
+ {
18
+ public:
19
+ // NOTE: This MUST be defined FIRST in the class. That way it gets
20
+ // destroyed last. Destructors for other members depend on the allocator.
21
+ //
22
+ // Each database instance has its own private allocator. This can be
23
+ // accessed without any mutex cover because accesses to the database
24
+ // itself are externally serialized.
25
+ typedef ALLOCATOR_T allocator_t;
26
+ allocator_t allocator;
27
+
28
+ typedef db_handle_internal_t<ALLOCATOR_T> db_handle_internal;
29
+ typedef table_ctx_t<db_handle_internal> table_ctx;
30
+
31
+ struct tablemap_traits : default_ctx_map_traits<table_ctx, uint32, allocator_t>
32
+ {
33
+ static const mstring_safe &key_from_ctx(const table_ctx *ctx) {
34
+ return ctx->tablename;
35
+ }
36
+ };
37
+
38
+ // String tables.
39
+ meta_stringtable_t metastrings;
40
+ user_stringtable_t userstrings;
41
+
42
+ // Maps table name to table manager. Both static and dynamic.
43
+ typedef context_map_t<tablemap_traits> tables_t;
44
+ tables_t tables;
45
+
46
+ // Some statistics we compute in the first phase of serialization,
47
+ // used during the second phase.
48
+ struct computed_packing_data
49
+ {
50
+ uint32 total_bytes;
51
+ uint32 tableset_size;
52
+
53
+ // We have to remap fixed entries. In general this will be the
54
+ // size of the largest table with any deleted entries. If the
55
+ // table has no deleted entries, we have nothing to remap.
56
+ // We build the map itself at the beginning of the serialization
57
+ // phase of each table, and we do it nearly for free, as we are
58
+ // serializing the rows themselves. We then use it while serializing
59
+ // the indices, and then rebuild/reuse it for the next table.
60
+ uint32 max_fixed_table_remap_size;
61
+ db_vector<row_idx_t> row_remap_vector;
62
+
63
+ computed_packing_data() :
64
+ // This only grows. Should start at zero.
65
+ max_fixed_table_remap_size(0),
66
+ // We'll reinit with a real allocator if we ever use this.
67
+ row_remap_vector(NULL)
68
+ {}
69
+ } packing;
70
+
71
+ // Current savepoint value. Initializes to 1. Never gets decremented,
72
+ // even on rollback.
73
+ sp_marker_t _active_sp;
74
+ // The sp that was active at the time the last rollback was initiated
75
+ // (regardless of rollback target).
76
+ sp_marker_t _sp_active_last_rollback;
77
+
78
+ sp_marker_t active_sp() const { return _active_sp; }
79
+ sp_marker_t new_sp() {
80
+ // We increment the value, but return the previous value. That way
81
+ // rolling back to the value returned yields the current state.
82
+ sp_marker_t x = _active_sp;
83
+ _active_sp.increment();
84
+ return x;
85
+ }
86
+
87
+ // Each time any table schema changes (row/index create/drop or sp_rollback
88
+ // of such action, the local count (in the table) gets updated, as does
89
+ // this global count. Sometimes the incement is more than one. We just
90
+ // guarantee that it changes.
91
+ uint32 last_schema_change;
92
+
93
+ static void rollback_null(table_ctx *table, sp_marker_t sp) {}
94
+
95
+ // We rollback to a target sp. Anything marked later than this sp gets
96
+ // deleted. rolling back to 0 deletes anything marked >= 1, which is
97
+ // everything but the original data.
98
+ //
99
+ // Rolling back to a savepoint never causes allocation.
100
+ void sp_rollback(sp_marker_t tsp)
101
+ {
102
+ // Nothing within this block should raise. The catch is just
103
+ // to confirm that.
104
+ try {
105
+ _sp_active_last_rollback = _active_sp;
106
+
107
+ // We really don't have to do much when a table gets uncreated/
108
+ // undropped. The context map takes charge of destroying it
109
+ // when the time comes.
110
+ tables.sp_rollback(
111
+ tsp,
112
+ &rollback_null,
113
+ &rollback_null,
114
+ &rollback_null);
115
+
116
+ // Now rollback the tables themselves. Even if the table itself
117
+ // isn't being undropped/uncreated, it might still have a lot of
118
+ // work to roll back.
119
+ typename tables_t::by_name_iterator bnend = tables.by_name().end();
120
+ typename tables_t::by_name_iterator bni = tables.by_name().begin();
121
+ for (;bni != bnend; ++bni) {
122
+ (*bni)->sp_rollback(tsp);
123
+ }
124
+ } catch (...) {
125
+ // An exception here is either an internal bug or some sort of
126
+ // heap corruption.
127
+ OINKY_ASSERT(false);
128
+ throw;
129
+ }
130
+ }
131
+
132
+ // This invalidates the handle. Invoking this prior to destruction
133
+ // can make destruction faster, since we stop tracking memory allocation,
134
+ // etc. knowing we're about reclaim everything en masse.
135
+ void dismount()
136
+ {
137
+ allocator.teardown();
138
+ }
139
+
140
+ void reset()
141
+ {
142
+ // In general this king of thing is dangerous in C++, because any
143
+ // constructor may encounter an allocation failure.
144
+ // In the worst case we can trigger double-destruction,
145
+ // but all of our destructors are defensive against that.
146
+ // So this is safe, as long as no-one is allowed to derive from
147
+ // this type.
148
+ this->~db_handle_internal();
149
+ ::new(this) db_handle_internal();
150
+ }
151
+
152
+ db_handle_internal_t() :
153
+ allocator(),
154
+ metastrings(&allocator),
155
+ userstrings(&allocator),
156
+ tables(&allocator, &metastrings),
157
+ _active_sp(1),
158
+ _sp_active_last_rollback(0),
159
+ last_schema_change(0)
160
+ {}
161
+
162
+ ~db_handle_internal_t()
163
+ {
164
+ dismount();
165
+ // Member destructors do all the actual teardown work. We simply
166
+ // needed to invoke dismount to optimize (no-op) deallocation
167
+ // beforehand.
168
+ }
169
+
170
+ uint32 mark_schema_change() { return last_schema_change += 1; }
171
+
172
+ // This reports on modifications of data, not structure. Creating a
173
+ // savepoint does not count as a modification, deleting a row does.
174
+ // The question is, does the operation affect serialization?
175
+ //
176
+ // This result is conservative. It may return true even when nothing
177
+ // has changed. If we make a change and then roll it back, this will
178
+ // still return true. However, this state does not persist, unless the
179
+ // sequence of events repeats. If a new SP is created, and this is
180
+ // invoked with the new SP value, then it will return false, as it should.
181
+ bool modified_since(sp_marker_t sp) const {
182
+ return (_sp_active_last_rollback > sp) ||
183
+ tables.modified_since(
184
+ sp,
185
+ boost::bind(&table_ctx::table_modified_since, _1, _2));
186
+ }
187
+
188
+ template<typename BASE>
189
+ class tables_accessor_spec : public BASE
190
+ {
191
+ static void make_table(table_ctx *target, db_handle_internal *db, const mstring_safe &name) {
192
+ target->reinit(db);
193
+ target->tablename = name;
194
+ db->mark_schema_change();
195
+ }
196
+
197
+ db_handle_internal *db;
198
+ public:
199
+ tables_accessor_spec(db_handle_internal *_db) :
200
+ BASE(_db->tables),
201
+ db(_db)
202
+ {}
203
+
204
+ typedef typename BASE::iterator iterator;
205
+
206
+ iterator create(const db_string &name) {
207
+ return BASE::create(name, db->active_sp(), boost::bind(make_table, _1, db, _2));
208
+ }
209
+
210
+ // First field of the result-pair is true if the object is new.
211
+ std::pair<iterator, bool> create_if(const db_string &name) {
212
+ return BASE::create_if(name, db->active_sp(), boost::bind(make_table, _1, db, _2));
213
+ }
214
+
215
+ void drop(iterator &itr) {
216
+ BASE::drop(itr, db->active_sp());
217
+ }
218
+ };
219
+
220
+ // This is effectively the user interface to the DB. You can get a
221
+ // handle to a table/column/index, from which you can alter/insert/drop/
222
+ // iterate, etc.
223
+ typedef typename table_ctx::table_handle table_handle;
224
+ typedef typename table_ctx::column_selector_t column_selector_t;
225
+ typedef typename table_ctx::row_iterator row_iterator;
226
+ typedef typename tables_t::template set_accessor_t<table_handle, tables_accessor_spec>::type tables_accessor_t;
227
+ tables_accessor_t tables_accessor() { return tables_accessor_t(this); }
228
+ };
229
+
230
+ // The user gets one of these.
231
+ template<typename ALLOCATOR_T>
232
+ class db_instance_t
233
+ {
234
+ typedef db_handle_internal_t<ALLOCATOR_T> db_handle_internal;
235
+ typedef db_instance_t<ALLOCATOR_T> db_instance;
236
+
237
+ // This has to be a member instead of a base because of the
238
+ // reset command. Reset recreates a handle object, regardless of what
239
+ // a user may have derived.
240
+ //
241
+ // This way, a user can safely derive from db_instance_t, not that
242
+ // there's any reason to.
243
+ db_handle_internal_t<ALLOCATOR_T> db;
244
+
245
+ public:
246
+ typedef typename db_handle_internal::column_selector_t column_selector_t;
247
+
248
+ typedef typename db_handle_internal::tables_accessor_t tables_accessor_t;
249
+ typedef typename db_handle_internal::table_handle table_handle;
250
+ typedef typename db_handle_internal::row_iterator row_iterator;
251
+ typedef typename tables_accessor_t::iterator table_itr;
252
+
253
+ typedef typename table_handle::columns_accessor_t columns_accessor_t;
254
+ typedef typename table_handle::column_handle column_handle;
255
+ typedef typename table_handle::cursor_handle table_cursor_handle;
256
+ typedef typename columns_accessor_t::iterator column_itr;
257
+
258
+ typedef typename table_handle::indices_accessor_t indices_accessor_t;
259
+ typedef typename table_handle::index_handle index_handle;
260
+ typedef typename indices_accessor_t::iterator index_itr;
261
+
262
+ typedef typename index_handle::iterator index_row_iterator;
263
+ typedef typename index_handle::cursor_handle index_cursor_handle;
264
+ typedef typename index_handle::column_defs_accessor index_columns_accessor_t;
265
+ typedef typename index_columns_accessor_t::column_itr index_column_itr;
266
+
267
+ typedef Oinky::index_column_def index_column_def;
268
+
269
+ // typedef typename index_handle::column_def index_column_def;
270
+ // typedef typename index_handle::column_iterator index_column_iterator;
271
+
272
+ tables_accessor_t tables() { return db.tables_accessor(); }
273
+
274
+ // This is on a private timeline. The caller is only guranteed
275
+ // that it increases monotonically.
276
+ uint32 last_schema_change() const { return db.last_schema_change(); }
277
+
278
+ bool modified_since(sp_marker_t sp) const {
279
+ return db.modified_since(sp);
280
+ }
281
+
282
+ // Takes the const bytestring and mounts it as a database. Returns a
283
+ // handle to the DB on successful mount. Exception if mount fails.
284
+ //
285
+ // The mount process does not exhaustively verify encoding. Subsequent
286
+ // accesses to the handle may throw an exception if they decode invalid
287
+ // data.
288
+ //
289
+ // The same flattened bytes can be mounted several times, which will
290
+ // produce unique handles. Modifications to any handle will be invisible
291
+ // to the others.
292
+ static void mount(db_instance *h, const char *buffer, uint32 buflen) {
293
+ Serialization::Serializer<
294
+ db_handle_internal,
295
+ Serialization::v1_sformat_tag
296
+ >::mount(&h->db, buffer, buflen);
297
+ }
298
+
299
+ uint32 prepare_pack() {
300
+ return Serialization::Serializer<
301
+ db_handle_internal,
302
+ Serialization::v1_sformat_tag
303
+ >::prepare_pack(db);
304
+ }
305
+
306
+ uint32 complete_pack(char * target_buffer, uint32 buflen) {
307
+ char * end = target_buffer;
308
+
309
+ Serialization::Serializer<
310
+ db_handle_internal,
311
+ Serialization::v1_sformat_tag
312
+ >::complete_pack(db, end, target_buffer + buflen);
313
+
314
+ // How many bytes were written
315
+ return end - target_buffer;
316
+ }
317
+
318
+ sp_marker_t new_sp() { return db.new_sp(); }
319
+ void sp_rollback(sp_marker_t tsp) { db.sp_rollback(tsp); }
320
+
321
+ // The DB instance itself is noncopyable. For that reason,
322
+ // we implement a custom reset() method. This is equivalent to doing
323
+ // db_t db;
324
+ // ...<modify DB>
325
+ // db = db_t(); // <<---- This is the reset operation we simulate.
326
+ void reset() { db.reset(); }
327
+ };
328
+
329
+ } //namespace Internal
330
+
331
+ typedef ::Oinky::Internal::db_instance_t<nky_allocator_t> db_t;
332
+
333
+ } //namespace Oinky
334
+
@@ -0,0 +1,1038 @@
1
+ // This source is distributed under the terms of the MIT License. Refer
2
+ // to the 'LICENSE' file for details.
3
+ //
4
+ // Copyright (c) Jacob Lacouture, 2012
5
+
6
+ namespace Oinky
7
+ {
8
+ namespace Internal
9
+ {
10
+ using namespace Oinky::Errors;
11
+ using namespace Oinky::Utils;
12
+
13
+ struct index_def_t
14
+ {
15
+ // Column width of index.
16
+ column_idx_t col_count;
17
+
18
+ // unique?
19
+ bool require_unique;
20
+
21
+ const uint8 *cols_ascending;
22
+ };
23
+
24
+ template<typename TABLE_CTX>
25
+ class idx_column_iterator_t : public
26
+ boost::iterator_facade<
27
+ idx_column_iterator_t<TABLE_CTX>,
28
+ idx_column_ctx,
29
+ std::random_access_iterator_tag,
30
+ const idx_column_ctx &
31
+ >
32
+ {
33
+ typedef idx_column_iterator_t<TABLE_CTX> itr_t;
34
+
35
+ const TABLE_CTX *table;
36
+ const index_def_t *defn;
37
+ const column_ctx *const* column_refs;
38
+ column_idx_t position;
39
+
40
+ void set_value() {
41
+ OINKY_ASSERT(position <= defn->col_count);
42
+ if (position < defn->col_count) {
43
+ val.column = column_refs[position];
44
+ val.ascending = (defn->cols_ascending[position >> 3] & (1 << position & 7)) != 0;
45
+ val.index_pos = position;
46
+ }
47
+ }
48
+ public:
49
+ typedef idx_column_ctx value_type;
50
+
51
+ idx_column_iterator_t() : table(NULL), defn(NULL), position(0) {}
52
+
53
+ idx_column_iterator_t(
54
+ const TABLE_CTX *_table,
55
+ const index_def_t *_defn,
56
+ const column_ctx *const *_column_refs,
57
+ column_idx_t _position) :
58
+ table(_table),
59
+ defn(_defn),
60
+ column_refs(_column_refs),
61
+ position(_position)
62
+ {
63
+ set_value();
64
+ }
65
+
66
+ index_column_def make_def() const {
67
+ index_column_def d;
68
+ d.column_name = dereference().column->colname;
69
+ d.sort_ascending = dereference().ascending;
70
+ return d;
71
+ }
72
+
73
+ private:
74
+ const value_type &dereference() const {
75
+ OINKY_ASSERT(position < defn->col_count);
76
+ return val;
77
+ }
78
+ bool equal(const itr_t &other) const {
79
+ OINKY_ASSERT(table == other.table);
80
+ return position == other.position;
81
+ }
82
+ void increment() { ++position; set_value(); }
83
+ void decrement() { --position; set_value(); }
84
+ void advance(int x) { position += x; set_value(); }
85
+ int distance_to(const itr_t &other) const {
86
+ OINKY_ASSERT(table == other.table);
87
+ return (int) position - other.position;
88
+ }
89
+
90
+ idx_column_ctx val;
91
+
92
+ friend class boost::iterator_core_access;
93
+ };
94
+
95
+ // This manages a mixed index. It provides a differential, mutable view
96
+ // against a fixed serialized version of the index.
97
+ template<typename TABLE_CTX>
98
+ class index_ctx_t
99
+ {
100
+ public:
101
+ typedef TABLE_CTX table_ctx;
102
+ typedef index_ctx_t<table_ctx> index_ctx;
103
+ typedef idx_column_iterator_t<table_ctx> idx_column_iterator;
104
+ typedef pending_row<table_ctx> pending_row_t;
105
+ typedef index_idx_t position_idx_t;
106
+
107
+ mstring_safe indexname;
108
+
109
+ // sp marker of the creation/deletion of this index.
110
+ ls_marker ls;
111
+
112
+ // Parent table.
113
+ table_ctx *table;
114
+
115
+ // Which of the table's indexes is this?
116
+ index_idx_t index_idx;
117
+
118
+ // Head of the index tree.
119
+ row_index_hook head;
120
+
121
+ // Definition of the index.
122
+ index_def_t defn;
123
+
124
+ // We never access this. It merely exists to free the memory
125
+ // when the index is destroyed.
126
+ db_vector<uint8> __definition_data;
127
+
128
+ // This specifies which columns are indexed, in which order.
129
+ const column_ctx *const *column_refs;
130
+
131
+ // The serialized portion of the index.
132
+ typedef indexdata_fixed<table_ctx> fixed_t;
133
+ typedef typename fixed_t::iterator fixed_itr_t;
134
+ fixed_t fixed;
135
+
136
+ typedef column_selector_template<table_ctx> column_selector_t;
137
+
138
+ index_ctx_t(table_ctx *_table = NULL, index_idx_t _index_idx = (index_idx_t) 0) :
139
+ table(_table),
140
+ index_idx(_index_idx),
141
+ __definition_data(_table ? &_table->db->allocator : NULL),
142
+ column_refs(NULL),
143
+ fixed(
144
+ tabular_t(),
145
+ _table,
146
+ this)
147
+ {
148
+ treealg::init_header(&head);
149
+ }
150
+
151
+ ~index_ctx_t()
152
+ {
153
+ // Clear the tree head. Otherwise destructor will traverse and unlink
154
+ // everything, which is a waste of time.
155
+ treealg::init_header(&head);
156
+ }
157
+
158
+ void reinit_serial(table_ctx *_table, index_idx_t _index_idx)
159
+ {
160
+ this->~index_ctx();
161
+ ::new(this) index_ctx(_table, _index_idx);
162
+ }
163
+
164
+ // Allocate a vector and copy the values into it. Index owns responsibility
165
+ // for releasing.
166
+ template<typename ITR_T>
167
+ void reinit_dynamic(table_ctx *_table, index_idx_t _index_idx, index_uniqueness_spec_t uniq, ITR_T col_begin, ITR_T col_end)
168
+ {
169
+ BOOST_STATIC_ASSERT((boost::is_same<
170
+ typename boost::iterator_value<ITR_T>::type,
171
+ index_column_def>::value));
172
+
173
+ uint32 colcount = std::distance(col_begin, col_end);
174
+
175
+ reinit_serial(_table, _index_idx);
176
+
177
+ // Allocate memory to hold our column arrays. We use a bit per column
178
+ // for the cols_ascending value.
179
+ uint32 ascending_bytes = (colcount + 7) >> 3;
180
+ __definition_data.resize((sizeof(column_ctx *) * colcount) + ascending_bytes);
181
+ column_ctx **wcols = (column_ctx **) __definition_data.begin();
182
+ column_refs = wcols;
183
+ uint8 *cols_ascending = (uint8 *) (column_refs + colcount);
184
+ memset(cols_ascending, 0, ascending_bytes);
185
+
186
+ // Fill definiton data from source iterators.
187
+ uint32 pos = 0;
188
+ for (ITR_T i = col_begin; i != col_end; ++i, ++pos) {
189
+ typename table_ctx::cols_by_name_itr_t col = table->columns.by_name().find(i->column_name);
190
+ if (col == table->columns.by_name().end()) {
191
+ // The specified column does not exist.
192
+ throw_error(object_not_found());
193
+ }
194
+ if ((*col)->ctype == column_types::Variant) {
195
+ // We do not index variant column types, because there is no natural
196
+ // ordering among values of different types. (We could obviously
197
+ // declare one, but the declaration would be arbitrary, and arbitrary
198
+ // semantics are better excluded.)
199
+ throw_error(column_not_indexable());
200
+ }
201
+ if (i->sort_ascending) {
202
+ cols_ascending[pos >> 3] |= 1 << (pos & 7);
203
+ }
204
+ wcols[pos] = *col;
205
+ }
206
+
207
+ defn.cols_ascending = cols_ascending;
208
+ defn.col_count = (column_idx_t) colcount;
209
+ defn.require_unique = (uniq == Oinky::Unique);
210
+
211
+ // The above setting of defn is enough to make coldefs_begin valid.
212
+ fixed = fixed_t(
213
+ tabular_t(),
214
+ _table,
215
+ this);
216
+ }
217
+
218
+ // Compares two index nodes.
219
+ typedef index_row_compare_t<index_ctx, pending_row_t> index_row_compare;
220
+ struct hook_compare
221
+ {
222
+ index_row_compare irc;
223
+
224
+ hook_compare(const index_ctx *_idx) : irc(_idx) {}
225
+
226
+ const index_ctx *index() const { return irc.idx; }
227
+
228
+ template<typename ITR_T>
229
+ int compare(const row_index_hook *left_n, const simple_sequence<ITR_T> &right) const
230
+ {
231
+ const pending_row_t *left = pending_row_t::from_index_hook(left_n,index());
232
+ return irc.compare(*left,right);
233
+ }
234
+ template<typename ITR_T>
235
+ int compare(const simple_sequence<ITR_T> &left, const row_index_hook *right_n) const
236
+ {
237
+ return -compare(right_n, left);
238
+ }
239
+ int compare(const row_index_hook *left_n, const row_index_hook *right_n) const
240
+ {
241
+ const pending_row_t *left = pending_row_t::from_index_hook(left_n,index());
242
+ const pending_row_t *right = pending_row_t::from_index_hook(right_n,index());
243
+ return irc.compare(*left,*right);
244
+ }
245
+ };
246
+ struct index_less
247
+ {
248
+ hook_compare c;
249
+ bool suborder_duplicates;
250
+ bool is_cursor_seek;
251
+
252
+ index_less(const index_ctx *_idx, bool _suborder_duplicates, bool _is_cursor_seek = false) :
253
+ c(_idx),
254
+ suborder_duplicates(_suborder_duplicates),
255
+ is_cursor_seek(_is_cursor_seek)
256
+ {}
257
+
258
+ template<typename ITR_T>
259
+ bool operator()(const simple_sequence<ITR_T> &left, const row_index_hook *right_n) const
260
+ {
261
+ // Searching for sequences, we should never be subordering duplicates. We only
262
+ // do that on insert.
263
+ OINKY_ASSERT(!suborder_duplicates);
264
+ return c.compare(left,right_n) < 0;
265
+ }
266
+ template<typename ITR_T>
267
+ bool operator()(const row_index_hook *left_n, const simple_sequence<ITR_T> &right) const
268
+ {
269
+ OINKY_ASSERT(!suborder_duplicates);
270
+ return c.compare(left_n,right) < 0;
271
+ }
272
+
273
+ bool operator()(const row_index_hook *left_n, const row_index_hook *right_n) const
274
+ {
275
+ if (left_n == right_n) {
276
+ return false;
277
+ }
278
+ const pending_row_t *left = pending_row_t::from_index_hook(left_n,c.index());
279
+ const pending_row_t *right = pending_row_t::from_index_hook(right_n,c.index());
280
+ int i = c.irc.compare(*left,*right);
281
+ if (i || !suborder_duplicates) return i < 0;
282
+ // Secondary ordering is the sequence number.
283
+ OINKY_ASSERT((left->sequence_number != right->sequence_number) || is_cursor_seek);
284
+ return left->sequence_number < right->sequence_number;
285
+ }
286
+ };
287
+
288
+ template<typename PITR>
289
+ struct merge_itr_compare
290
+ {
291
+ // TBD: These symmetric comparisons should only be necessary for the
292
+ // pending_iterator, not the cursor. These are only used when
293
+ // comparing two merge-iterators to each other.
294
+
295
+ // Comparing identical iterator types is much faster than comparing values.
296
+ int operator()(const fixed_itr_t &left, const fixed_itr_t &right) const {
297
+ // These are just row indices
298
+ return (int) *left - (int) *right;
299
+ }
300
+ int operator()(const PITR &left, const PITR &right) const {
301
+ // Compare the two nodes.
302
+ const index_ctx *idx = left.index_ptr();
303
+ OINKY_ASSERT(idx == right.index_ptr());
304
+ const pending_row_t *lr = left.to_row();
305
+ const pending_row_t *rr = right.to_row();
306
+ int i = index_row_compare(idx).compare(*lr,*rr);
307
+ if (i || (lr == rr)) return i;
308
+
309
+ // Similar to below, where we must treat fixed and pending objects
310
+ // differently, because this index may not be unique, we must
311
+ // consider that there may be multiple pending rows with the same
312
+ // indexed values, and we need to treat them differently. We differentiate
313
+ // them by the node's sequence number.
314
+
315
+ // See note below about this not being possible unless the index
316
+ // is non-unique, or this is an expired iterator (or an internal bug).
317
+ OINKY_ASSERT(!idx->defn.require_unique);
318
+
319
+ // We should have generated unique values.
320
+ if (lr->sequence_number < rr->sequence_number) return -1;
321
+ if (lr->sequence_number > rr->sequence_number) return 1;
322
+
323
+ // This should be impossible for pending_iterator. Only the
324
+ // safe_cursor could be pointing to an old row object, which
325
+ // shares the sequence number and index position with its new
326
+ // version.
327
+ OINKY_ASSERT(!(boost::is_same<PITR, pending_iterator>::value));
328
+ return 0;
329
+ }
330
+
331
+ // Comparing different iterator types requires actual comparison.
332
+ int operator()(const PITR &left, const fixed_itr_t &right) const {
333
+ return -(*this)(right, left);
334
+ }
335
+ int operator()(const fixed_itr_t &left, const PITR &right) const {
336
+ // The accessor comparison method doesn't work because
337
+ // it is a value-based comparison. It lacks the context
338
+ // of knowing whether the columns are indexed in ascending
339
+ // or decending order.
340
+ index_ctx *idx = right.index_ptr();
341
+ uint32 col_count = idx->defn.col_count;
342
+ variant_cv_t *lvals = (variant_cv_t *) alloca(sizeof(variant_cv_t) * col_count);
343
+ left.indexed_values().copy_to(lvals, col_count);
344
+ simple_sequence<const variant_cv_t *> lseq(lvals, lvals + col_count);
345
+
346
+ int i = -index_row_compare(idx).compare(*right.to_row(), lseq);
347
+ // Here we are enumerating two sets which we know to be disjoint in
348
+ // terms of rowkeys. It's possible that the index is non-unique,
349
+ // in which case we might still have value overlap between the
350
+ // two sets. We need to be sure not to drop either value,
351
+ // so in the event of a tie, the fixed iterator always wins.
352
+ //
353
+ // Note that we do NOT do this in the comparison function that we
354
+ // use for insertion, because that does need to discover duplicate
355
+ // values in some cases.
356
+ //
357
+ // Note also, however, that this IS the comparison function use to
358
+ // compare two iterators. If they refer to different storage
359
+ // (pending vs. fixed) we know they're not equivalent.
360
+ if (i == 0) {
361
+ // Should not have been permitted to insert a pending node
362
+ // that shadowed a fixed node. Possibly this is a user
363
+ // iterator that has since been invalidated by a savepoint
364
+ // rollback.
365
+ OINKY_ASSERT(left.is_deleted() || !left.index()->defn.require_unique);
366
+
367
+ // Secondary sort by sequence number/rowkey.
368
+ if ((uint64) *left < right->sequence_number) {
369
+ return -1;
370
+ }
371
+ if ((uint64) *left > right->sequence_number) {
372
+ return 1;
373
+ }
374
+ // Otherwise this is literally the same row.
375
+ // One MUST be deleted.
376
+ OINKY_ASSERT(left.is_deleted());
377
+ // These must still be sequenced, so that we can skip over the
378
+ // deleted item to reach the most current value (in the pending).
379
+ return -1;
380
+ }
381
+ return i;
382
+ }
383
+ };
384
+
385
+ // Nothing to do. Indexes are merely projections of table data, and for
386
+ // now their definitions are immutable.
387
+ void sp_rollback(sp_marker_t target_sp) {}
388
+
389
+ void link_all()
390
+ {
391
+ typedef typename table_ctx::pitr_t pitr_t;
392
+
393
+ OINKY_ASSERT(treealg::size(&head) == 0);
394
+ pitr_t end = table->live_pending.end();
395
+ // This amounts to a full sort.
396
+ for (
397
+ pitr_t i = table->live_pending.begin();
398
+ i != end;
399
+ ++i)
400
+ {
401
+ pending_row_t &row(*i);
402
+ OINKY_ASSERT( row.ls.delete_sp == 0 );
403
+ link_pending(&row, true);
404
+ }
405
+ }
406
+
407
+ void unlink_all()
408
+ {
409
+ // This is easy. The member nodes will just be treated as
410
+ // uninitialized hooks. No point unlinking them.
411
+ treealg::init_header(&head);
412
+ }
413
+
414
+ void drop(sp_marker_t sp)
415
+ {
416
+ unlink_all();
417
+ }
418
+
419
+ // Uncreate the index itself.
420
+ void uncreate(sp_marker_t tsp)
421
+ {
422
+ // Remove all remaining pending rows from this index.
423
+ unlink_all();
424
+ }
425
+ void undrop(sp_marker_t tsp)
426
+ {
427
+ // Relink all rows.
428
+ link_all();
429
+ }
430
+
431
+ struct values_accessor_base
432
+ {
433
+ column_selector_t cs;
434
+ const pending_row_t *prow;
435
+
436
+ values_accessor_base(
437
+ column_selector_t _cs,
438
+ const pending_row_t *_prow) :
439
+ cs(_cs),
440
+ prow(_prow)
441
+ {}
442
+
443
+ uint32 column_count() const { return cs.column_count(); }
444
+
445
+ // Parameters are a column definition and a value
446
+ template<typename FN>
447
+ void each_column_value(FN fn) const {
448
+ prow->each_column_value(cs, fn);
449
+ }
450
+ };
451
+
452
+ typedef multicolumn_value_accessor<values_accessor_base> values_accessor_t;
453
+
454
+ values_accessor_t indexed_values(pending_row_t *row) {
455
+ return values_accessor_t(index_columns(), row);
456
+ }
457
+
458
+ column_selector_t index_columns() {
459
+ return column_selector_t::select_index(this);
460
+ }
461
+
462
+ // Extract the indexed columns from the pending row and insert.
463
+ void link_pending(pending_row_t *pr, bool skip_fixed_check)
464
+ {
465
+ row_index_hook *hook = pr->get_hook(index_idx);
466
+
467
+ // If we care about uniqueness, then check the fixed table first
468
+ if (!skip_fixed_check && defn.require_unique && !table->fixed_dropped()) {
469
+ // Copy the relevant index values into sequence before searching.
470
+ variant_cv_t *values = (variant_cv_t *) alloca(sizeof(variant_cv_t) * defn.col_count);
471
+ indexed_values(pr).copy_to(values, defn.col_count);
472
+
473
+ fixed_itr_t fi = fixed.find(simple_sequence<const variant_cv_t *>(values, values + defn.col_count));
474
+ if ((fi != fixed.end()) && !fi.is_deleted()) {
475
+ throw_error(index_entry_not_unique());
476
+ }
477
+ }
478
+
479
+ // Check whether the insertion would violate uniqueness, and if we care.
480
+ // There's no additional overhead to checking, even if we're not
481
+ // enforcing uniqueness, since we have to insert anyway.
482
+ treealg::insert_commit_data commit_data;
483
+ std::pair<row_index_hook *, bool> result_pair = treealg::insert_unique_check(
484
+ &head,
485
+ hook,
486
+ // second parameter is to add a secondary ordering (pointer) to
487
+ // duplicate entries. This is because all orderings must be
488
+ // strict, even if they are identical, which can happen for
489
+ // non-unique indices.
490
+ index_less(this, !defn.require_unique),
491
+ commit_data);
492
+ if (!result_pair.second) // Entry exists in index.
493
+ {
494
+ // If this is showing up as existing, then it must be using the unique check.
495
+ OINKY_ASSERT(defn.require_unique);
496
+ throw_error(index_entry_not_unique());
497
+ }
498
+
499
+ // Otherwise do the insert.
500
+ treealg::insert_unique_commit(&head, hook, commit_data);
501
+ }
502
+
503
+ // Just extract from the tree.
504
+ void unlink_pending(pending_row_t *pr)
505
+ {
506
+ row_index_hook *hook = pr->get_hook(index_idx);
507
+ treealg::erase(&head, hook);
508
+ }
509
+ private:
510
+ struct pending_iterator_base
511
+ {
512
+ index_ctx *idx;
513
+ row_index_hook *hook;
514
+
515
+ pending_iterator_base() : idx(NULL), hook(NULL) {}
516
+
517
+ pending_iterator_base(index_ctx *_index, row_index_hook *_hook) :
518
+ idx(_index),
519
+ hook(_hook)
520
+ {}
521
+
522
+ values_accessor_t indexed_values() const {
523
+ return idx->indexed_values(to_row());
524
+ }
525
+
526
+ pending_row_t *to_row() const {
527
+ if (hook == treealg::end_node(&idx->head)) {
528
+ return NULL;
529
+ }
530
+ return pending_row_t::from_index_hook(hook, idx);
531
+ }
532
+
533
+ index_ctx *index_ptr() const { return idx; }
534
+
535
+ int compare_to(const pending_iterator_base &other) const {
536
+ pending_row_t *l = to_row();
537
+ pending_row_t *r = other.to_row();
538
+ if (l == r) return 0;
539
+ if (l == NULL) return 1;
540
+ if (r == NULL) return -1;
541
+ return index_row_compare(idx).compare(*l,*r);
542
+ }
543
+
544
+ template<typename ITR_T>
545
+ int compare_to(const simple_sequence<ITR_T> &seq) const {
546
+ pending_row_t *l = to_row();
547
+ if (l == NULL) return 1;
548
+ return index_row_compare(idx).compare(*l,seq);
549
+ }
550
+ };
551
+
552
+ // Compared to the table iterator...
553
+ // The index iterator must merge values...unlike the table iterator.
554
+ // The index iterator must also check every fixed value against the
555
+ // delete set, since the delete set and index are not in the same order.
556
+ //
557
+ // This iterator is bidirectional.
558
+ // iterator over the pending items;
559
+ class pending_iterator :
560
+ public pending_iterator_base,
561
+ public boost::iterator_facade<
562
+ pending_iterator,
563
+ const pending_row_t &,
564
+ boost::bidirectional_traversal_tag,
565
+ const pending_row_t &>
566
+ {
567
+ friend class boost::iterator_core_access;
568
+
569
+ void increment() {
570
+ hook = treealg::next_node(hook);
571
+ }
572
+ void decrement() {
573
+ hook = treealg::prev_node(hook);
574
+ }
575
+ const pending_row_t &dereference() const { return *to_row(); }
576
+ bool equal(const pending_iterator &other) const {
577
+ return hook == other.hook;
578
+ }
579
+
580
+ public:
581
+ using pending_iterator_base::idx;
582
+ using pending_iterator_base::hook;
583
+ using pending_iterator_base::to_row;
584
+
585
+ pending_iterator() : pending_iterator_base() {}
586
+
587
+ pending_iterator(index_ctx *_idx, row_index_hook *_hook) :
588
+ pending_iterator_base(_idx,_hook)
589
+ {}
590
+
591
+ pending_iterator(index_ctx *_idx, pending_row_t *_row) :
592
+ pending_iterator_base(_idx,_row->get_hook(_idx->index_idx))
593
+ {}
594
+
595
+ bool at_end() const { return hook == treealg::end_node(&idx->head); }
596
+
597
+ static pending_iterator begin_from(index_ctx *idx) {
598
+ return pending_iterator(idx, treealg::begin_node(&idx->head));
599
+ }
600
+ static pending_iterator end_from(index_ctx *idx) {
601
+ return pending_iterator(idx, treealg::end_node(&idx->head));
602
+ }
603
+ template<typename SEQ_T>
604
+ static pending_iterator lower_bound_from(index_ctx *idx, const SEQ_T &key) {
605
+ // The comparer we use will recognize equivalent entries as matches.
606
+ return pending_iterator(idx, treealg::lower_bound(&idx->head, key, index_less(idx, false)));
607
+ }
608
+ template<typename SEQ_T>
609
+ static pending_iterator upper_bound_from(index_ctx *idx, const SEQ_T &key) {
610
+ return pending_iterator(idx, treealg::upper_bound(&idx->head, key, index_less(idx, false)));
611
+ }
612
+ template<typename SEQ_T>
613
+ static pending_iterator find_from(index_ctx *idx, const SEQ_T &key) {
614
+ return pending_iterator(idx, treealg::find(&idx->head, key, index_less(idx, false)));
615
+ }
616
+ };
617
+
618
+ pending_iterator pending_begin() { return pending_iterator::begin_from(this); }
619
+ pending_iterator pending_end() { return pending_iterator::end_from(this); }
620
+
621
+ public:
622
+ idx_column_iterator coldefs_begin() const {
623
+ return idx_column_iterator(table, &defn, column_refs, (column_idx_t) 0);
624
+ }
625
+ idx_column_iterator coldefs_end() const {
626
+ return idx_column_iterator(table, &defn, column_refs, defn.col_count);
627
+ }
628
+
629
+ typedef symmetric_iterator<fixed_itr_t> fixed_range_t;
630
+ typedef symmetric_iterator<pending_iterator> pending_range_t;
631
+
632
+ struct pending_to_row {
633
+ template<typename PITR>
634
+ pending_row_t *operator()(const PITR &pitr) const {
635
+ return pitr.to_row();
636
+ }
637
+ };
638
+
639
+ typedef typename iterator_builder<
640
+ table_ctx,
641
+ fixed_itr_t,
642
+ fixed_range_t,
643
+ pending_iterator,
644
+ pending_range_t,
645
+ merge_itr_compare<pending_iterator>,
646
+ pending_to_row>::iterator iterator;
647
+
648
+ template<typename SEQ_T>
649
+ inline void check_key_type(const SEQ_T &key) const {
650
+ // We support untyped value comparison internally, because we trust
651
+ // the internals. However, we require that the user-key be safely
652
+ // typed. The effects of passing a mistyped column value
653
+ // would be difficult to diagnose, and could potentially segfault.
654
+ BOOST_STATIC_ASSERT((boost::is_same<
655
+ typename boost::iterator_value<typename SEQ_T::itr_t>::type,
656
+ variant_cv_t>::value));
657
+ }
658
+
659
+ // Indices can be iterated forward and backward.
660
+ // The position given describes the values of the indexed columns. It
661
+ // need not describe all the indexed columns. A partial value specifies an
662
+ // exclusive lower bound on the resulting iterator.
663
+ template<typename SEQ_T>
664
+ iterator lower_bound(const SEQ_T &key) {
665
+ check_key_type(key);
666
+
667
+ fixed_itr_t fi = fixed.lower_bound(key);
668
+ pending_iterator pi = pending_iterator::lower_bound_from(this,key);
669
+ return iterator(
670
+ table,
671
+ fixed_range_t(fi, fixed.begin(), fixed.end()),
672
+ pending_range_t(pi, pending_begin(), pending_end())
673
+ );
674
+ }
675
+ template<typename SEQ_T>
676
+ iterator upper_bound(const SEQ_T &key) {
677
+ check_key_type(key);
678
+
679
+ fixed_itr_t fi = fixed.upper_bound(key);
680
+ pending_iterator pi = pending_iterator::upper_bound_from(this,key);
681
+ return iterator(
682
+ table,
683
+ fixed_range_t(fi, fixed.begin(), fixed.end()),
684
+ pending_range_t(pi, pending_begin(), pending_end())
685
+ );
686
+ }
687
+ template<typename SEQ_T>
688
+ iterator find(const SEQ_T &key) {
689
+ check_key_type(key);
690
+
691
+ fixed_itr_t fi = fixed.find(key);
692
+ pending_iterator pi = pending_iterator::find_from(this,key);
693
+ return iterator(
694
+ table,
695
+ fixed_range_t(fi, fixed.begin(), fixed.end()),
696
+ pending_range_t(pi, pending_begin(), pending_end())
697
+ );
698
+ }
699
+ iterator begin() {
700
+ fixed_itr_t fi(fixed.begin());
701
+ pending_iterator pi(pending_begin());
702
+ return iterator(
703
+ table,
704
+ fixed_range_t(fi, fi, fixed.end()),
705
+ pending_range_t(pi, pi, pending_end())
706
+ );
707
+ }
708
+ iterator end() {
709
+ fixed_itr_t fi(fixed.end());
710
+ pending_iterator pi(pending_end());
711
+ return iterator(
712
+ table,
713
+ fixed_range_t(fi, fixed.begin(), fi),
714
+ pending_range_t(pi, pending_begin(), pi)
715
+ );
716
+ }
717
+
718
+ struct safe_idx_cursor_host
719
+ {
720
+ index_ctx *idx;
721
+ safe_idx_cursor_host(index_ctx *_idx) : idx(_idx) {}
722
+ safe_idx_cursor_host() : idx(NULL) {}
723
+
724
+ bool operator==(const safe_idx_cursor_host &other) const {
725
+ return idx == other.idx;
726
+ }
727
+ bool operator!=(const safe_idx_cursor_host &other) const {
728
+ return idx != other.idx;
729
+ }
730
+
731
+ typedef typename table_ctx::allocator_t allocator_t;
732
+ allocator_t &allocator() const { return idx->table->db->allocator; }
733
+
734
+ table_ctx *table() const { return idx->table; }
735
+
736
+ void check_state() const
737
+ {
738
+ if (idx->ls.is_deleted()) {
739
+ throw_error(object_deleted());
740
+ }
741
+ }
742
+
743
+ // Alternatives are lower/upper bound. Depends on fwd/reverse
744
+ pending_row_t *seek_pending(bool use_lower_bound, pending_row_t *row) const
745
+ {
746
+ OINKY_ASSERT(row->is_inactive());
747
+ row_index_hook *hook = row->get_hook(idx->index_idx);
748
+
749
+ // Our existing row isn't indexed, but we can use the familiar
750
+ // comparer to compute its insert position, which is where
751
+ // we should continue enumeration from. This works whether
752
+ // a matching entry is found or not.
753
+ //
754
+ // We do this rather than just making a sequence out of the
755
+ // positional values (using alloca/select/copy_to) because
756
+ // we also want to seek to a particular sequence number.
757
+ //
758
+ // Suborder non-unique items..otehrwise we would always seek
759
+ // to the first entry matching the indexed values. There's
760
+ // no harm doing this even when the index is unique. We
761
+ // specify too that we are in cursor_seek, which disables the
762
+ // assert we would get from finding an existing node
763
+ // matching another node in every way, including the sequence
764
+ // number. (It's not a problem because the node we're
765
+ // searching with is not in the tree.
766
+ index_less comparer(idx, true, true);
767
+
768
+ row_index_hook *h;
769
+ if (use_lower_bound) {
770
+ h = treealg::lower_bound(
771
+ &idx->head,
772
+ hook,
773
+ comparer);
774
+ } else {
775
+ h = treealg::upper_bound(
776
+ &idx->head,
777
+ hook,
778
+ comparer);
779
+ }
780
+ if (h == treealg::end_node(&idx->head)) {
781
+ return NULL;
782
+ }
783
+ return pending_row_t::from_index_hook(h, idx);
784
+ }
785
+
786
+ pending_row_t *seek_prev(pending_row_t *row) const {
787
+ row_index_hook *h;
788
+ if (row == NULL) {
789
+ h = treealg::end_node(&idx->head);
790
+ } else {
791
+ h = row->get_hook(idx->index_idx);
792
+ }
793
+ if (h == treealg::begin_node(&idx->head)) {
794
+ return NULL;
795
+ } else {
796
+ return pending_row_t::from_index_hook(treealg::prev_node(h), idx);
797
+ }
798
+ }
799
+ pending_row_t *seek_next(pending_row_t *row) const {
800
+ row_index_hook *h;
801
+ if (row == NULL) {
802
+ h = treealg::begin_node(&idx->head);
803
+ } else {
804
+ h = row->get_hook(idx->index_idx);
805
+ h = treealg::next_node(h);
806
+ }
807
+
808
+ if (h == treealg::end_node(&idx->head)) {
809
+ return NULL;
810
+ } else {
811
+ return pending_row_t::from_index_hook(h, idx);
812
+ }
813
+ }
814
+
815
+ // Link/Unlink the internal cursor object from the host list.
816
+ template<typename INTERNAL>
817
+ void unlink_cursor(INTERNAL *i) {
818
+ idx->active_cursors.erase(index_ctx::active_cursors_t::s_iterator_to(*i));
819
+ }
820
+ template<typename INTERNAL>
821
+ void link_cursor(INTERNAL *i) {
822
+ idx->active_cursors.push_back(*i);
823
+ }
824
+
825
+ inline void assert_row_valid(pending_row_t *row) const {
826
+ OINKY_ASSERT(row && (row->table() == idx->table));
827
+ }
828
+ };
829
+
830
+ class safe_pending_cursor : public safe_pending_cursor_t<pending_row_t, safe_idx_cursor_host>
831
+ {
832
+ typedef safe_pending_cursor_t<pending_row_t, safe_idx_cursor_host> base_t;
833
+ public:
834
+ using base_t::host;
835
+ using base_t::to_row;
836
+
837
+ safe_pending_cursor() {}
838
+
839
+ safe_pending_cursor(const safe_idx_cursor_host &_host, pending_row_t *_row, bool _bbegin) :
840
+ base_t(_host, _row, _bbegin)
841
+ {}
842
+
843
+ const safe_pending_cursor &i() const { return *this; }
844
+
845
+ values_accessor_t indexed_values() const {
846
+ return values_accessor_t(host.idx->index_columns(), to_row());
847
+ }
848
+ index_ctx *index_ptr() const { return host.idx; }
849
+ };
850
+
851
+ typedef typename iterator_builder<
852
+ table_ctx,
853
+ fixed_itr_t,
854
+ fixed_range_t,
855
+ safe_pending_cursor,
856
+ safe_pending_cursor,
857
+ merge_itr_compare<safe_pending_cursor>,
858
+ pending_to_row>::iterator safe_itr_t;
859
+
860
+ typedef cursor_internal_t<safe_itr_t> cursor_internal;
861
+ typedef cursor_handle_t<cursor_internal, safe_idx_cursor_host> cursor_handle;
862
+
863
+ //
864
+ // We keep track of all the active cursors on this table. (excluding the
865
+ // indices). We invalidate them when the index gets dropped or uncreated.
866
+ //
867
+ typedef boost::intrusive::list<cursor_internal> active_cursors_t;
868
+ active_cursors_t active_cursors;
869
+
870
+ void invalidate_cursors()
871
+ {
872
+ while (active_cursors.begin() != active_cursors.end()) {
873
+ typename active_cursors_t::iterator i = active_cursors.begin();
874
+ i->invalidate();
875
+ active_cursors.erase(i);
876
+ }
877
+ }
878
+
879
+ // Invoked when the table is dropped from the DB, or when a create is rolled back via SP.
880
+ void on_deactivation() {
881
+ invalidate_cursors();
882
+ }
883
+ };
884
+
885
+ template<typename INDEX_CTX>
886
+ // This is the public/user interface to the index_ctx.
887
+ class index_handle_t
888
+ {
889
+ typedef INDEX_CTX index_ctx;
890
+ typedef typename index_ctx::table_ctx table_ctx;
891
+ typedef typename index_ctx::safe_itr_t safe_itr_t;
892
+ typedef typename table_ctx::pending_row_t pending_row_t;
893
+ typedef typename index_ctx::safe_idx_cursor_host safe_idx_cursor_host;
894
+ typedef typename index_ctx::safe_pending_cursor safe_pending_cursor;
895
+ typedef typename index_ctx::idx_column_iterator idx_column_iterator;
896
+
897
+ index_ctx *index;
898
+ public:
899
+ typedef typename index_ctx::cursor_handle cursor_handle;
900
+ typedef typename index_ctx::iterator iterator;
901
+ typedef typename table_ctx::column_selector_t column_selector_t;
902
+
903
+ index_handle_t() : index(NULL) {}
904
+ index_handle_t(index_ctx *_index) : index(_index) {}
905
+
906
+ // This tells if the handle value is uninitialized. If this returns false,
907
+ // it does NOT mean the handle is valid. It may still be an expired handle
908
+ // or just invalid. But it is a test as to whether it's initialized at all.
909
+ bool is_null() const { return index == NULL; }
910
+
911
+ //###############
912
+ // For the C API. The type is completely opaque, but an equivalent
913
+ // handle object can be recovered from it.
914
+ class raw_handle_t {};
915
+ raw_handle_t *raw() const { return (raw_handle_t *) index; }
916
+ index_handle_t(raw_handle_t *_index) : index((index_ctx *) _index) {}
917
+
918
+ //###############
919
+ // Handle OPS
920
+ db_string name() const { return index->indexname.as_string(); }
921
+
922
+ bool is_unique() const { return index->defn.require_unique; }
923
+
924
+ uint32 column_count() const { return (uint32) index->defn.col_count; }
925
+
926
+ column_selector_t index_columns() const {
927
+ return index->index_columns();
928
+ }
929
+
930
+ class column_defs_accessor
931
+ {
932
+ index_ctx *index;
933
+ public:
934
+ column_defs_accessor() : index(NULL) {}
935
+ column_defs_accessor(index_ctx *_index) : index(_index) {}
936
+
937
+ class column_itr : public boost::iterator_adaptor<
938
+ column_itr,
939
+ idx_column_iterator,
940
+ index_column_def,
941
+ boost::bidirectional_traversal_tag,
942
+ index_column_def
943
+ >
944
+ {
945
+ typedef boost::iterator_adaptor<
946
+ column_itr,
947
+ idx_column_iterator,
948
+ index_column_def,
949
+ boost::bidirectional_traversal_tag,
950
+ index_column_def
951
+ > base_t;
952
+
953
+ friend class boost::iterator_core_access;
954
+ public:
955
+ column_itr() {}
956
+ column_itr(const idx_column_iterator &_i) : base_t(_i) {}
957
+
958
+ private:
959
+ index_column_def dereference() const { return base_t::base().make_def(); }
960
+ };
961
+
962
+ size_t column_count() const { return index->defn.col_count; }
963
+ column_itr begin() const { return column_itr(index->coldefs_begin()); }
964
+ column_itr end() const { return column_itr(index->coldefs_end()); }
965
+ };
966
+
967
+ column_defs_accessor columns() const { return column_defs_accessor(index); }
968
+
969
+ typename table_ctx::table_handle table() const {
970
+ return typename table_ctx::table_handle(index->table);
971
+ }
972
+
973
+ // Indices can be iterated forward and backward.
974
+ // The position given describes the values of the indexed columns. It
975
+ // need not describe all the indexed columns. A partial value specifies an
976
+ // exclusive lower bound on the resulting iterator.
977
+ template<typename ITR_T>
978
+ iterator lower_bound(ITR_T key_begin, ITR_T key_end) const {
979
+ return index->lower_bound(simple_sequence<ITR_T>(key_begin, key_end));
980
+ }
981
+ template<typename ITR_T>
982
+ iterator upper_bound(ITR_T key_begin, ITR_T key_end) const {
983
+ return index->upper_bound(simple_sequence<ITR_T>(key_begin, key_end));
984
+ }
985
+ template<typename ITR_T>
986
+ iterator find(ITR_T key_begin, ITR_T key_end) const {
987
+ return index->find(simple_sequence<ITR_T>(key_begin, key_end));
988
+ }
989
+ iterator begin() const {
990
+ return index->begin();
991
+ }
992
+ iterator end() const {
993
+ return index->end();
994
+ }
995
+
996
+ cursor_handle new_cursor(const iterator &where) const {
997
+ if (index != where.pending().idx) {
998
+ // Invalid iterator, or iterator over a different index.
999
+ throw_error(invalid_argument());
1000
+ }
1001
+
1002
+ return cursor_handle(safe_idx_cursor_host(index), make_safe_itr(where));
1003
+ }
1004
+ void delete_cursor(cursor_handle &where) const {
1005
+ where.free();
1006
+ }
1007
+
1008
+ // Reset the cursor position to that defined by the given iterator.
1009
+ // This is more efficient than deleting and recreating a new cursor.
1010
+ void move_cursor(cursor_handle &crs, const iterator &where) const {
1011
+ if ((index != crs.host.idx) ||
1012
+ (index != where.pending().idx))
1013
+ {
1014
+ // Cursor/Iterator/IxHandle do not match.
1015
+ throw_error(invalid_argument());
1016
+ }
1017
+
1018
+ crs.iterator() = make_safe_itr(where);
1019
+ }
1020
+
1021
+ uint32 row_count() const {
1022
+ return index->table->row_count();
1023
+ }
1024
+ private:
1025
+ safe_itr_t make_safe_itr(const iterator &where) const {
1026
+ bool p_bbegin = where.pending_range().before_begin();
1027
+ pending_row_t *p_row = where.pending_range().is_valid() ? where.pending().to_row() : NULL;
1028
+
1029
+ return safe_itr_t(
1030
+ index->table,
1031
+ where.fixed_range(),
1032
+ safe_pending_cursor(safe_idx_cursor_host(index), p_row, p_bbegin));
1033
+ }
1034
+ };
1035
+
1036
+ } //namespace Internal
1037
+ } //namespace Oinky
1038
+