@herdwatch/lokijs 1.5.8-dev.7 → 1.5.12-dev.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -13,6 +13,9 @@
13
13
  return (function() {
14
14
  "use strict";
15
15
 
16
+ /* jshint -W030 */
17
+ var DEBUG = typeof window !== 'undefined' && !!window.__loki_incremental_idb_debug;
18
+
16
19
  /**
17
20
  * An improved Loki persistence adapter for IndexedDB (not compatible with LokiIndexedAdapter)
18
21
  * Unlike LokiIndexedAdapter, the database is saved not as one big JSON blob, but split into
@@ -30,14 +33,37 @@
30
33
  * @constructor IncrementalIndexedDBAdapter
31
34
  *
32
35
  * @param {object=} options Configuration options for the adapter
33
- * @param {boolean} options.onversionchange Function to call on `IDBDatabase.onversionchange` event
36
+ * @param {function} options.onversionchange Function to call on `IDBDatabase.onversionchange` event
34
37
  * (most likely database deleted from another browser tab)
38
+ * @param {function} options.onFetchStart Function to call once IDB load has begun.
39
+ * Use this as an opportunity to execute code concurrently while IDB does work on a separate thread
40
+ * @param {function} options.onDidOverwrite Called when this adapter is forced to overwrite contents
41
+ * of IndexedDB. This happens if there's another open tab of the same app that's making changes.
42
+ * You might use it as an opportunity to alert user to the potential loss of data
43
+ * @param {function} options.serializeChunk Called with a chunk (array of Loki documents) before
44
+ * it's saved to IndexedDB. You can use it to manually compress on-disk representation
45
+ * for faster database loads. Hint: Hand-written conversion of objects to arrays is very
46
+ * profitable for performance. If you use this, you must also pass options.deserializeChunk.
47
+ * @param {function} options.deserializeChunk Called with a chunk serialized with options.serializeChunk
48
+ * Expects an array of Loki documents as the return value
49
+ * @param {number} options.megachunkCount Number of parallel requests for data when loading database.
50
+ * Can be tuned for a specific application
51
+ * @param {array} options.lazyCollections Names of collections that should be deserialized lazily
52
+ * Only use this for collections that aren't used at launch
35
53
  */
36
54
  function IncrementalIndexedDBAdapter(options) {
37
55
  this.mode = "incremental";
38
56
  this.options = options || {};
39
57
  this.chunkSize = 100;
58
+ this.megachunkCount = this.options.megachunkCount || 24;
59
+ this.lazyCollections = this.options.lazyCollections || [];
40
60
  this.idb = null; // will be lazily loaded on first operation that needs it
61
+ this._prevLokiVersionId = null;
62
+ this._prevCollectionVersionIds = {};
63
+
64
+ if (!(this.megachunkCount >= 4 && this.megachunkCount % 2 === 0)) {
65
+ throw new Error('megachunkCount must be >=4 and divisible by 2');
66
+ }
41
67
  }
42
68
 
43
69
  // chunkId - index of the data chunk - e.g. chunk 0 will be lokiIds 0-99
@@ -47,6 +73,7 @@
47
73
  var maxId = minId + this.chunkSize - 1;
48
74
 
49
75
  // use idIndex to find first collection.data position within the $loki range
76
+ collection.ensureId();
50
77
  var idIndex = collection.idIndex;
51
78
 
52
79
  var firstDataPosition = null;
@@ -86,7 +113,7 @@
86
113
  }
87
114
  }
88
115
 
89
- // TODO: remove sanity checks when everything is fully tested
116
+ // verify
90
117
  var firstElement = collection.data[firstDataPosition];
91
118
  if (!(firstElement && firstElement.$loki >= minId && firstElement.$loki <= maxId)) {
92
119
  throw new Error("broken invariant firstelement");
@@ -101,7 +128,6 @@
101
128
  // will have holes when data is deleted)
102
129
  var chunkData = collection.data.slice(firstDataPosition, lastDataPosition + 1);
103
130
 
104
- // TODO: remove sanity checks when everything is fully tested
105
131
  if (chunkData.length > this.chunkSize) {
106
132
  throw new Error("broken invariant - chunk size");
107
133
  }
@@ -120,55 +146,241 @@
120
146
  * db.saveDatabase();
121
147
  *
122
148
  * @param {string} dbname - the name to give the serialized database
123
- * @param {object} dbcopy - copy of the Loki database
149
+ * @param {function} getLokiCopy - returns copy of the Loki database
124
150
  * @param {function} callback - (Optional) callback passed obj.success with true or false
125
151
  * @memberof IncrementalIndexedDBAdapter
126
152
  */
127
- IncrementalIndexedDBAdapter.prototype.saveDatabase = function(dbname, loki, callback) {
153
+ IncrementalIndexedDBAdapter.prototype.saveDatabase = function(dbname, getLokiCopy, callback) {
128
154
  var that = this;
129
- console.log("exportDatabase - begin");
130
- console.time("exportDatabase");
131
155
 
132
- var chunksToSave = [];
156
+ if (!this.idb) {
157
+ this._initializeIDB(dbname, callback, function() {
158
+ that.saveDatabase(dbname, getLokiCopy, callback);
159
+ });
160
+ return;
161
+ }
162
+
163
+ if (this.operationInProgress) {
164
+ throw new Error("Error while saving to database - another operation is already in progress. Please use throttledSaves=true option on Loki object");
165
+ }
166
+ this.operationInProgress = true;
167
+
168
+ DEBUG && console.log("saveDatabase - begin");
169
+ DEBUG && console.time("saveDatabase");
170
+ function finish(e) {
171
+ DEBUG && e && console.error(e);
172
+ DEBUG && console.timeEnd("saveDatabase");
173
+ that.operationInProgress = false;
174
+ callback(e);
175
+ }
176
+
177
+ // try..catch is required, e.g.:
178
+ // InvalidStateError: Failed to execute 'transaction' on 'IDBDatabase': The database connection is closing.
179
+ // (this may happen if another tab has called deleteDatabase)
180
+ try {
181
+ var updatePrevVersionIds = function () {
182
+ console.error('Unexpected successful tx - cannot update previous version ids');
183
+ };
184
+ var didOverwrite = false;
185
+
186
+ var tx = this.idb.transaction(['LokiIncrementalData'], "readwrite");
187
+ tx.oncomplete = function() {
188
+ updatePrevVersionIds();
189
+ finish();
190
+ if (didOverwrite && that.options.onDidOverwrite) {
191
+ that.options.onDidOverwrite();
192
+ }
193
+ };
194
+
195
+ tx.onerror = function(e) {
196
+ finish(e);
197
+ };
198
+
199
+ tx.onabort = function(e) {
200
+ finish(e);
201
+ };
202
+
203
+ var store = tx.objectStore('LokiIncrementalData');
204
+
205
+ var performSave = function (maxChunkIds) {
206
+ try {
207
+ var incremental = !maxChunkIds;
208
+ var chunkInfo = that._putInChunks(store, getLokiCopy(), incremental, maxChunkIds);
209
+ // Update last seen version IDs, but only after the transaction is successful
210
+ updatePrevVersionIds = function() {
211
+ that._prevLokiVersionId = chunkInfo.lokiVersionId;
212
+ chunkInfo.collectionVersionIds.forEach(function (collectionInfo) {
213
+ that._prevCollectionVersionIds[collectionInfo.name] = collectionInfo.versionId;
214
+ });
215
+ };
216
+ tx.commit && tx.commit();
217
+ } catch (error) {
218
+ console.error('idb performSave failed: ', error);
219
+ tx.abort();
220
+ }
221
+ };
222
+
223
+ // Incrementally saving changed chunks breaks down if there is more than one writer to IDB
224
+ // (multiple tabs of the same web app), leading to data corruption. To fix that, we save all
225
+ // metadata chunks (loki + collections) with a unique ID on each save and remember it. Before
226
+ // the subsequent save, we read loki from IDB to check if its version ID changed. If not, we're
227
+ // guaranteed that persisted DB is consistent with our diff. Otherwise, we fall back to the slow
228
+ // path and overwrite *all* database chunks with our version. Both reading and writing must
229
+ // happen in the same IDB transaction for this to work.
230
+ // TODO: We can optimize the slow path by fetching collection metadata chunks and comparing their
231
+ // version IDs with those last seen by us. Since any change in collection data requires a metadata
232
+ // chunk save, we're guaranteed that if the IDs match, we don't need to overwrite chukns of this collection
233
+ var getAllKeysThenSave = function() {
234
+ // NOTE: We must fetch all keys to protect against a case where another tab has wrote more
235
+ // chunks whan we did -- if so, we must delete them.
236
+ idbReq(store.getAllKeys(), function(e) {
237
+ var maxChunkIds = getMaxChunkIds(e.target.result);
238
+ performSave(maxChunkIds);
239
+ }, function(e) {
240
+ console.error('Getting all keys failed: ', e);
241
+ tx.abort();
242
+ });
243
+ };
244
+
245
+ var getLokiThenSave = function() {
246
+ idbReq(store.get('loki'), function(e) {
247
+ if (lokiChunkVersionId(e.target.result) === that._prevLokiVersionId) {
248
+ performSave();
249
+ } else {
250
+ DEBUG && console.warn('Another writer changed Loki IDB, using slow path...');
251
+ didOverwrite = true;
252
+ getAllKeysThenSave();
253
+ }
254
+ }, function(e) {
255
+ console.error('Getting loki chunk failed: ', e);
256
+ tx.abort();
257
+ });
258
+ };
259
+
260
+ getLokiThenSave();
261
+ } catch (error) {
262
+ finish(error);
263
+ }
264
+ };
265
+
266
+ // gets current largest chunk ID for each collection
267
+ function getMaxChunkIds(allKeys) {
268
+ var maxChunkIds = {};
269
+
270
+ allKeys.forEach(function (key) {
271
+ var keySegments = key.split(".");
272
+ // table.chunk.2317
273
+ if (keySegments.length === 3 && keySegments[1] === "chunk") {
274
+ var collection = keySegments[0];
275
+ var chunkId = parseInt(keySegments[2]) || 0;
276
+ var currentMax = maxChunkIds[collection];
277
+
278
+ if (!currentMax || chunkId > currentMax) {
279
+ maxChunkIds[collection] = chunkId;
280
+ }
281
+ }
282
+ });
283
+ return maxChunkIds;
284
+ }
133
285
 
134
- loki.collections.forEach(function(collection, i) {
286
+ function lokiChunkVersionId(chunk) {
287
+ try {
288
+ if (chunk) {
289
+ var loki = JSON.parse(chunk.value);
290
+ return loki.idbVersionId || null;
291
+ } else {
292
+ return null;
293
+ }
294
+ } catch (e) {
295
+ console.error('Error while parsing loki chunk', e);
296
+ return null;
297
+ }
298
+ }
299
+
300
+ IncrementalIndexedDBAdapter.prototype._putInChunks = function(idbStore, loki, incremental, maxChunkIds) {
301
+ var that = this;
302
+ var collectionVersionIds = [];
303
+ var savedSize = 0;
304
+
305
+ var prepareCollection = function (collection, i) {
135
306
  // Find dirty chunk ids
136
307
  var dirtyChunks = new Set();
137
- collection.dirtyIds.forEach(function(lokiId) {
308
+ incremental && collection.dirtyIds.forEach(function(lokiId) {
138
309
  var chunkId = (lokiId / that.chunkSize) | 0;
139
310
  dirtyChunks.add(chunkId);
140
311
  });
141
312
  collection.dirtyIds = [];
142
313
 
143
314
  // Serialize chunks to save
144
- dirtyChunks.forEach(function(chunkId) {
315
+ var prepareChunk = function (chunkId) {
145
316
  var chunkData = that._getChunk(collection, chunkId);
317
+ if (that.options.serializeChunk) {
318
+ chunkData = that.options.serializeChunk(collection.name, chunkData);
319
+ }
146
320
  // we must stringify now, because IDB is asynchronous, and underlying objects are mutable
147
- chunksToSave.push({
321
+ // In general, it's also faster to stringify, because we need serialization anyway, and
322
+ // JSON.stringify is much better optimized than IDB's structured clone
323
+ chunkData = JSON.stringify(chunkData);
324
+ savedSize += chunkData.length;
325
+ DEBUG && incremental && console.log('Saving: ' + collection.name + ".chunk." + chunkId);
326
+ idbStore.put({
148
327
  key: collection.name + ".chunk." + chunkId,
149
- value: JSON.stringify(chunkData),
328
+ value: chunkData,
150
329
  });
151
- });
330
+ };
331
+ if (incremental) {
332
+ dirtyChunks.forEach(prepareChunk);
333
+ } else {
334
+ // add all chunks
335
+ var maxChunkId = (collection.maxId / that.chunkSize) | 0;
336
+ for (var j = 0; j <= maxChunkId; j += 1) {
337
+ prepareChunk(j);
338
+ }
152
339
 
153
- collection.data = [];
154
- // this is recreated on load anyway, so we can make metadata smaller
155
- collection.isIndex = [];
340
+ // delete chunks with larger ids than what we have
341
+ // NOTE: we don't have to delete metadata chunks as they will be absent from loki anyway
342
+ // NOTE: failures are silently ignored, so we don't have to worry about holes
343
+ var persistedMaxChunkId = maxChunkIds[collection.name] || 0;
344
+ for (var k = maxChunkId + 1; k <= persistedMaxChunkId; k += 1) {
345
+ var deletedChunkName = collection.name + ".chunk." + k;
346
+ idbStore.delete(deletedChunkName);
347
+ DEBUG && console.warn('Deleted chunk: ' + deletedChunkName);
348
+ }
349
+ }
156
350
 
157
- // save collection metadata as separate chunk, leave only names in loki
158
- // TODO: To reduce IO, we should only save this chunk when it has changed
159
- chunksToSave.push({
160
- key: collection.name + ".metadata",
161
- value: JSON.stringify(collection),
162
- });
351
+ // save collection metadata as separate chunk (but only if changed)
352
+ if (collection.dirty || dirtyChunks.size || !incremental) {
353
+ collection.idIndex = []; // this is recreated lazily
354
+ collection.data = [];
355
+ collection.idbVersionId = randomVersionId();
356
+ collectionVersionIds.push({ name: collection.name, versionId: collection.idbVersionId });
357
+
358
+ var metadataChunk = JSON.stringify(collection);
359
+ savedSize += metadataChunk.length;
360
+ DEBUG && incremental && console.log('Saving: ' + collection.name + ".metadata");
361
+ idbStore.put({
362
+ key: collection.name + ".metadata",
363
+ value: metadataChunk,
364
+ });
365
+ }
366
+
367
+ // leave only names in the loki chunk
163
368
  loki.collections[i] = { name: collection.name };
164
- });
369
+ };
370
+ loki.collections.forEach(prepareCollection);
165
371
 
372
+ loki.idbVersionId = randomVersionId();
166
373
  var serializedMetadata = JSON.stringify(loki);
167
- loki = null; // allow GC of the DB copy
374
+ savedSize += serializedMetadata.length;
168
375
 
169
- chunksToSave.push({ key: "loki", value: serializedMetadata });
376
+ DEBUG && incremental && console.log('Saving: loki');
377
+ idbStore.put({ key: "loki", value: serializedMetadata });
170
378
 
171
- that._saveChunks(dbname, chunksToSave, callback);
379
+ DEBUG && console.log("saved size: " + savedSize);
380
+ return {
381
+ lokiVersionId: loki.idbVersionId,
382
+ collectionVersionIds: collectionVersionIds,
383
+ };
172
384
  };
173
385
 
174
386
  /**
@@ -188,131 +400,137 @@
188
400
  */
189
401
  IncrementalIndexedDBAdapter.prototype.loadDatabase = function(dbname, callback) {
190
402
  var that = this;
191
- console.log("loadDatabase - begin");
192
- console.time("loadDatabase");
193
- this._getAllChunks(dbname, function(chunks) {
194
- if (!Array.isArray(chunks)) {
195
- // we got an error
196
- console.timeEnd("loadDatabase");
197
- callback(chunks);
198
- }
199
403
 
200
- if (!chunks.length) {
201
- console.timeEnd("loadDatabase");
202
- callback(null);
203
- return;
204
- }
404
+ if (this.operationInProgress) {
405
+ throw new Error("Error while loading database - another operation is already in progress. Please use throttledSaves=true option on Loki object");
406
+ }
205
407
 
206
- console.log("Found chunks:", chunks.length);
408
+ this.operationInProgress = true;
207
409
 
208
- that._sortChunksInPlace(chunks);
410
+ DEBUG && console.log("loadDatabase - begin");
411
+ DEBUG && console.time("loadDatabase");
209
412
 
210
- // repack chunks into a map
211
- var loki;
212
- var chunkCollections = {};
413
+ var finish = function (value) {
414
+ DEBUG && console.timeEnd("loadDatabase");
415
+ that.operationInProgress = false;
416
+ callback(value);
417
+ };
213
418
 
214
- chunks.forEach(function(object) {
215
- var key = object.key;
216
- var value = object.value;
217
- if (key === "loki") {
218
- loki = value;
219
- return;
220
- } else if (key.includes(".")) {
221
- var keySegments = key.split(".");
222
- if (keySegments.length === 3 && keySegments[1] === "chunk") {
223
- var colName = keySegments[0];
224
- if (chunkCollections[colName]) {
225
- chunkCollections[colName].dataChunks.push(value);
226
- } else {
227
- chunkCollections[colName] = {
228
- metadata: null,
229
- dataChunks: [value],
230
- };
231
- }
232
- return;
233
- } else if (keySegments.length === 2 && keySegments[1] === "metadata") {
234
- var name = keySegments[0];
235
- if (chunkCollections[name]) {
236
- chunkCollections[name].metadata = value;
237
- } else {
238
- chunkCollections[name] = { metadata: value, dataChunks: [] };
239
- }
240
- return;
241
- }
419
+ this._getAllChunks(dbname, function(chunks) {
420
+ try {
421
+ if (!Array.isArray(chunks)) {
422
+ throw chunks; // we have an error
242
423
  }
243
424
 
244
- console.error("Unknown chunk " + key);
245
- callback(new Error("Invalid database - unknown chunk found"));
246
- });
247
- chunks = null;
425
+ if (!chunks.length) {
426
+ return finish(null);
427
+ }
248
428
 
249
- if (!loki) {
250
- callback(new Error("Invalid database - missing database metadata"));
251
- }
429
+ DEBUG && console.log("Found chunks:", chunks.length);
252
430
 
253
- // parse Loki object
254
- loki = JSON.parse(loki);
431
+ // repack chunks into a map
432
+ chunks = chunksToMap(chunks);
433
+ var loki = chunks.loki;
434
+ chunks.loki = null; // gc
255
435
 
256
- // populate collections with data
257
- that._populate(loki, chunkCollections);
258
- chunkCollections = null;
436
+ // populate collections with data
437
+ populateLoki(loki, chunks.chunkMap, that.options.deserializeChunk, that.lazyCollections);
438
+ chunks = null; // gc
259
439
 
260
- console.timeEnd("loadDatabase");
261
- callback(loki);
440
+ // remember previous version IDs
441
+ that._prevLokiVersionId = loki.idbVersionId || null;
442
+ that._prevCollectionVersionIds = {};
443
+ loki.collections.forEach(function (collection) {
444
+ that._prevCollectionVersionIds[collection.name] = collection.idbVersionId || null;
445
+ });
446
+
447
+ return finish(loki);
448
+ } catch (error) {
449
+ that._prevLokiVersionId = null;
450
+ that._prevCollectionVersionIds = {};
451
+ return finish(error);
452
+ }
262
453
  });
263
454
  };
264
455
 
265
- IncrementalIndexedDBAdapter.prototype._sortChunksInPlace = function(chunks) {
266
- // sort chunks in place to load data in the right order (ascending loki ids)
267
- // on both Safari and Chrome, we'll get chunks in order like this: 0, 1, 10, 100...
268
- var getSortKey = function(object) {
269
- var key = object.key;
270
- if (key.includes(".")) {
271
- var segments = key.split(".");
272
- if (segments.length === 3 && segments[1] === "chunk") {
273
- return parseInt(segments[2], 10);
456
+ function chunksToMap(chunks) {
457
+ var loki;
458
+ var chunkMap = {};
459
+
460
+ sortChunksInPlace(chunks);
461
+
462
+ chunks.forEach(function(chunk) {
463
+ var type = chunk.type;
464
+ var value = chunk.value;
465
+ var name = chunk.collectionName;
466
+ if (type === "loki") {
467
+ loki = value;
468
+ } else if (type === "data") {
469
+ if (chunkMap[name]) {
470
+ chunkMap[name].dataChunks.push(value);
471
+ } else {
472
+ chunkMap[name] = {
473
+ metadata: null,
474
+ dataChunks: [value],
475
+ };
476
+ }
477
+ } else if (type === "metadata") {
478
+ if (chunkMap[name]) {
479
+ chunkMap[name].metadata = value;
480
+ } else {
481
+ chunkMap[name] = { metadata: value, dataChunks: [] };
274
482
  }
483
+ } else {
484
+ throw new Error("unreachable");
275
485
  }
276
-
277
- return -1; // consistent type must be returned
278
- };
279
- chunks.sort(function(a, b) {
280
- var aKey = getSortKey(a),
281
- bKey = getSortKey(b);
282
- if (aKey < bKey) return -1;
283
- if (aKey > bKey) return 1;
284
- return 0;
285
486
  });
286
- };
287
487
 
288
- IncrementalIndexedDBAdapter.prototype._populate = function(loki, chunkCollections) {
289
- loki.collections.forEach(function(collectionStub, i) {
290
- var chunkCollection = chunkCollections[collectionStub.name];
488
+ if (!loki) {
489
+ throw new Error("Corrupted database - missing database metadata");
490
+ }
491
+
492
+ return { loki: loki, chunkMap: chunkMap };
493
+ }
291
494
 
495
+ function populateLoki(loki, chunkMap, deserializeChunk, lazyCollections) {
496
+ loki.collections.forEach(function populateCollection(collectionStub, i) {
497
+ var name = collectionStub.name;
498
+ var chunkCollection = chunkMap[name];
292
499
  if (chunkCollection) {
293
- // TODO: What if metadata is missing?
294
- var collection = JSON.parse(chunkCollection.metadata);
500
+ if (!chunkCollection.metadata) {
501
+ throw new Error("Corrupted database - missing metadata chunk for " + name);
502
+ }
503
+ var collection = chunkCollection.metadata;
295
504
  chunkCollection.metadata = null;
296
-
297
505
  loki.collections[i] = collection;
298
506
 
299
- var dataChunks = chunkCollection.dataChunks;
300
- dataChunks.forEach(function(chunkObj, i) {
301
- var chunk = JSON.parse(chunkObj);
302
- chunkObj = null; // make string available for GC
303
- dataChunks[i] = null;
304
-
305
- chunk.forEach(function(doc) {
306
- collection.data.push(doc);
507
+ var isLazy = lazyCollections.includes(name);
508
+ var lokiDeserializeCollectionChunks = function () {
509
+ DEBUG && isLazy && console.log("lazy loading " + name);
510
+ var data = [];
511
+ var dataChunks = chunkCollection.dataChunks;
512
+ dataChunks.forEach(function populateChunk(chunk, i) {
513
+ if (isLazy) {
514
+ chunk = JSON.parse(chunk);
515
+ if (deserializeChunk) {
516
+ chunk = deserializeChunk(name, chunk);
517
+ }
518
+ }
519
+ chunk.forEach(function(doc) {
520
+ data.push(doc);
521
+ });
522
+ dataChunks[i] = null;
307
523
  });
308
- });
524
+ return data;
525
+ };
526
+ collection.getData = lokiDeserializeCollectionChunks;
309
527
  }
310
528
  });
311
- };
529
+ }
312
530
 
313
531
  IncrementalIndexedDBAdapter.prototype._initializeIDB = function(dbname, onError, onSuccess) {
314
532
  var that = this;
315
- console.log("initializing idb");
533
+ DEBUG && console.log("initializing idb");
316
534
 
317
535
  if (this.idbInitInProgress) {
318
536
  throw new Error("Cannot open IndexedDB because open is already in progress");
@@ -323,7 +541,7 @@
323
541
 
324
542
  openRequest.onupgradeneeded = function(e) {
325
543
  var db = e.target.result;
326
- console.log('onupgradeneeded, old version: ' + e.oldVersion);
544
+ DEBUG && console.log('onupgradeneeded, old version: ' + e.oldVersion);
327
545
 
328
546
  if (e.oldVersion < 1) {
329
547
  // Version 1 - Initial - Create database
@@ -336,19 +554,25 @@
336
554
 
337
555
  openRequest.onsuccess = function(e) {
338
556
  that.idbInitInProgress = false;
339
- that.idb = e.target.result;
557
+ var db = e.target.result;
558
+ that.idb = db;
340
559
 
341
- if (!that.idb.objectStoreNames.contains('LokiIncrementalData')) {
560
+ if (!db.objectStoreNames.contains('LokiIncrementalData')) {
342
561
  onError(new Error("Missing LokiIncrementalData"));
343
562
  // Attempt to recover (after reload) by deleting database, since it's damaged anyway
344
563
  that.deleteDatabase(dbname);
345
564
  return;
346
565
  }
347
566
 
348
- console.log("init success");
567
+ DEBUG && console.log("init success");
568
+
569
+ db.onversionchange = function(versionChangeEvent) {
570
+ // Ignore if database was deleted and recreated in the meantime
571
+ if (that.idb !== db) {
572
+ return;
573
+ }
349
574
 
350
- that.idb.onversionchange = function(versionChangeEvent) {
351
- console.log('IDB version change', versionChangeEvent);
575
+ DEBUG && console.log('IDB version change', versionChangeEvent);
352
576
  // This function will be called if another connection changed DB version
353
577
  // (Most likely database was deleted from another browser tab, unless there's a new version
354
578
  // of this adapter, or someone makes a connection to IDB outside of this adapter)
@@ -356,6 +580,7 @@
356
580
  // The database will be unusable after this. Be sure to supply `onversionchange` option
357
581
  // to force logout
358
582
  that.idb.close();
583
+ that.idb = null;
359
584
  if (that.options.onversionchange) {
360
585
  that.options.onversionchange(versionChangeEvent);
361
586
  }
@@ -371,79 +596,148 @@
371
596
 
372
597
  openRequest.onerror = function(e) {
373
598
  that.idbInitInProgress = false;
374
- console.error("IndexeddB open error", e);
599
+ console.error("IndexedDB open error", e);
375
600
  onError(e);
376
601
  };
377
602
  };
378
603
 
379
- IncrementalIndexedDBAdapter.prototype._saveChunks = function(dbname, chunks, callback) {
604
+ IncrementalIndexedDBAdapter.prototype._getAllChunks = function(dbname, callback) {
380
605
  var that = this;
381
606
  if (!this.idb) {
382
607
  this._initializeIDB(dbname, callback, function() {
383
- that._saveChunks(dbname, chunks, callback);
608
+ that._getAllChunks(dbname, callback);
384
609
  });
385
610
  return;
386
611
  }
387
612
 
388
- if (this.operationInProgress) {
389
- throw new Error("Error while saving to database - another operation is already in progress. Please use throttledSaves=true option on Loki object");
390
- }
613
+ var tx = this.idb.transaction(['LokiIncrementalData'], "readonly");
614
+ var store = tx.objectStore('LokiIncrementalData');
391
615
 
392
- this.operationInProgress = true;
616
+ var deserializeChunk = this.options.deserializeChunk;
617
+ var lazyCollections = this.lazyCollections;
618
+
619
+ // If there are a lot of chunks (>100), don't request them all in one go, but in multiple
620
+ // "megachunks" (chunks of chunks). This improves concurrency, as main thread is already busy
621
+ // while IDB process is still fetching data. Details: https://github.com/techfort/LokiJS/pull/874
622
+ function getMegachunks(keys) {
623
+ var megachunkCount = that.megachunkCount;
624
+ var keyRanges = createKeyRanges(keys, megachunkCount);
625
+
626
+ var allChunks = [];
627
+ var megachunksReceived = 0;
628
+
629
+ function processMegachunk(e, megachunkIndex, keyRange) {
630
+ // var debugMsg = 'processing chunk ' + megachunkIndex + ' (' + keyRange.lower + ' -- ' + keyRange.upper + ')'
631
+ // DEBUG && console.time(debugMsg);
632
+ var megachunk = e.target.result;
633
+ megachunk.forEach(function (chunk, i) {
634
+ parseChunk(chunk, deserializeChunk, lazyCollections);
635
+ allChunks.push(chunk);
636
+ megachunk[i] = null; // gc
637
+ });
638
+ // DEBUG && console.timeEnd(debugMsg);
393
639
 
394
- var tx = this.idb.transaction(['LokiIncrementalData'], "readwrite");
395
- tx.oncomplete = function() {
396
- that.operationInProgress = false;
397
- console.timeEnd("exportDatabase");
398
- callback();
399
- };
640
+ megachunksReceived += 1;
641
+ if (megachunksReceived === megachunkCount) {
642
+ callback(allChunks);
643
+ }
644
+ }
400
645
 
401
- tx.onerror = function(e) {
402
- that.operationInProgress = false;
403
- callback(e);
404
- };
646
+ // Stagger megachunk requests - first one half, then request the second when first one comes
647
+ // back. This further improves concurrency.
648
+ var megachunkWaves = 2;
649
+ var megachunksPerWave = megachunkCount / megachunkWaves;
650
+ function requestMegachunk(index, wave) {
651
+ var keyRange = keyRanges[index];
652
+ idbReq(store.getAll(keyRange), function(e) {
653
+ if (wave < megachunkWaves) {
654
+ requestMegachunk(index + megachunksPerWave, wave + 1);
655
+ }
405
656
 
406
- tx.onabort = function(e) {
407
- that.operationInProgress = false;
408
- callback(e);
409
- };
657
+ processMegachunk(e, index, keyRange);
658
+ }, function(e) {
659
+ callback(e);
660
+ });
661
+ }
410
662
 
411
- var store = tx.objectStore('LokiIncrementalData');
663
+ for (var i = 0; i < megachunksPerWave; i += 1) {
664
+ requestMegachunk(i, 1);
665
+ }
666
+ }
412
667
 
413
- chunks.forEach(function(object) {
414
- store.put(object);
415
- });
416
- };
668
+ function getAllChunks() {
669
+ idbReq(store.getAll(), function(e) {
670
+ var allChunks = e.target.result;
671
+ allChunks.forEach(function (chunk) {
672
+ parseChunk(chunk, deserializeChunk, lazyCollections);
673
+ });
674
+ callback(allChunks);
675
+ }, function(e) {
676
+ callback(e);
677
+ });
678
+ }
417
679
 
418
- IncrementalIndexedDBAdapter.prototype._getAllChunks = function(dbname, callback) {
419
- var that = this;
420
- if (!this.idb) {
421
- this._initializeIDB(dbname, callback, function() {
422
- that._getAllChunks(dbname, callback);
680
+ function getAllKeys() {
681
+ function onDidGetKeys(keys) {
682
+ keys.sort();
683
+ if (keys.length > 100) {
684
+ getMegachunks(keys);
685
+ } else {
686
+ getAllChunks();
687
+ }
688
+ }
689
+
690
+ idbReq(store.getAllKeys(), function(e) {
691
+ onDidGetKeys(e.target.result);
692
+ }, function(e) {
693
+ callback(e);
423
694
  });
424
- return;
695
+
696
+ if (that.options.onFetchStart) {
697
+ that.options.onFetchStart();
698
+ }
425
699
  }
426
700
 
427
- if (this.operationInProgress) {
428
- throw new Error("Error while loading database - another operation is already in progress. Please use throttledSaves=true option on Loki object");
701
+ getAllKeys();
702
+ };
703
+
704
+ function classifyChunk(chunk) {
705
+ var key = chunk.key;
706
+
707
+ if (key === 'loki') {
708
+ chunk.type = 'loki';
709
+ return;
710
+ } else if (key.includes('.')) {
711
+ var keySegments = key.split(".");
712
+ if (keySegments.length === 3 && keySegments[1] === "chunk") {
713
+ chunk.type = 'data';
714
+ chunk.collectionName = keySegments[0];
715
+ chunk.index = parseInt(keySegments[2], 10);
716
+ return;
717
+ } else if (keySegments.length === 2 && keySegments[1] === "metadata") {
718
+ chunk.type = 'metadata';
719
+ chunk.collectionName = keySegments[0];
720
+ return;
721
+ }
429
722
  }
430
723
 
431
- this.operationInProgress = true;
724
+ console.error("Unknown chunk " + key);
725
+ throw new Error("Corrupted database - unknown chunk found");
726
+ }
432
727
 
433
- var tx = this.idb.transaction(['LokiIncrementalData'], "readonly");
728
+ function parseChunk(chunk, deserializeChunk, lazyCollections) {
729
+ classifyChunk(chunk);
434
730
 
435
- var request = tx.objectStore('LokiIncrementalData').getAll();
436
- request.onsuccess = function(e) {
437
- that.operationInProgress = false;
438
- var chunks = e.target.result;
439
- callback(chunks);
440
- };
731
+ var isData = chunk.type === 'data';
732
+ var isLazy = lazyCollections.includes(chunk.collectionName);
441
733
 
442
- request.onerror = function(e) {
443
- that.operationInProgress = false;
444
- callback(e);
445
- };
446
- };
734
+ if (!(isData && isLazy)) {
735
+ chunk.value = JSON.parse(chunk.value);
736
+ }
737
+ if (deserializeChunk && isData && !isLazy) {
738
+ chunk.value = deserializeChunk(chunk.collectionName, chunk.value);
739
+ }
740
+ }
447
741
 
448
742
  /**
449
743
  * Deletes a database from IndexedDB
@@ -467,8 +761,11 @@
467
761
  this.operationInProgress = true;
468
762
 
469
763
  var that = this;
470
- console.log("deleteDatabase - begin");
471
- console.time("deleteDatabase");
764
+ DEBUG && console.log("deleteDatabase - begin");
765
+ DEBUG && console.time("deleteDatabase");
766
+
767
+ this._prevLokiVersionId = null;
768
+ this._prevCollectionVersionIds = {};
472
769
 
473
770
  if (this.idb) {
474
771
  this.idb.close();
@@ -479,7 +776,7 @@
479
776
 
480
777
  request.onsuccess = function() {
481
778
  that.operationInProgress = false;
482
- console.timeEnd("deleteDatabase");
779
+ DEBUG && console.timeEnd("deleteDatabase");
483
780
  callback({ success: true });
484
781
  };
485
782
 
@@ -496,6 +793,54 @@
496
793
  };
497
794
  };
498
795
 
796
+ function randomVersionId() {
797
+ // Appears to have enough entropy for chunk version IDs
798
+ // (Only has to be different than enough of its own previous versions that there's no writer
799
+ // that thinks a new version is the same as an earlier one, not globally unique)
800
+ return Math.random().toString(36).substring(2);
801
+ }
802
+
803
+ function sortChunksInPlace(chunks) {
804
+ // sort chunks in place to load data in the right order (ascending loki ids)
805
+ // on both Safari and Chrome, we'll get chunks in order like this: 0, 1, 10, 100...
806
+ chunks.sort(function(a, b) {
807
+ return (a.index || 0) - (b.index || 0);
808
+ });
809
+ }
810
+
811
+ function createKeyRanges(keys, count) {
812
+ var countPerRange = Math.floor(keys.length / count);
813
+ var keyRanges = [];
814
+ var minKey, maxKey;
815
+ for (var i = 0; i < count; i += 1) {
816
+ minKey = keys[countPerRange * i];
817
+ maxKey = keys[countPerRange * (i + 1)];
818
+ if (i === 0) {
819
+ // ... < maxKey
820
+ keyRanges.push(IDBKeyRange.upperBound(maxKey, true));
821
+ } else if (i === count - 1) {
822
+ // >= minKey
823
+ keyRanges.push(IDBKeyRange.lowerBound(minKey));
824
+ } else {
825
+ // >= minKey && < maxKey
826
+ keyRanges.push(IDBKeyRange.bound(minKey, maxKey, false, true));
827
+ }
828
+ }
829
+ return keyRanges;
830
+ }
831
+
832
+ function idbReq(request, onsuccess, onerror) {
833
+ request.onsuccess = function (e) {
834
+ try {
835
+ return onsuccess(e);
836
+ } catch (error) {
837
+ onerror(error);
838
+ }
839
+ };
840
+ request.onerror = onerror;
841
+ return request;
842
+ }
843
+
499
844
  return IncrementalIndexedDBAdapter;
500
845
  })();
501
846
  });