@luvio/environments 0.126.2 → 0.128.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/es/es2018/DurableStore.d.ts +1 -1
- package/dist/es/es2018/environments.js +31 -31
- package/dist/umd/es2018/DurableStore.d.ts +1 -1
- package/dist/umd/es2018/environments.js +31 -31
- package/dist/umd/es5/DurableStore.d.ts +1 -1
- package/dist/umd/es5/environments.js +108 -89
- package/package.json +2 -2
|
@@ -58,7 +58,7 @@ export interface DurableStoreChange {
|
|
|
58
58
|
*/
|
|
59
59
|
isExternalChange?: boolean;
|
|
60
60
|
}
|
|
61
|
-
export declare type OnDurableStoreChangedListener = (changes: DurableStoreChange[]) => void
|
|
61
|
+
export declare type OnDurableStoreChangedListener = (changes: DurableStoreChange[]) => Promise<void>;
|
|
62
62
|
/**
|
|
63
63
|
* The DurableStore implementation must have the following behaviors:
|
|
64
64
|
* - It must handle read/write synchronization (ie: reads should wait for any
|
|
@@ -452,7 +452,7 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
452
452
|
throw new Error('This makeDurable instance has been disposed');
|
|
453
453
|
}
|
|
454
454
|
};
|
|
455
|
-
const unsubscribe = durableStore.registerOnChangedListener((changes) => {
|
|
455
|
+
const unsubscribe = durableStore.registerOnChangedListener(async (changes) => {
|
|
456
456
|
const defaultSegmentKeys = [];
|
|
457
457
|
const adapterContextSegmentKeys = [];
|
|
458
458
|
for (let i = 0, len = changes.length; i < len; i++) {
|
|
@@ -481,11 +481,8 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
481
481
|
}
|
|
482
482
|
}
|
|
483
483
|
if (adapterContextKeysFromDifferentInstance.length > 0) {
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
durableStore
|
|
487
|
-
.getEntries(adapterContextKeysFromDifferentInstance, AdapterContextSegment)
|
|
488
|
-
.then((entries) => {
|
|
484
|
+
try {
|
|
485
|
+
const entries = await durableStore.getEntries(adapterContextKeysFromDifferentInstance, AdapterContextSegment);
|
|
489
486
|
if (entries !== undefined) {
|
|
490
487
|
const entryKeys = keys(entries);
|
|
491
488
|
for (let i = 0, len = entryKeys.length; i < len; i++) {
|
|
@@ -494,8 +491,10 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
494
491
|
contextStores[entryKey] = entry.data;
|
|
495
492
|
}
|
|
496
493
|
}
|
|
497
|
-
}
|
|
498
|
-
|
|
494
|
+
}
|
|
495
|
+
catch (error) {
|
|
496
|
+
durableStoreErrorHandler(error);
|
|
497
|
+
}
|
|
499
498
|
}
|
|
500
499
|
// process default segment changes
|
|
501
500
|
const defaultSegmentKeysLength = defaultSegmentKeys.length;
|
|
@@ -513,7 +512,7 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
513
512
|
// call base environment storeEvict so this evict is not tracked for durable deletion
|
|
514
513
|
environment.storeEvict(key);
|
|
515
514
|
}
|
|
516
|
-
environment.storeBroadcast(rebuildSnapshot, environment.snapshotAvailable);
|
|
515
|
+
await environment.storeBroadcast(rebuildSnapshot, environment.snapshotAvailable);
|
|
517
516
|
}
|
|
518
517
|
});
|
|
519
518
|
const dispose = function () {
|
|
@@ -558,9 +557,10 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
558
557
|
};
|
|
559
558
|
const storeBroadcast = function (_rebuildSnapshot, _snapshotDataAvailable) {
|
|
560
559
|
validateNotDisposed();
|
|
561
|
-
//
|
|
560
|
+
// return resolved promise here and wait for the L2 flush to happen in handleSuccessResponse,
|
|
562
561
|
// that flush will cause the onChanged handler to fire which will revive
|
|
563
562
|
// records to the main L1 store and call the base storeBroadcast
|
|
563
|
+
return Promise.resolve();
|
|
564
564
|
};
|
|
565
565
|
const publishChangesToDurableStore = function () {
|
|
566
566
|
validateNotDisposed();
|
|
@@ -603,28 +603,28 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
603
603
|
}
|
|
604
604
|
return environment.wrapNormalizedGraphNode(normalized, ingestStagingStore);
|
|
605
605
|
};
|
|
606
|
-
const rebuildSnapshot = function (snapshot,
|
|
606
|
+
const rebuildSnapshot = function (snapshot, onRebuild) {
|
|
607
607
|
validateNotDisposed();
|
|
608
608
|
// try rebuilding from memory
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
609
|
+
environment.rebuildSnapshot(snapshot, (rebuilt) => {
|
|
610
|
+
// only try reviving from durable store if snapshot is unfulfilled
|
|
611
|
+
if (rebuilt.state !== 'Unfulfilled') {
|
|
612
|
+
onRebuild(rebuilt);
|
|
613
|
+
return;
|
|
614
|
+
}
|
|
615
|
+
// Do an L2 revive and emit to subscriber using the callback.
|
|
616
|
+
reviveSnapshot(environment, durableStore, rebuilt, durableStoreErrorHandler, () => {
|
|
617
|
+
// reviveSnapshot will revive into L1, and since "records" is a reference
|
|
618
|
+
// (and not a copy) to the L1 records we can use it for rebuild
|
|
619
|
+
let rebuiltSnap;
|
|
620
|
+
environment.rebuildSnapshot(snapshot, (rebuilt) => {
|
|
621
|
+
rebuiltSnap = rebuilt;
|
|
622
|
+
});
|
|
623
|
+
return rebuiltSnap;
|
|
624
|
+
}).then((result) => {
|
|
625
|
+
onRebuild(result.snapshot);
|
|
626
|
+
});
|
|
620
627
|
});
|
|
621
|
-
// synchronously return the base snapshot as Pending if not already
|
|
622
|
-
return snapshot.state === 'Pending'
|
|
623
|
-
? snapshot
|
|
624
|
-
: {
|
|
625
|
-
...snapshot,
|
|
626
|
-
state: 'Pending',
|
|
627
|
-
};
|
|
628
628
|
};
|
|
629
629
|
const withContext = function (adapter, options) {
|
|
630
630
|
validateNotDisposed();
|
|
@@ -773,7 +773,7 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
773
773
|
},
|
|
774
774
|
// we don't need to prime metadata
|
|
775
775
|
() => { });
|
|
776
|
-
snapshotFromMemoryIngest = ingestAndBroadcastFunc();
|
|
776
|
+
snapshotFromMemoryIngest = await ingestAndBroadcastFunc();
|
|
777
777
|
await publishChangesToDurableStore();
|
|
778
778
|
})();
|
|
779
779
|
for (const key of keysAsArray) {
|
|
@@ -801,7 +801,7 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
801
801
|
// underlying DurableStore implementation takes care of R/W sync
|
|
802
802
|
// so all we have to do is ingest then write to L2
|
|
803
803
|
ingestStagingStore = buildIngestStagingStore(environment);
|
|
804
|
-
snapshotFromMemoryIngest = ingestAndBroadcastFunc();
|
|
804
|
+
snapshotFromMemoryIngest = await ingestAndBroadcastFunc();
|
|
805
805
|
await publishChangesToDurableStore();
|
|
806
806
|
}
|
|
807
807
|
if (snapshotFromMemoryIngest === undefined) {
|
|
@@ -58,7 +58,7 @@ export interface DurableStoreChange {
|
|
|
58
58
|
*/
|
|
59
59
|
isExternalChange?: boolean;
|
|
60
60
|
}
|
|
61
|
-
export declare type OnDurableStoreChangedListener = (changes: DurableStoreChange[]) => void
|
|
61
|
+
export declare type OnDurableStoreChangedListener = (changes: DurableStoreChange[]) => Promise<void>;
|
|
62
62
|
/**
|
|
63
63
|
* The DurableStore implementation must have the following behaviors:
|
|
64
64
|
* - It must handle read/write synchronization (ie: reads should wait for any
|
|
@@ -456,7 +456,7 @@
|
|
|
456
456
|
throw new Error('This makeDurable instance has been disposed');
|
|
457
457
|
}
|
|
458
458
|
};
|
|
459
|
-
const unsubscribe = durableStore.registerOnChangedListener((changes) => {
|
|
459
|
+
const unsubscribe = durableStore.registerOnChangedListener(async (changes) => {
|
|
460
460
|
const defaultSegmentKeys = [];
|
|
461
461
|
const adapterContextSegmentKeys = [];
|
|
462
462
|
for (let i = 0, len = changes.length; i < len; i++) {
|
|
@@ -485,11 +485,8 @@
|
|
|
485
485
|
}
|
|
486
486
|
}
|
|
487
487
|
if (adapterContextKeysFromDifferentInstance.length > 0) {
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
durableStore
|
|
491
|
-
.getEntries(adapterContextKeysFromDifferentInstance, AdapterContextSegment)
|
|
492
|
-
.then((entries) => {
|
|
488
|
+
try {
|
|
489
|
+
const entries = await durableStore.getEntries(adapterContextKeysFromDifferentInstance, AdapterContextSegment);
|
|
493
490
|
if (entries !== undefined) {
|
|
494
491
|
const entryKeys = keys(entries);
|
|
495
492
|
for (let i = 0, len = entryKeys.length; i < len; i++) {
|
|
@@ -498,8 +495,10 @@
|
|
|
498
495
|
contextStores[entryKey] = entry.data;
|
|
499
496
|
}
|
|
500
497
|
}
|
|
501
|
-
}
|
|
502
|
-
|
|
498
|
+
}
|
|
499
|
+
catch (error) {
|
|
500
|
+
durableStoreErrorHandler(error);
|
|
501
|
+
}
|
|
503
502
|
}
|
|
504
503
|
// process default segment changes
|
|
505
504
|
const defaultSegmentKeysLength = defaultSegmentKeys.length;
|
|
@@ -517,7 +516,7 @@
|
|
|
517
516
|
// call base environment storeEvict so this evict is not tracked for durable deletion
|
|
518
517
|
environment.storeEvict(key);
|
|
519
518
|
}
|
|
520
|
-
environment.storeBroadcast(rebuildSnapshot, environment.snapshotAvailable);
|
|
519
|
+
await environment.storeBroadcast(rebuildSnapshot, environment.snapshotAvailable);
|
|
521
520
|
}
|
|
522
521
|
});
|
|
523
522
|
const dispose = function () {
|
|
@@ -562,9 +561,10 @@
|
|
|
562
561
|
};
|
|
563
562
|
const storeBroadcast = function (_rebuildSnapshot, _snapshotDataAvailable) {
|
|
564
563
|
validateNotDisposed();
|
|
565
|
-
//
|
|
564
|
+
// return resolved promise here and wait for the L2 flush to happen in handleSuccessResponse,
|
|
566
565
|
// that flush will cause the onChanged handler to fire which will revive
|
|
567
566
|
// records to the main L1 store and call the base storeBroadcast
|
|
567
|
+
return Promise.resolve();
|
|
568
568
|
};
|
|
569
569
|
const publishChangesToDurableStore = function () {
|
|
570
570
|
validateNotDisposed();
|
|
@@ -607,28 +607,28 @@
|
|
|
607
607
|
}
|
|
608
608
|
return environment.wrapNormalizedGraphNode(normalized, ingestStagingStore);
|
|
609
609
|
};
|
|
610
|
-
const rebuildSnapshot = function (snapshot,
|
|
610
|
+
const rebuildSnapshot = function (snapshot, onRebuild) {
|
|
611
611
|
validateNotDisposed();
|
|
612
612
|
// try rebuilding from memory
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
613
|
+
environment.rebuildSnapshot(snapshot, (rebuilt) => {
|
|
614
|
+
// only try reviving from durable store if snapshot is unfulfilled
|
|
615
|
+
if (rebuilt.state !== 'Unfulfilled') {
|
|
616
|
+
onRebuild(rebuilt);
|
|
617
|
+
return;
|
|
618
|
+
}
|
|
619
|
+
// Do an L2 revive and emit to subscriber using the callback.
|
|
620
|
+
reviveSnapshot(environment, durableStore, rebuilt, durableStoreErrorHandler, () => {
|
|
621
|
+
// reviveSnapshot will revive into L1, and since "records" is a reference
|
|
622
|
+
// (and not a copy) to the L1 records we can use it for rebuild
|
|
623
|
+
let rebuiltSnap;
|
|
624
|
+
environment.rebuildSnapshot(snapshot, (rebuilt) => {
|
|
625
|
+
rebuiltSnap = rebuilt;
|
|
626
|
+
});
|
|
627
|
+
return rebuiltSnap;
|
|
628
|
+
}).then((result) => {
|
|
629
|
+
onRebuild(result.snapshot);
|
|
630
|
+
});
|
|
624
631
|
});
|
|
625
|
-
// synchronously return the base snapshot as Pending if not already
|
|
626
|
-
return snapshot.state === 'Pending'
|
|
627
|
-
? snapshot
|
|
628
|
-
: {
|
|
629
|
-
...snapshot,
|
|
630
|
-
state: 'Pending',
|
|
631
|
-
};
|
|
632
632
|
};
|
|
633
633
|
const withContext = function (adapter, options) {
|
|
634
634
|
validateNotDisposed();
|
|
@@ -777,7 +777,7 @@
|
|
|
777
777
|
},
|
|
778
778
|
// we don't need to prime metadata
|
|
779
779
|
() => { });
|
|
780
|
-
snapshotFromMemoryIngest = ingestAndBroadcastFunc();
|
|
780
|
+
snapshotFromMemoryIngest = await ingestAndBroadcastFunc();
|
|
781
781
|
await publishChangesToDurableStore();
|
|
782
782
|
})();
|
|
783
783
|
for (const key of keysAsArray) {
|
|
@@ -805,7 +805,7 @@
|
|
|
805
805
|
// underlying DurableStore implementation takes care of R/W sync
|
|
806
806
|
// so all we have to do is ingest then write to L2
|
|
807
807
|
ingestStagingStore = buildIngestStagingStore(environment);
|
|
808
|
-
snapshotFromMemoryIngest = ingestAndBroadcastFunc();
|
|
808
|
+
snapshotFromMemoryIngest = await ingestAndBroadcastFunc();
|
|
809
809
|
await publishChangesToDurableStore();
|
|
810
810
|
}
|
|
811
811
|
if (snapshotFromMemoryIngest === undefined) {
|
|
@@ -58,7 +58,7 @@ export interface DurableStoreChange {
|
|
|
58
58
|
*/
|
|
59
59
|
isExternalChange?: boolean;
|
|
60
60
|
}
|
|
61
|
-
export declare type OnDurableStoreChangedListener = (changes: DurableStoreChange[]) => void
|
|
61
|
+
export declare type OnDurableStoreChangedListener = (changes: DurableStoreChange[]) => Promise<void>;
|
|
62
62
|
/**
|
|
63
63
|
* The DurableStore implementation must have the following behaviors:
|
|
64
64
|
* - It must handle read/write synchronization (ie: reads should wait for any
|
|
@@ -535,6 +535,7 @@
|
|
|
535
535
|
* @param instrumentation An instrumentation function implementation
|
|
536
536
|
*/
|
|
537
537
|
function makeDurable(environment, _a) {
|
|
538
|
+
var _this = this;
|
|
538
539
|
var durableStore = _a.durableStore, instrumentation = _a.instrumentation;
|
|
539
540
|
var ingestStagingStore = null;
|
|
540
541
|
var durableTTLStore = new DurableTTLStore(durableStore);
|
|
@@ -559,71 +560,82 @@
|
|
|
559
560
|
throw new Error('This makeDurable instance has been disposed');
|
|
560
561
|
}
|
|
561
562
|
};
|
|
562
|
-
var unsubscribe = durableStore.registerOnChangedListener(function (changes) {
|
|
563
|
-
var defaultSegmentKeys
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
var key = adapterContextSegmentKeys_1[_i];
|
|
580
|
-
if (pendingContextStoreKeys.has(key)) {
|
|
581
|
-
// if this instance caused the L2 write then remove from the
|
|
582
|
-
// "pending" Set and move on
|
|
583
|
-
pendingContextStoreKeys.delete(key);
|
|
584
|
-
}
|
|
585
|
-
else {
|
|
586
|
-
// else it came from another luvio instance and we need to
|
|
587
|
-
// read from L2
|
|
588
|
-
adapterContextKeysFromDifferentInstance.push(key);
|
|
589
|
-
}
|
|
590
|
-
}
|
|
591
|
-
if (adapterContextKeysFromDifferentInstance.length > 0) {
|
|
592
|
-
// change handlers are sync, so kick off L2 read of the changed
|
|
593
|
-
// segment keys but we can't await it
|
|
594
|
-
durableStore
|
|
595
|
-
.getEntries(adapterContextKeysFromDifferentInstance, AdapterContextSegment)
|
|
596
|
-
.then(function (entries) {
|
|
597
|
-
if (entries !== undefined) {
|
|
598
|
-
var entryKeys = keys(entries);
|
|
599
|
-
for (var i = 0, len = entryKeys.length; i < len; i++) {
|
|
600
|
-
var entryKey = entryKeys[i];
|
|
601
|
-
var entry = entries[entryKey];
|
|
602
|
-
contextStores[entryKey] = entry.data;
|
|
563
|
+
var unsubscribe = durableStore.registerOnChangedListener(function (changes) { return __awaiter(_this, void 0, void 0, function () {
|
|
564
|
+
var defaultSegmentKeys, adapterContextSegmentKeys, i, len, change, adapterContextKeysFromDifferentInstance, _i, adapterContextSegmentKeys_1, key, entries, entryKeys, i, len, entryKey, entry, error_2, defaultSegmentKeysLength, i, key, canonical;
|
|
565
|
+
return __generator(this, function (_a) {
|
|
566
|
+
switch (_a.label) {
|
|
567
|
+
case 0:
|
|
568
|
+
defaultSegmentKeys = [];
|
|
569
|
+
adapterContextSegmentKeys = [];
|
|
570
|
+
for (i = 0, len = changes.length; i < len; i++) {
|
|
571
|
+
change = changes[i];
|
|
572
|
+
// we only care about changes to the data which is stored in the default
|
|
573
|
+
// segment or the adapter context
|
|
574
|
+
if (change.segment === DefaultDurableSegment) {
|
|
575
|
+
defaultSegmentKeys.push.apply(defaultSegmentKeys, change.ids);
|
|
576
|
+
}
|
|
577
|
+
else if (change.segment === AdapterContextSegment) {
|
|
578
|
+
adapterContextSegmentKeys.push.apply(adapterContextSegmentKeys, change.ids);
|
|
579
|
+
}
|
|
603
580
|
}
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
581
|
+
adapterContextKeysFromDifferentInstance = [];
|
|
582
|
+
for (_i = 0, adapterContextSegmentKeys_1 = adapterContextSegmentKeys; _i < adapterContextSegmentKeys_1.length; _i++) {
|
|
583
|
+
key = adapterContextSegmentKeys_1[_i];
|
|
584
|
+
if (pendingContextStoreKeys.has(key)) {
|
|
585
|
+
// if this instance caused the L2 write then remove from the
|
|
586
|
+
// "pending" Set and move on
|
|
587
|
+
pendingContextStoreKeys.delete(key);
|
|
588
|
+
}
|
|
589
|
+
else {
|
|
590
|
+
// else it came from another luvio instance and we need to
|
|
591
|
+
// read from L2
|
|
592
|
+
adapterContextKeysFromDifferentInstance.push(key);
|
|
593
|
+
}
|
|
594
|
+
}
|
|
595
|
+
if (!(adapterContextKeysFromDifferentInstance.length > 0)) return [3 /*break*/, 4];
|
|
596
|
+
_a.label = 1;
|
|
597
|
+
case 1:
|
|
598
|
+
_a.trys.push([1, 3, , 4]);
|
|
599
|
+
return [4 /*yield*/, durableStore.getEntries(adapterContextKeysFromDifferentInstance, AdapterContextSegment)];
|
|
600
|
+
case 2:
|
|
601
|
+
entries = _a.sent();
|
|
602
|
+
if (entries !== undefined) {
|
|
603
|
+
entryKeys = keys(entries);
|
|
604
|
+
for (i = 0, len = entryKeys.length; i < len; i++) {
|
|
605
|
+
entryKey = entryKeys[i];
|
|
606
|
+
entry = entries[entryKey];
|
|
607
|
+
contextStores[entryKey] = entry.data;
|
|
608
|
+
}
|
|
609
|
+
}
|
|
610
|
+
return [3 /*break*/, 4];
|
|
611
|
+
case 3:
|
|
612
|
+
error_2 = _a.sent();
|
|
613
|
+
durableStoreErrorHandler(error_2);
|
|
614
|
+
return [3 /*break*/, 4];
|
|
615
|
+
case 4:
|
|
616
|
+
defaultSegmentKeysLength = defaultSegmentKeys.length;
|
|
617
|
+
if (!(defaultSegmentKeysLength > 0)) return [3 /*break*/, 6];
|
|
618
|
+
for (i = 0; i < defaultSegmentKeysLength; i++) {
|
|
619
|
+
key = defaultSegmentKeys[i];
|
|
620
|
+
canonical = environment.storeGetCanonicalKey(key);
|
|
621
|
+
if (canonical !== key) {
|
|
622
|
+
continue;
|
|
623
|
+
}
|
|
624
|
+
// TODO: W-8909393 If expiration is the only thing that changed we should not evict the data... so
|
|
625
|
+
// if we stored expiration and data at different keys (or same keys in different segments)
|
|
626
|
+
// then we could know if only the expiration has changed and we wouldn't need to evict
|
|
627
|
+
// and go through an entire broadcast/revive cycle for unchanged data
|
|
628
|
+
// call base environment storeEvict so this evict is not tracked for durable deletion
|
|
629
|
+
environment.storeEvict(key);
|
|
630
|
+
}
|
|
631
|
+
return [4 /*yield*/, environment.storeBroadcast(rebuildSnapshot, environment.snapshotAvailable)];
|
|
632
|
+
case 5:
|
|
633
|
+
_a.sent();
|
|
634
|
+
_a.label = 6;
|
|
635
|
+
case 6: return [2 /*return*/];
|
|
623
636
|
}
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
});
|
|
637
|
+
});
|
|
638
|
+
}); });
|
|
627
639
|
var dispose = function () {
|
|
628
640
|
validateNotDisposed();
|
|
629
641
|
disposed = true;
|
|
@@ -666,9 +678,10 @@
|
|
|
666
678
|
};
|
|
667
679
|
var storeBroadcast = function (_rebuildSnapshot, _snapshotDataAvailable) {
|
|
668
680
|
validateNotDisposed();
|
|
669
|
-
//
|
|
681
|
+
// return resolved promise here and wait for the L2 flush to happen in handleSuccessResponse,
|
|
670
682
|
// that flush will cause the onChanged handler to fire which will revive
|
|
671
683
|
// records to the main L1 store and call the base storeBroadcast
|
|
684
|
+
return Promise.resolve();
|
|
672
685
|
};
|
|
673
686
|
var publishChangesToDurableStore = function () {
|
|
674
687
|
validateNotDisposed();
|
|
@@ -711,26 +724,28 @@
|
|
|
711
724
|
}
|
|
712
725
|
return environment.wrapNormalizedGraphNode(normalized, ingestStagingStore);
|
|
713
726
|
};
|
|
714
|
-
var rebuildSnapshot = function (snapshot,
|
|
727
|
+
var rebuildSnapshot = function (snapshot, onRebuild) {
|
|
715
728
|
validateNotDisposed();
|
|
716
729
|
// try rebuilding from memory
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
730
|
+
environment.rebuildSnapshot(snapshot, function (rebuilt) {
|
|
731
|
+
// only try reviving from durable store if snapshot is unfulfilled
|
|
732
|
+
if (rebuilt.state !== 'Unfulfilled') {
|
|
733
|
+
onRebuild(rebuilt);
|
|
734
|
+
return;
|
|
735
|
+
}
|
|
736
|
+
// Do an L2 revive and emit to subscriber using the callback.
|
|
737
|
+
reviveSnapshot(environment, durableStore, rebuilt, durableStoreErrorHandler, function () {
|
|
738
|
+
// reviveSnapshot will revive into L1, and since "records" is a reference
|
|
739
|
+
// (and not a copy) to the L1 records we can use it for rebuild
|
|
740
|
+
var rebuiltSnap;
|
|
741
|
+
environment.rebuildSnapshot(snapshot, function (rebuilt) {
|
|
742
|
+
rebuiltSnap = rebuilt;
|
|
743
|
+
});
|
|
744
|
+
return rebuiltSnap;
|
|
745
|
+
}).then(function (result) {
|
|
746
|
+
onRebuild(result.snapshot);
|
|
747
|
+
});
|
|
729
748
|
});
|
|
730
|
-
// synchronously return the base snapshot as Pending if not already
|
|
731
|
-
return snapshot.state === 'Pending'
|
|
732
|
-
? snapshot
|
|
733
|
-
: __assign(__assign({}, snapshot), { state: 'Pending' });
|
|
734
749
|
};
|
|
735
750
|
var withContext = function (adapter, options) {
|
|
736
751
|
validateNotDisposed();
|
|
@@ -899,9 +914,11 @@
|
|
|
899
914
|
},
|
|
900
915
|
// we don't need to prime metadata
|
|
901
916
|
function () { });
|
|
902
|
-
|
|
903
|
-
return [4 /*yield*/, publishChangesToDurableStore()];
|
|
917
|
+
return [4 /*yield*/, ingestAndBroadcastFunc()];
|
|
904
918
|
case 3:
|
|
919
|
+
snapshotFromMemoryIngest = _a.sent();
|
|
920
|
+
return [4 /*yield*/, publishChangesToDurableStore()];
|
|
921
|
+
case 4:
|
|
905
922
|
_a.sent();
|
|
906
923
|
return [2 /*return*/];
|
|
907
924
|
}
|
|
@@ -932,18 +949,20 @@
|
|
|
932
949
|
}
|
|
933
950
|
}
|
|
934
951
|
return [7 /*endfinally*/];
|
|
935
|
-
case 4: return [3 /*break*/,
|
|
952
|
+
case 4: return [3 /*break*/, 8];
|
|
936
953
|
case 5:
|
|
937
954
|
// we aren't doing any merging so we don't have to synchronize, the
|
|
938
955
|
// underlying DurableStore implementation takes care of R/W sync
|
|
939
956
|
// so all we have to do is ingest then write to L2
|
|
940
957
|
ingestStagingStore = buildIngestStagingStore(environment);
|
|
941
|
-
|
|
942
|
-
return [4 /*yield*/, publishChangesToDurableStore()];
|
|
958
|
+
return [4 /*yield*/, ingestAndBroadcastFunc()];
|
|
943
959
|
case 6:
|
|
944
|
-
_d.sent();
|
|
945
|
-
|
|
960
|
+
snapshotFromMemoryIngest = _d.sent();
|
|
961
|
+
return [4 /*yield*/, publishChangesToDurableStore()];
|
|
946
962
|
case 7:
|
|
963
|
+
_d.sent();
|
|
964
|
+
_d.label = 8;
|
|
965
|
+
case 8:
|
|
947
966
|
if (snapshotFromMemoryIngest === undefined) {
|
|
948
967
|
return [2 /*return*/, undefined];
|
|
949
968
|
}
|
|
@@ -952,7 +971,7 @@
|
|
|
952
971
|
}
|
|
953
972
|
_c = snapshotFromMemoryIngest, select = _c.select, refresh = _c.refresh;
|
|
954
973
|
return [4 /*yield*/, reviveSnapshot(environment, durableStore, snapshotFromMemoryIngest, durableStoreErrorHandler, function () { return environment.storeLookup(select, environment.createSnapshot, refresh); })];
|
|
955
|
-
case
|
|
974
|
+
case 9:
|
|
956
975
|
result = _d.sent();
|
|
957
976
|
return [2 /*return*/, result.snapshot];
|
|
958
977
|
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@luvio/environments",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.128.0",
|
|
4
4
|
"description": "Luvio Environments",
|
|
5
5
|
"repository": {
|
|
6
6
|
"type": "git",
|
|
@@ -23,7 +23,7 @@
|
|
|
23
23
|
"watch": "yarn build --watch"
|
|
24
24
|
},
|
|
25
25
|
"dependencies": {
|
|
26
|
-
"@luvio/engine": "0.
|
|
26
|
+
"@luvio/engine": "0.128.0"
|
|
27
27
|
},
|
|
28
28
|
"bundlesize": [
|
|
29
29
|
{
|