@luvio/environments 0.106.0 → 0.108.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -419,6 +419,7 @@ function isUnfulfilledSnapshot(cachedSnapshotResult) {
419
419
  function makeDurable(environment, { durableStore, instrumentation }) {
420
420
  let ingestStagingStore = null;
421
421
  const durableTTLStore = new DurableTTLStore(durableStore);
422
+ const mergeKeysPromiseMap = new Map();
422
423
  let initializationPromise = new Promise((resolve) => {
423
424
  const finish = () => {
424
425
  resolve();
@@ -677,52 +678,84 @@ function makeDurable(environment, { durableStore, instrumentation }) {
677
678
  }
678
679
  return {};
679
680
  };
680
- const handleSuccessResponse = function (ingestAndBroadcastFunc, getResponseCacheKeysFunc, existingRecords) {
681
+ const handleSuccessResponse = async function (ingestAndBroadcastFunc, getResponseCacheKeysFunc) {
681
682
  validateNotDisposed();
682
683
  const cacheKeySet = getResponseCacheKeysFunc();
683
- const cacheKeys = new Set(keys(cacheKeySet));
684
+ const cacheKeySetKeys = keys(cacheKeySet);
684
685
  const keysToRevive = {};
685
- cacheKeys.forEach((cacheKey) => {
686
- const key = cacheKeySet[cacheKey];
687
- if (key.mergeable === true &&
688
- (existingRecords === undefined || existingRecords[cacheKey] === undefined)) {
689
- keysToRevive[cacheKey] = true;
686
+ for (const cacheKeySetKey of cacheKeySetKeys) {
687
+ const cacheKey = cacheKeySet[cacheKeySetKey];
688
+ if (cacheKey.mergeable === true) {
689
+ keysToRevive[cacheKeySetKey] = true;
690
690
  }
691
- });
692
- const ingestAndPublish = (revivedRecords) => {
693
- const toPrime = existingRecords !== undefined
694
- ? { ...revivedRecords, ...existingRecords }
695
- : revivedRecords;
696
- ingestStagingStore = buildIngestStagingStore(environment);
697
- ingestStagingStore.records = toPrime;
698
- const snapshotFromMemoryIngest = ingestAndBroadcastFunc();
699
- return publishChangesToDurableStore().then(() => {
700
- if (snapshotFromMemoryIngest === undefined) {
701
- return undefined;
691
+ }
692
+ let snapshotFromMemoryIngest = undefined;
693
+ const keysAsArray = keys(keysToRevive);
694
+ if (keysAsArray.length > 0) {
695
+ // if we need to do an L2 read then L2 write then we need to synchronize
696
+ // our read/merge/ingest/write Promise based on the keys so we don't
697
+ // stomp over any data
698
+ const readWritePromise = (async () => {
699
+ const pendingPromises = [];
700
+ for (const key of keysAsArray) {
701
+ const pendingPromise = mergeKeysPromiseMap.get(key);
702
+ if (pendingPromise !== undefined) {
703
+ // IMPORTANT: while on the synchronous code path we get a
704
+ // handle to pendingPromise and push it onto the array.
705
+ // This is important because later in this synchronous code
706
+ // path we will upsert readWritePromise into the
707
+ // mergeKeysPromiseMap (essentially overwriting pendingPromise
708
+ // in the map).
709
+ pendingPromises.push(pendingPromise);
710
+ }
702
711
  }
703
- if (snapshotFromMemoryIngest.state !== 'Unfulfilled') {
704
- return snapshotFromMemoryIngest;
712
+ await Promise.all(pendingPromises);
713
+ const entries = await durableStore.getEntries(keysAsArray, DefaultDurableSegment);
714
+ ingestStagingStore = buildIngestStagingStore(environment);
715
+ publishDurableStoreEntries(entries, ingestStagingStore.publish.bind(ingestStagingStore),
716
+ // we don't need to prime metadata
717
+ () => { });
718
+ snapshotFromMemoryIngest = ingestAndBroadcastFunc();
719
+ await publishChangesToDurableStore();
720
+ })();
721
+ for (const key of keysAsArray) {
722
+ // we are overwriting the previous promise at this key, but that
723
+ // is ok because we got a handle to it earlier (see the IMPORTANT
724
+ // comment about 35 lines up)
725
+ mergeKeysPromiseMap.set(key, readWritePromise);
726
+ }
727
+ try {
728
+ await readWritePromise;
729
+ }
730
+ finally {
731
+ for (const key of keysAsArray) {
732
+ const pendingPromise = mergeKeysPromiseMap.get(key);
733
+ // cleanup the entry from the map if this is the last promise
734
+ // for that key
735
+ if (pendingPromise === readWritePromise) {
736
+ mergeKeysPromiseMap.delete(key);
737
+ }
705
738
  }
706
- // if snapshot from staging store lookup is unfulfilled then do an L2 lookup
707
- return reviveSnapshot(environment, durableStore, snapshotFromMemoryIngest, durableStoreErrorHandler, () => environment.storeLookup(snapshotFromMemoryIngest.select, environment.createSnapshot, snapshotFromMemoryIngest.refresh)).then((result) => {
708
- return result.snapshot;
709
- });
710
- });
711
- };
712
- if (keys(keysToRevive).length === 0) {
713
- return ingestAndPublish({});
739
+ }
714
740
  }
715
- return durableStore
716
- .getEntries(keys(keysToRevive), DefaultDurableSegment)
717
- .then((entries) => {
718
- const existingL2Records = create(null);
719
- publishDurableStoreEntries(entries, (key, record) => {
720
- existingL2Records[key] = record;
721
- },
722
- // we don't need to prime metadata
723
- () => { });
724
- return ingestAndPublish(existingL2Records);
725
- });
741
+ else {
742
+ // we aren't doing any merging so we don't have to synchronize, the
743
+ // underlying DurableStore implementation takes care of R/W sync
744
+ // so all we have to do is ingest then write to L2
745
+ ingestStagingStore = buildIngestStagingStore(environment);
746
+ snapshotFromMemoryIngest = ingestAndBroadcastFunc();
747
+ await publishChangesToDurableStore();
748
+ }
749
+ if (snapshotFromMemoryIngest === undefined) {
750
+ return undefined;
751
+ }
752
+ if (snapshotFromMemoryIngest.state !== 'Unfulfilled') {
753
+ return snapshotFromMemoryIngest;
754
+ }
755
+ // if snapshot from staging store lookup is unfulfilled then do an L2 lookup
756
+ const { select, refresh } = snapshotFromMemoryIngest;
757
+ const result = await reviveSnapshot(environment, durableStore, snapshotFromMemoryIngest, durableStoreErrorHandler, () => environment.storeLookup(select, environment.createSnapshot, refresh));
758
+ return result.snapshot;
726
759
  };
727
760
  const handleErrorResponse = function (ingestAndBroadcastFunc) {
728
761
  validateNotDisposed();
@@ -1,4 +1,4 @@
1
- import type { CacheKeySet, Environment, RecordSource, Snapshot, InMemoryStore } from '@luvio/engine';
1
+ import type { Environment, RecordSource, InMemoryStore } from '@luvio/engine';
2
2
  import type { DurableStore } from './DurableStore';
3
3
  import type { InstrumentationFunction } from './makeDurable/error';
4
4
  import type { TTLOverridesMap } from './DurableTTLStore';
@@ -25,12 +25,6 @@ export interface DurableEnvironment extends Environment {
25
25
  * flow, otherwise returns an empty object.
26
26
  */
27
27
  getIngestStagingStoreMetadata(): InMemoryStore['metadata'];
28
- /**
29
- * Overload of Environment.handleSuccessResponse that takes in an optional
30
- * RecordSource to "prime" the ingest staging store with before calling
31
- * ingest. Useful for merge-able record types
32
- */
33
- handleSuccessResponse<IngestionReturnType extends Snapshot<D, V> | undefined, D, V = unknown>(ingestAndBroadcastFunc: () => IngestionReturnType, getResponseCacheKeysFunc: () => CacheKeySet, existingRecords?: RecordSource): IngestionReturnType | Promise<IngestionReturnType>;
34
28
  }
35
29
  export declare const AdapterContextSegment = "ADAPTER-CONTEXT";
36
30
  export declare const ADAPTER_CONTEXT_ID_SUFFIX = "__NAMED_CONTEXT";
@@ -423,6 +423,7 @@
423
423
  function makeDurable(environment, { durableStore, instrumentation }) {
424
424
  let ingestStagingStore = null;
425
425
  const durableTTLStore = new DurableTTLStore(durableStore);
426
+ const mergeKeysPromiseMap = new Map();
426
427
  let initializationPromise = new Promise((resolve) => {
427
428
  const finish = () => {
428
429
  resolve();
@@ -681,52 +682,84 @@
681
682
  }
682
683
  return {};
683
684
  };
684
- const handleSuccessResponse = function (ingestAndBroadcastFunc, getResponseCacheKeysFunc, existingRecords) {
685
+ const handleSuccessResponse = async function (ingestAndBroadcastFunc, getResponseCacheKeysFunc) {
685
686
  validateNotDisposed();
686
687
  const cacheKeySet = getResponseCacheKeysFunc();
687
- const cacheKeys = new Set(keys(cacheKeySet));
688
+ const cacheKeySetKeys = keys(cacheKeySet);
688
689
  const keysToRevive = {};
689
- cacheKeys.forEach((cacheKey) => {
690
- const key = cacheKeySet[cacheKey];
691
- if (key.mergeable === true &&
692
- (existingRecords === undefined || existingRecords[cacheKey] === undefined)) {
693
- keysToRevive[cacheKey] = true;
690
+ for (const cacheKeySetKey of cacheKeySetKeys) {
691
+ const cacheKey = cacheKeySet[cacheKeySetKey];
692
+ if (cacheKey.mergeable === true) {
693
+ keysToRevive[cacheKeySetKey] = true;
694
694
  }
695
- });
696
- const ingestAndPublish = (revivedRecords) => {
697
- const toPrime = existingRecords !== undefined
698
- ? { ...revivedRecords, ...existingRecords }
699
- : revivedRecords;
700
- ingestStagingStore = buildIngestStagingStore(environment);
701
- ingestStagingStore.records = toPrime;
702
- const snapshotFromMemoryIngest = ingestAndBroadcastFunc();
703
- return publishChangesToDurableStore().then(() => {
704
- if (snapshotFromMemoryIngest === undefined) {
705
- return undefined;
695
+ }
696
+ let snapshotFromMemoryIngest = undefined;
697
+ const keysAsArray = keys(keysToRevive);
698
+ if (keysAsArray.length > 0) {
699
+ // if we need to do an L2 read then L2 write then we need to synchronize
700
+ // our read/merge/ingest/write Promise based on the keys so we don't
701
+ // stomp over any data
702
+ const readWritePromise = (async () => {
703
+ const pendingPromises = [];
704
+ for (const key of keysAsArray) {
705
+ const pendingPromise = mergeKeysPromiseMap.get(key);
706
+ if (pendingPromise !== undefined) {
707
+ // IMPORTANT: while on the synchronous code path we get a
708
+ // handle to pendingPromise and push it onto the array.
709
+ // This is important because later in this synchronous code
710
+ // path we will upsert readWritePromise into the
711
+ // mergeKeysPromiseMap (essentially overwriting pendingPromise
712
+ // in the map).
713
+ pendingPromises.push(pendingPromise);
714
+ }
706
715
  }
707
- if (snapshotFromMemoryIngest.state !== 'Unfulfilled') {
708
- return snapshotFromMemoryIngest;
716
+ await Promise.all(pendingPromises);
717
+ const entries = await durableStore.getEntries(keysAsArray, DefaultDurableSegment);
718
+ ingestStagingStore = buildIngestStagingStore(environment);
719
+ publishDurableStoreEntries(entries, ingestStagingStore.publish.bind(ingestStagingStore),
720
+ // we don't need to prime metadata
721
+ () => { });
722
+ snapshotFromMemoryIngest = ingestAndBroadcastFunc();
723
+ await publishChangesToDurableStore();
724
+ })();
725
+ for (const key of keysAsArray) {
726
+ // we are overwriting the previous promise at this key, but that
727
+ // is ok because we got a handle to it earlier (see the IMPORTANT
728
+ // comment about 35 lines up)
729
+ mergeKeysPromiseMap.set(key, readWritePromise);
730
+ }
731
+ try {
732
+ await readWritePromise;
733
+ }
734
+ finally {
735
+ for (const key of keysAsArray) {
736
+ const pendingPromise = mergeKeysPromiseMap.get(key);
737
+ // cleanup the entry from the map if this is the last promise
738
+ // for that key
739
+ if (pendingPromise === readWritePromise) {
740
+ mergeKeysPromiseMap.delete(key);
741
+ }
709
742
  }
710
- // if snapshot from staging store lookup is unfulfilled then do an L2 lookup
711
- return reviveSnapshot(environment, durableStore, snapshotFromMemoryIngest, durableStoreErrorHandler, () => environment.storeLookup(snapshotFromMemoryIngest.select, environment.createSnapshot, snapshotFromMemoryIngest.refresh)).then((result) => {
712
- return result.snapshot;
713
- });
714
- });
715
- };
716
- if (keys(keysToRevive).length === 0) {
717
- return ingestAndPublish({});
743
+ }
718
744
  }
719
- return durableStore
720
- .getEntries(keys(keysToRevive), DefaultDurableSegment)
721
- .then((entries) => {
722
- const existingL2Records = create(null);
723
- publishDurableStoreEntries(entries, (key, record) => {
724
- existingL2Records[key] = record;
725
- },
726
- // we don't need to prime metadata
727
- () => { });
728
- return ingestAndPublish(existingL2Records);
729
- });
745
+ else {
746
+ // we aren't doing any merging so we don't have to synchronize, the
747
+ // underlying DurableStore implementation takes care of R/W sync
748
+ // so all we have to do is ingest then write to L2
749
+ ingestStagingStore = buildIngestStagingStore(environment);
750
+ snapshotFromMemoryIngest = ingestAndBroadcastFunc();
751
+ await publishChangesToDurableStore();
752
+ }
753
+ if (snapshotFromMemoryIngest === undefined) {
754
+ return undefined;
755
+ }
756
+ if (snapshotFromMemoryIngest.state !== 'Unfulfilled') {
757
+ return snapshotFromMemoryIngest;
758
+ }
759
+ // if snapshot from staging store lookup is unfulfilled then do an L2 lookup
760
+ const { select, refresh } = snapshotFromMemoryIngest;
761
+ const result = await reviveSnapshot(environment, durableStore, snapshotFromMemoryIngest, durableStoreErrorHandler, () => environment.storeLookup(select, environment.createSnapshot, refresh));
762
+ return result.snapshot;
730
763
  };
731
764
  const handleErrorResponse = function (ingestAndBroadcastFunc) {
732
765
  validateNotDisposed();
@@ -1,4 +1,4 @@
1
- import type { CacheKeySet, Environment, RecordSource, Snapshot, InMemoryStore } from '@luvio/engine';
1
+ import type { Environment, RecordSource, InMemoryStore } from '@luvio/engine';
2
2
  import type { DurableStore } from './DurableStore';
3
3
  import type { InstrumentationFunction } from './makeDurable/error';
4
4
  import type { TTLOverridesMap } from './DurableTTLStore';
@@ -25,12 +25,6 @@ export interface DurableEnvironment extends Environment {
25
25
  * flow, otherwise returns an empty object.
26
26
  */
27
27
  getIngestStagingStoreMetadata(): InMemoryStore['metadata'];
28
- /**
29
- * Overload of Environment.handleSuccessResponse that takes in an optional
30
- * RecordSource to "prime" the ingest staging store with before calling
31
- * ingest. Useful for merge-able record types
32
- */
33
- handleSuccessResponse<IngestionReturnType extends Snapshot<D, V> | undefined, D, V = unknown>(ingestAndBroadcastFunc: () => IngestionReturnType, getResponseCacheKeysFunc: () => CacheKeySet, existingRecords?: RecordSource): IngestionReturnType | Promise<IngestionReturnType>;
34
28
  }
35
29
  export declare const AdapterContextSegment = "ADAPTER-CONTEXT";
36
30
  export declare const ADAPTER_CONTEXT_ID_SUFFIX = "__NAMED_CONTEXT";
@@ -516,6 +516,7 @@
516
516
  var durableStore = _a.durableStore, instrumentation = _a.instrumentation;
517
517
  var ingestStagingStore = null;
518
518
  var durableTTLStore = new DurableTTLStore(durableStore);
519
+ var mergeKeysPromiseMap = new Map();
519
520
  var initializationPromise = new Promise(function (resolve) {
520
521
  var finish = function () {
521
522
  resolve();
@@ -779,52 +780,114 @@
779
780
  }
780
781
  return {};
781
782
  };
782
- var handleSuccessResponse = function (ingestAndBroadcastFunc, getResponseCacheKeysFunc, existingRecords) {
783
- validateNotDisposed();
784
- var cacheKeySet = getResponseCacheKeysFunc();
785
- var cacheKeys = new Set(keys(cacheKeySet));
786
- var keysToRevive = {};
787
- cacheKeys.forEach(function (cacheKey) {
788
- var key = cacheKeySet[cacheKey];
789
- if (key.mergeable === true &&
790
- (existingRecords === undefined || existingRecords[cacheKey] === undefined)) {
791
- keysToRevive[cacheKey] = true;
792
- }
793
- });
794
- var ingestAndPublish = function (revivedRecords) {
795
- var toPrime = existingRecords !== undefined
796
- ? __assign(__assign({}, revivedRecords), existingRecords) : revivedRecords;
797
- ingestStagingStore = buildIngestStagingStore(environment);
798
- ingestStagingStore.records = toPrime;
799
- var snapshotFromMemoryIngest = ingestAndBroadcastFunc();
800
- return publishChangesToDurableStore().then(function () {
801
- if (snapshotFromMemoryIngest === undefined) {
802
- return undefined;
803
- }
804
- if (snapshotFromMemoryIngest.state !== 'Unfulfilled') {
805
- return snapshotFromMemoryIngest;
783
+ var handleSuccessResponse = function (ingestAndBroadcastFunc, getResponseCacheKeysFunc) {
784
+ return __awaiter(this, void 0, void 0, function () {
785
+ var cacheKeySet, cacheKeySetKeys, keysToRevive, _i, cacheKeySetKeys_1, cacheKeySetKey, cacheKey, snapshotFromMemoryIngest, keysAsArray, readWritePromise, _a, keysAsArray_1, key, _b, keysAsArray_2, key, pendingPromise, _c, select, refresh, result;
786
+ var _this = this;
787
+ return __generator(this, function (_d) {
788
+ switch (_d.label) {
789
+ case 0:
790
+ validateNotDisposed();
791
+ cacheKeySet = getResponseCacheKeysFunc();
792
+ cacheKeySetKeys = keys(cacheKeySet);
793
+ keysToRevive = {};
794
+ for (_i = 0, cacheKeySetKeys_1 = cacheKeySetKeys; _i < cacheKeySetKeys_1.length; _i++) {
795
+ cacheKeySetKey = cacheKeySetKeys_1[_i];
796
+ cacheKey = cacheKeySet[cacheKeySetKey];
797
+ if (cacheKey.mergeable === true) {
798
+ keysToRevive[cacheKeySetKey] = true;
799
+ }
800
+ }
801
+ snapshotFromMemoryIngest = undefined;
802
+ keysAsArray = keys(keysToRevive);
803
+ if (!(keysAsArray.length > 0)) return [3 /*break*/, 5];
804
+ readWritePromise = (function () { return __awaiter(_this, void 0, void 0, function () {
805
+ var pendingPromises, _i, keysAsArray_3, key, pendingPromise, entries;
806
+ return __generator(this, function (_a) {
807
+ switch (_a.label) {
808
+ case 0:
809
+ pendingPromises = [];
810
+ for (_i = 0, keysAsArray_3 = keysAsArray; _i < keysAsArray_3.length; _i++) {
811
+ key = keysAsArray_3[_i];
812
+ pendingPromise = mergeKeysPromiseMap.get(key);
813
+ if (pendingPromise !== undefined) {
814
+ // IMPORTANT: while on the synchronous code path we get a
815
+ // handle to pendingPromise and push it onto the array.
816
+ // This is important because later in this synchronous code
817
+ // path we will upsert readWritePromise into the
818
+ // mergeKeysPromiseMap (essentially overwriting pendingPromise
819
+ // in the map).
820
+ pendingPromises.push(pendingPromise);
821
+ }
822
+ }
823
+ return [4 /*yield*/, Promise.all(pendingPromises)];
824
+ case 1:
825
+ _a.sent();
826
+ return [4 /*yield*/, durableStore.getEntries(keysAsArray, DefaultDurableSegment)];
827
+ case 2:
828
+ entries = _a.sent();
829
+ ingestStagingStore = buildIngestStagingStore(environment);
830
+ publishDurableStoreEntries(entries, ingestStagingStore.publish.bind(ingestStagingStore),
831
+ // we don't need to prime metadata
832
+ function () { });
833
+ snapshotFromMemoryIngest = ingestAndBroadcastFunc();
834
+ return [4 /*yield*/, publishChangesToDurableStore()];
835
+ case 3:
836
+ _a.sent();
837
+ return [2 /*return*/];
838
+ }
839
+ });
840
+ }); })();
841
+ for (_a = 0, keysAsArray_1 = keysAsArray; _a < keysAsArray_1.length; _a++) {
842
+ key = keysAsArray_1[_a];
843
+ // we are overwriting the previous promise at this key, but that
844
+ // is ok because we got a handle to it earlier (see the IMPORTANT
845
+ // comment about 35 lines up)
846
+ mergeKeysPromiseMap.set(key, readWritePromise);
847
+ }
848
+ _d.label = 1;
849
+ case 1:
850
+ _d.trys.push([1, , 3, 4]);
851
+ return [4 /*yield*/, readWritePromise];
852
+ case 2:
853
+ _d.sent();
854
+ return [3 /*break*/, 4];
855
+ case 3:
856
+ for (_b = 0, keysAsArray_2 = keysAsArray; _b < keysAsArray_2.length; _b++) {
857
+ key = keysAsArray_2[_b];
858
+ pendingPromise = mergeKeysPromiseMap.get(key);
859
+ // cleanup the entry from the map if this is the last promise
860
+ // for that key
861
+ if (pendingPromise === readWritePromise) {
862
+ mergeKeysPromiseMap.delete(key);
863
+ }
864
+ }
865
+ return [7 /*endfinally*/];
866
+ case 4: return [3 /*break*/, 7];
867
+ case 5:
868
+ // we aren't doing any merging so we don't have to synchronize, the
869
+ // underlying DurableStore implementation takes care of R/W sync
870
+ // so all we have to do is ingest then write to L2
871
+ ingestStagingStore = buildIngestStagingStore(environment);
872
+ snapshotFromMemoryIngest = ingestAndBroadcastFunc();
873
+ return [4 /*yield*/, publishChangesToDurableStore()];
874
+ case 6:
875
+ _d.sent();
876
+ _d.label = 7;
877
+ case 7:
878
+ if (snapshotFromMemoryIngest === undefined) {
879
+ return [2 /*return*/, undefined];
880
+ }
881
+ if (snapshotFromMemoryIngest.state !== 'Unfulfilled') {
882
+ return [2 /*return*/, snapshotFromMemoryIngest];
883
+ }
884
+ _c = snapshotFromMemoryIngest, select = _c.select, refresh = _c.refresh;
885
+ return [4 /*yield*/, reviveSnapshot(environment, durableStore, snapshotFromMemoryIngest, durableStoreErrorHandler, function () { return environment.storeLookup(select, environment.createSnapshot, refresh); })];
886
+ case 8:
887
+ result = _d.sent();
888
+ return [2 /*return*/, result.snapshot];
806
889
  }
807
- // if snapshot from staging store lookup is unfulfilled then do an L2 lookup
808
- return reviveSnapshot(environment, durableStore, snapshotFromMemoryIngest, durableStoreErrorHandler, function () {
809
- return environment.storeLookup(snapshotFromMemoryIngest.select, environment.createSnapshot, snapshotFromMemoryIngest.refresh);
810
- }).then(function (result) {
811
- return result.snapshot;
812
- });
813
890
  });
814
- };
815
- if (keys(keysToRevive).length === 0) {
816
- return ingestAndPublish({});
817
- }
818
- return durableStore
819
- .getEntries(keys(keysToRevive), DefaultDurableSegment)
820
- .then(function (entries) {
821
- var existingL2Records = create(null);
822
- publishDurableStoreEntries(entries, function (key, record) {
823
- existingL2Records[key] = record;
824
- },
825
- // we don't need to prime metadata
826
- function () { });
827
- return ingestAndPublish(existingL2Records);
828
891
  });
829
892
  };
830
893
  var handleErrorResponse = function (ingestAndBroadcastFunc) {
@@ -1,4 +1,4 @@
1
- import type { CacheKeySet, Environment, RecordSource, Snapshot, InMemoryStore } from '@luvio/engine';
1
+ import type { Environment, RecordSource, InMemoryStore } from '@luvio/engine';
2
2
  import type { DurableStore } from './DurableStore';
3
3
  import type { InstrumentationFunction } from './makeDurable/error';
4
4
  import type { TTLOverridesMap } from './DurableTTLStore';
@@ -25,12 +25,6 @@ export interface DurableEnvironment extends Environment {
25
25
  * flow, otherwise returns an empty object.
26
26
  */
27
27
  getIngestStagingStoreMetadata(): InMemoryStore['metadata'];
28
- /**
29
- * Overload of Environment.handleSuccessResponse that takes in an optional
30
- * RecordSource to "prime" the ingest staging store with before calling
31
- * ingest. Useful for merge-able record types
32
- */
33
- handleSuccessResponse<IngestionReturnType extends Snapshot<D, V> | undefined, D, V = unknown>(ingestAndBroadcastFunc: () => IngestionReturnType, getResponseCacheKeysFunc: () => CacheKeySet, existingRecords?: RecordSource): IngestionReturnType | Promise<IngestionReturnType>;
34
28
  }
35
29
  export declare const AdapterContextSegment = "ADAPTER-CONTEXT";
36
30
  export declare const ADAPTER_CONTEXT_ID_SUFFIX = "__NAMED_CONTEXT";
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@luvio/environments",
3
- "version": "0.106.0",
3
+ "version": "0.108.0",
4
4
  "description": "Luvio Environments",
5
5
  "repository": {
6
6
  "type": "git",
@@ -23,7 +23,7 @@
23
23
  "watch": "yarn build --watch"
24
24
  },
25
25
  "dependencies": {
26
- "@luvio/engine": "0.106.0"
26
+ "@luvio/engine": "0.108.0"
27
27
  },
28
28
  "bundlesize": [
29
29
  {