@luvio/environments 0.106.0 → 0.109.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -419,6 +419,7 @@ function isUnfulfilledSnapshot(cachedSnapshotResult) {
419
419
  function makeDurable(environment, { durableStore, instrumentation }) {
420
420
  let ingestStagingStore = null;
421
421
  const durableTTLStore = new DurableTTLStore(durableStore);
422
+ const mergeKeysPromiseMap = new Map();
422
423
  let initializationPromise = new Promise((resolve) => {
423
424
  const finish = () => {
424
425
  resolve();
@@ -677,52 +678,86 @@ function makeDurable(environment, { durableStore, instrumentation }) {
677
678
  }
678
679
  return {};
679
680
  };
680
- const handleSuccessResponse = function (ingestAndBroadcastFunc, getResponseCacheKeysFunc, existingRecords) {
681
+ const handleSuccessResponse = async function (ingestAndBroadcastFunc, getResponseCacheKeysFunc) {
681
682
  validateNotDisposed();
682
683
  const cacheKeySet = getResponseCacheKeysFunc();
683
- const cacheKeys = new Set(keys(cacheKeySet));
684
+ const cacheKeySetKeys = keys(cacheKeySet);
684
685
  const keysToRevive = {};
685
- cacheKeys.forEach((cacheKey) => {
686
- const key = cacheKeySet[cacheKey];
687
- if (key.mergeable === true &&
688
- (existingRecords === undefined || existingRecords[cacheKey] === undefined)) {
689
- keysToRevive[cacheKey] = true;
686
+ for (const cacheKeySetKey of cacheKeySetKeys) {
687
+ const cacheKey = cacheKeySet[cacheKeySetKey];
688
+ if (cacheKey.mergeable === true) {
689
+ keysToRevive[cacheKeySetKey] = true;
690
690
  }
691
- });
692
- const ingestAndPublish = (revivedRecords) => {
693
- const toPrime = existingRecords !== undefined
694
- ? { ...revivedRecords, ...existingRecords }
695
- : revivedRecords;
696
- ingestStagingStore = buildIngestStagingStore(environment);
697
- ingestStagingStore.records = toPrime;
698
- const snapshotFromMemoryIngest = ingestAndBroadcastFunc();
699
- return publishChangesToDurableStore().then(() => {
700
- if (snapshotFromMemoryIngest === undefined) {
701
- return undefined;
691
+ }
692
+ let snapshotFromMemoryIngest = undefined;
693
+ const keysAsArray = keys(keysToRevive);
694
+ if (keysAsArray.length > 0) {
695
+ // if we need to do an L2 read then L2 write then we need to synchronize
696
+ // our read/merge/ingest/write Promise based on the keys so we don't
697
+ // stomp over any data
698
+ const readWritePromise = (async () => {
699
+ const pendingPromises = [];
700
+ for (const key of keysAsArray) {
701
+ const pendingPromise = mergeKeysPromiseMap.get(key);
702
+ if (pendingPromise !== undefined) {
703
+ // IMPORTANT: while on the synchronous code path we get a
704
+ // handle to pendingPromise and push it onto the array.
705
+ // This is important because later in this synchronous code
706
+ // path we will upsert readWritePromise into the
707
+ // mergeKeysPromiseMap (essentially overwriting pendingPromise
708
+ // in the map).
709
+ pendingPromises.push(pendingPromise);
710
+ }
702
711
  }
703
- if (snapshotFromMemoryIngest.state !== 'Unfulfilled') {
704
- return snapshotFromMemoryIngest;
712
+ await Promise.all(pendingPromises);
713
+ const entries = await durableStore.getEntries(keysAsArray, DefaultDurableSegment);
714
+ ingestStagingStore = buildIngestStagingStore(environment);
715
+ publishDurableStoreEntries(entries, (key, record) => {
716
+ ingestStagingStore.records[key] = record;
717
+ },
718
+ // we don't need to prime metadata
719
+ () => { });
720
+ snapshotFromMemoryIngest = ingestAndBroadcastFunc();
721
+ await publishChangesToDurableStore();
722
+ })();
723
+ for (const key of keysAsArray) {
724
+ // we are overwriting the previous promise at this key, but that
725
+ // is ok because we got a handle to it earlier (see the IMPORTANT
726
+ // comment about 35 lines up)
727
+ mergeKeysPromiseMap.set(key, readWritePromise);
728
+ }
729
+ try {
730
+ await readWritePromise;
731
+ }
732
+ finally {
733
+ for (const key of keysAsArray) {
734
+ const pendingPromise = mergeKeysPromiseMap.get(key);
735
+ // cleanup the entry from the map if this is the last promise
736
+ // for that key
737
+ if (pendingPromise === readWritePromise) {
738
+ mergeKeysPromiseMap.delete(key);
739
+ }
705
740
  }
706
- // if snapshot from staging store lookup is unfulfilled then do an L2 lookup
707
- return reviveSnapshot(environment, durableStore, snapshotFromMemoryIngest, durableStoreErrorHandler, () => environment.storeLookup(snapshotFromMemoryIngest.select, environment.createSnapshot, snapshotFromMemoryIngest.refresh)).then((result) => {
708
- return result.snapshot;
709
- });
710
- });
711
- };
712
- if (keys(keysToRevive).length === 0) {
713
- return ingestAndPublish({});
741
+ }
714
742
  }
715
- return durableStore
716
- .getEntries(keys(keysToRevive), DefaultDurableSegment)
717
- .then((entries) => {
718
- const existingL2Records = create(null);
719
- publishDurableStoreEntries(entries, (key, record) => {
720
- existingL2Records[key] = record;
721
- },
722
- // we don't need to prime metadata
723
- () => { });
724
- return ingestAndPublish(existingL2Records);
725
- });
743
+ else {
744
+ // we aren't doing any merging so we don't have to synchronize, the
745
+ // underlying DurableStore implementation takes care of R/W sync
746
+ // so all we have to do is ingest then write to L2
747
+ ingestStagingStore = buildIngestStagingStore(environment);
748
+ snapshotFromMemoryIngest = ingestAndBroadcastFunc();
749
+ await publishChangesToDurableStore();
750
+ }
751
+ if (snapshotFromMemoryIngest === undefined) {
752
+ return undefined;
753
+ }
754
+ if (snapshotFromMemoryIngest.state !== 'Unfulfilled') {
755
+ return snapshotFromMemoryIngest;
756
+ }
757
+ // if snapshot from staging store lookup is unfulfilled then do an L2 lookup
758
+ const { select, refresh } = snapshotFromMemoryIngest;
759
+ const result = await reviveSnapshot(environment, durableStore, snapshotFromMemoryIngest, durableStoreErrorHandler, () => environment.storeLookup(select, environment.createSnapshot, refresh));
760
+ return result.snapshot;
726
761
  };
727
762
  const handleErrorResponse = function (ingestAndBroadcastFunc) {
728
763
  validateNotDisposed();
@@ -1,4 +1,4 @@
1
- import type { CacheKeySet, Environment, RecordSource, Snapshot, InMemoryStore } from '@luvio/engine';
1
+ import type { Environment, RecordSource, InMemoryStore } from '@luvio/engine';
2
2
  import type { DurableStore } from './DurableStore';
3
3
  import type { InstrumentationFunction } from './makeDurable/error';
4
4
  import type { TTLOverridesMap } from './DurableTTLStore';
@@ -25,12 +25,6 @@ export interface DurableEnvironment extends Environment {
25
25
  * flow, otherwise returns an empty object.
26
26
  */
27
27
  getIngestStagingStoreMetadata(): InMemoryStore['metadata'];
28
- /**
29
- * Overload of Environment.handleSuccessResponse that takes in an optional
30
- * RecordSource to "prime" the ingest staging store with before calling
31
- * ingest. Useful for merge-able record types
32
- */
33
- handleSuccessResponse<IngestionReturnType extends Snapshot<D, V> | undefined, D, V = unknown>(ingestAndBroadcastFunc: () => IngestionReturnType, getResponseCacheKeysFunc: () => CacheKeySet, existingRecords?: RecordSource): IngestionReturnType | Promise<IngestionReturnType>;
34
28
  }
35
29
  export declare const AdapterContextSegment = "ADAPTER-CONTEXT";
36
30
  export declare const ADAPTER_CONTEXT_ID_SUFFIX = "__NAMED_CONTEXT";
@@ -423,6 +423,7 @@
423
423
  function makeDurable(environment, { durableStore, instrumentation }) {
424
424
  let ingestStagingStore = null;
425
425
  const durableTTLStore = new DurableTTLStore(durableStore);
426
+ const mergeKeysPromiseMap = new Map();
426
427
  let initializationPromise = new Promise((resolve) => {
427
428
  const finish = () => {
428
429
  resolve();
@@ -681,52 +682,86 @@
681
682
  }
682
683
  return {};
683
684
  };
684
- const handleSuccessResponse = function (ingestAndBroadcastFunc, getResponseCacheKeysFunc, existingRecords) {
685
+ const handleSuccessResponse = async function (ingestAndBroadcastFunc, getResponseCacheKeysFunc) {
685
686
  validateNotDisposed();
686
687
  const cacheKeySet = getResponseCacheKeysFunc();
687
- const cacheKeys = new Set(keys(cacheKeySet));
688
+ const cacheKeySetKeys = keys(cacheKeySet);
688
689
  const keysToRevive = {};
689
- cacheKeys.forEach((cacheKey) => {
690
- const key = cacheKeySet[cacheKey];
691
- if (key.mergeable === true &&
692
- (existingRecords === undefined || existingRecords[cacheKey] === undefined)) {
693
- keysToRevive[cacheKey] = true;
690
+ for (const cacheKeySetKey of cacheKeySetKeys) {
691
+ const cacheKey = cacheKeySet[cacheKeySetKey];
692
+ if (cacheKey.mergeable === true) {
693
+ keysToRevive[cacheKeySetKey] = true;
694
694
  }
695
- });
696
- const ingestAndPublish = (revivedRecords) => {
697
- const toPrime = existingRecords !== undefined
698
- ? { ...revivedRecords, ...existingRecords }
699
- : revivedRecords;
700
- ingestStagingStore = buildIngestStagingStore(environment);
701
- ingestStagingStore.records = toPrime;
702
- const snapshotFromMemoryIngest = ingestAndBroadcastFunc();
703
- return publishChangesToDurableStore().then(() => {
704
- if (snapshotFromMemoryIngest === undefined) {
705
- return undefined;
695
+ }
696
+ let snapshotFromMemoryIngest = undefined;
697
+ const keysAsArray = keys(keysToRevive);
698
+ if (keysAsArray.length > 0) {
699
+ // if we need to do an L2 read then L2 write then we need to synchronize
700
+ // our read/merge/ingest/write Promise based on the keys so we don't
701
+ // stomp over any data
702
+ const readWritePromise = (async () => {
703
+ const pendingPromises = [];
704
+ for (const key of keysAsArray) {
705
+ const pendingPromise = mergeKeysPromiseMap.get(key);
706
+ if (pendingPromise !== undefined) {
707
+ // IMPORTANT: while on the synchronous code path we get a
708
+ // handle to pendingPromise and push it onto the array.
709
+ // This is important because later in this synchronous code
710
+ // path we will upsert readWritePromise into the
711
+ // mergeKeysPromiseMap (essentially overwriting pendingPromise
712
+ // in the map).
713
+ pendingPromises.push(pendingPromise);
714
+ }
706
715
  }
707
- if (snapshotFromMemoryIngest.state !== 'Unfulfilled') {
708
- return snapshotFromMemoryIngest;
716
+ await Promise.all(pendingPromises);
717
+ const entries = await durableStore.getEntries(keysAsArray, DefaultDurableSegment);
718
+ ingestStagingStore = buildIngestStagingStore(environment);
719
+ publishDurableStoreEntries(entries, (key, record) => {
720
+ ingestStagingStore.records[key] = record;
721
+ },
722
+ // we don't need to prime metadata
723
+ () => { });
724
+ snapshotFromMemoryIngest = ingestAndBroadcastFunc();
725
+ await publishChangesToDurableStore();
726
+ })();
727
+ for (const key of keysAsArray) {
728
+ // we are overwriting the previous promise at this key, but that
729
+ // is ok because we got a handle to it earlier (see the IMPORTANT
730
+ // comment about 35 lines up)
731
+ mergeKeysPromiseMap.set(key, readWritePromise);
732
+ }
733
+ try {
734
+ await readWritePromise;
735
+ }
736
+ finally {
737
+ for (const key of keysAsArray) {
738
+ const pendingPromise = mergeKeysPromiseMap.get(key);
739
+ // cleanup the entry from the map if this is the last promise
740
+ // for that key
741
+ if (pendingPromise === readWritePromise) {
742
+ mergeKeysPromiseMap.delete(key);
743
+ }
709
744
  }
710
- // if snapshot from staging store lookup is unfulfilled then do an L2 lookup
711
- return reviveSnapshot(environment, durableStore, snapshotFromMemoryIngest, durableStoreErrorHandler, () => environment.storeLookup(snapshotFromMemoryIngest.select, environment.createSnapshot, snapshotFromMemoryIngest.refresh)).then((result) => {
712
- return result.snapshot;
713
- });
714
- });
715
- };
716
- if (keys(keysToRevive).length === 0) {
717
- return ingestAndPublish({});
745
+ }
718
746
  }
719
- return durableStore
720
- .getEntries(keys(keysToRevive), DefaultDurableSegment)
721
- .then((entries) => {
722
- const existingL2Records = create(null);
723
- publishDurableStoreEntries(entries, (key, record) => {
724
- existingL2Records[key] = record;
725
- },
726
- // we don't need to prime metadata
727
- () => { });
728
- return ingestAndPublish(existingL2Records);
729
- });
747
+ else {
748
+ // we aren't doing any merging so we don't have to synchronize, the
749
+ // underlying DurableStore implementation takes care of R/W sync
750
+ // so all we have to do is ingest then write to L2
751
+ ingestStagingStore = buildIngestStagingStore(environment);
752
+ snapshotFromMemoryIngest = ingestAndBroadcastFunc();
753
+ await publishChangesToDurableStore();
754
+ }
755
+ if (snapshotFromMemoryIngest === undefined) {
756
+ return undefined;
757
+ }
758
+ if (snapshotFromMemoryIngest.state !== 'Unfulfilled') {
759
+ return snapshotFromMemoryIngest;
760
+ }
761
+ // if snapshot from staging store lookup is unfulfilled then do an L2 lookup
762
+ const { select, refresh } = snapshotFromMemoryIngest;
763
+ const result = await reviveSnapshot(environment, durableStore, snapshotFromMemoryIngest, durableStoreErrorHandler, () => environment.storeLookup(select, environment.createSnapshot, refresh));
764
+ return result.snapshot;
730
765
  };
731
766
  const handleErrorResponse = function (ingestAndBroadcastFunc) {
732
767
  validateNotDisposed();
@@ -1,4 +1,4 @@
1
- import type { CacheKeySet, Environment, RecordSource, Snapshot, InMemoryStore } from '@luvio/engine';
1
+ import type { Environment, RecordSource, InMemoryStore } from '@luvio/engine';
2
2
  import type { DurableStore } from './DurableStore';
3
3
  import type { InstrumentationFunction } from './makeDurable/error';
4
4
  import type { TTLOverridesMap } from './DurableTTLStore';
@@ -25,12 +25,6 @@ export interface DurableEnvironment extends Environment {
25
25
  * flow, otherwise returns an empty object.
26
26
  */
27
27
  getIngestStagingStoreMetadata(): InMemoryStore['metadata'];
28
- /**
29
- * Overload of Environment.handleSuccessResponse that takes in an optional
30
- * RecordSource to "prime" the ingest staging store with before calling
31
- * ingest. Useful for merge-able record types
32
- */
33
- handleSuccessResponse<IngestionReturnType extends Snapshot<D, V> | undefined, D, V = unknown>(ingestAndBroadcastFunc: () => IngestionReturnType, getResponseCacheKeysFunc: () => CacheKeySet, existingRecords?: RecordSource): IngestionReturnType | Promise<IngestionReturnType>;
34
28
  }
35
29
  export declare const AdapterContextSegment = "ADAPTER-CONTEXT";
36
30
  export declare const ADAPTER_CONTEXT_ID_SUFFIX = "__NAMED_CONTEXT";
@@ -516,6 +516,7 @@
516
516
  var durableStore = _a.durableStore, instrumentation = _a.instrumentation;
517
517
  var ingestStagingStore = null;
518
518
  var durableTTLStore = new DurableTTLStore(durableStore);
519
+ var mergeKeysPromiseMap = new Map();
519
520
  var initializationPromise = new Promise(function (resolve) {
520
521
  var finish = function () {
521
522
  resolve();
@@ -779,52 +780,116 @@
779
780
  }
780
781
  return {};
781
782
  };
782
- var handleSuccessResponse = function (ingestAndBroadcastFunc, getResponseCacheKeysFunc, existingRecords) {
783
- validateNotDisposed();
784
- var cacheKeySet = getResponseCacheKeysFunc();
785
- var cacheKeys = new Set(keys(cacheKeySet));
786
- var keysToRevive = {};
787
- cacheKeys.forEach(function (cacheKey) {
788
- var key = cacheKeySet[cacheKey];
789
- if (key.mergeable === true &&
790
- (existingRecords === undefined || existingRecords[cacheKey] === undefined)) {
791
- keysToRevive[cacheKey] = true;
792
- }
793
- });
794
- var ingestAndPublish = function (revivedRecords) {
795
- var toPrime = existingRecords !== undefined
796
- ? __assign(__assign({}, revivedRecords), existingRecords) : revivedRecords;
797
- ingestStagingStore = buildIngestStagingStore(environment);
798
- ingestStagingStore.records = toPrime;
799
- var snapshotFromMemoryIngest = ingestAndBroadcastFunc();
800
- return publishChangesToDurableStore().then(function () {
801
- if (snapshotFromMemoryIngest === undefined) {
802
- return undefined;
803
- }
804
- if (snapshotFromMemoryIngest.state !== 'Unfulfilled') {
805
- return snapshotFromMemoryIngest;
783
+ var handleSuccessResponse = function (ingestAndBroadcastFunc, getResponseCacheKeysFunc) {
784
+ return __awaiter(this, void 0, void 0, function () {
785
+ var cacheKeySet, cacheKeySetKeys, keysToRevive, _i, cacheKeySetKeys_1, cacheKeySetKey, cacheKey, snapshotFromMemoryIngest, keysAsArray, readWritePromise, _a, keysAsArray_1, key, _b, keysAsArray_2, key, pendingPromise, _c, select, refresh, result;
786
+ var _this = this;
787
+ return __generator(this, function (_d) {
788
+ switch (_d.label) {
789
+ case 0:
790
+ validateNotDisposed();
791
+ cacheKeySet = getResponseCacheKeysFunc();
792
+ cacheKeySetKeys = keys(cacheKeySet);
793
+ keysToRevive = {};
794
+ for (_i = 0, cacheKeySetKeys_1 = cacheKeySetKeys; _i < cacheKeySetKeys_1.length; _i++) {
795
+ cacheKeySetKey = cacheKeySetKeys_1[_i];
796
+ cacheKey = cacheKeySet[cacheKeySetKey];
797
+ if (cacheKey.mergeable === true) {
798
+ keysToRevive[cacheKeySetKey] = true;
799
+ }
800
+ }
801
+ snapshotFromMemoryIngest = undefined;
802
+ keysAsArray = keys(keysToRevive);
803
+ if (!(keysAsArray.length > 0)) return [3 /*break*/, 5];
804
+ readWritePromise = (function () { return __awaiter(_this, void 0, void 0, function () {
805
+ var pendingPromises, _i, keysAsArray_3, key, pendingPromise, entries;
806
+ return __generator(this, function (_a) {
807
+ switch (_a.label) {
808
+ case 0:
809
+ pendingPromises = [];
810
+ for (_i = 0, keysAsArray_3 = keysAsArray; _i < keysAsArray_3.length; _i++) {
811
+ key = keysAsArray_3[_i];
812
+ pendingPromise = mergeKeysPromiseMap.get(key);
813
+ if (pendingPromise !== undefined) {
814
+ // IMPORTANT: while on the synchronous code path we get a
815
+ // handle to pendingPromise and push it onto the array.
816
+ // This is important because later in this synchronous code
817
+ // path we will upsert readWritePromise into the
818
+ // mergeKeysPromiseMap (essentially overwriting pendingPromise
819
+ // in the map).
820
+ pendingPromises.push(pendingPromise);
821
+ }
822
+ }
823
+ return [4 /*yield*/, Promise.all(pendingPromises)];
824
+ case 1:
825
+ _a.sent();
826
+ return [4 /*yield*/, durableStore.getEntries(keysAsArray, DefaultDurableSegment)];
827
+ case 2:
828
+ entries = _a.sent();
829
+ ingestStagingStore = buildIngestStagingStore(environment);
830
+ publishDurableStoreEntries(entries, function (key, record) {
831
+ ingestStagingStore.records[key] = record;
832
+ },
833
+ // we don't need to prime metadata
834
+ function () { });
835
+ snapshotFromMemoryIngest = ingestAndBroadcastFunc();
836
+ return [4 /*yield*/, publishChangesToDurableStore()];
837
+ case 3:
838
+ _a.sent();
839
+ return [2 /*return*/];
840
+ }
841
+ });
842
+ }); })();
843
+ for (_a = 0, keysAsArray_1 = keysAsArray; _a < keysAsArray_1.length; _a++) {
844
+ key = keysAsArray_1[_a];
845
+ // we are overwriting the previous promise at this key, but that
846
+ // is ok because we got a handle to it earlier (see the IMPORTANT
847
+ // comment about 35 lines up)
848
+ mergeKeysPromiseMap.set(key, readWritePromise);
849
+ }
850
+ _d.label = 1;
851
+ case 1:
852
+ _d.trys.push([1, , 3, 4]);
853
+ return [4 /*yield*/, readWritePromise];
854
+ case 2:
855
+ _d.sent();
856
+ return [3 /*break*/, 4];
857
+ case 3:
858
+ for (_b = 0, keysAsArray_2 = keysAsArray; _b < keysAsArray_2.length; _b++) {
859
+ key = keysAsArray_2[_b];
860
+ pendingPromise = mergeKeysPromiseMap.get(key);
861
+ // cleanup the entry from the map if this is the last promise
862
+ // for that key
863
+ if (pendingPromise === readWritePromise) {
864
+ mergeKeysPromiseMap.delete(key);
865
+ }
866
+ }
867
+ return [7 /*endfinally*/];
868
+ case 4: return [3 /*break*/, 7];
869
+ case 5:
870
+ // we aren't doing any merging so we don't have to synchronize, the
871
+ // underlying DurableStore implementation takes care of R/W sync
872
+ // so all we have to do is ingest then write to L2
873
+ ingestStagingStore = buildIngestStagingStore(environment);
874
+ snapshotFromMemoryIngest = ingestAndBroadcastFunc();
875
+ return [4 /*yield*/, publishChangesToDurableStore()];
876
+ case 6:
877
+ _d.sent();
878
+ _d.label = 7;
879
+ case 7:
880
+ if (snapshotFromMemoryIngest === undefined) {
881
+ return [2 /*return*/, undefined];
882
+ }
883
+ if (snapshotFromMemoryIngest.state !== 'Unfulfilled') {
884
+ return [2 /*return*/, snapshotFromMemoryIngest];
885
+ }
886
+ _c = snapshotFromMemoryIngest, select = _c.select, refresh = _c.refresh;
887
+ return [4 /*yield*/, reviveSnapshot(environment, durableStore, snapshotFromMemoryIngest, durableStoreErrorHandler, function () { return environment.storeLookup(select, environment.createSnapshot, refresh); })];
888
+ case 8:
889
+ result = _d.sent();
890
+ return [2 /*return*/, result.snapshot];
806
891
  }
807
- // if snapshot from staging store lookup is unfulfilled then do an L2 lookup
808
- return reviveSnapshot(environment, durableStore, snapshotFromMemoryIngest, durableStoreErrorHandler, function () {
809
- return environment.storeLookup(snapshotFromMemoryIngest.select, environment.createSnapshot, snapshotFromMemoryIngest.refresh);
810
- }).then(function (result) {
811
- return result.snapshot;
812
- });
813
892
  });
814
- };
815
- if (keys(keysToRevive).length === 0) {
816
- return ingestAndPublish({});
817
- }
818
- return durableStore
819
- .getEntries(keys(keysToRevive), DefaultDurableSegment)
820
- .then(function (entries) {
821
- var existingL2Records = create(null);
822
- publishDurableStoreEntries(entries, function (key, record) {
823
- existingL2Records[key] = record;
824
- },
825
- // we don't need to prime metadata
826
- function () { });
827
- return ingestAndPublish(existingL2Records);
828
893
  });
829
894
  };
830
895
  var handleErrorResponse = function (ingestAndBroadcastFunc) {
@@ -1,4 +1,4 @@
1
- import type { CacheKeySet, Environment, RecordSource, Snapshot, InMemoryStore } from '@luvio/engine';
1
+ import type { Environment, RecordSource, InMemoryStore } from '@luvio/engine';
2
2
  import type { DurableStore } from './DurableStore';
3
3
  import type { InstrumentationFunction } from './makeDurable/error';
4
4
  import type { TTLOverridesMap } from './DurableTTLStore';
@@ -25,12 +25,6 @@ export interface DurableEnvironment extends Environment {
25
25
  * flow, otherwise returns an empty object.
26
26
  */
27
27
  getIngestStagingStoreMetadata(): InMemoryStore['metadata'];
28
- /**
29
- * Overload of Environment.handleSuccessResponse that takes in an optional
30
- * RecordSource to "prime" the ingest staging store with before calling
31
- * ingest. Useful for merge-able record types
32
- */
33
- handleSuccessResponse<IngestionReturnType extends Snapshot<D, V> | undefined, D, V = unknown>(ingestAndBroadcastFunc: () => IngestionReturnType, getResponseCacheKeysFunc: () => CacheKeySet, existingRecords?: RecordSource): IngestionReturnType | Promise<IngestionReturnType>;
34
28
  }
35
29
  export declare const AdapterContextSegment = "ADAPTER-CONTEXT";
36
30
  export declare const ADAPTER_CONTEXT_ID_SUFFIX = "__NAMED_CONTEXT";
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@luvio/environments",
3
- "version": "0.106.0",
3
+ "version": "0.109.0",
4
4
  "description": "Luvio Environments",
5
5
  "repository": {
6
6
  "type": "git",
@@ -23,7 +23,7 @@
23
23
  "watch": "yarn build --watch"
24
24
  },
25
25
  "dependencies": {
26
- "@luvio/engine": "0.106.0"
26
+ "@luvio/engine": "0.109.0"
27
27
  },
28
28
  "bundlesize": [
29
29
  {