@luvio/environments 0.138.1 → 0.138.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/es/es2018/environments.js +858 -858
- package/dist/es/es2018/{DurableStore.d.ts → types/DurableStore.d.ts} +134 -134
- package/dist/{umd/es5 → es/es2018/types}/DurableTTLStore.d.ts +25 -25
- package/dist/{umd/es5 → es/es2018/types}/events.d.ts +18 -18
- package/dist/{umd/es5 → es/es2018/types}/main.d.ts +5 -5
- package/dist/es/es2018/{makeDurable → types/makeDurable}/error.d.ts +11 -11
- package/dist/es/es2018/{makeDurable → types/makeDurable}/flush.d.ts +4 -4
- package/dist/{umd/es5 → es/es2018/types}/makeDurable/revive.d.ts +38 -38
- package/dist/es/es2018/{makeDurable → types/makeDurable}/stagingStore.d.ts +6 -6
- package/dist/es/es2018/{makeDurable → types/makeDurable}/ttl.d.ts +3 -3
- package/dist/es/es2018/{makeDurable → types/makeDurable}/utils.d.ts +2 -2
- package/dist/es/es2018/{makeDurable.d.ts → types/makeDurable.d.ts} +44 -44
- package/dist/es/es2018/{utils → types/utils}/deep-freeze.d.ts +1 -1
- package/dist/es/es2018/{utils → types/utils}/language.d.ts +19 -19
- package/dist/umd/es2018/environments.js +858 -858
- package/dist/umd/es2018/{DurableStore.d.ts → types/DurableStore.d.ts} +134 -134
- package/dist/{es/es2018 → umd/es2018/types}/DurableTTLStore.d.ts +25 -25
- package/dist/{es/es2018 → umd/es2018/types}/events.d.ts +18 -18
- package/dist/umd/es2018/{main.d.ts → types/main.d.ts} +5 -5
- package/dist/umd/{es5 → es2018/types}/makeDurable/error.d.ts +11 -11
- package/dist/umd/es2018/{makeDurable → types/makeDurable}/flush.d.ts +4 -4
- package/dist/{es/es2018 → umd/es2018/types}/makeDurable/revive.d.ts +38 -38
- package/dist/umd/{es5 → es2018/types}/makeDurable/stagingStore.d.ts +6 -6
- package/dist/umd/es2018/{makeDurable → types/makeDurable}/ttl.d.ts +3 -3
- package/dist/umd/es2018/{makeDurable → types/makeDurable}/utils.d.ts +2 -2
- package/dist/umd/{es5 → es2018/types}/makeDurable.d.ts +44 -44
- package/dist/umd/es2018/{utils → types/utils}/deep-freeze.d.ts +1 -1
- package/dist/umd/es2018/{utils → types/utils}/language.d.ts +19 -19
- package/dist/umd/es5/environments.js +942 -942
- package/dist/umd/es5/{DurableStore.d.ts → types/DurableStore.d.ts} +134 -134
- package/dist/umd/{es2018 → es5/types}/DurableTTLStore.d.ts +25 -25
- package/dist/umd/{es2018 → es5/types}/events.d.ts +18 -18
- package/dist/{es/es2018 → umd/es5/types}/main.d.ts +5 -5
- package/dist/umd/{es2018 → es5/types}/makeDurable/error.d.ts +11 -11
- package/dist/umd/es5/{makeDurable → types/makeDurable}/flush.d.ts +4 -4
- package/dist/umd/{es2018 → es5/types}/makeDurable/revive.d.ts +38 -38
- package/dist/umd/{es2018 → es5/types}/makeDurable/stagingStore.d.ts +6 -6
- package/dist/umd/es5/{makeDurable → types/makeDurable}/ttl.d.ts +3 -3
- package/dist/umd/es5/{makeDurable → types/makeDurable}/utils.d.ts +2 -2
- package/dist/umd/{es2018 → es5/types}/makeDurable.d.ts +44 -44
- package/dist/umd/es5/{utils → types/utils}/deep-freeze.d.ts +1 -1
- package/dist/umd/es5/{utils → types/utils}/language.d.ts +19 -19
- package/package.json +4 -4
|
@@ -1,885 +1,885 @@
|
|
|
1
1
|
import { StoreKeySet, serializeStructuredKey, emitAdapterEvent, StoreKeyMap, buildStaleWhileRevalidateImplementation, Reader } from '@luvio/engine';
|
|
2
2
|
|
|
3
|
-
// the last version the metadata shape was altered
|
|
4
|
-
const DURABLE_METADATA_VERSION = '0.111.0';
|
|
5
|
-
function isDeprecatedDurableStoreEntry(durableRecord) {
|
|
6
|
-
if (durableRecord.expiration !== undefined) {
|
|
7
|
-
return true;
|
|
8
|
-
}
|
|
9
|
-
const metadata = durableRecord.metadata;
|
|
10
|
-
if (metadata !== undefined) {
|
|
11
|
-
const { metadataVersion } = metadata;
|
|
12
|
-
// eventually we will want to assert that metadataVersion is defined
|
|
13
|
-
if (metadataVersion !== undefined && metadataVersion !== DURABLE_METADATA_VERSION) {
|
|
14
|
-
return true;
|
|
15
|
-
}
|
|
16
|
-
}
|
|
17
|
-
// Add more deprecated shape checks here
|
|
18
|
-
return false;
|
|
19
|
-
}
|
|
3
|
+
// the last version the metadata shape was altered
|
|
4
|
+
const DURABLE_METADATA_VERSION = '0.111.0';
|
|
5
|
+
function isDeprecatedDurableStoreEntry(durableRecord) {
|
|
6
|
+
if (durableRecord.expiration !== undefined) {
|
|
7
|
+
return true;
|
|
8
|
+
}
|
|
9
|
+
const metadata = durableRecord.metadata;
|
|
10
|
+
if (metadata !== undefined) {
|
|
11
|
+
const { metadataVersion } = metadata;
|
|
12
|
+
// eventually we will want to assert that metadataVersion is defined
|
|
13
|
+
if (metadataVersion !== undefined && metadataVersion !== DURABLE_METADATA_VERSION) {
|
|
14
|
+
return true;
|
|
15
|
+
}
|
|
16
|
+
}
|
|
17
|
+
// Add more deprecated shape checks here
|
|
18
|
+
return false;
|
|
19
|
+
}
|
|
20
20
|
const DefaultDurableSegment = 'DEFAULT';
|
|
21
21
|
|
|
22
|
-
const { keys, create, assign, freeze } = Object;
|
|
22
|
+
const { keys, create, assign, freeze } = Object;
|
|
23
23
|
const { isArray } = Array;
|
|
24
24
|
|
|
25
|
-
//Durable store error instrumentation key
|
|
26
|
-
const DURABLE_STORE_ERROR = 'durable-store-error';
|
|
27
|
-
/**
|
|
28
|
-
* Returns a function that processes errors from durable store promise rejections.
|
|
29
|
-
* If running in a non-production environment, the error is rethrown.
|
|
30
|
-
* When running in production the error is sent to instrumentation.
|
|
31
|
-
* @param instrument Instrumentation function implementation
|
|
32
|
-
*/
|
|
33
|
-
function handleDurableStoreRejection(instrument) {
|
|
34
|
-
return (error) => {
|
|
35
|
-
if (process.env.NODE_ENV !== 'production') {
|
|
36
|
-
throw error;
|
|
37
|
-
}
|
|
38
|
-
if (instrument !== undefined) {
|
|
39
|
-
instrument(() => {
|
|
40
|
-
return {
|
|
41
|
-
[DURABLE_STORE_ERROR]: true,
|
|
42
|
-
error: error,
|
|
43
|
-
};
|
|
44
|
-
});
|
|
45
|
-
}
|
|
46
|
-
};
|
|
25
|
+
//Durable store error instrumentation key
|
|
26
|
+
const DURABLE_STORE_ERROR = 'durable-store-error';
|
|
27
|
+
/**
|
|
28
|
+
* Returns a function that processes errors from durable store promise rejections.
|
|
29
|
+
* If running in a non-production environment, the error is rethrown.
|
|
30
|
+
* When running in production the error is sent to instrumentation.
|
|
31
|
+
* @param instrument Instrumentation function implementation
|
|
32
|
+
*/
|
|
33
|
+
function handleDurableStoreRejection(instrument) {
|
|
34
|
+
return (error) => {
|
|
35
|
+
if (process.env.NODE_ENV !== 'production') {
|
|
36
|
+
throw error;
|
|
37
|
+
}
|
|
38
|
+
if (instrument !== undefined) {
|
|
39
|
+
instrument(() => {
|
|
40
|
+
return {
|
|
41
|
+
[DURABLE_STORE_ERROR]: true,
|
|
42
|
+
error: error,
|
|
43
|
+
};
|
|
44
|
+
});
|
|
45
|
+
}
|
|
46
|
+
};
|
|
47
47
|
}
|
|
48
48
|
|
|
49
|
-
function deepFreeze(value) {
|
|
50
|
-
// No need to freeze primitives
|
|
51
|
-
if (typeof value !== 'object' || value === null) {
|
|
52
|
-
return;
|
|
53
|
-
}
|
|
54
|
-
if (isArray(value)) {
|
|
55
|
-
for (let i = 0, len = value.length; i < len; i += 1) {
|
|
56
|
-
deepFreeze(value[i]);
|
|
57
|
-
}
|
|
58
|
-
}
|
|
59
|
-
else {
|
|
60
|
-
const keys$1 = keys(value);
|
|
61
|
-
for (let i = 0, len = keys$1.length; i < len; i += 1) {
|
|
62
|
-
deepFreeze(value[keys$1[i]]);
|
|
63
|
-
}
|
|
64
|
-
}
|
|
65
|
-
freeze(value);
|
|
49
|
+
function deepFreeze(value) {
|
|
50
|
+
// No need to freeze primitives
|
|
51
|
+
if (typeof value !== 'object' || value === null) {
|
|
52
|
+
return;
|
|
53
|
+
}
|
|
54
|
+
if (isArray(value)) {
|
|
55
|
+
for (let i = 0, len = value.length; i < len; i += 1) {
|
|
56
|
+
deepFreeze(value[i]);
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
else {
|
|
60
|
+
const keys$1 = keys(value);
|
|
61
|
+
for (let i = 0, len = keys$1.length; i < len; i += 1) {
|
|
62
|
+
deepFreeze(value[keys$1[i]]);
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
freeze(value);
|
|
66
66
|
}
|
|
67
67
|
|
|
68
|
-
function isStoreEntryError(storeRecord) {
|
|
69
|
-
return storeRecord.__type === 'error';
|
|
68
|
+
function isStoreEntryError(storeRecord) {
|
|
69
|
+
return storeRecord.__type === 'error';
|
|
70
70
|
}
|
|
71
71
|
|
|
72
|
-
/**
|
|
73
|
-
* Takes a set of entries from DurableStore and publishes them via the passed in funcs.
|
|
74
|
-
* This respects expiration and checks for valid DurableStore data shapes. This should
|
|
75
|
-
* be used over manually parsing DurableStoreEntries
|
|
76
|
-
*
|
|
77
|
-
* @param durableRecords The DurableStoreEntries to parse
|
|
78
|
-
* @param publish A function to call with the data of each DurableStoreEntry
|
|
79
|
-
* @param publishMetadata A function to call with the metadata of each DurableStoreEntry
|
|
80
|
-
* @param pendingWriter the PendingWriter (this is going away soon)
|
|
81
|
-
* @returns
|
|
82
|
-
*/
|
|
83
|
-
function publishDurableStoreEntries(durableRecords, publish, publishMetadata) {
|
|
84
|
-
const revivedKeys = new StoreKeySet();
|
|
85
|
-
let hadUnexpectedShape = false;
|
|
86
|
-
if (durableRecords === undefined) {
|
|
87
|
-
return { revivedKeys, hadUnexpectedShape };
|
|
88
|
-
}
|
|
89
|
-
const durableKeys = keys(durableRecords);
|
|
90
|
-
if (durableKeys.length === 0) {
|
|
91
|
-
// no records to revive
|
|
92
|
-
return { revivedKeys, hadUnexpectedShape };
|
|
93
|
-
}
|
|
94
|
-
for (let i = 0, len = durableKeys.length; i < len; i += 1) {
|
|
95
|
-
const key = durableKeys[i];
|
|
96
|
-
const durableRecord = durableRecords[key];
|
|
97
|
-
if (isDeprecatedDurableStoreEntry(durableRecord)) {
|
|
98
|
-
// had the old shape, skip reviving this entry.
|
|
99
|
-
hadUnexpectedShape = true;
|
|
100
|
-
continue;
|
|
101
|
-
}
|
|
102
|
-
const { metadata, data } = durableRecord;
|
|
103
|
-
if (data === undefined) {
|
|
104
|
-
// if unexpected data skip reviving
|
|
105
|
-
hadUnexpectedShape = true;
|
|
106
|
-
continue;
|
|
107
|
-
}
|
|
108
|
-
if (metadata !== undefined) {
|
|
109
|
-
const { expirationTimestamp } = metadata;
|
|
110
|
-
if (expirationTimestamp === undefined) {
|
|
111
|
-
// if unexpected expiration data skip reviving
|
|
112
|
-
hadUnexpectedShape = true;
|
|
113
|
-
continue;
|
|
114
|
-
}
|
|
115
|
-
publishMetadata(key, metadata);
|
|
116
|
-
}
|
|
117
|
-
if (isStoreEntryError(data)) {
|
|
118
|
-
// freeze errors on way into L1
|
|
119
|
-
deepFreeze(data.error);
|
|
120
|
-
}
|
|
121
|
-
publish(key, data);
|
|
122
|
-
revivedKeys.add(key);
|
|
123
|
-
}
|
|
124
|
-
return { revivedKeys, hadUnexpectedShape };
|
|
125
|
-
}
|
|
126
|
-
/**
|
|
127
|
-
* This method returns a Promise to a snapshot that is revived from L2 cache. If
|
|
128
|
-
* L2 does not have the entries necessary to fulfill the snapshot then this method
|
|
129
|
-
* will refresh the snapshot from network, and then run the results from network
|
|
130
|
-
* through L2 ingestion, returning the subsequent revived snapshot.
|
|
131
|
-
*/
|
|
132
|
-
function reviveSnapshot(baseEnvironment, durableStore,
|
|
133
|
-
// TODO [W-10165787]: We should only allow Unfulfilled snapshot be passed in
|
|
134
|
-
unavailableSnapshot, durableStoreErrorHandler, buildL1Snapshot, reviveMetrics = { l2Trips: [] }) {
|
|
135
|
-
const { recordId, select, seenRecords, state } = unavailableSnapshot;
|
|
136
|
-
// L2 can only revive Unfulfilled snapshots that have a selector since they have the
|
|
137
|
-
// info needed to revive (like missingLinks) and rebuild. Otherwise return L1 snapshot.
|
|
138
|
-
if (state !== 'Unfulfilled' || select === undefined) {
|
|
139
|
-
return Promise.resolve({
|
|
140
|
-
snapshot: unavailableSnapshot,
|
|
141
|
-
metrics: reviveMetrics,
|
|
142
|
-
});
|
|
143
|
-
}
|
|
144
|
-
// in case L1 store changes/deallocs a record while we are doing the async read
|
|
145
|
-
// we attempt to read all keys from L2 - so combine recordId with any seenRecords
|
|
146
|
-
const keysToReviveSet = new StoreKeySet().add(recordId);
|
|
147
|
-
keysToReviveSet.merge(seenRecords);
|
|
148
|
-
const keysToRevive = keysToReviveSet.keysAsArray();
|
|
149
|
-
const canonicalKeys = keysToRevive.map((x) => serializeStructuredKey(baseEnvironment.storeGetCanonicalKey(x)));
|
|
150
|
-
const start = Date.now();
|
|
151
|
-
const { l2Trips } = reviveMetrics;
|
|
152
|
-
return durableStore.getEntries(canonicalKeys, DefaultDurableSegment).then((durableRecords) => {
|
|
153
|
-
l2Trips.push({
|
|
154
|
-
duration: Date.now() - start,
|
|
155
|
-
keysRequestedCount: canonicalKeys.length,
|
|
156
|
-
});
|
|
157
|
-
const { revivedKeys, hadUnexpectedShape } = publishDurableStoreEntries(durableRecords,
|
|
158
|
-
// TODO [W-10072584]: instead of implicitly using L1 we should take in
|
|
159
|
-
// publish and publishMetadata funcs, so callers can decide where to
|
|
160
|
-
// revive to (like they pass in how to do the buildL1Snapshot)
|
|
161
|
-
baseEnvironment.storePublish.bind(baseEnvironment), baseEnvironment.publishStoreMetadata.bind(baseEnvironment));
|
|
162
|
-
// if the data coming back from DS had an unexpected shape then just
|
|
163
|
-
// return the L1 snapshot
|
|
164
|
-
if (hadUnexpectedShape === true) {
|
|
165
|
-
return { snapshot: unavailableSnapshot, metrics: reviveMetrics };
|
|
166
|
-
}
|
|
167
|
-
if (revivedKeys.size() === 0) {
|
|
168
|
-
// durable store doesn't have what we asked for so return L1 snapshot
|
|
169
|
-
return { snapshot: unavailableSnapshot, metrics: reviveMetrics };
|
|
170
|
-
}
|
|
171
|
-
// try building the snapshot from L1 now that we have revived the missingLinks
|
|
172
|
-
const snapshot = buildL1Snapshot();
|
|
173
|
-
// if snapshot is pending then some other in-flight refresh will broadcast
|
|
174
|
-
// later
|
|
175
|
-
if (snapshot.state === 'Pending') {
|
|
176
|
-
return { snapshot, metrics: reviveMetrics };
|
|
177
|
-
}
|
|
178
|
-
if (snapshot.state === 'Unfulfilled') {
|
|
179
|
-
// have to check if the new snapshot has any additional seenRecords
|
|
180
|
-
// and revive again if so
|
|
181
|
-
const { seenRecords: newSnapshotSeenRecords, recordId: newSnapshotRecordId } = snapshot;
|
|
182
|
-
const newKeysToReviveSet = new StoreKeySet();
|
|
183
|
-
newKeysToReviveSet.add(newSnapshotRecordId);
|
|
184
|
-
newKeysToReviveSet.merge(newSnapshotSeenRecords);
|
|
185
|
-
const newKeys = newKeysToReviveSet.keysAsArray();
|
|
186
|
-
// in case DS returned additional entries we combine the requested
|
|
187
|
-
// and returned keys
|
|
188
|
-
const alreadyRequestedOrRevivedSet = keysToReviveSet;
|
|
189
|
-
alreadyRequestedOrRevivedSet.merge(revivedKeys);
|
|
190
|
-
// if there's any seen keys in the newly rebuilt snapshot that
|
|
191
|
-
// haven't already been requested or returned then revive again
|
|
192
|
-
for (let i = 0, len = newKeys.length; i < len; i++) {
|
|
193
|
-
const newSnapshotSeenKey = newKeys[i];
|
|
194
|
-
if (!alreadyRequestedOrRevivedSet.has(newSnapshotSeenKey)) {
|
|
195
|
-
return reviveSnapshot(baseEnvironment, durableStore, snapshot, durableStoreErrorHandler, buildL1Snapshot, reviveMetrics);
|
|
196
|
-
}
|
|
197
|
-
}
|
|
198
|
-
}
|
|
199
|
-
return { snapshot, metrics: reviveMetrics };
|
|
200
|
-
}, (error) => {
|
|
201
|
-
durableStoreErrorHandler(error);
|
|
202
|
-
// getEntries failed, return the L1 snapshot
|
|
203
|
-
return { snapshot: unavailableSnapshot, metrics: reviveMetrics };
|
|
204
|
-
});
|
|
72
|
+
/**
|
|
73
|
+
* Takes a set of entries from DurableStore and publishes them via the passed in funcs.
|
|
74
|
+
* This respects expiration and checks for valid DurableStore data shapes. This should
|
|
75
|
+
* be used over manually parsing DurableStoreEntries
|
|
76
|
+
*
|
|
77
|
+
* @param durableRecords The DurableStoreEntries to parse
|
|
78
|
+
* @param publish A function to call with the data of each DurableStoreEntry
|
|
79
|
+
* @param publishMetadata A function to call with the metadata of each DurableStoreEntry
|
|
80
|
+
* @param pendingWriter the PendingWriter (this is going away soon)
|
|
81
|
+
* @returns
|
|
82
|
+
*/
|
|
83
|
+
function publishDurableStoreEntries(durableRecords, publish, publishMetadata) {
|
|
84
|
+
const revivedKeys = new StoreKeySet();
|
|
85
|
+
let hadUnexpectedShape = false;
|
|
86
|
+
if (durableRecords === undefined) {
|
|
87
|
+
return { revivedKeys, hadUnexpectedShape };
|
|
88
|
+
}
|
|
89
|
+
const durableKeys = keys(durableRecords);
|
|
90
|
+
if (durableKeys.length === 0) {
|
|
91
|
+
// no records to revive
|
|
92
|
+
return { revivedKeys, hadUnexpectedShape };
|
|
93
|
+
}
|
|
94
|
+
for (let i = 0, len = durableKeys.length; i < len; i += 1) {
|
|
95
|
+
const key = durableKeys[i];
|
|
96
|
+
const durableRecord = durableRecords[key];
|
|
97
|
+
if (isDeprecatedDurableStoreEntry(durableRecord)) {
|
|
98
|
+
// had the old shape, skip reviving this entry.
|
|
99
|
+
hadUnexpectedShape = true;
|
|
100
|
+
continue;
|
|
101
|
+
}
|
|
102
|
+
const { metadata, data } = durableRecord;
|
|
103
|
+
if (data === undefined) {
|
|
104
|
+
// if unexpected data skip reviving
|
|
105
|
+
hadUnexpectedShape = true;
|
|
106
|
+
continue;
|
|
107
|
+
}
|
|
108
|
+
if (metadata !== undefined) {
|
|
109
|
+
const { expirationTimestamp } = metadata;
|
|
110
|
+
if (expirationTimestamp === undefined) {
|
|
111
|
+
// if unexpected expiration data skip reviving
|
|
112
|
+
hadUnexpectedShape = true;
|
|
113
|
+
continue;
|
|
114
|
+
}
|
|
115
|
+
publishMetadata(key, metadata);
|
|
116
|
+
}
|
|
117
|
+
if (isStoreEntryError(data)) {
|
|
118
|
+
// freeze errors on way into L1
|
|
119
|
+
deepFreeze(data.error);
|
|
120
|
+
}
|
|
121
|
+
publish(key, data);
|
|
122
|
+
revivedKeys.add(key);
|
|
123
|
+
}
|
|
124
|
+
return { revivedKeys, hadUnexpectedShape };
|
|
125
|
+
}
|
|
126
|
+
/**
|
|
127
|
+
* This method returns a Promise to a snapshot that is revived from L2 cache. If
|
|
128
|
+
* L2 does not have the entries necessary to fulfill the snapshot then this method
|
|
129
|
+
* will refresh the snapshot from network, and then run the results from network
|
|
130
|
+
* through L2 ingestion, returning the subsequent revived snapshot.
|
|
131
|
+
*/
|
|
132
|
+
function reviveSnapshot(baseEnvironment, durableStore,
|
|
133
|
+
// TODO [W-10165787]: We should only allow Unfulfilled snapshot be passed in
|
|
134
|
+
unavailableSnapshot, durableStoreErrorHandler, buildL1Snapshot, reviveMetrics = { l2Trips: [] }) {
|
|
135
|
+
const { recordId, select, seenRecords, state } = unavailableSnapshot;
|
|
136
|
+
// L2 can only revive Unfulfilled snapshots that have a selector since they have the
|
|
137
|
+
// info needed to revive (like missingLinks) and rebuild. Otherwise return L1 snapshot.
|
|
138
|
+
if (state !== 'Unfulfilled' || select === undefined) {
|
|
139
|
+
return Promise.resolve({
|
|
140
|
+
snapshot: unavailableSnapshot,
|
|
141
|
+
metrics: reviveMetrics,
|
|
142
|
+
});
|
|
143
|
+
}
|
|
144
|
+
// in case L1 store changes/deallocs a record while we are doing the async read
|
|
145
|
+
// we attempt to read all keys from L2 - so combine recordId with any seenRecords
|
|
146
|
+
const keysToReviveSet = new StoreKeySet().add(recordId);
|
|
147
|
+
keysToReviveSet.merge(seenRecords);
|
|
148
|
+
const keysToRevive = keysToReviveSet.keysAsArray();
|
|
149
|
+
const canonicalKeys = keysToRevive.map((x) => serializeStructuredKey(baseEnvironment.storeGetCanonicalKey(x)));
|
|
150
|
+
const start = Date.now();
|
|
151
|
+
const { l2Trips } = reviveMetrics;
|
|
152
|
+
return durableStore.getEntries(canonicalKeys, DefaultDurableSegment).then((durableRecords) => {
|
|
153
|
+
l2Trips.push({
|
|
154
|
+
duration: Date.now() - start,
|
|
155
|
+
keysRequestedCount: canonicalKeys.length,
|
|
156
|
+
});
|
|
157
|
+
const { revivedKeys, hadUnexpectedShape } = publishDurableStoreEntries(durableRecords,
|
|
158
|
+
// TODO [W-10072584]: instead of implicitly using L1 we should take in
|
|
159
|
+
// publish and publishMetadata funcs, so callers can decide where to
|
|
160
|
+
// revive to (like they pass in how to do the buildL1Snapshot)
|
|
161
|
+
baseEnvironment.storePublish.bind(baseEnvironment), baseEnvironment.publishStoreMetadata.bind(baseEnvironment));
|
|
162
|
+
// if the data coming back from DS had an unexpected shape then just
|
|
163
|
+
// return the L1 snapshot
|
|
164
|
+
if (hadUnexpectedShape === true) {
|
|
165
|
+
return { snapshot: unavailableSnapshot, metrics: reviveMetrics };
|
|
166
|
+
}
|
|
167
|
+
if (revivedKeys.size() === 0) {
|
|
168
|
+
// durable store doesn't have what we asked for so return L1 snapshot
|
|
169
|
+
return { snapshot: unavailableSnapshot, metrics: reviveMetrics };
|
|
170
|
+
}
|
|
171
|
+
// try building the snapshot from L1 now that we have revived the missingLinks
|
|
172
|
+
const snapshot = buildL1Snapshot();
|
|
173
|
+
// if snapshot is pending then some other in-flight refresh will broadcast
|
|
174
|
+
// later
|
|
175
|
+
if (snapshot.state === 'Pending') {
|
|
176
|
+
return { snapshot, metrics: reviveMetrics };
|
|
177
|
+
}
|
|
178
|
+
if (snapshot.state === 'Unfulfilled') {
|
|
179
|
+
// have to check if the new snapshot has any additional seenRecords
|
|
180
|
+
// and revive again if so
|
|
181
|
+
const { seenRecords: newSnapshotSeenRecords, recordId: newSnapshotRecordId } = snapshot;
|
|
182
|
+
const newKeysToReviveSet = new StoreKeySet();
|
|
183
|
+
newKeysToReviveSet.add(newSnapshotRecordId);
|
|
184
|
+
newKeysToReviveSet.merge(newSnapshotSeenRecords);
|
|
185
|
+
const newKeys = newKeysToReviveSet.keysAsArray();
|
|
186
|
+
// in case DS returned additional entries we combine the requested
|
|
187
|
+
// and returned keys
|
|
188
|
+
const alreadyRequestedOrRevivedSet = keysToReviveSet;
|
|
189
|
+
alreadyRequestedOrRevivedSet.merge(revivedKeys);
|
|
190
|
+
// if there's any seen keys in the newly rebuilt snapshot that
|
|
191
|
+
// haven't already been requested or returned then revive again
|
|
192
|
+
for (let i = 0, len = newKeys.length; i < len; i++) {
|
|
193
|
+
const newSnapshotSeenKey = newKeys[i];
|
|
194
|
+
if (!alreadyRequestedOrRevivedSet.has(newSnapshotSeenKey)) {
|
|
195
|
+
return reviveSnapshot(baseEnvironment, durableStore, snapshot, durableStoreErrorHandler, buildL1Snapshot, reviveMetrics);
|
|
196
|
+
}
|
|
197
|
+
}
|
|
198
|
+
}
|
|
199
|
+
return { snapshot, metrics: reviveMetrics };
|
|
200
|
+
}, (error) => {
|
|
201
|
+
durableStoreErrorHandler(error);
|
|
202
|
+
// getEntries failed, return the L1 snapshot
|
|
203
|
+
return { snapshot: unavailableSnapshot, metrics: reviveMetrics };
|
|
204
|
+
});
|
|
205
205
|
}
|
|
206
206
|
|
|
207
|
-
const TTL_DURABLE_SEGMENT = 'TTL_DURABLE_SEGMENT';
|
|
208
|
-
const TTL_DEFAULT_KEY = 'TTL_DEFAULT_KEY';
|
|
209
|
-
function buildDurableTTLOverrideStoreKey(namespace, representationName) {
|
|
210
|
-
return `${namespace}::${representationName}`;
|
|
211
|
-
}
|
|
212
|
-
function isEntryDurableTTLOverride(entry) {
|
|
213
|
-
if (typeof entry === 'object' && entry !== undefined && entry !== null) {
|
|
214
|
-
const data = entry.data;
|
|
215
|
-
if (data !== undefined) {
|
|
216
|
-
return (data.namespace !== undefined &&
|
|
217
|
-
data.representationName !== undefined &&
|
|
218
|
-
data.ttl !== undefined);
|
|
219
|
-
}
|
|
220
|
-
}
|
|
221
|
-
return false;
|
|
222
|
-
}
|
|
223
|
-
function isDefaultDurableTTLOverride(override) {
|
|
224
|
-
return (override.namespace === TTL_DEFAULT_KEY && override.representationName === TTL_DEFAULT_KEY);
|
|
225
|
-
}
|
|
226
|
-
/**
|
|
227
|
-
* Class to set and get the TTL override values in the Durable Store
|
|
228
|
-
*/
|
|
229
|
-
class DurableTTLStore {
|
|
230
|
-
constructor(durableStore) {
|
|
231
|
-
this.durableStore = durableStore;
|
|
232
|
-
}
|
|
233
|
-
setDefaultDurableTTLOverrides(ttl) {
|
|
234
|
-
return this.durableStore.setEntries({
|
|
235
|
-
[buildDurableTTLOverrideStoreKey(TTL_DEFAULT_KEY, TTL_DEFAULT_KEY)]: {
|
|
236
|
-
data: {
|
|
237
|
-
namespace: TTL_DEFAULT_KEY,
|
|
238
|
-
representationName: TTL_DEFAULT_KEY,
|
|
239
|
-
ttl,
|
|
240
|
-
},
|
|
241
|
-
},
|
|
242
|
-
}, TTL_DURABLE_SEGMENT);
|
|
243
|
-
}
|
|
244
|
-
setDurableTTLOverride(namespace, representationName, ttl) {
|
|
245
|
-
return this.durableStore.setEntries({
|
|
246
|
-
[buildDurableTTLOverrideStoreKey(namespace, representationName)]: {
|
|
247
|
-
data: { namespace, representationName, ttl },
|
|
248
|
-
},
|
|
249
|
-
}, TTL_DURABLE_SEGMENT);
|
|
250
|
-
}
|
|
251
|
-
getDurableTTLOverrides() {
|
|
252
|
-
return this.durableStore
|
|
253
|
-
.getAllEntries(TTL_DURABLE_SEGMENT)
|
|
254
|
-
.then((entries) => {
|
|
255
|
-
const overrides = [];
|
|
256
|
-
let defaultTTL = undefined;
|
|
257
|
-
if (entries === undefined) {
|
|
258
|
-
return {
|
|
259
|
-
defaultTTL,
|
|
260
|
-
overrides,
|
|
261
|
-
};
|
|
262
|
-
}
|
|
263
|
-
const keys$1 = keys(entries);
|
|
264
|
-
for (let i = 0, len = keys$1.length; i < len; i++) {
|
|
265
|
-
const key = keys$1[i];
|
|
266
|
-
const entry = entries[key];
|
|
267
|
-
if (entry !== undefined && isEntryDurableTTLOverride(entry)) {
|
|
268
|
-
if (isDefaultDurableTTLOverride(entry.data)) {
|
|
269
|
-
defaultTTL = entry.data;
|
|
270
|
-
}
|
|
271
|
-
else {
|
|
272
|
-
overrides.push(entry.data);
|
|
273
|
-
}
|
|
274
|
-
}
|
|
275
|
-
}
|
|
276
|
-
return {
|
|
277
|
-
defaultTTL,
|
|
278
|
-
overrides,
|
|
279
|
-
};
|
|
280
|
-
});
|
|
281
|
-
}
|
|
207
|
+
const TTL_DURABLE_SEGMENT = 'TTL_DURABLE_SEGMENT';
|
|
208
|
+
const TTL_DEFAULT_KEY = 'TTL_DEFAULT_KEY';
|
|
209
|
+
function buildDurableTTLOverrideStoreKey(namespace, representationName) {
|
|
210
|
+
return `${namespace}::${representationName}`;
|
|
211
|
+
}
|
|
212
|
+
function isEntryDurableTTLOverride(entry) {
|
|
213
|
+
if (typeof entry === 'object' && entry !== undefined && entry !== null) {
|
|
214
|
+
const data = entry.data;
|
|
215
|
+
if (data !== undefined) {
|
|
216
|
+
return (data.namespace !== undefined &&
|
|
217
|
+
data.representationName !== undefined &&
|
|
218
|
+
data.ttl !== undefined);
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
return false;
|
|
222
|
+
}
|
|
223
|
+
function isDefaultDurableTTLOverride(override) {
|
|
224
|
+
return (override.namespace === TTL_DEFAULT_KEY && override.representationName === TTL_DEFAULT_KEY);
|
|
225
|
+
}
|
|
226
|
+
/**
|
|
227
|
+
* Class to set and get the TTL override values in the Durable Store
|
|
228
|
+
*/
|
|
229
|
+
class DurableTTLStore {
|
|
230
|
+
constructor(durableStore) {
|
|
231
|
+
this.durableStore = durableStore;
|
|
232
|
+
}
|
|
233
|
+
setDefaultDurableTTLOverrides(ttl) {
|
|
234
|
+
return this.durableStore.setEntries({
|
|
235
|
+
[buildDurableTTLOverrideStoreKey(TTL_DEFAULT_KEY, TTL_DEFAULT_KEY)]: {
|
|
236
|
+
data: {
|
|
237
|
+
namespace: TTL_DEFAULT_KEY,
|
|
238
|
+
representationName: TTL_DEFAULT_KEY,
|
|
239
|
+
ttl,
|
|
240
|
+
},
|
|
241
|
+
},
|
|
242
|
+
}, TTL_DURABLE_SEGMENT);
|
|
243
|
+
}
|
|
244
|
+
setDurableTTLOverride(namespace, representationName, ttl) {
|
|
245
|
+
return this.durableStore.setEntries({
|
|
246
|
+
[buildDurableTTLOverrideStoreKey(namespace, representationName)]: {
|
|
247
|
+
data: { namespace, representationName, ttl },
|
|
248
|
+
},
|
|
249
|
+
}, TTL_DURABLE_SEGMENT);
|
|
250
|
+
}
|
|
251
|
+
getDurableTTLOverrides() {
|
|
252
|
+
return this.durableStore
|
|
253
|
+
.getAllEntries(TTL_DURABLE_SEGMENT)
|
|
254
|
+
.then((entries) => {
|
|
255
|
+
const overrides = [];
|
|
256
|
+
let defaultTTL = undefined;
|
|
257
|
+
if (entries === undefined) {
|
|
258
|
+
return {
|
|
259
|
+
defaultTTL,
|
|
260
|
+
overrides,
|
|
261
|
+
};
|
|
262
|
+
}
|
|
263
|
+
const keys$1 = keys(entries);
|
|
264
|
+
for (let i = 0, len = keys$1.length; i < len; i++) {
|
|
265
|
+
const key = keys$1[i];
|
|
266
|
+
const entry = entries[key];
|
|
267
|
+
if (entry !== undefined && isEntryDurableTTLOverride(entry)) {
|
|
268
|
+
if (isDefaultDurableTTLOverride(entry.data)) {
|
|
269
|
+
defaultTTL = entry.data;
|
|
270
|
+
}
|
|
271
|
+
else {
|
|
272
|
+
overrides.push(entry.data);
|
|
273
|
+
}
|
|
274
|
+
}
|
|
275
|
+
}
|
|
276
|
+
return {
|
|
277
|
+
defaultTTL,
|
|
278
|
+
overrides,
|
|
279
|
+
};
|
|
280
|
+
});
|
|
281
|
+
}
|
|
282
282
|
}
|
|
283
283
|
|
|
284
|
-
function copy(source) {
|
|
285
|
-
if (typeof source !== 'object' || source === null) {
|
|
286
|
-
return source;
|
|
287
|
-
}
|
|
288
|
-
if (isArray(source)) {
|
|
289
|
-
// TS doesn't trust that this new array is an array unless we cast it
|
|
290
|
-
return [...source];
|
|
291
|
-
}
|
|
292
|
-
return { ...source };
|
|
293
|
-
}
|
|
294
|
-
function flushInMemoryStoreValuesToDurableStore(store, durableStore, durableStoreErrorHandler) {
|
|
295
|
-
const durableRecords = create(null);
|
|
296
|
-
const evictedRecords = create(null);
|
|
297
|
-
const { records, metadata: storeMetadata, visitedIds, refreshedIds, } = store.fallbackStringKeyInMemoryStore;
|
|
298
|
-
// TODO: W-8909393 Once metadata is stored in its own segment we need to
|
|
299
|
-
// call setEntries for the visitedIds on default segment and call setEntries
|
|
300
|
-
// on the metadata segment for the refreshedIds
|
|
301
|
-
const keys$1 = keys({ ...visitedIds, ...refreshedIds });
|
|
302
|
-
for (let i = 0, len = keys$1.length; i < len; i += 1) {
|
|
303
|
-
const key = keys$1[i];
|
|
304
|
-
const record = records[key];
|
|
305
|
-
// this record has been evicted, evict from DS
|
|
306
|
-
if (record === undefined) {
|
|
307
|
-
evictedRecords[key] = true;
|
|
308
|
-
continue;
|
|
309
|
-
}
|
|
310
|
-
const metadata = storeMetadata[key];
|
|
311
|
-
durableRecords[key] = {
|
|
312
|
-
// copy the data in case the store is mutated during the
|
|
313
|
-
// async setEntries call
|
|
314
|
-
data: copy(record),
|
|
315
|
-
};
|
|
316
|
-
if (metadata !== undefined) {
|
|
317
|
-
durableRecords[key].metadata = {
|
|
318
|
-
...metadata,
|
|
319
|
-
metadataVersion: DURABLE_METADATA_VERSION,
|
|
320
|
-
};
|
|
321
|
-
}
|
|
322
|
-
}
|
|
323
|
-
const durableStoreOperations = [];
|
|
324
|
-
// publishes
|
|
325
|
-
const recordKeys = keys(durableRecords);
|
|
326
|
-
if (recordKeys.length > 0) {
|
|
327
|
-
durableStoreOperations.push({
|
|
328
|
-
type: 'setEntries',
|
|
329
|
-
entries: durableRecords,
|
|
330
|
-
segment: DefaultDurableSegment,
|
|
331
|
-
});
|
|
332
|
-
}
|
|
333
|
-
// evicts
|
|
334
|
-
const evictedKeys = keys(evictedRecords);
|
|
335
|
-
if (evictedKeys.length > 0) {
|
|
336
|
-
durableStoreOperations.push({
|
|
337
|
-
type: 'evictEntries',
|
|
338
|
-
ids: evictedKeys,
|
|
339
|
-
segment: DefaultDurableSegment,
|
|
340
|
-
});
|
|
341
|
-
}
|
|
342
|
-
if (durableStoreOperations.length > 0) {
|
|
343
|
-
return durableStore.batchOperations(durableStoreOperations).catch(durableStoreErrorHandler);
|
|
344
|
-
}
|
|
345
|
-
return Promise.resolve();
|
|
284
|
+
function copy(source) {
|
|
285
|
+
if (typeof source !== 'object' || source === null) {
|
|
286
|
+
return source;
|
|
287
|
+
}
|
|
288
|
+
if (isArray(source)) {
|
|
289
|
+
// TS doesn't trust that this new array is an array unless we cast it
|
|
290
|
+
return [...source];
|
|
291
|
+
}
|
|
292
|
+
return { ...source };
|
|
293
|
+
}
|
|
294
|
+
function flushInMemoryStoreValuesToDurableStore(store, durableStore, durableStoreErrorHandler) {
|
|
295
|
+
const durableRecords = create(null);
|
|
296
|
+
const evictedRecords = create(null);
|
|
297
|
+
const { records, metadata: storeMetadata, visitedIds, refreshedIds, } = store.fallbackStringKeyInMemoryStore;
|
|
298
|
+
// TODO: W-8909393 Once metadata is stored in its own segment we need to
|
|
299
|
+
// call setEntries for the visitedIds on default segment and call setEntries
|
|
300
|
+
// on the metadata segment for the refreshedIds
|
|
301
|
+
const keys$1 = keys({ ...visitedIds, ...refreshedIds });
|
|
302
|
+
for (let i = 0, len = keys$1.length; i < len; i += 1) {
|
|
303
|
+
const key = keys$1[i];
|
|
304
|
+
const record = records[key];
|
|
305
|
+
// this record has been evicted, evict from DS
|
|
306
|
+
if (record === undefined) {
|
|
307
|
+
evictedRecords[key] = true;
|
|
308
|
+
continue;
|
|
309
|
+
}
|
|
310
|
+
const metadata = storeMetadata[key];
|
|
311
|
+
durableRecords[key] = {
|
|
312
|
+
// copy the data in case the store is mutated during the
|
|
313
|
+
// async setEntries call
|
|
314
|
+
data: copy(record),
|
|
315
|
+
};
|
|
316
|
+
if (metadata !== undefined) {
|
|
317
|
+
durableRecords[key].metadata = {
|
|
318
|
+
...metadata,
|
|
319
|
+
metadataVersion: DURABLE_METADATA_VERSION,
|
|
320
|
+
};
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
const durableStoreOperations = [];
|
|
324
|
+
// publishes
|
|
325
|
+
const recordKeys = keys(durableRecords);
|
|
326
|
+
if (recordKeys.length > 0) {
|
|
327
|
+
durableStoreOperations.push({
|
|
328
|
+
type: 'setEntries',
|
|
329
|
+
entries: durableRecords,
|
|
330
|
+
segment: DefaultDurableSegment,
|
|
331
|
+
});
|
|
332
|
+
}
|
|
333
|
+
// evicts
|
|
334
|
+
const evictedKeys = keys(evictedRecords);
|
|
335
|
+
if (evictedKeys.length > 0) {
|
|
336
|
+
durableStoreOperations.push({
|
|
337
|
+
type: 'evictEntries',
|
|
338
|
+
ids: evictedKeys,
|
|
339
|
+
segment: DefaultDurableSegment,
|
|
340
|
+
});
|
|
341
|
+
}
|
|
342
|
+
if (durableStoreOperations.length > 0) {
|
|
343
|
+
return durableStore.batchOperations(durableStoreOperations).catch(durableStoreErrorHandler);
|
|
344
|
+
}
|
|
345
|
+
return Promise.resolve();
|
|
346
346
|
}
|
|
347
347
|
|
|
348
|
-
const DurableEnvironmentEventDiscriminator = 'durable';
|
|
349
|
-
function isDurableEnvironmentEvent(event) {
|
|
350
|
-
return (event.type === 'environment' && event.environment === DurableEnvironmentEventDiscriminator);
|
|
351
|
-
}
|
|
352
|
-
function emitDurableEnvironmentAdapterEvent(eventData, observers) {
|
|
353
|
-
emitAdapterEvent({
|
|
354
|
-
type: 'environment',
|
|
355
|
-
timestamp: Date.now(),
|
|
356
|
-
environment: DurableEnvironmentEventDiscriminator,
|
|
357
|
-
data: eventData,
|
|
358
|
-
}, observers);
|
|
348
|
+
const DurableEnvironmentEventDiscriminator = 'durable';
|
|
349
|
+
function isDurableEnvironmentEvent(event) {
|
|
350
|
+
return (event.type === 'environment' && event.environment === DurableEnvironmentEventDiscriminator);
|
|
351
|
+
}
|
|
352
|
+
function emitDurableEnvironmentAdapterEvent(eventData, observers) {
|
|
353
|
+
emitAdapterEvent({
|
|
354
|
+
type: 'environment',
|
|
355
|
+
timestamp: Date.now(),
|
|
356
|
+
environment: DurableEnvironmentEventDiscriminator,
|
|
357
|
+
data: eventData,
|
|
358
|
+
}, observers);
|
|
359
359
|
}
|
|
360
360
|
|
|
361
|
-
async function reviveTTLOverrides(ttlStore, environment) {
|
|
362
|
-
const map = await ttlStore.getDurableTTLOverrides();
|
|
363
|
-
const { defaultTTL, overrides } = map;
|
|
364
|
-
if (defaultTTL !== undefined) {
|
|
365
|
-
environment.storeSetDefaultTTLOverride(defaultTTL.ttl);
|
|
366
|
-
}
|
|
367
|
-
for (let i = 0, len = overrides.length; i < len; i++) {
|
|
368
|
-
const { namespace, representationName, ttl } = overrides[i];
|
|
369
|
-
environment.storeSetTTLOverride(namespace, representationName, ttl);
|
|
370
|
-
}
|
|
361
|
+
async function reviveTTLOverrides(ttlStore, environment) {
|
|
362
|
+
const map = await ttlStore.getDurableTTLOverrides();
|
|
363
|
+
const { defaultTTL, overrides } = map;
|
|
364
|
+
if (defaultTTL !== undefined) {
|
|
365
|
+
environment.storeSetDefaultTTLOverride(defaultTTL.ttl);
|
|
366
|
+
}
|
|
367
|
+
for (let i = 0, len = overrides.length; i < len; i++) {
|
|
368
|
+
const { namespace, representationName, ttl } = overrides[i];
|
|
369
|
+
environment.storeSetTTLOverride(namespace, representationName, ttl);
|
|
370
|
+
}
|
|
371
371
|
}
|
|
372
372
|
|
|
373
|
-
/**
|
|
374
|
-
* Returns an empty InMemoryStore that can be used for ingestion. Copies over
|
|
375
|
-
* the TTLOverrides from the given Environment's Store.
|
|
376
|
-
*/
|
|
377
|
-
function buildIngestStagingStore(environment) {
|
|
378
|
-
return environment.storeBuildIngestionStagingStore();
|
|
373
|
+
/**
|
|
374
|
+
* Returns an empty InMemoryStore that can be used for ingestion. Copies over
|
|
375
|
+
* the TTLOverrides from the given Environment's Store.
|
|
376
|
+
*/
|
|
377
|
+
function buildIngestStagingStore(environment) {
|
|
378
|
+
return environment.storeBuildIngestionStagingStore();
|
|
379
379
|
}
|
|
380
380
|
|
|
381
|
-
const AdapterContextSegment = 'ADAPTER-CONTEXT';
|
|
382
|
-
const ADAPTER_CONTEXT_ID_SUFFIX = '__NAMED_CONTEXT';
|
|
383
|
-
async function reviveOrCreateContext(adapterId, durableStore, durableStoreErrorHandler, contextStores, pendingContextStoreKeys, onContextLoaded) {
|
|
384
|
-
// initialize empty context store
|
|
385
|
-
contextStores[adapterId] = create(null);
|
|
386
|
-
const context = {
|
|
387
|
-
set(key, value) {
|
|
388
|
-
contextStores[adapterId][key] = value;
|
|
389
|
-
durableStore.setEntries({
|
|
390
|
-
[adapterId]: { data: contextStores[adapterId] },
|
|
391
|
-
}, AdapterContextSegment);
|
|
392
|
-
pendingContextStoreKeys.add(adapterId);
|
|
393
|
-
},
|
|
394
|
-
get(key) {
|
|
395
|
-
return contextStores[adapterId][key];
|
|
396
|
-
},
|
|
397
|
-
};
|
|
398
|
-
const contextReturn = () => {
|
|
399
|
-
if (onContextLoaded !== undefined) {
|
|
400
|
-
return onContextLoaded(context).then(() => {
|
|
401
|
-
return context;
|
|
402
|
-
});
|
|
403
|
-
}
|
|
404
|
-
return context;
|
|
405
|
-
};
|
|
406
|
-
try {
|
|
407
|
-
const entries = await durableStore.getEntries([adapterId], AdapterContextSegment);
|
|
408
|
-
if (entries !== undefined && entries[adapterId] !== undefined) {
|
|
409
|
-
// if durable store has a saved context then load it in the store
|
|
410
|
-
contextStores[adapterId] = entries[adapterId].data;
|
|
411
|
-
}
|
|
412
|
-
}
|
|
413
|
-
catch (error) {
|
|
414
|
-
durableStoreErrorHandler(error);
|
|
415
|
-
}
|
|
416
|
-
return contextReturn();
|
|
417
|
-
}
|
|
418
|
-
function isUnfulfilledSnapshot(cachedSnapshotResult) {
|
|
419
|
-
if (cachedSnapshotResult === undefined) {
|
|
420
|
-
return false;
|
|
421
|
-
}
|
|
422
|
-
if ('then' in cachedSnapshotResult) {
|
|
423
|
-
return false;
|
|
424
|
-
}
|
|
425
|
-
return cachedSnapshotResult.state === 'Unfulfilled';
|
|
426
|
-
}
|
|
427
|
-
/**
|
|
428
|
-
* Configures the environment to persist data into a durable store and attempt to resolve
|
|
429
|
-
* data from the persistent store before hitting the network.
|
|
430
|
-
*
|
|
431
|
-
* @param environment The base environment
|
|
432
|
-
* @param durableStore A DurableStore implementation
|
|
433
|
-
* @param instrumentation An instrumentation function implementation
|
|
434
|
-
*/
|
|
435
|
-
function makeDurable(environment, { durableStore, instrumentation }) {
|
|
436
|
-
let ingestStagingStore = null;
|
|
437
|
-
const durableTTLStore = new DurableTTLStore(durableStore);
|
|
438
|
-
const mergeKeysPromiseMap = new StoreKeyMap();
|
|
439
|
-
// When a context store is mutated we write it to L2, which causes DS on change
|
|
440
|
-
// event. If this instance of makeDurable caused that L2 write we can ignore that
|
|
441
|
-
// on change event. This Set helps us do that.
|
|
442
|
-
const pendingContextStoreKeys = new Set();
|
|
443
|
-
const contextStores = create(null);
|
|
444
|
-
let initializationPromise = new Promise((resolve) => {
|
|
445
|
-
const finish = () => {
|
|
446
|
-
resolve();
|
|
447
|
-
initializationPromise = undefined;
|
|
448
|
-
};
|
|
449
|
-
reviveTTLOverrides(durableTTLStore, environment).then(finish);
|
|
450
|
-
});
|
|
451
|
-
//instrumentation for durable store errors
|
|
452
|
-
const durableStoreErrorHandler = handleDurableStoreRejection(instrumentation);
|
|
453
|
-
let disposed = false;
|
|
454
|
-
const validateNotDisposed = () => {
|
|
455
|
-
if (disposed === true) {
|
|
456
|
-
throw new Error('This makeDurable instance has been disposed');
|
|
457
|
-
}
|
|
458
|
-
};
|
|
459
|
-
const unsubscribe = durableStore.registerOnChangedListener(async (changes) => {
|
|
460
|
-
const defaultSegmentKeys = [];
|
|
461
|
-
const adapterContextSegmentKeys = [];
|
|
462
|
-
for (let i = 0, len = changes.length; i < len; i++) {
|
|
463
|
-
const change = changes[i];
|
|
464
|
-
// we only care about changes to the data which is stored in the default
|
|
465
|
-
// segment or the adapter context
|
|
466
|
-
if (change.segment === DefaultDurableSegment) {
|
|
467
|
-
defaultSegmentKeys.push(...change.ids);
|
|
468
|
-
}
|
|
469
|
-
else if (change.segment === AdapterContextSegment) {
|
|
470
|
-
adapterContextSegmentKeys.push(...change.ids);
|
|
471
|
-
}
|
|
472
|
-
}
|
|
473
|
-
// process adapter context changes
|
|
474
|
-
const adapterContextKeysFromDifferentInstance = [];
|
|
475
|
-
for (const key of adapterContextSegmentKeys) {
|
|
476
|
-
if (pendingContextStoreKeys.has(key)) {
|
|
477
|
-
// if this instance caused the L2 write then remove from the
|
|
478
|
-
// "pending" Set and move on
|
|
479
|
-
pendingContextStoreKeys.delete(key);
|
|
480
|
-
}
|
|
481
|
-
else {
|
|
482
|
-
// else it came from another luvio instance and we need to
|
|
483
|
-
// read from L2
|
|
484
|
-
adapterContextKeysFromDifferentInstance.push(key);
|
|
485
|
-
}
|
|
486
|
-
}
|
|
487
|
-
if (adapterContextKeysFromDifferentInstance.length > 0) {
|
|
488
|
-
try {
|
|
489
|
-
const entries = await durableStore.getEntries(adapterContextKeysFromDifferentInstance, AdapterContextSegment);
|
|
490
|
-
if (entries !== undefined) {
|
|
491
|
-
const entryKeys = keys(entries);
|
|
492
|
-
for (let i = 0, len = entryKeys.length; i < len; i++) {
|
|
493
|
-
const entryKey = entryKeys[i];
|
|
494
|
-
const entry = entries[entryKey];
|
|
495
|
-
contextStores[entryKey] = entry.data;
|
|
496
|
-
}
|
|
497
|
-
}
|
|
498
|
-
}
|
|
499
|
-
catch (error) {
|
|
500
|
-
durableStoreErrorHandler(error);
|
|
501
|
-
}
|
|
502
|
-
}
|
|
503
|
-
// process default segment changes
|
|
504
|
-
const defaultSegmentKeysLength = defaultSegmentKeys.length;
|
|
505
|
-
if (defaultSegmentKeysLength > 0) {
|
|
506
|
-
for (let i = 0; i < defaultSegmentKeysLength; i++) {
|
|
507
|
-
const key = defaultSegmentKeys[i];
|
|
508
|
-
const canonical = environment.storeGetCanonicalKey(key);
|
|
509
|
-
if (canonical !== key) {
|
|
510
|
-
continue;
|
|
511
|
-
}
|
|
512
|
-
// TODO: W-8909393 If expiration is the only thing that changed we should not evict the data... so
|
|
513
|
-
// if we stored expiration and data at different keys (or same keys in different segments)
|
|
514
|
-
// then we could know if only the expiration has changed and we wouldn't need to evict
|
|
515
|
-
// and go through an entire broadcast/revive cycle for unchanged data
|
|
516
|
-
// call base environment storeEvict so this evict is not tracked for durable deletion
|
|
517
|
-
environment.storeEvict(key);
|
|
518
|
-
}
|
|
519
|
-
await environment.storeBroadcast(rebuildSnapshot, environment.snapshotAvailable);
|
|
520
|
-
}
|
|
521
|
-
});
|
|
522
|
-
const dispose = function () {
|
|
523
|
-
validateNotDisposed();
|
|
524
|
-
disposed = true;
|
|
525
|
-
return unsubscribe();
|
|
526
|
-
};
|
|
527
|
-
const storePublish = function (key, data) {
|
|
528
|
-
validateNotDisposed();
|
|
529
|
-
if (ingestStagingStore === null) {
|
|
530
|
-
ingestStagingStore = buildIngestStagingStore(environment);
|
|
531
|
-
}
|
|
532
|
-
ingestStagingStore.publish(key, data);
|
|
533
|
-
// remove record from main luvio L1 cache while we are on the synchronous path
|
|
534
|
-
// because we do not want some other code attempting to use the
|
|
535
|
-
// in-memory values before the durable store onChanged handler
|
|
536
|
-
// calls back and revives the values to in-memory
|
|
537
|
-
environment.storeEvict(key);
|
|
538
|
-
};
|
|
539
|
-
const publishStoreMetadata = function (recordId, storeMetadata) {
|
|
540
|
-
validateNotDisposed();
|
|
541
|
-
if (ingestStagingStore === null) {
|
|
542
|
-
ingestStagingStore = buildIngestStagingStore(environment);
|
|
543
|
-
}
|
|
544
|
-
ingestStagingStore.publishMetadata(recordId, storeMetadata);
|
|
545
|
-
};
|
|
546
|
-
const storeIngest = function (key, ingest, response, luvio) {
|
|
547
|
-
validateNotDisposed();
|
|
548
|
-
// we don't ingest to the luvio L1 store from network directly, we ingest to
|
|
549
|
-
// L2 and let DurableStore on change event revive keys into luvio L1 store
|
|
550
|
-
if (ingestStagingStore === null) {
|
|
551
|
-
ingestStagingStore = buildIngestStagingStore(environment);
|
|
552
|
-
}
|
|
553
|
-
environment.storeIngest(key, ingest, response, luvio, ingestStagingStore);
|
|
554
|
-
};
|
|
555
|
-
const storeIngestError = function (key, errorSnapshot, storeMetadataParams, _storeOverride) {
|
|
556
|
-
validateNotDisposed();
|
|
557
|
-
if (ingestStagingStore === null) {
|
|
558
|
-
ingestStagingStore = buildIngestStagingStore(environment);
|
|
559
|
-
}
|
|
560
|
-
environment.storeIngestError(key, errorSnapshot, storeMetadataParams, ingestStagingStore);
|
|
561
|
-
};
|
|
562
|
-
const storeBroadcast = function (_rebuildSnapshot, _snapshotDataAvailable) {
|
|
563
|
-
validateNotDisposed();
|
|
564
|
-
// publishing to L2 is essentially "broadcasting" because the onChanged
|
|
565
|
-
// handler will fire which will revive records to the main L1 store and
|
|
566
|
-
// call the base storeBroadcast
|
|
567
|
-
return publishChangesToDurableStore();
|
|
568
|
-
};
|
|
569
|
-
const publishChangesToDurableStore = function () {
|
|
570
|
-
validateNotDisposed();
|
|
571
|
-
if (ingestStagingStore === null) {
|
|
572
|
-
return Promise.resolve();
|
|
573
|
-
}
|
|
574
|
-
const promise = flushInMemoryStoreValuesToDurableStore(ingestStagingStore, durableStore, durableStoreErrorHandler);
|
|
575
|
-
ingestStagingStore = null;
|
|
576
|
-
return promise;
|
|
577
|
-
};
|
|
578
|
-
const storeLookup = function (sel, createSnapshot, refresh, ttlStrategy) {
|
|
579
|
-
validateNotDisposed();
|
|
580
|
-
// if this lookup is right after an ingest there will be a staging store
|
|
581
|
-
if (ingestStagingStore !== null) {
|
|
582
|
-
const reader = new Reader(ingestStagingStore, sel.variables, refresh, undefined, ttlStrategy);
|
|
583
|
-
return reader.read(sel);
|
|
584
|
-
}
|
|
585
|
-
// otherwise this is from buildCachedSnapshot and we should use the luvio
|
|
586
|
-
// L1 store
|
|
587
|
-
return environment.storeLookup(sel, createSnapshot, refresh, ttlStrategy);
|
|
588
|
-
};
|
|
589
|
-
const storeEvict = function (key) {
|
|
590
|
-
validateNotDisposed();
|
|
591
|
-
if (ingestStagingStore === null) {
|
|
592
|
-
ingestStagingStore = buildIngestStagingStore(environment);
|
|
593
|
-
}
|
|
594
|
-
ingestStagingStore.evict(key);
|
|
595
|
-
};
|
|
596
|
-
const getNode = function (key) {
|
|
597
|
-
validateNotDisposed();
|
|
598
|
-
if (ingestStagingStore === null) {
|
|
599
|
-
ingestStagingStore = buildIngestStagingStore(environment);
|
|
600
|
-
}
|
|
601
|
-
return environment.getNode(key, ingestStagingStore);
|
|
602
|
-
};
|
|
603
|
-
const wrapNormalizedGraphNode = function (normalized) {
|
|
604
|
-
validateNotDisposed();
|
|
605
|
-
if (ingestStagingStore === null) {
|
|
606
|
-
ingestStagingStore = buildIngestStagingStore(environment);
|
|
607
|
-
}
|
|
608
|
-
return environment.wrapNormalizedGraphNode(normalized, ingestStagingStore);
|
|
609
|
-
};
|
|
610
|
-
const rebuildSnapshot = function (snapshot, onRebuild) {
|
|
611
|
-
validateNotDisposed();
|
|
612
|
-
// try rebuilding from memory
|
|
613
|
-
environment.rebuildSnapshot(snapshot, (rebuilt) => {
|
|
614
|
-
// only try reviving from durable store if snapshot is unfulfilled
|
|
615
|
-
if (rebuilt.state !== 'Unfulfilled') {
|
|
616
|
-
onRebuild(rebuilt);
|
|
617
|
-
return;
|
|
618
|
-
}
|
|
619
|
-
// Do an L2 revive and emit to subscriber using the callback.
|
|
620
|
-
reviveSnapshot(environment, durableStore, rebuilt, durableStoreErrorHandler, () => {
|
|
621
|
-
// reviveSnapshot will revive into L1, and since "records" is a reference
|
|
622
|
-
// (and not a copy) to the L1 records we can use it for rebuild
|
|
623
|
-
let rebuiltSnap;
|
|
624
|
-
environment.rebuildSnapshot(snapshot, (rebuilt) => {
|
|
625
|
-
rebuiltSnap = rebuilt;
|
|
626
|
-
});
|
|
627
|
-
return rebuiltSnap;
|
|
628
|
-
}).then((result) => {
|
|
629
|
-
onRebuild(result.snapshot);
|
|
630
|
-
});
|
|
631
|
-
});
|
|
632
|
-
};
|
|
633
|
-
const withContext = function (adapter, options) {
|
|
634
|
-
validateNotDisposed();
|
|
635
|
-
const { contextId, contextVersion, onContextLoaded } = options;
|
|
636
|
-
let context = undefined;
|
|
637
|
-
let contextKey = `${contextId}`;
|
|
638
|
-
// if a context version is supplied, key with the version encoded
|
|
639
|
-
if (contextVersion !== undefined) {
|
|
640
|
-
contextKey += `::${contextVersion}`;
|
|
641
|
-
}
|
|
642
|
-
contextKey += ADAPTER_CONTEXT_ID_SUFFIX;
|
|
643
|
-
const contextAsPromise = reviveOrCreateContext(contextKey, durableStore, durableStoreErrorHandler, contextStores, pendingContextStoreKeys, onContextLoaded);
|
|
644
|
-
return (config, requestContext) => {
|
|
645
|
-
if (context === undefined) {
|
|
646
|
-
return contextAsPromise.then((revivedContext) => {
|
|
647
|
-
context = revivedContext;
|
|
648
|
-
return adapter(config, context, requestContext); // TODO - remove as any cast after https://github.com/salesforce-experience-platform-emu/luvio/pull/230
|
|
649
|
-
});
|
|
650
|
-
}
|
|
651
|
-
return adapter(config, context, requestContext);
|
|
652
|
-
};
|
|
653
|
-
};
|
|
654
|
-
const storeRedirect = function (existingKey, canonicalKey) {
|
|
655
|
-
validateNotDisposed();
|
|
656
|
-
// call redirect on staging store so "old" keys are removed from L2 on
|
|
657
|
-
// the next publishChangesToDurableStore. NOTE: we don't need to call
|
|
658
|
-
// redirect on the base environment store because staging store and base
|
|
659
|
-
// L1 store share the same redirect and reverseRedirectKeys
|
|
660
|
-
if (ingestStagingStore === null) {
|
|
661
|
-
ingestStagingStore = buildIngestStagingStore(environment);
|
|
662
|
-
}
|
|
663
|
-
ingestStagingStore.redirect(existingKey, canonicalKey);
|
|
664
|
-
};
|
|
665
|
-
const storeSetTTLOverride = function (namespace, representationName, ttl) {
|
|
666
|
-
validateNotDisposed();
|
|
667
|
-
return Promise.all([
|
|
668
|
-
environment.storeSetTTLOverride(namespace, representationName, ttl),
|
|
669
|
-
durableTTLStore.setDurableTTLOverride(namespace, representationName, ttl),
|
|
670
|
-
]).then();
|
|
671
|
-
};
|
|
672
|
-
const storeSetDefaultTTLOverride = function (ttl) {
|
|
673
|
-
validateNotDisposed();
|
|
674
|
-
return Promise.all([
|
|
675
|
-
environment.storeSetDefaultTTLOverride(ttl),
|
|
676
|
-
durableTTLStore.setDefaultDurableTTLOverrides(ttl),
|
|
677
|
-
]).then();
|
|
678
|
-
};
|
|
679
|
-
const getDurableTTLOverrides = function () {
|
|
680
|
-
validateNotDisposed();
|
|
681
|
-
return durableTTLStore.getDurableTTLOverrides();
|
|
682
|
-
};
|
|
683
|
-
const dispatchResourceRequest = async function (request, context, eventObservers) {
|
|
684
|
-
validateNotDisposed();
|
|
685
|
-
// non-GET adapters call dispatchResourceRequest before any other luvio
|
|
686
|
-
// function so this is our chance to ensure we're initialized
|
|
687
|
-
if (initializationPromise !== undefined) {
|
|
688
|
-
await initializationPromise;
|
|
689
|
-
}
|
|
690
|
-
return environment.dispatchResourceRequest(request, context, eventObservers);
|
|
691
|
-
};
|
|
692
|
-
// NOTE: we can't use "async" keyword on this function because that would
|
|
693
|
-
// force it to always be an async response. The signature is a union
|
|
694
|
-
// of sync/async so no "awaiting" in this function, just promise-chaining
|
|
695
|
-
const applyCachePolicy = function (luvio, adapterRequestContext, buildSnapshotContext, buildCachedSnapshot, buildNetworkSnapshot) {
|
|
696
|
-
validateNotDisposed();
|
|
697
|
-
const wrappedCacheLookup = (injectedBuildSnapshotContext, injectedStoreLookup) => {
|
|
698
|
-
const snapshot = buildCachedSnapshot(injectedBuildSnapshotContext, injectedStoreLookup, luvio);
|
|
699
|
-
// if the adapter attempted to do an L1 lookup and it was unfulfilled
|
|
700
|
-
// then we can attempt an L2 lookup
|
|
701
|
-
if (isUnfulfilledSnapshot(snapshot)) {
|
|
702
|
-
const start = Date.now();
|
|
703
|
-
emitDurableEnvironmentAdapterEvent({ type: 'l2-revive-start' }, adapterRequestContext.eventObservers);
|
|
704
|
-
const revivedSnapshot = reviveSnapshot(environment, durableStore, snapshot, durableStoreErrorHandler, () => injectedStoreLookup(snapshot.select, snapshot.refresh)).then((result) => {
|
|
705
|
-
emitDurableEnvironmentAdapterEvent({
|
|
706
|
-
type: 'l2-revive-end',
|
|
707
|
-
snapshot: result.snapshot,
|
|
708
|
-
duration: Date.now() - start,
|
|
709
|
-
l2Trips: result.metrics.l2Trips,
|
|
710
|
-
}, adapterRequestContext.eventObservers);
|
|
711
|
-
return result.snapshot;
|
|
712
|
-
});
|
|
713
|
-
return revivedSnapshot;
|
|
714
|
-
}
|
|
715
|
-
// otherwise just return what buildCachedSnapshot gave us
|
|
716
|
-
return snapshot;
|
|
717
|
-
};
|
|
718
|
-
const wrappedApplyCachePolicy = () => {
|
|
719
|
-
return environment.applyCachePolicy(luvio, adapterRequestContext, buildSnapshotContext, wrappedCacheLookup, buildNetworkSnapshot);
|
|
720
|
-
};
|
|
721
|
-
// GET adapters call applyCachePolicy before any other luvio
|
|
722
|
-
// function so this is our chance to ensure we're initialized
|
|
723
|
-
return initializationPromise !== undefined
|
|
724
|
-
? initializationPromise.then(wrappedApplyCachePolicy)
|
|
725
|
-
: wrappedApplyCachePolicy();
|
|
726
|
-
};
|
|
727
|
-
const getIngestStagingStoreRecords = function () {
|
|
728
|
-
validateNotDisposed();
|
|
729
|
-
if (ingestStagingStore !== null) {
|
|
730
|
-
return ingestStagingStore.fallbackStringKeyInMemoryStore.records;
|
|
731
|
-
}
|
|
732
|
-
return {};
|
|
733
|
-
};
|
|
734
|
-
const getIngestStagingStoreMetadata = function () {
|
|
735
|
-
validateNotDisposed();
|
|
736
|
-
if (ingestStagingStore !== null) {
|
|
737
|
-
return ingestStagingStore.fallbackStringKeyInMemoryStore.metadata;
|
|
738
|
-
}
|
|
739
|
-
return {};
|
|
740
|
-
};
|
|
741
|
-
const handleSuccessResponse = async function (ingestAndBroadcastFunc, getResponseCacheKeysFunc) {
|
|
742
|
-
validateNotDisposed();
|
|
743
|
-
const cacheKeyMap = getResponseCacheKeysFunc();
|
|
744
|
-
const cacheKeyMapKeys = cacheKeyMap.keysAsArray();
|
|
745
|
-
const keysToRevive = new StoreKeySet();
|
|
746
|
-
for (const cacheKeyMapKey of cacheKeyMapKeys) {
|
|
747
|
-
const cacheKey = cacheKeyMap.get(cacheKeyMapKey);
|
|
748
|
-
if (cacheKey.mergeable === true) {
|
|
749
|
-
keysToRevive.add(cacheKeyMapKey);
|
|
750
|
-
}
|
|
751
|
-
}
|
|
752
|
-
let snapshotFromMemoryIngest = undefined;
|
|
753
|
-
// To-do: Once these are structured keys, will need to support them throughout durable logic W-12356727
|
|
754
|
-
const keysToReviveAsArray = Array.from(keysToRevive.keysAsStrings());
|
|
755
|
-
if (keysToReviveAsArray.length > 0) {
|
|
756
|
-
// if we need to do an L2 read then L2 write then we need to synchronize
|
|
757
|
-
// our read/merge/ingest/write Promise based on the keys so we don't
|
|
758
|
-
// stomp over any data
|
|
759
|
-
const readWritePromise = (async () => {
|
|
760
|
-
const pendingPromises = [];
|
|
761
|
-
for (const key of keysToReviveAsArray) {
|
|
762
|
-
const pendingPromise = mergeKeysPromiseMap.get(key);
|
|
763
|
-
if (pendingPromise !== undefined) {
|
|
764
|
-
// IMPORTANT: while on the synchronous code path we get a
|
|
765
|
-
// handle to pendingPromise and push it onto the array.
|
|
766
|
-
// This is important because later in this synchronous code
|
|
767
|
-
// path we will upsert readWritePromise into the
|
|
768
|
-
// mergeKeysPromiseMap (essentially overwriting pendingPromise
|
|
769
|
-
// in the map).
|
|
770
|
-
pendingPromises.push(pendingPromise);
|
|
771
|
-
}
|
|
772
|
-
}
|
|
773
|
-
await Promise.all(pendingPromises);
|
|
774
|
-
const entries = await durableStore.getEntries(keysToReviveAsArray, DefaultDurableSegment);
|
|
775
|
-
ingestStagingStore = buildIngestStagingStore(environment);
|
|
776
|
-
publishDurableStoreEntries(entries, (key, record) => {
|
|
777
|
-
if (typeof key === 'string') {
|
|
778
|
-
ingestStagingStore.fallbackStringKeyInMemoryStore.records[key] =
|
|
779
|
-
record;
|
|
780
|
-
}
|
|
781
|
-
else {
|
|
782
|
-
ingestStagingStore.recordsMap.set(key, record);
|
|
783
|
-
}
|
|
784
|
-
}, (key, metadata) => {
|
|
785
|
-
if (typeof key === 'string') {
|
|
786
|
-
ingestStagingStore.fallbackStringKeyInMemoryStore.metadata[key] =
|
|
787
|
-
metadata;
|
|
788
|
-
}
|
|
789
|
-
else {
|
|
790
|
-
ingestStagingStore.metadataMap.set(key, metadata);
|
|
791
|
-
}
|
|
792
|
-
});
|
|
793
|
-
snapshotFromMemoryIngest = await ingestAndBroadcastFunc();
|
|
794
|
-
})();
|
|
795
|
-
for (const key of keysToReviveAsArray) {
|
|
796
|
-
// we are overwriting the previous promise at this key, but that
|
|
797
|
-
// is ok because we got a handle to it earlier (see the IMPORTANT
|
|
798
|
-
// comment about 35 lines up)
|
|
799
|
-
mergeKeysPromiseMap.set(key, readWritePromise);
|
|
800
|
-
}
|
|
801
|
-
try {
|
|
802
|
-
await readWritePromise;
|
|
803
|
-
}
|
|
804
|
-
finally {
|
|
805
|
-
for (const key of keysToReviveAsArray) {
|
|
806
|
-
const pendingPromise = mergeKeysPromiseMap.get(key);
|
|
807
|
-
// cleanup the entry from the map if this is the last promise
|
|
808
|
-
// for that key
|
|
809
|
-
if (pendingPromise === readWritePromise) {
|
|
810
|
-
mergeKeysPromiseMap.delete(key);
|
|
811
|
-
}
|
|
812
|
-
}
|
|
813
|
-
}
|
|
814
|
-
}
|
|
815
|
-
else {
|
|
816
|
-
// we aren't doing any merging so we don't have to synchronize, the
|
|
817
|
-
// underlying DurableStore implementation takes care of R/W sync
|
|
818
|
-
// so all we have to do is ingest then write to L2
|
|
819
|
-
ingestStagingStore = buildIngestStagingStore(environment);
|
|
820
|
-
snapshotFromMemoryIngest = await ingestAndBroadcastFunc();
|
|
821
|
-
}
|
|
822
|
-
if (snapshotFromMemoryIngest === undefined) {
|
|
823
|
-
return undefined;
|
|
824
|
-
}
|
|
825
|
-
if (snapshotFromMemoryIngest.state !== 'Unfulfilled') {
|
|
826
|
-
return snapshotFromMemoryIngest;
|
|
827
|
-
}
|
|
828
|
-
// if snapshot from staging store lookup is unfulfilled then do an L2 lookup
|
|
829
|
-
const { select, refresh } = snapshotFromMemoryIngest;
|
|
830
|
-
const result = await reviveSnapshot(environment, durableStore, snapshotFromMemoryIngest, durableStoreErrorHandler, () => environment.storeLookup(select, environment.createSnapshot, refresh));
|
|
831
|
-
return result.snapshot;
|
|
832
|
-
};
|
|
833
|
-
const handleErrorResponse = async function (ingestAndBroadcastFunc) {
|
|
834
|
-
validateNotDisposed();
|
|
835
|
-
ingestStagingStore = buildIngestStagingStore(environment);
|
|
836
|
-
return ingestAndBroadcastFunc();
|
|
837
|
-
};
|
|
838
|
-
const getNotifyChangeStoreEntries = function (keys) {
|
|
839
|
-
validateNotDisposed();
|
|
840
|
-
return durableStore
|
|
841
|
-
.getEntries(keys.map(serializeStructuredKey), DefaultDurableSegment)
|
|
842
|
-
.then((durableRecords) => {
|
|
843
|
-
const entries = [];
|
|
844
|
-
publishDurableStoreEntries(durableRecords, (key, record) => {
|
|
845
|
-
entries.push({
|
|
846
|
-
key,
|
|
847
|
-
record: record,
|
|
848
|
-
});
|
|
849
|
-
}, () => { });
|
|
850
|
-
return entries;
|
|
851
|
-
});
|
|
852
|
-
};
|
|
853
|
-
environment.defaultCachePolicy = {
|
|
854
|
-
type: 'stale-while-revalidate',
|
|
855
|
-
implementation: buildStaleWhileRevalidateImplementation(Number.MAX_SAFE_INTEGER),
|
|
856
|
-
};
|
|
857
|
-
return create(environment, {
|
|
858
|
-
publishStoreMetadata: { value: publishStoreMetadata },
|
|
859
|
-
storeIngest: { value: storeIngest },
|
|
860
|
-
storeIngestError: { value: storeIngestError },
|
|
861
|
-
storeBroadcast: { value: storeBroadcast },
|
|
862
|
-
storeLookup: { value: storeLookup },
|
|
863
|
-
storeEvict: { value: storeEvict },
|
|
864
|
-
wrapNormalizedGraphNode: { value: wrapNormalizedGraphNode },
|
|
865
|
-
getNode: { value: getNode },
|
|
866
|
-
rebuildSnapshot: { value: rebuildSnapshot },
|
|
867
|
-
withContext: { value: withContext },
|
|
868
|
-
storeSetTTLOverride: { value: storeSetTTLOverride },
|
|
869
|
-
storeSetDefaultTTLOverride: { value: storeSetDefaultTTLOverride },
|
|
870
|
-
storePublish: { value: storePublish },
|
|
871
|
-
storeRedirect: { value: storeRedirect },
|
|
872
|
-
dispose: { value: dispose },
|
|
873
|
-
publishChangesToDurableStore: { value: publishChangesToDurableStore },
|
|
874
|
-
getDurableTTLOverrides: { value: getDurableTTLOverrides },
|
|
875
|
-
dispatchResourceRequest: { value: dispatchResourceRequest },
|
|
876
|
-
applyCachePolicy: { value: applyCachePolicy },
|
|
877
|
-
getIngestStagingStoreRecords: { value: getIngestStagingStoreRecords },
|
|
878
|
-
getIngestStagingStoreMetadata: { value: getIngestStagingStoreMetadata },
|
|
879
|
-
handleSuccessResponse: { value: handleSuccessResponse },
|
|
880
|
-
handleErrorResponse: { value: handleErrorResponse },
|
|
881
|
-
getNotifyChangeStoreEntries: { value: getNotifyChangeStoreEntries },
|
|
882
|
-
});
|
|
381
|
+
const AdapterContextSegment = 'ADAPTER-CONTEXT';
|
|
382
|
+
const ADAPTER_CONTEXT_ID_SUFFIX = '__NAMED_CONTEXT';
|
|
383
|
+
async function reviveOrCreateContext(adapterId, durableStore, durableStoreErrorHandler, contextStores, pendingContextStoreKeys, onContextLoaded) {
|
|
384
|
+
// initialize empty context store
|
|
385
|
+
contextStores[adapterId] = create(null);
|
|
386
|
+
const context = {
|
|
387
|
+
set(key, value) {
|
|
388
|
+
contextStores[adapterId][key] = value;
|
|
389
|
+
durableStore.setEntries({
|
|
390
|
+
[adapterId]: { data: contextStores[adapterId] },
|
|
391
|
+
}, AdapterContextSegment);
|
|
392
|
+
pendingContextStoreKeys.add(adapterId);
|
|
393
|
+
},
|
|
394
|
+
get(key) {
|
|
395
|
+
return contextStores[adapterId][key];
|
|
396
|
+
},
|
|
397
|
+
};
|
|
398
|
+
const contextReturn = () => {
|
|
399
|
+
if (onContextLoaded !== undefined) {
|
|
400
|
+
return onContextLoaded(context).then(() => {
|
|
401
|
+
return context;
|
|
402
|
+
});
|
|
403
|
+
}
|
|
404
|
+
return context;
|
|
405
|
+
};
|
|
406
|
+
try {
|
|
407
|
+
const entries = await durableStore.getEntries([adapterId], AdapterContextSegment);
|
|
408
|
+
if (entries !== undefined && entries[adapterId] !== undefined) {
|
|
409
|
+
// if durable store has a saved context then load it in the store
|
|
410
|
+
contextStores[adapterId] = entries[adapterId].data;
|
|
411
|
+
}
|
|
412
|
+
}
|
|
413
|
+
catch (error) {
|
|
414
|
+
durableStoreErrorHandler(error);
|
|
415
|
+
}
|
|
416
|
+
return contextReturn();
|
|
417
|
+
}
|
|
418
|
+
function isUnfulfilledSnapshot(cachedSnapshotResult) {
|
|
419
|
+
if (cachedSnapshotResult === undefined) {
|
|
420
|
+
return false;
|
|
421
|
+
}
|
|
422
|
+
if ('then' in cachedSnapshotResult) {
|
|
423
|
+
return false;
|
|
424
|
+
}
|
|
425
|
+
return cachedSnapshotResult.state === 'Unfulfilled';
|
|
426
|
+
}
|
|
427
|
+
/**
|
|
428
|
+
* Configures the environment to persist data into a durable store and attempt to resolve
|
|
429
|
+
* data from the persistent store before hitting the network.
|
|
430
|
+
*
|
|
431
|
+
* @param environment The base environment
|
|
432
|
+
* @param durableStore A DurableStore implementation
|
|
433
|
+
* @param instrumentation An instrumentation function implementation
|
|
434
|
+
*/
|
|
435
|
+
function makeDurable(environment, { durableStore, instrumentation }) {
|
|
436
|
+
let ingestStagingStore = null;
|
|
437
|
+
const durableTTLStore = new DurableTTLStore(durableStore);
|
|
438
|
+
const mergeKeysPromiseMap = new StoreKeyMap();
|
|
439
|
+
// When a context store is mutated we write it to L2, which causes DS on change
|
|
440
|
+
// event. If this instance of makeDurable caused that L2 write we can ignore that
|
|
441
|
+
// on change event. This Set helps us do that.
|
|
442
|
+
const pendingContextStoreKeys = new Set();
|
|
443
|
+
const contextStores = create(null);
|
|
444
|
+
let initializationPromise = new Promise((resolve) => {
|
|
445
|
+
const finish = () => {
|
|
446
|
+
resolve();
|
|
447
|
+
initializationPromise = undefined;
|
|
448
|
+
};
|
|
449
|
+
reviveTTLOverrides(durableTTLStore, environment).then(finish);
|
|
450
|
+
});
|
|
451
|
+
//instrumentation for durable store errors
|
|
452
|
+
const durableStoreErrorHandler = handleDurableStoreRejection(instrumentation);
|
|
453
|
+
let disposed = false;
|
|
454
|
+
const validateNotDisposed = () => {
|
|
455
|
+
if (disposed === true) {
|
|
456
|
+
throw new Error('This makeDurable instance has been disposed');
|
|
457
|
+
}
|
|
458
|
+
};
|
|
459
|
+
const unsubscribe = durableStore.registerOnChangedListener(async (changes) => {
|
|
460
|
+
const defaultSegmentKeys = [];
|
|
461
|
+
const adapterContextSegmentKeys = [];
|
|
462
|
+
for (let i = 0, len = changes.length; i < len; i++) {
|
|
463
|
+
const change = changes[i];
|
|
464
|
+
// we only care about changes to the data which is stored in the default
|
|
465
|
+
// segment or the adapter context
|
|
466
|
+
if (change.segment === DefaultDurableSegment) {
|
|
467
|
+
defaultSegmentKeys.push(...change.ids);
|
|
468
|
+
}
|
|
469
|
+
else if (change.segment === AdapterContextSegment) {
|
|
470
|
+
adapterContextSegmentKeys.push(...change.ids);
|
|
471
|
+
}
|
|
472
|
+
}
|
|
473
|
+
// process adapter context changes
|
|
474
|
+
const adapterContextKeysFromDifferentInstance = [];
|
|
475
|
+
for (const key of adapterContextSegmentKeys) {
|
|
476
|
+
if (pendingContextStoreKeys.has(key)) {
|
|
477
|
+
// if this instance caused the L2 write then remove from the
|
|
478
|
+
// "pending" Set and move on
|
|
479
|
+
pendingContextStoreKeys.delete(key);
|
|
480
|
+
}
|
|
481
|
+
else {
|
|
482
|
+
// else it came from another luvio instance and we need to
|
|
483
|
+
// read from L2
|
|
484
|
+
adapterContextKeysFromDifferentInstance.push(key);
|
|
485
|
+
}
|
|
486
|
+
}
|
|
487
|
+
if (adapterContextKeysFromDifferentInstance.length > 0) {
|
|
488
|
+
try {
|
|
489
|
+
const entries = await durableStore.getEntries(adapterContextKeysFromDifferentInstance, AdapterContextSegment);
|
|
490
|
+
if (entries !== undefined) {
|
|
491
|
+
const entryKeys = keys(entries);
|
|
492
|
+
for (let i = 0, len = entryKeys.length; i < len; i++) {
|
|
493
|
+
const entryKey = entryKeys[i];
|
|
494
|
+
const entry = entries[entryKey];
|
|
495
|
+
contextStores[entryKey] = entry.data;
|
|
496
|
+
}
|
|
497
|
+
}
|
|
498
|
+
}
|
|
499
|
+
catch (error) {
|
|
500
|
+
durableStoreErrorHandler(error);
|
|
501
|
+
}
|
|
502
|
+
}
|
|
503
|
+
// process default segment changes
|
|
504
|
+
const defaultSegmentKeysLength = defaultSegmentKeys.length;
|
|
505
|
+
if (defaultSegmentKeysLength > 0) {
|
|
506
|
+
for (let i = 0; i < defaultSegmentKeysLength; i++) {
|
|
507
|
+
const key = defaultSegmentKeys[i];
|
|
508
|
+
const canonical = environment.storeGetCanonicalKey(key);
|
|
509
|
+
if (canonical !== key) {
|
|
510
|
+
continue;
|
|
511
|
+
}
|
|
512
|
+
// TODO: W-8909393 If expiration is the only thing that changed we should not evict the data... so
|
|
513
|
+
// if we stored expiration and data at different keys (or same keys in different segments)
|
|
514
|
+
// then we could know if only the expiration has changed and we wouldn't need to evict
|
|
515
|
+
// and go through an entire broadcast/revive cycle for unchanged data
|
|
516
|
+
// call base environment storeEvict so this evict is not tracked for durable deletion
|
|
517
|
+
environment.storeEvict(key);
|
|
518
|
+
}
|
|
519
|
+
await environment.storeBroadcast(rebuildSnapshot, environment.snapshotAvailable);
|
|
520
|
+
}
|
|
521
|
+
});
|
|
522
|
+
const dispose = function () {
|
|
523
|
+
validateNotDisposed();
|
|
524
|
+
disposed = true;
|
|
525
|
+
return unsubscribe();
|
|
526
|
+
};
|
|
527
|
+
const storePublish = function (key, data) {
|
|
528
|
+
validateNotDisposed();
|
|
529
|
+
if (ingestStagingStore === null) {
|
|
530
|
+
ingestStagingStore = buildIngestStagingStore(environment);
|
|
531
|
+
}
|
|
532
|
+
ingestStagingStore.publish(key, data);
|
|
533
|
+
// remove record from main luvio L1 cache while we are on the synchronous path
|
|
534
|
+
// because we do not want some other code attempting to use the
|
|
535
|
+
// in-memory values before the durable store onChanged handler
|
|
536
|
+
// calls back and revives the values to in-memory
|
|
537
|
+
environment.storeEvict(key);
|
|
538
|
+
};
|
|
539
|
+
const publishStoreMetadata = function (recordId, storeMetadata) {
|
|
540
|
+
validateNotDisposed();
|
|
541
|
+
if (ingestStagingStore === null) {
|
|
542
|
+
ingestStagingStore = buildIngestStagingStore(environment);
|
|
543
|
+
}
|
|
544
|
+
ingestStagingStore.publishMetadata(recordId, storeMetadata);
|
|
545
|
+
};
|
|
546
|
+
const storeIngest = function (key, ingest, response, luvio) {
|
|
547
|
+
validateNotDisposed();
|
|
548
|
+
// we don't ingest to the luvio L1 store from network directly, we ingest to
|
|
549
|
+
// L2 and let DurableStore on change event revive keys into luvio L1 store
|
|
550
|
+
if (ingestStagingStore === null) {
|
|
551
|
+
ingestStagingStore = buildIngestStagingStore(environment);
|
|
552
|
+
}
|
|
553
|
+
environment.storeIngest(key, ingest, response, luvio, ingestStagingStore);
|
|
554
|
+
};
|
|
555
|
+
const storeIngestError = function (key, errorSnapshot, storeMetadataParams, _storeOverride) {
|
|
556
|
+
validateNotDisposed();
|
|
557
|
+
if (ingestStagingStore === null) {
|
|
558
|
+
ingestStagingStore = buildIngestStagingStore(environment);
|
|
559
|
+
}
|
|
560
|
+
environment.storeIngestError(key, errorSnapshot, storeMetadataParams, ingestStagingStore);
|
|
561
|
+
};
|
|
562
|
+
const storeBroadcast = function (_rebuildSnapshot, _snapshotDataAvailable) {
|
|
563
|
+
validateNotDisposed();
|
|
564
|
+
// publishing to L2 is essentially "broadcasting" because the onChanged
|
|
565
|
+
// handler will fire which will revive records to the main L1 store and
|
|
566
|
+
// call the base storeBroadcast
|
|
567
|
+
return publishChangesToDurableStore();
|
|
568
|
+
};
|
|
569
|
+
const publishChangesToDurableStore = function () {
|
|
570
|
+
validateNotDisposed();
|
|
571
|
+
if (ingestStagingStore === null) {
|
|
572
|
+
return Promise.resolve();
|
|
573
|
+
}
|
|
574
|
+
const promise = flushInMemoryStoreValuesToDurableStore(ingestStagingStore, durableStore, durableStoreErrorHandler);
|
|
575
|
+
ingestStagingStore = null;
|
|
576
|
+
return promise;
|
|
577
|
+
};
|
|
578
|
+
const storeLookup = function (sel, createSnapshot, refresh, ttlStrategy) {
|
|
579
|
+
validateNotDisposed();
|
|
580
|
+
// if this lookup is right after an ingest there will be a staging store
|
|
581
|
+
if (ingestStagingStore !== null) {
|
|
582
|
+
const reader = new Reader(ingestStagingStore, sel.variables, refresh, undefined, ttlStrategy);
|
|
583
|
+
return reader.read(sel);
|
|
584
|
+
}
|
|
585
|
+
// otherwise this is from buildCachedSnapshot and we should use the luvio
|
|
586
|
+
// L1 store
|
|
587
|
+
return environment.storeLookup(sel, createSnapshot, refresh, ttlStrategy);
|
|
588
|
+
};
|
|
589
|
+
const storeEvict = function (key) {
|
|
590
|
+
validateNotDisposed();
|
|
591
|
+
if (ingestStagingStore === null) {
|
|
592
|
+
ingestStagingStore = buildIngestStagingStore(environment);
|
|
593
|
+
}
|
|
594
|
+
ingestStagingStore.evict(key);
|
|
595
|
+
};
|
|
596
|
+
const getNode = function (key) {
|
|
597
|
+
validateNotDisposed();
|
|
598
|
+
if (ingestStagingStore === null) {
|
|
599
|
+
ingestStagingStore = buildIngestStagingStore(environment);
|
|
600
|
+
}
|
|
601
|
+
return environment.getNode(key, ingestStagingStore);
|
|
602
|
+
};
|
|
603
|
+
const wrapNormalizedGraphNode = function (normalized) {
|
|
604
|
+
validateNotDisposed();
|
|
605
|
+
if (ingestStagingStore === null) {
|
|
606
|
+
ingestStagingStore = buildIngestStagingStore(environment);
|
|
607
|
+
}
|
|
608
|
+
return environment.wrapNormalizedGraphNode(normalized, ingestStagingStore);
|
|
609
|
+
};
|
|
610
|
+
const rebuildSnapshot = function (snapshot, onRebuild) {
|
|
611
|
+
validateNotDisposed();
|
|
612
|
+
// try rebuilding from memory
|
|
613
|
+
environment.rebuildSnapshot(snapshot, (rebuilt) => {
|
|
614
|
+
// only try reviving from durable store if snapshot is unfulfilled
|
|
615
|
+
if (rebuilt.state !== 'Unfulfilled') {
|
|
616
|
+
onRebuild(rebuilt);
|
|
617
|
+
return;
|
|
618
|
+
}
|
|
619
|
+
// Do an L2 revive and emit to subscriber using the callback.
|
|
620
|
+
reviveSnapshot(environment, durableStore, rebuilt, durableStoreErrorHandler, () => {
|
|
621
|
+
// reviveSnapshot will revive into L1, and since "records" is a reference
|
|
622
|
+
// (and not a copy) to the L1 records we can use it for rebuild
|
|
623
|
+
let rebuiltSnap;
|
|
624
|
+
environment.rebuildSnapshot(snapshot, (rebuilt) => {
|
|
625
|
+
rebuiltSnap = rebuilt;
|
|
626
|
+
});
|
|
627
|
+
return rebuiltSnap;
|
|
628
|
+
}).then((result) => {
|
|
629
|
+
onRebuild(result.snapshot);
|
|
630
|
+
});
|
|
631
|
+
});
|
|
632
|
+
};
|
|
633
|
+
const withContext = function (adapter, options) {
|
|
634
|
+
validateNotDisposed();
|
|
635
|
+
const { contextId, contextVersion, onContextLoaded } = options;
|
|
636
|
+
let context = undefined;
|
|
637
|
+
let contextKey = `${contextId}`;
|
|
638
|
+
// if a context version is supplied, key with the version encoded
|
|
639
|
+
if (contextVersion !== undefined) {
|
|
640
|
+
contextKey += `::${contextVersion}`;
|
|
641
|
+
}
|
|
642
|
+
contextKey += ADAPTER_CONTEXT_ID_SUFFIX;
|
|
643
|
+
const contextAsPromise = reviveOrCreateContext(contextKey, durableStore, durableStoreErrorHandler, contextStores, pendingContextStoreKeys, onContextLoaded);
|
|
644
|
+
return (config, requestContext) => {
|
|
645
|
+
if (context === undefined) {
|
|
646
|
+
return contextAsPromise.then((revivedContext) => {
|
|
647
|
+
context = revivedContext;
|
|
648
|
+
return adapter(config, context, requestContext); // TODO - remove as any cast after https://github.com/salesforce-experience-platform-emu/luvio/pull/230
|
|
649
|
+
});
|
|
650
|
+
}
|
|
651
|
+
return adapter(config, context, requestContext);
|
|
652
|
+
};
|
|
653
|
+
};
|
|
654
|
+
const storeRedirect = function (existingKey, canonicalKey) {
|
|
655
|
+
validateNotDisposed();
|
|
656
|
+
// call redirect on staging store so "old" keys are removed from L2 on
|
|
657
|
+
// the next publishChangesToDurableStore. NOTE: we don't need to call
|
|
658
|
+
// redirect on the base environment store because staging store and base
|
|
659
|
+
// L1 store share the same redirect and reverseRedirectKeys
|
|
660
|
+
if (ingestStagingStore === null) {
|
|
661
|
+
ingestStagingStore = buildIngestStagingStore(environment);
|
|
662
|
+
}
|
|
663
|
+
ingestStagingStore.redirect(existingKey, canonicalKey);
|
|
664
|
+
};
|
|
665
|
+
const storeSetTTLOverride = function (namespace, representationName, ttl) {
|
|
666
|
+
validateNotDisposed();
|
|
667
|
+
return Promise.all([
|
|
668
|
+
environment.storeSetTTLOverride(namespace, representationName, ttl),
|
|
669
|
+
durableTTLStore.setDurableTTLOverride(namespace, representationName, ttl),
|
|
670
|
+
]).then();
|
|
671
|
+
};
|
|
672
|
+
const storeSetDefaultTTLOverride = function (ttl) {
|
|
673
|
+
validateNotDisposed();
|
|
674
|
+
return Promise.all([
|
|
675
|
+
environment.storeSetDefaultTTLOverride(ttl),
|
|
676
|
+
durableTTLStore.setDefaultDurableTTLOverrides(ttl),
|
|
677
|
+
]).then();
|
|
678
|
+
};
|
|
679
|
+
const getDurableTTLOverrides = function () {
|
|
680
|
+
validateNotDisposed();
|
|
681
|
+
return durableTTLStore.getDurableTTLOverrides();
|
|
682
|
+
};
|
|
683
|
+
const dispatchResourceRequest = async function (request, context, eventObservers) {
|
|
684
|
+
validateNotDisposed();
|
|
685
|
+
// non-GET adapters call dispatchResourceRequest before any other luvio
|
|
686
|
+
// function so this is our chance to ensure we're initialized
|
|
687
|
+
if (initializationPromise !== undefined) {
|
|
688
|
+
await initializationPromise;
|
|
689
|
+
}
|
|
690
|
+
return environment.dispatchResourceRequest(request, context, eventObservers);
|
|
691
|
+
};
|
|
692
|
+
// NOTE: we can't use "async" keyword on this function because that would
|
|
693
|
+
// force it to always be an async response. The signature is a union
|
|
694
|
+
// of sync/async so no "awaiting" in this function, just promise-chaining
|
|
695
|
+
const applyCachePolicy = function (luvio, adapterRequestContext, buildSnapshotContext, buildCachedSnapshot, buildNetworkSnapshot) {
|
|
696
|
+
validateNotDisposed();
|
|
697
|
+
const wrappedCacheLookup = (injectedBuildSnapshotContext, injectedStoreLookup) => {
|
|
698
|
+
const snapshot = buildCachedSnapshot(injectedBuildSnapshotContext, injectedStoreLookup, luvio);
|
|
699
|
+
// if the adapter attempted to do an L1 lookup and it was unfulfilled
|
|
700
|
+
// then we can attempt an L2 lookup
|
|
701
|
+
if (isUnfulfilledSnapshot(snapshot)) {
|
|
702
|
+
const start = Date.now();
|
|
703
|
+
emitDurableEnvironmentAdapterEvent({ type: 'l2-revive-start' }, adapterRequestContext.eventObservers);
|
|
704
|
+
const revivedSnapshot = reviveSnapshot(environment, durableStore, snapshot, durableStoreErrorHandler, () => injectedStoreLookup(snapshot.select, snapshot.refresh)).then((result) => {
|
|
705
|
+
emitDurableEnvironmentAdapterEvent({
|
|
706
|
+
type: 'l2-revive-end',
|
|
707
|
+
snapshot: result.snapshot,
|
|
708
|
+
duration: Date.now() - start,
|
|
709
|
+
l2Trips: result.metrics.l2Trips,
|
|
710
|
+
}, adapterRequestContext.eventObservers);
|
|
711
|
+
return result.snapshot;
|
|
712
|
+
});
|
|
713
|
+
return revivedSnapshot;
|
|
714
|
+
}
|
|
715
|
+
// otherwise just return what buildCachedSnapshot gave us
|
|
716
|
+
return snapshot;
|
|
717
|
+
};
|
|
718
|
+
const wrappedApplyCachePolicy = () => {
|
|
719
|
+
return environment.applyCachePolicy(luvio, adapterRequestContext, buildSnapshotContext, wrappedCacheLookup, buildNetworkSnapshot);
|
|
720
|
+
};
|
|
721
|
+
// GET adapters call applyCachePolicy before any other luvio
|
|
722
|
+
// function so this is our chance to ensure we're initialized
|
|
723
|
+
return initializationPromise !== undefined
|
|
724
|
+
? initializationPromise.then(wrappedApplyCachePolicy)
|
|
725
|
+
: wrappedApplyCachePolicy();
|
|
726
|
+
};
|
|
727
|
+
const getIngestStagingStoreRecords = function () {
|
|
728
|
+
validateNotDisposed();
|
|
729
|
+
if (ingestStagingStore !== null) {
|
|
730
|
+
return ingestStagingStore.fallbackStringKeyInMemoryStore.records;
|
|
731
|
+
}
|
|
732
|
+
return {};
|
|
733
|
+
};
|
|
734
|
+
const getIngestStagingStoreMetadata = function () {
|
|
735
|
+
validateNotDisposed();
|
|
736
|
+
if (ingestStagingStore !== null) {
|
|
737
|
+
return ingestStagingStore.fallbackStringKeyInMemoryStore.metadata;
|
|
738
|
+
}
|
|
739
|
+
return {};
|
|
740
|
+
};
|
|
741
|
+
const handleSuccessResponse = async function (ingestAndBroadcastFunc, getResponseCacheKeysFunc) {
|
|
742
|
+
validateNotDisposed();
|
|
743
|
+
const cacheKeyMap = getResponseCacheKeysFunc();
|
|
744
|
+
const cacheKeyMapKeys = cacheKeyMap.keysAsArray();
|
|
745
|
+
const keysToRevive = new StoreKeySet();
|
|
746
|
+
for (const cacheKeyMapKey of cacheKeyMapKeys) {
|
|
747
|
+
const cacheKey = cacheKeyMap.get(cacheKeyMapKey);
|
|
748
|
+
if (cacheKey.mergeable === true) {
|
|
749
|
+
keysToRevive.add(cacheKeyMapKey);
|
|
750
|
+
}
|
|
751
|
+
}
|
|
752
|
+
let snapshotFromMemoryIngest = undefined;
|
|
753
|
+
// To-do: Once these are structured keys, will need to support them throughout durable logic W-12356727
|
|
754
|
+
const keysToReviveAsArray = Array.from(keysToRevive.keysAsStrings());
|
|
755
|
+
if (keysToReviveAsArray.length > 0) {
|
|
756
|
+
// if we need to do an L2 read then L2 write then we need to synchronize
|
|
757
|
+
// our read/merge/ingest/write Promise based on the keys so we don't
|
|
758
|
+
// stomp over any data
|
|
759
|
+
const readWritePromise = (async () => {
|
|
760
|
+
const pendingPromises = [];
|
|
761
|
+
for (const key of keysToReviveAsArray) {
|
|
762
|
+
const pendingPromise = mergeKeysPromiseMap.get(key);
|
|
763
|
+
if (pendingPromise !== undefined) {
|
|
764
|
+
// IMPORTANT: while on the synchronous code path we get a
|
|
765
|
+
// handle to pendingPromise and push it onto the array.
|
|
766
|
+
// This is important because later in this synchronous code
|
|
767
|
+
// path we will upsert readWritePromise into the
|
|
768
|
+
// mergeKeysPromiseMap (essentially overwriting pendingPromise
|
|
769
|
+
// in the map).
|
|
770
|
+
pendingPromises.push(pendingPromise);
|
|
771
|
+
}
|
|
772
|
+
}
|
|
773
|
+
await Promise.all(pendingPromises);
|
|
774
|
+
const entries = await durableStore.getEntries(keysToReviveAsArray, DefaultDurableSegment);
|
|
775
|
+
ingestStagingStore = buildIngestStagingStore(environment);
|
|
776
|
+
publishDurableStoreEntries(entries, (key, record) => {
|
|
777
|
+
if (typeof key === 'string') {
|
|
778
|
+
ingestStagingStore.fallbackStringKeyInMemoryStore.records[key] =
|
|
779
|
+
record;
|
|
780
|
+
}
|
|
781
|
+
else {
|
|
782
|
+
ingestStagingStore.recordsMap.set(key, record);
|
|
783
|
+
}
|
|
784
|
+
}, (key, metadata) => {
|
|
785
|
+
if (typeof key === 'string') {
|
|
786
|
+
ingestStagingStore.fallbackStringKeyInMemoryStore.metadata[key] =
|
|
787
|
+
metadata;
|
|
788
|
+
}
|
|
789
|
+
else {
|
|
790
|
+
ingestStagingStore.metadataMap.set(key, metadata);
|
|
791
|
+
}
|
|
792
|
+
});
|
|
793
|
+
snapshotFromMemoryIngest = await ingestAndBroadcastFunc();
|
|
794
|
+
})();
|
|
795
|
+
for (const key of keysToReviveAsArray) {
|
|
796
|
+
// we are overwriting the previous promise at this key, but that
|
|
797
|
+
// is ok because we got a handle to it earlier (see the IMPORTANT
|
|
798
|
+
// comment about 35 lines up)
|
|
799
|
+
mergeKeysPromiseMap.set(key, readWritePromise);
|
|
800
|
+
}
|
|
801
|
+
try {
|
|
802
|
+
await readWritePromise;
|
|
803
|
+
}
|
|
804
|
+
finally {
|
|
805
|
+
for (const key of keysToReviveAsArray) {
|
|
806
|
+
const pendingPromise = mergeKeysPromiseMap.get(key);
|
|
807
|
+
// cleanup the entry from the map if this is the last promise
|
|
808
|
+
// for that key
|
|
809
|
+
if (pendingPromise === readWritePromise) {
|
|
810
|
+
mergeKeysPromiseMap.delete(key);
|
|
811
|
+
}
|
|
812
|
+
}
|
|
813
|
+
}
|
|
814
|
+
}
|
|
815
|
+
else {
|
|
816
|
+
// we aren't doing any merging so we don't have to synchronize, the
|
|
817
|
+
// underlying DurableStore implementation takes care of R/W sync
|
|
818
|
+
// so all we have to do is ingest then write to L2
|
|
819
|
+
ingestStagingStore = buildIngestStagingStore(environment);
|
|
820
|
+
snapshotFromMemoryIngest = await ingestAndBroadcastFunc();
|
|
821
|
+
}
|
|
822
|
+
if (snapshotFromMemoryIngest === undefined) {
|
|
823
|
+
return undefined;
|
|
824
|
+
}
|
|
825
|
+
if (snapshotFromMemoryIngest.state !== 'Unfulfilled') {
|
|
826
|
+
return snapshotFromMemoryIngest;
|
|
827
|
+
}
|
|
828
|
+
// if snapshot from staging store lookup is unfulfilled then do an L2 lookup
|
|
829
|
+
const { select, refresh } = snapshotFromMemoryIngest;
|
|
830
|
+
const result = await reviveSnapshot(environment, durableStore, snapshotFromMemoryIngest, durableStoreErrorHandler, () => environment.storeLookup(select, environment.createSnapshot, refresh));
|
|
831
|
+
return result.snapshot;
|
|
832
|
+
};
|
|
833
|
+
const handleErrorResponse = async function (ingestAndBroadcastFunc) {
|
|
834
|
+
validateNotDisposed();
|
|
835
|
+
ingestStagingStore = buildIngestStagingStore(environment);
|
|
836
|
+
return ingestAndBroadcastFunc();
|
|
837
|
+
};
|
|
838
|
+
const getNotifyChangeStoreEntries = function (keys) {
|
|
839
|
+
validateNotDisposed();
|
|
840
|
+
return durableStore
|
|
841
|
+
.getEntries(keys.map(serializeStructuredKey), DefaultDurableSegment)
|
|
842
|
+
.then((durableRecords) => {
|
|
843
|
+
const entries = [];
|
|
844
|
+
publishDurableStoreEntries(durableRecords, (key, record) => {
|
|
845
|
+
entries.push({
|
|
846
|
+
key,
|
|
847
|
+
record: record,
|
|
848
|
+
});
|
|
849
|
+
}, () => { });
|
|
850
|
+
return entries;
|
|
851
|
+
});
|
|
852
|
+
};
|
|
853
|
+
environment.defaultCachePolicy = {
|
|
854
|
+
type: 'stale-while-revalidate',
|
|
855
|
+
implementation: buildStaleWhileRevalidateImplementation(Number.MAX_SAFE_INTEGER),
|
|
856
|
+
};
|
|
857
|
+
return create(environment, {
|
|
858
|
+
publishStoreMetadata: { value: publishStoreMetadata },
|
|
859
|
+
storeIngest: { value: storeIngest },
|
|
860
|
+
storeIngestError: { value: storeIngestError },
|
|
861
|
+
storeBroadcast: { value: storeBroadcast },
|
|
862
|
+
storeLookup: { value: storeLookup },
|
|
863
|
+
storeEvict: { value: storeEvict },
|
|
864
|
+
wrapNormalizedGraphNode: { value: wrapNormalizedGraphNode },
|
|
865
|
+
getNode: { value: getNode },
|
|
866
|
+
rebuildSnapshot: { value: rebuildSnapshot },
|
|
867
|
+
withContext: { value: withContext },
|
|
868
|
+
storeSetTTLOverride: { value: storeSetTTLOverride },
|
|
869
|
+
storeSetDefaultTTLOverride: { value: storeSetDefaultTTLOverride },
|
|
870
|
+
storePublish: { value: storePublish },
|
|
871
|
+
storeRedirect: { value: storeRedirect },
|
|
872
|
+
dispose: { value: dispose },
|
|
873
|
+
publishChangesToDurableStore: { value: publishChangesToDurableStore },
|
|
874
|
+
getDurableTTLOverrides: { value: getDurableTTLOverrides },
|
|
875
|
+
dispatchResourceRequest: { value: dispatchResourceRequest },
|
|
876
|
+
applyCachePolicy: { value: applyCachePolicy },
|
|
877
|
+
getIngestStagingStoreRecords: { value: getIngestStagingStoreRecords },
|
|
878
|
+
getIngestStagingStoreMetadata: { value: getIngestStagingStoreMetadata },
|
|
879
|
+
handleSuccessResponse: { value: handleSuccessResponse },
|
|
880
|
+
handleErrorResponse: { value: handleErrorResponse },
|
|
881
|
+
getNotifyChangeStoreEntries: { value: getNotifyChangeStoreEntries },
|
|
882
|
+
});
|
|
883
883
|
}
|
|
884
884
|
|
|
885
885
|
export { DURABLE_METADATA_VERSION, DefaultDurableSegment, isDurableEnvironmentEvent, makeDurable, publishDurableStoreEntries };
|