@salesforce/lds-runtime-bridge 0.1.0-dev1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE.txt +82 -0
- package/dist/ldsRuntimeBridge.js +2014 -0
- package/dist/types/__mocks__/@salesforce/lds-network-aura.d.ts +4 -0
- package/dist/types/__mocks__/o11y/activity.d.ts +11 -0
- package/dist/types/__mocks__/o11y/client.d.ts +5 -0
- package/dist/types/__mocks__/o11y/idleDetector.d.ts +18 -0
- package/dist/types/__mocks__/o11y/instrumentation.d.ts +16 -0
- package/dist/types/instrumentation/instrumentMobileAdapter.d.ts +2 -0
- package/dist/types/main.d.ts +4 -0
- package/dist/types/runtime.d.ts +2 -0
- package/package.json +74 -0
|
@@ -0,0 +1,2014 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Copyright (c) 2022, Salesforce, Inc.,
|
|
3
|
+
* All rights reserved.
|
|
4
|
+
* For full license text, see the LICENSE.txt file
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
/*
|
|
8
|
+
* ATTENTION!
|
|
9
|
+
* THIS IS A GENERATED FILE FROM https://github.com/salesforce-experience-platform-emu/lds-lightning-platform
|
|
10
|
+
* If you would like to contribute to LDS, please follow the steps outlined in the git repo.
|
|
11
|
+
* Any changes made to this file in p4 will be automatically overwritten.
|
|
12
|
+
* *******************************************************************************************
|
|
13
|
+
*/
|
|
14
|
+
import { setDefaultLuvio } from 'force/ldsEngine';
|
|
15
|
+
import { setBypassDeepFreeze, StoreKeySet, serializeStructuredKey, StringKeyInMemoryStore, Reader, deepFreeze, emitAdapterEvent, InMemoryStore, Environment, Luvio } from 'force/luvioEngine';
|
|
16
|
+
import { setupInstrumentation, instrumentAdapter as instrumentAdapter$1, instrumentLuvio } from 'force/ldsInstrumentation';
|
|
17
|
+
import { idleDetector, getInstrumentation } from 'o11y/client';
|
|
18
|
+
import { instrument } from 'force/ldsBindings';
|
|
19
|
+
import { extractRecordIdFromStoreKey, RECORD_VIEW_ENTITY_ID_PREFIX, isStoreKeyRecordViewEntity, keyBuilderRecord, RECORD_ID_PREFIX, RECORD_FIELDS_KEY_JUNCTION } from 'force/ldsAdaptersUiapi';
|
|
20
|
+
import networkAdapter from 'force/ldsNetwork';
|
|
21
|
+
import { initializeOneStore, getRuntime as getRuntime$1 } from 'native/ldsRuntimeMobile';
|
|
22
|
+
import ldsEngineCreator from 'force/ldsEngineCreator';
|
|
23
|
+
|
|
24
|
+
// the last version the metadata shape was altered
|
|
25
|
+
const DURABLE_METADATA_VERSION = '0.111.0';
|
|
26
|
+
function isDeprecatedDurableStoreEntry(durableRecord) {
|
|
27
|
+
if (durableRecord.expiration !== undefined) {
|
|
28
|
+
return true;
|
|
29
|
+
}
|
|
30
|
+
const metadata = durableRecord.metadata;
|
|
31
|
+
if (metadata !== undefined) {
|
|
32
|
+
const { metadataVersion } = metadata;
|
|
33
|
+
// eventually we will want to assert that metadataVersion is defined
|
|
34
|
+
if (metadataVersion !== undefined && metadataVersion !== DURABLE_METADATA_VERSION) {
|
|
35
|
+
return true;
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
// Add more deprecated shape checks here
|
|
39
|
+
return false;
|
|
40
|
+
}
|
|
41
|
+
const DefaultDurableSegment = 'DEFAULT';
|
|
42
|
+
const RedirectDurableSegment = 'REDIRECT_KEYS';
|
|
43
|
+
const MessagingDurableSegment = 'MESSAGING';
|
|
44
|
+
const MessageNotifyStoreUpdateAvailable = 'notifyStoreUpdateAvailable';
|
|
45
|
+
|
|
46
|
+
const { keys: keys$2, create: create$2, assign: assign$2, freeze: freeze$1 } = Object;
|
|
47
|
+
|
|
48
|
+
//Durable store error instrumentation key
|
|
49
|
+
const DURABLE_STORE_ERROR = 'durable-store-error';
|
|
50
|
+
/**
|
|
51
|
+
* Returns a function that processes errors from durable store promise rejections.
|
|
52
|
+
* If running in a non-production environment, the error is rethrown.
|
|
53
|
+
* When running in production the error is sent to instrumentation.
|
|
54
|
+
* @param instrument Instrumentation function implementation
|
|
55
|
+
*/
|
|
56
|
+
function handleDurableStoreRejection(instrument) {
|
|
57
|
+
return (error) => {
|
|
58
|
+
if (process.env.NODE_ENV !== 'production') {
|
|
59
|
+
throw error;
|
|
60
|
+
}
|
|
61
|
+
if (instrument !== undefined) {
|
|
62
|
+
instrument(() => {
|
|
63
|
+
return {
|
|
64
|
+
[DURABLE_STORE_ERROR]: true,
|
|
65
|
+
error: error,
|
|
66
|
+
};
|
|
67
|
+
});
|
|
68
|
+
}
|
|
69
|
+
};
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
function isStoreEntryError(storeRecord) {
|
|
73
|
+
if (!storeRecord || typeof storeRecord !== 'object') {
|
|
74
|
+
return false;
|
|
75
|
+
}
|
|
76
|
+
return storeRecord.__type === 'error';
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
/**
|
|
80
|
+
* Takes a set of entries from DurableStore and publishes them via the passed in funcs.
|
|
81
|
+
* This respects expiration and checks for valid DurableStore data shapes. This should
|
|
82
|
+
* be used over manually parsing DurableStoreEntries
|
|
83
|
+
*
|
|
84
|
+
* @param durableRecords The DurableStoreEntries to parse
|
|
85
|
+
* @param publish A function to call with the data of each DurableStoreEntry
|
|
86
|
+
* @param publishMetadata A function to call with the metadata of each DurableStoreEntry
|
|
87
|
+
* @param pendingWriter the PendingWriter (this is going away soon)
|
|
88
|
+
* @returns
|
|
89
|
+
*/
|
|
90
|
+
function publishDurableStoreEntries(durableRecords, put, publishMetadata) {
|
|
91
|
+
const revivedKeys = new StoreKeySet();
|
|
92
|
+
let hadUnexpectedShape = false;
|
|
93
|
+
if (durableRecords === undefined) {
|
|
94
|
+
return { revivedKeys, hadUnexpectedShape };
|
|
95
|
+
}
|
|
96
|
+
const durableKeys = keys$2(durableRecords);
|
|
97
|
+
if (durableKeys.length === 0) {
|
|
98
|
+
// no records to revive
|
|
99
|
+
return { revivedKeys, hadUnexpectedShape };
|
|
100
|
+
}
|
|
101
|
+
for (let i = 0, len = durableKeys.length; i < len; i += 1) {
|
|
102
|
+
const key = durableKeys[i];
|
|
103
|
+
const durableRecord = durableRecords[key];
|
|
104
|
+
if (isDeprecatedDurableStoreEntry(durableRecord)) {
|
|
105
|
+
// had the old shape, skip reviving this entry.
|
|
106
|
+
hadUnexpectedShape = true;
|
|
107
|
+
continue;
|
|
108
|
+
}
|
|
109
|
+
const { metadata, data } = durableRecord;
|
|
110
|
+
if (data === undefined) {
|
|
111
|
+
// if unexpected data skip reviving
|
|
112
|
+
hadUnexpectedShape = true;
|
|
113
|
+
continue;
|
|
114
|
+
}
|
|
115
|
+
if (metadata !== undefined) {
|
|
116
|
+
const { expirationTimestamp } = metadata;
|
|
117
|
+
if (expirationTimestamp === undefined) {
|
|
118
|
+
// if unexpected expiration data skip reviving
|
|
119
|
+
hadUnexpectedShape = true;
|
|
120
|
+
continue;
|
|
121
|
+
}
|
|
122
|
+
publishMetadata(key, metadata);
|
|
123
|
+
}
|
|
124
|
+
if (isStoreEntryError(data)) {
|
|
125
|
+
// freeze errors on way into L1
|
|
126
|
+
deepFreeze(data.error);
|
|
127
|
+
}
|
|
128
|
+
put(key, data);
|
|
129
|
+
revivedKeys.add(key);
|
|
130
|
+
}
|
|
131
|
+
return { revivedKeys, hadUnexpectedShape };
|
|
132
|
+
}
|
|
133
|
+
/**
|
|
134
|
+
* This method returns a Promise to a snapshot that is revived from L2 cache. If
|
|
135
|
+
* L2 does not have the entries necessary to fulfill the snapshot then this method
|
|
136
|
+
* will refresh the snapshot from network, and then run the results from network
|
|
137
|
+
* through L2 ingestion, returning the subsequent revived snapshot.
|
|
138
|
+
*/
|
|
139
|
+
function reviveSnapshot(baseEnvironment, durableStore, unavailableSnapshot, durableStoreErrorHandler, buildL1Snapshot, revivingStore, reviveMetrics = { l2Trips: [] }) {
|
|
140
|
+
const { recordId, select, missingLinks, seenRecords, state } = unavailableSnapshot;
|
|
141
|
+
// L2 can only revive Unfulfilled snapshots that have a selector since they have the
|
|
142
|
+
// info needed to revive (like missingLinks) and rebuild. Otherwise return L1 snapshot.
|
|
143
|
+
if (state !== 'Unfulfilled' || select === undefined) {
|
|
144
|
+
return Promise.resolve({
|
|
145
|
+
snapshot: unavailableSnapshot,
|
|
146
|
+
metrics: reviveMetrics,
|
|
147
|
+
});
|
|
148
|
+
}
|
|
149
|
+
const keysToReviveSet = new StoreKeySet();
|
|
150
|
+
if (revivingStore) {
|
|
151
|
+
// Any stale keys since the last l2 read should be cleared and fetched again
|
|
152
|
+
for (const staleKey of revivingStore.staleEntries) {
|
|
153
|
+
keysToReviveSet.add(staleKey);
|
|
154
|
+
}
|
|
155
|
+
revivingStore.clearStale();
|
|
156
|
+
}
|
|
157
|
+
else {
|
|
158
|
+
// when not using a reviving store:
|
|
159
|
+
// in case L1 store changes/deallocs a record while we are doing the async read
|
|
160
|
+
// we attempt to read all keys from L2 - so combine recordId with any seenRecords
|
|
161
|
+
keysToReviveSet.add(recordId);
|
|
162
|
+
keysToReviveSet.merge(seenRecords);
|
|
163
|
+
}
|
|
164
|
+
keysToReviveSet.merge(missingLinks);
|
|
165
|
+
const keysToRevive = keysToReviveSet.keysAsArray();
|
|
166
|
+
const canonicalKeys = keysToRevive.map((x) => serializeStructuredKey(baseEnvironment.storeGetCanonicalKey(x)));
|
|
167
|
+
const start = Date.now();
|
|
168
|
+
const { l2Trips } = reviveMetrics;
|
|
169
|
+
return durableStore.getEntries(canonicalKeys, DefaultDurableSegment).then((durableRecords) => {
|
|
170
|
+
l2Trips.push({
|
|
171
|
+
duration: Date.now() - start,
|
|
172
|
+
keysRequestedCount: canonicalKeys.length,
|
|
173
|
+
});
|
|
174
|
+
const { revivedKeys, hadUnexpectedShape } = publishDurableStoreEntries(durableRecords,
|
|
175
|
+
// TODO [W-10072584]: instead of implicitly using L1 we should take in
|
|
176
|
+
// publish and publishMetadata funcs, so callers can decide where to
|
|
177
|
+
// revive to (like they pass in how to do the buildL1Snapshot)
|
|
178
|
+
baseEnvironment.storePut.bind(baseEnvironment), baseEnvironment.publishStoreMetadata.bind(baseEnvironment));
|
|
179
|
+
// if the data coming back from DS had an unexpected shape then just
|
|
180
|
+
// return the L1 snapshot
|
|
181
|
+
if (hadUnexpectedShape === true) {
|
|
182
|
+
return { snapshot: unavailableSnapshot, metrics: reviveMetrics };
|
|
183
|
+
}
|
|
184
|
+
if (revivedKeys.size() === 0) {
|
|
185
|
+
// durable store doesn't have what we asked for so return L1 snapshot
|
|
186
|
+
return { snapshot: unavailableSnapshot, metrics: reviveMetrics };
|
|
187
|
+
}
|
|
188
|
+
// try building the snapshot from L1 now that we have revived the missingLinks
|
|
189
|
+
const snapshot = buildL1Snapshot();
|
|
190
|
+
// if snapshot is pending then some other in-flight refresh will broadcast
|
|
191
|
+
// later
|
|
192
|
+
if (snapshot.state === 'Pending') {
|
|
193
|
+
return { snapshot, metrics: reviveMetrics };
|
|
194
|
+
}
|
|
195
|
+
if (snapshot.state === 'Unfulfilled') {
|
|
196
|
+
// have to check if the new snapshot has any additional seenRecords
|
|
197
|
+
// and revive again if so
|
|
198
|
+
const { seenRecords: newSnapshotSeenRecords, missingLinks: newSnapshotMissingLinks, recordId: newSnapshotRecordId, } = snapshot;
|
|
199
|
+
const newKeysToReviveSet = new StoreKeySet();
|
|
200
|
+
newKeysToReviveSet.add(newSnapshotRecordId);
|
|
201
|
+
newKeysToReviveSet.merge(newSnapshotSeenRecords);
|
|
202
|
+
newKeysToReviveSet.merge(newSnapshotMissingLinks);
|
|
203
|
+
const newKeys = newKeysToReviveSet.keysAsArray();
|
|
204
|
+
// in case DS returned additional entries we combine the requested
|
|
205
|
+
// and returned keys
|
|
206
|
+
const alreadyRequestedOrRevivedSet = keysToReviveSet;
|
|
207
|
+
alreadyRequestedOrRevivedSet.merge(revivedKeys);
|
|
208
|
+
// if there's any seen keys in the newly rebuilt snapshot that
|
|
209
|
+
// haven't already been requested or returned then revive again
|
|
210
|
+
for (let i = 0, len = newKeys.length; i < len; i++) {
|
|
211
|
+
const newSnapshotSeenKey = newKeys[i];
|
|
212
|
+
if (!alreadyRequestedOrRevivedSet.has(newSnapshotSeenKey)) {
|
|
213
|
+
return reviveSnapshot(baseEnvironment, durableStore, snapshot, durableStoreErrorHandler, buildL1Snapshot, revivingStore, reviveMetrics);
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
return { snapshot, metrics: reviveMetrics };
|
|
218
|
+
}, (error) => {
|
|
219
|
+
durableStoreErrorHandler(error);
|
|
220
|
+
// getEntries failed, return the L1 snapshot
|
|
221
|
+
return { snapshot: unavailableSnapshot, metrics: reviveMetrics };
|
|
222
|
+
});
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
const TTL_DURABLE_SEGMENT = 'TTL_DURABLE_SEGMENT';
|
|
226
|
+
const TTL_DEFAULT_KEY = 'TTL_DEFAULT_KEY';
|
|
227
|
+
function buildDurableTTLOverrideStoreKey(namespace, representationName) {
|
|
228
|
+
return `${namespace}::${representationName}`;
|
|
229
|
+
}
|
|
230
|
+
function isEntryDurableTTLOverride(entry) {
|
|
231
|
+
if (typeof entry === 'object' && entry !== undefined && entry !== null) {
|
|
232
|
+
const data = entry.data;
|
|
233
|
+
if (data !== undefined) {
|
|
234
|
+
return (data.namespace !== undefined &&
|
|
235
|
+
data.representationName !== undefined &&
|
|
236
|
+
data.ttl !== undefined);
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
return false;
|
|
240
|
+
}
|
|
241
|
+
function isDefaultDurableTTLOverride(override) {
|
|
242
|
+
return (override.namespace === TTL_DEFAULT_KEY && override.representationName === TTL_DEFAULT_KEY);
|
|
243
|
+
}
|
|
244
|
+
/**
|
|
245
|
+
* Class to set and get the TTL override values in the Durable Store
|
|
246
|
+
*/
|
|
247
|
+
class DurableTTLStore {
|
|
248
|
+
constructor(durableStore) {
|
|
249
|
+
this.durableStore = durableStore;
|
|
250
|
+
}
|
|
251
|
+
setDefaultDurableTTLOverrides(ttl) {
|
|
252
|
+
return this.durableStore.setEntries({
|
|
253
|
+
[buildDurableTTLOverrideStoreKey(TTL_DEFAULT_KEY, TTL_DEFAULT_KEY)]: {
|
|
254
|
+
data: {
|
|
255
|
+
namespace: TTL_DEFAULT_KEY,
|
|
256
|
+
representationName: TTL_DEFAULT_KEY,
|
|
257
|
+
ttl,
|
|
258
|
+
},
|
|
259
|
+
},
|
|
260
|
+
}, TTL_DURABLE_SEGMENT);
|
|
261
|
+
}
|
|
262
|
+
setDurableTTLOverride(namespace, representationName, ttl) {
|
|
263
|
+
return this.durableStore.setEntries({
|
|
264
|
+
[buildDurableTTLOverrideStoreKey(namespace, representationName)]: {
|
|
265
|
+
data: { namespace, representationName, ttl },
|
|
266
|
+
},
|
|
267
|
+
}, TTL_DURABLE_SEGMENT);
|
|
268
|
+
}
|
|
269
|
+
getDurableTTLOverrides() {
|
|
270
|
+
return this.durableStore
|
|
271
|
+
.getAllEntries(TTL_DURABLE_SEGMENT)
|
|
272
|
+
.then((entries) => {
|
|
273
|
+
const overrides = [];
|
|
274
|
+
let defaultTTL = undefined;
|
|
275
|
+
if (entries === undefined) {
|
|
276
|
+
return {
|
|
277
|
+
defaultTTL,
|
|
278
|
+
overrides,
|
|
279
|
+
};
|
|
280
|
+
}
|
|
281
|
+
const keys$1 = keys$2(entries);
|
|
282
|
+
for (let i = 0, len = keys$1.length; i < len; i++) {
|
|
283
|
+
const key = keys$1[i];
|
|
284
|
+
const entry = entries[key];
|
|
285
|
+
if (entry !== undefined && isEntryDurableTTLOverride(entry)) {
|
|
286
|
+
if (isDefaultDurableTTLOverride(entry.data)) {
|
|
287
|
+
defaultTTL = entry.data;
|
|
288
|
+
}
|
|
289
|
+
else {
|
|
290
|
+
overrides.push(entry.data);
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
}
|
|
294
|
+
return {
|
|
295
|
+
defaultTTL,
|
|
296
|
+
overrides,
|
|
297
|
+
};
|
|
298
|
+
});
|
|
299
|
+
}
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
function flushInMemoryStoreValuesToDurableStore(store, durableStore, durableStoreErrorHandler, redirects, shouldFlush, additionalDurableStoreOperations = [], enableDurableMetadataRefresh = false) {
|
|
303
|
+
const durableRecords = create$2(null);
|
|
304
|
+
const refreshedDurableRecords = create$2(null);
|
|
305
|
+
const evictedRecords = create$2(null);
|
|
306
|
+
const { visitedIds, refreshedIds } = store.fallbackStringKeyInMemoryStore;
|
|
307
|
+
// TODO: W-8909393 Once metadata is stored in its own segment we need to
|
|
308
|
+
// call setEntries for the visitedIds on default segment and call setEntries
|
|
309
|
+
// on the metadata segment for the refreshedIds
|
|
310
|
+
const keys$1 = keys$2({ ...visitedIds, ...refreshedIds });
|
|
311
|
+
for (let i = 0, len = keys$1.length; i < len; i += 1) {
|
|
312
|
+
const key = keys$1[i];
|
|
313
|
+
const canonicalKey = store.getCanonicalRecordId(key);
|
|
314
|
+
// this record has been redirected, evict the original
|
|
315
|
+
if (canonicalKey !== key) {
|
|
316
|
+
evictedRecords[key] = true;
|
|
317
|
+
continue;
|
|
318
|
+
}
|
|
319
|
+
const record = store.readEntry(key);
|
|
320
|
+
const wasVisited = visitedIds[key] !== undefined;
|
|
321
|
+
// this record has been evicted, evict from DS
|
|
322
|
+
if (wasVisited && record === undefined) {
|
|
323
|
+
evictedRecords[key] = true;
|
|
324
|
+
continue;
|
|
325
|
+
}
|
|
326
|
+
const metadata = store.readMetadata(key);
|
|
327
|
+
const entries = wasVisited === true || enableDurableMetadataRefresh === false
|
|
328
|
+
? durableRecords
|
|
329
|
+
: refreshedDurableRecords;
|
|
330
|
+
const { flushValue: flushValue, forceFlushMetadata: flushMetadata } = shouldFlush(key, record);
|
|
331
|
+
if (flushValue) {
|
|
332
|
+
setRecordTo(entries, key, record, metadata);
|
|
333
|
+
}
|
|
334
|
+
else {
|
|
335
|
+
// If the record is not to be flushed, we still need to update the metadata
|
|
336
|
+
if (flushMetadata === true) {
|
|
337
|
+
setRecordTo(refreshedDurableRecords, key, record, metadata);
|
|
338
|
+
}
|
|
339
|
+
}
|
|
340
|
+
}
|
|
341
|
+
const durableStoreOperations = additionalDurableStoreOperations;
|
|
342
|
+
const recordKeys = keys$2(durableRecords);
|
|
343
|
+
if (recordKeys.length > 0) {
|
|
344
|
+
// publishes with data
|
|
345
|
+
durableStoreOperations.push({
|
|
346
|
+
type: 'setEntries',
|
|
347
|
+
entries: durableRecords,
|
|
348
|
+
segment: DefaultDurableSegment,
|
|
349
|
+
});
|
|
350
|
+
}
|
|
351
|
+
const refreshKeys = keys$2(refreshedDurableRecords);
|
|
352
|
+
if (refreshKeys.length > 0) {
|
|
353
|
+
// publishes with only metadata updates
|
|
354
|
+
durableStoreOperations.push({
|
|
355
|
+
type: 'setMetadata',
|
|
356
|
+
entries: refreshedDurableRecords,
|
|
357
|
+
segment: DefaultDurableSegment,
|
|
358
|
+
});
|
|
359
|
+
}
|
|
360
|
+
// redirects
|
|
361
|
+
redirects.forEach((value, key) => {
|
|
362
|
+
durableStoreOperations.push({
|
|
363
|
+
type: 'setEntries',
|
|
364
|
+
entries: {
|
|
365
|
+
[key]: {
|
|
366
|
+
data: { key, redirect: value },
|
|
367
|
+
},
|
|
368
|
+
},
|
|
369
|
+
segment: RedirectDurableSegment,
|
|
370
|
+
});
|
|
371
|
+
});
|
|
372
|
+
// evicts
|
|
373
|
+
const evictedKeys = keys$2(evictedRecords);
|
|
374
|
+
if (evictedKeys.length > 0) {
|
|
375
|
+
durableStoreOperations.push({
|
|
376
|
+
type: 'evictEntries',
|
|
377
|
+
ids: evictedKeys,
|
|
378
|
+
segment: DefaultDurableSegment,
|
|
379
|
+
});
|
|
380
|
+
}
|
|
381
|
+
if (durableStoreOperations.length > 0) {
|
|
382
|
+
return durableStore.batchOperations(durableStoreOperations).catch(durableStoreErrorHandler);
|
|
383
|
+
}
|
|
384
|
+
return Promise.resolve();
|
|
385
|
+
}
|
|
386
|
+
function setRecordTo(entries, key, record, metadata) {
|
|
387
|
+
entries[key] = {
|
|
388
|
+
data: record,
|
|
389
|
+
};
|
|
390
|
+
if (metadata !== undefined) {
|
|
391
|
+
entries[key].metadata = {
|
|
392
|
+
...metadata,
|
|
393
|
+
metadataVersion: DURABLE_METADATA_VERSION,
|
|
394
|
+
};
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
const DurableEnvironmentEventDiscriminator = 'durable';
|
|
399
|
+
function emitDurableEnvironmentAdapterEvent(eventData, observers) {
|
|
400
|
+
emitAdapterEvent({
|
|
401
|
+
type: 'environment',
|
|
402
|
+
timestamp: Date.now(),
|
|
403
|
+
environment: DurableEnvironmentEventDiscriminator,
|
|
404
|
+
data: eventData,
|
|
405
|
+
}, observers);
|
|
406
|
+
}
|
|
407
|
+
|
|
408
|
+
async function reviveTTLOverrides(ttlStore, environment) {
|
|
409
|
+
const map = await ttlStore.getDurableTTLOverrides();
|
|
410
|
+
const { defaultTTL, overrides } = map;
|
|
411
|
+
if (defaultTTL !== undefined) {
|
|
412
|
+
environment.storeSetDefaultTTLOverride(defaultTTL.ttl);
|
|
413
|
+
}
|
|
414
|
+
for (let i = 0, len = overrides.length; i < len; i++) {
|
|
415
|
+
const { namespace, representationName, ttl } = overrides[i];
|
|
416
|
+
environment.storeSetTTLOverride(namespace, representationName, ttl);
|
|
417
|
+
}
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
/**
|
|
421
|
+
* Returns an empty InMemoryStore that can be used for ingestion. Copies over
|
|
422
|
+
* the TTLOverrides from the given Environment's Store.
|
|
423
|
+
*/
|
|
424
|
+
function buildIngestStagingStore(environment) {
|
|
425
|
+
return environment.storeBuildIngestionStagingStore();
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
async function reviveRedirects(durableStore, env) {
|
|
429
|
+
const entries = await durableStore.getAllEntries(RedirectDurableSegment);
|
|
430
|
+
if (entries) {
|
|
431
|
+
for (const durableEntry of Object.keys(entries)) {
|
|
432
|
+
const entry = entries[durableEntry];
|
|
433
|
+
const { data: { key, redirect }, } = entry;
|
|
434
|
+
if (entry) {
|
|
435
|
+
env.storeRedirect(key, redirect);
|
|
436
|
+
}
|
|
437
|
+
}
|
|
438
|
+
}
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
function buildRevivingStagingStore(upstreamStore) {
|
|
442
|
+
const localStore = new StringKeyInMemoryStore();
|
|
443
|
+
const staleEntries = new Set();
|
|
444
|
+
function readEntry(key) {
|
|
445
|
+
if (typeof key !== 'string') {
|
|
446
|
+
return upstreamStore.readEntry(key);
|
|
447
|
+
}
|
|
448
|
+
let storeEntry = localStore.readEntry(key);
|
|
449
|
+
if (!storeEntry) {
|
|
450
|
+
// read from upstream store...
|
|
451
|
+
storeEntry = upstreamStore.readEntry(key);
|
|
452
|
+
// put it in our store to avoid it getting evicted prior to the next durable store read
|
|
453
|
+
localStore.put(key, storeEntry);
|
|
454
|
+
}
|
|
455
|
+
return storeEntry;
|
|
456
|
+
}
|
|
457
|
+
// Entries are marked stale by the durable store change listener. They are not
|
|
458
|
+
// immediately evicted so as to not result in a cache miss during a rebuild.
|
|
459
|
+
// The revive process will clear stale entries and read them from the durable store
|
|
460
|
+
// on the next revive loop.
|
|
461
|
+
function markStale(key) {
|
|
462
|
+
staleEntries.add(key);
|
|
463
|
+
}
|
|
464
|
+
// The revive loop clears stale entries right before reading from the durable store.
|
|
465
|
+
// Any stale entries will be revived to ensure they are present in L1 and match the
|
|
466
|
+
// latest data.
|
|
467
|
+
function clearStale() {
|
|
468
|
+
for (const key of staleEntries) {
|
|
469
|
+
localStore.dealloc(key);
|
|
470
|
+
}
|
|
471
|
+
staleEntries.clear();
|
|
472
|
+
}
|
|
473
|
+
// All functions other than `readEntry` pass through to the upstream store.
|
|
474
|
+
// A reviving store is only "active" during a call to `environment.storeLookup`, and will
|
|
475
|
+
// be used by the reader attempting to build an L1 snapshot. Immediately after the L1 rebuild
|
|
476
|
+
// the reviving store becomes inactive other than receiving change notifications.
|
|
477
|
+
return create$2(upstreamStore, {
|
|
478
|
+
readEntry: { value: readEntry },
|
|
479
|
+
markStale: { value: markStale },
|
|
480
|
+
clearStale: { value: clearStale },
|
|
481
|
+
staleEntries: { value: staleEntries },
|
|
482
|
+
});
|
|
483
|
+
}
|
|
484
|
+
|
|
485
|
+
const AdapterContextSegment = 'ADAPTER-CONTEXT';
|
|
486
|
+
const ADAPTER_CONTEXT_ID_SUFFIX = '__NAMED_CONTEXT';
|
|
487
|
+
async function reviveOrCreateContext(adapterId, durableStore, durableStoreErrorHandler, contextStores, pendingContextStoreKeys, onContextLoaded) {
|
|
488
|
+
// initialize empty context store
|
|
489
|
+
contextStores[adapterId] = create$2(null);
|
|
490
|
+
const context = {
|
|
491
|
+
set(key, value) {
|
|
492
|
+
contextStores[adapterId][key] = value;
|
|
493
|
+
durableStore.setEntries({
|
|
494
|
+
[adapterId]: { data: contextStores[adapterId] },
|
|
495
|
+
}, AdapterContextSegment);
|
|
496
|
+
pendingContextStoreKeys.add(adapterId);
|
|
497
|
+
},
|
|
498
|
+
get(key) {
|
|
499
|
+
return contextStores[adapterId][key];
|
|
500
|
+
},
|
|
501
|
+
};
|
|
502
|
+
const contextReturn = () => {
|
|
503
|
+
if (onContextLoaded !== undefined) {
|
|
504
|
+
return onContextLoaded(context).then(() => {
|
|
505
|
+
return context;
|
|
506
|
+
});
|
|
507
|
+
}
|
|
508
|
+
return context;
|
|
509
|
+
};
|
|
510
|
+
try {
|
|
511
|
+
const entries = await durableStore.getEntries([adapterId], AdapterContextSegment);
|
|
512
|
+
if (entries !== undefined && entries[adapterId] !== undefined) {
|
|
513
|
+
// if durable store has a saved context then load it in the store
|
|
514
|
+
contextStores[adapterId] = entries[adapterId].data;
|
|
515
|
+
}
|
|
516
|
+
}
|
|
517
|
+
catch (error) {
|
|
518
|
+
durableStoreErrorHandler(error);
|
|
519
|
+
}
|
|
520
|
+
return contextReturn();
|
|
521
|
+
}
|
|
522
|
+
function isUnfulfilledSnapshot(cachedSnapshotResult) {
|
|
523
|
+
if (cachedSnapshotResult === undefined) {
|
|
524
|
+
return false;
|
|
525
|
+
}
|
|
526
|
+
if ('then' in cachedSnapshotResult) {
|
|
527
|
+
return false;
|
|
528
|
+
}
|
|
529
|
+
return cachedSnapshotResult.state === 'Unfulfilled';
|
|
530
|
+
}
|
|
531
|
+
/**
|
|
532
|
+
* Configures the environment to persist data into a durable store and attempt to resolve
|
|
533
|
+
* data from the persistent store before hitting the network. Sets the default
|
|
534
|
+
* cache policy to stale-while-revalidate with infinite staleDuration.
|
|
535
|
+
*
|
|
536
|
+
* @param environment The base environment
|
|
537
|
+
* @param durableStore A DurableStore implementation
|
|
538
|
+
* @param instrumentation An instrumentation function implementation
|
|
539
|
+
*/
|
|
540
|
+
function makeDurable(environment, { durableStore, instrumentation, useRevivingStore, shouldFlush, enableDurableMetadataRefresh = false, disableDeepFreeze = false, }) {
|
|
541
|
+
// runtimes can choose to disable deepFreeze, e.g. headless mobile runtime
|
|
542
|
+
setBypassDeepFreeze(disableDeepFreeze);
|
|
543
|
+
let stagingStore = null;
|
|
544
|
+
const durableTTLStore = new DurableTTLStore(durableStore);
|
|
545
|
+
const mergeKeysPromiseMap = new Map();
|
|
546
|
+
// When a context store is mutated we write it to L2, which causes DS on change
|
|
547
|
+
// event. If this instance of makeDurable caused that L2 write we can ignore that
|
|
548
|
+
// on change event. This Set helps us do that.
|
|
549
|
+
const pendingContextStoreKeys = new Set();
|
|
550
|
+
// Reviving stores are tracked so that they can be notified of durable store change notifications.
|
|
551
|
+
const revivingStores = new Set();
|
|
552
|
+
// redirects that need to be flushed to the durable store
|
|
553
|
+
const pendingStoreRedirects = new Map();
|
|
554
|
+
const contextStores = create$2(null);
|
|
555
|
+
let initializationPromise = new Promise((resolve) => {
|
|
556
|
+
const finish = () => {
|
|
557
|
+
resolve();
|
|
558
|
+
initializationPromise = undefined;
|
|
559
|
+
};
|
|
560
|
+
Promise.all([
|
|
561
|
+
reviveTTLOverrides(durableTTLStore, environment),
|
|
562
|
+
reviveRedirects(durableStore, environment),
|
|
563
|
+
]).then(finish);
|
|
564
|
+
});
|
|
565
|
+
//instrumentation for durable store errors
|
|
566
|
+
const durableStoreErrorHandler = handleDurableStoreRejection(instrumentation);
|
|
567
|
+
let disposed = false;
|
|
568
|
+
const validateNotDisposed = () => {
|
|
569
|
+
if (disposed === true) {
|
|
570
|
+
throw new Error('This makeDurable instance has been disposed');
|
|
571
|
+
}
|
|
572
|
+
};
|
|
573
|
+
const unsubscribe = durableStore.registerOnChangedListener(async (changes) => {
|
|
574
|
+
const defaultSegmentKeys = [];
|
|
575
|
+
const adapterContextSegmentKeys = [];
|
|
576
|
+
const redirectSegmentKeys = [];
|
|
577
|
+
const metadataRefreshSegmentKeys = [];
|
|
578
|
+
const messagingSegmentKeys = [];
|
|
579
|
+
let shouldBroadcast = false;
|
|
580
|
+
for (let i = 0, len = changes.length; i < len; i++) {
|
|
581
|
+
const change = changes[i];
|
|
582
|
+
// we only care about changes to the data which is stored in the default
|
|
583
|
+
// segment or the adapter context
|
|
584
|
+
if (change.segment === DefaultDurableSegment) {
|
|
585
|
+
if (change.type === 'setMetadata') {
|
|
586
|
+
metadataRefreshSegmentKeys.push(...change.ids);
|
|
587
|
+
}
|
|
588
|
+
else {
|
|
589
|
+
defaultSegmentKeys.push(...change.ids);
|
|
590
|
+
}
|
|
591
|
+
}
|
|
592
|
+
else if (change.segment === AdapterContextSegment) {
|
|
593
|
+
adapterContextSegmentKeys.push(...change.ids);
|
|
594
|
+
}
|
|
595
|
+
else if (change.segment === RedirectDurableSegment) {
|
|
596
|
+
redirectSegmentKeys.push(...change.ids);
|
|
597
|
+
}
|
|
598
|
+
else if (change.segment === MessagingDurableSegment) {
|
|
599
|
+
messagingSegmentKeys.push(...change.ids);
|
|
600
|
+
}
|
|
601
|
+
}
|
|
602
|
+
if (redirectSegmentKeys.length > 0) {
|
|
603
|
+
const redirectEntries = await durableStore.getEntries(redirectSegmentKeys, RedirectDurableSegment);
|
|
604
|
+
if (redirectEntries !== undefined) {
|
|
605
|
+
const redirectKeys = Object.keys(redirectEntries);
|
|
606
|
+
for (const key of redirectKeys) {
|
|
607
|
+
const redirectData = redirectEntries[key];
|
|
608
|
+
environment.storeRedirect(redirectData.data.key, redirectData.data.redirect);
|
|
609
|
+
shouldBroadcast = true;
|
|
610
|
+
}
|
|
611
|
+
}
|
|
612
|
+
}
|
|
613
|
+
// process adapter context changes
|
|
614
|
+
const adapterContextKeysFromDifferentInstance = [];
|
|
615
|
+
for (const key of adapterContextSegmentKeys) {
|
|
616
|
+
if (pendingContextStoreKeys.has(key)) {
|
|
617
|
+
// if this instance caused the L2 write then remove from the
|
|
618
|
+
// "pending" Set and move on
|
|
619
|
+
pendingContextStoreKeys.delete(key);
|
|
620
|
+
}
|
|
621
|
+
else {
|
|
622
|
+
// else it came from another luvio instance and we need to
|
|
623
|
+
// read from L2
|
|
624
|
+
adapterContextKeysFromDifferentInstance.push(key);
|
|
625
|
+
}
|
|
626
|
+
}
|
|
627
|
+
if (adapterContextKeysFromDifferentInstance.length > 0) {
|
|
628
|
+
try {
|
|
629
|
+
const entries = await durableStore.getEntries(adapterContextKeysFromDifferentInstance, AdapterContextSegment);
|
|
630
|
+
if (entries !== undefined) {
|
|
631
|
+
const entryKeys = keys$2(entries);
|
|
632
|
+
for (let i = 0, len = entryKeys.length; i < len; i++) {
|
|
633
|
+
const entryKey = entryKeys[i];
|
|
634
|
+
const entry = entries[entryKey];
|
|
635
|
+
contextStores[entryKey] = entry.data;
|
|
636
|
+
}
|
|
637
|
+
}
|
|
638
|
+
}
|
|
639
|
+
catch (error) {
|
|
640
|
+
durableStoreErrorHandler(error);
|
|
641
|
+
}
|
|
642
|
+
}
|
|
643
|
+
// process default segment changes
|
|
644
|
+
const defaultSegmentKeysLength = defaultSegmentKeys.length;
|
|
645
|
+
if (defaultSegmentKeysLength > 0) {
|
|
646
|
+
for (let i = 0; i < defaultSegmentKeysLength; i++) {
|
|
647
|
+
const key = defaultSegmentKeys[i];
|
|
648
|
+
// TODO: W-8909393 If expiration is the only thing that changed we should not evict the data... so
|
|
649
|
+
// if we stored expiration and data at different keys (or same keys in different segments)
|
|
650
|
+
// then we could know if only the expiration has changed and we wouldn't need to evict
|
|
651
|
+
// and go through an entire broadcast/revive cycle for unchanged data
|
|
652
|
+
// call base environment storeEvict so this evict is not tracked for durable deletion
|
|
653
|
+
environment.storeEvict(key);
|
|
654
|
+
for (const revivingStore of revivingStores) {
|
|
655
|
+
revivingStore.markStale(key);
|
|
656
|
+
}
|
|
657
|
+
}
|
|
658
|
+
shouldBroadcast = true;
|
|
659
|
+
}
|
|
660
|
+
// process metadata only refreshes
|
|
661
|
+
if (metadataRefreshSegmentKeys.length > 0) {
|
|
662
|
+
const filteredKeys = metadataRefreshSegmentKeys.filter((s) => environment.storeKeyExists(s));
|
|
663
|
+
if (filteredKeys.length > 0) {
|
|
664
|
+
const entries = await durableStore.getMetadata(filteredKeys, DefaultDurableSegment);
|
|
665
|
+
if (entries !== undefined) {
|
|
666
|
+
const entryKeys = keys$2(entries);
|
|
667
|
+
for (let i = 0, len = entryKeys.length; i < len; i++) {
|
|
668
|
+
const entryKey = entryKeys[i];
|
|
669
|
+
const { metadata } = entries[entryKey];
|
|
670
|
+
if (metadata !== undefined) {
|
|
671
|
+
environment.putStoreMetadata(entryKey, metadata, false);
|
|
672
|
+
}
|
|
673
|
+
}
|
|
674
|
+
}
|
|
675
|
+
}
|
|
676
|
+
}
|
|
677
|
+
if (shouldBroadcast) {
|
|
678
|
+
await environment.storeBroadcast(rebuildSnapshot, environment.snapshotAvailable);
|
|
679
|
+
}
|
|
680
|
+
// if having notifyStoreUpdateAvailable msg, then pull the message body out from store
|
|
681
|
+
// and call environment.notifyStoreUpdateAvailable
|
|
682
|
+
if (messagingSegmentKeys.includes(MessageNotifyStoreUpdateAvailable)) {
|
|
683
|
+
const entries = await durableStore.getEntries([MessageNotifyStoreUpdateAvailable], MessagingDurableSegment);
|
|
684
|
+
if (entries !== undefined) {
|
|
685
|
+
const notifyEntry = entries[MessageNotifyStoreUpdateAvailable];
|
|
686
|
+
if (notifyEntry !== undefined) {
|
|
687
|
+
const keys = notifyEntry.data;
|
|
688
|
+
if (keys.length > 0) {
|
|
689
|
+
environment.notifyStoreUpdateAvailable(keys);
|
|
690
|
+
}
|
|
691
|
+
}
|
|
692
|
+
}
|
|
693
|
+
}
|
|
694
|
+
});
|
|
695
|
+
const dispose = function () {
|
|
696
|
+
validateNotDisposed();
|
|
697
|
+
disposed = true;
|
|
698
|
+
return unsubscribe();
|
|
699
|
+
};
|
|
700
|
+
const storePublish = function (key, data) {
|
|
701
|
+
validateNotDisposed();
|
|
702
|
+
if (stagingStore === null) {
|
|
703
|
+
stagingStore = buildIngestStagingStore(environment);
|
|
704
|
+
}
|
|
705
|
+
stagingStore.publish(key, data);
|
|
706
|
+
// remove record from main luvio L1 cache while we are on the synchronous path
|
|
707
|
+
// because we do not want some other code attempting to use the
|
|
708
|
+
// in-memory values before the durable store onChanged handler
|
|
709
|
+
// calls back and revives the values to in-memory
|
|
710
|
+
environment.storeDealloc(key);
|
|
711
|
+
};
|
|
712
|
+
const publishStoreMetadata = function (recordId, storeMetadata) {
|
|
713
|
+
validateNotDisposed();
|
|
714
|
+
if (stagingStore === null) {
|
|
715
|
+
stagingStore = buildIngestStagingStore(environment);
|
|
716
|
+
}
|
|
717
|
+
stagingStore.publishMetadata(recordId, storeMetadata);
|
|
718
|
+
};
|
|
719
|
+
const storeIngest = function (key, ingest, response, luvio) {
|
|
720
|
+
validateNotDisposed();
|
|
721
|
+
// we don't ingest to the luvio L1 store from network directly, we ingest to
|
|
722
|
+
// L2 and let DurableStore on change event revive keys into luvio L1 store
|
|
723
|
+
if (stagingStore === null) {
|
|
724
|
+
stagingStore = buildIngestStagingStore(environment);
|
|
725
|
+
}
|
|
726
|
+
environment.storeIngest(key, ingest, response, luvio, stagingStore);
|
|
727
|
+
};
|
|
728
|
+
const storeIngestError = function (key, errorSnapshot, storeMetadataParams, _storeOverride) {
|
|
729
|
+
validateNotDisposed();
|
|
730
|
+
if (stagingStore === null) {
|
|
731
|
+
stagingStore = buildIngestStagingStore(environment);
|
|
732
|
+
}
|
|
733
|
+
environment.storeIngestError(key, errorSnapshot, storeMetadataParams, stagingStore);
|
|
734
|
+
};
|
|
735
|
+
const storeBroadcast = function (_rebuildSnapshot, _snapshotDataAvailable) {
|
|
736
|
+
validateNotDisposed();
|
|
737
|
+
// publishing to L2 is essentially "broadcasting" because the onChanged
|
|
738
|
+
// handler will fire which will revive records to the main L1 store and
|
|
739
|
+
// call the base storeBroadcast
|
|
740
|
+
return publishChangesToDurableStore();
|
|
741
|
+
};
|
|
742
|
+
const publishChangesToDurableStore = function (additionalDurableStoreOperations) {
|
|
743
|
+
validateNotDisposed();
|
|
744
|
+
if (stagingStore === null) {
|
|
745
|
+
return Promise.resolve();
|
|
746
|
+
}
|
|
747
|
+
const promise = flushInMemoryStoreValuesToDurableStore(stagingStore, durableStore, durableStoreErrorHandler, new Map(pendingStoreRedirects), shouldFlush !== null && shouldFlush !== void 0 ? shouldFlush : (() => ({ flushValue: true })), additionalDurableStoreOperations, enableDurableMetadataRefresh);
|
|
748
|
+
pendingStoreRedirects.clear();
|
|
749
|
+
stagingStore = null;
|
|
750
|
+
return promise;
|
|
751
|
+
};
|
|
752
|
+
const storeLookup = function (sel, createSnapshot, refresh, ttlStrategy) {
|
|
753
|
+
validateNotDisposed();
|
|
754
|
+
// if this lookup is right after an ingest or during a revive there will be a staging store
|
|
755
|
+
if (stagingStore !== null) {
|
|
756
|
+
const reader = new Reader(stagingStore, sel.variables, refresh, undefined, ttlStrategy);
|
|
757
|
+
return reader.read(sel);
|
|
758
|
+
}
|
|
759
|
+
// otherwise this is from buildCachedSnapshot and we should use the luvio
|
|
760
|
+
// L1 store
|
|
761
|
+
return environment.storeLookup(sel, createSnapshot, refresh, ttlStrategy);
|
|
762
|
+
};
|
|
763
|
+
const storeEvict = function (key) {
|
|
764
|
+
validateNotDisposed();
|
|
765
|
+
if (stagingStore === null) {
|
|
766
|
+
stagingStore = buildIngestStagingStore(environment);
|
|
767
|
+
}
|
|
768
|
+
stagingStore.evict(key);
|
|
769
|
+
};
|
|
770
|
+
const getNode = function (key) {
|
|
771
|
+
validateNotDisposed();
|
|
772
|
+
if (stagingStore !== null) {
|
|
773
|
+
return environment.getNode(key, stagingStore);
|
|
774
|
+
}
|
|
775
|
+
return environment.getNode(key);
|
|
776
|
+
};
|
|
777
|
+
const wrapNormalizedGraphNode = function (normalized, key) {
|
|
778
|
+
validateNotDisposed();
|
|
779
|
+
if (stagingStore !== null) {
|
|
780
|
+
return environment.wrapNormalizedGraphNode(normalized, key, stagingStore);
|
|
781
|
+
}
|
|
782
|
+
return environment.wrapNormalizedGraphNode(normalized, key);
|
|
783
|
+
};
|
|
784
|
+
const rebuildSnapshot = function (snapshot, onRebuild) {
|
|
785
|
+
validateNotDisposed();
|
|
786
|
+
// try rebuilding from memory
|
|
787
|
+
environment.rebuildSnapshot(snapshot, (rebuilt) => {
|
|
788
|
+
// only try reviving from durable store if snapshot is unfulfilled
|
|
789
|
+
if (rebuilt.state !== 'Unfulfilled') {
|
|
790
|
+
onRebuild(rebuilt);
|
|
791
|
+
return;
|
|
792
|
+
}
|
|
793
|
+
// Do an L2 revive and emit to subscriber using the callback.
|
|
794
|
+
reviveSnapshotWrapper(rebuilt, () => {
|
|
795
|
+
// reviveSnapshot will revive into L1, and since "records" is a reference
|
|
796
|
+
// (and not a copy) to the L1 records we can use it for rebuild
|
|
797
|
+
let rebuiltSnap;
|
|
798
|
+
environment.rebuildSnapshot(snapshot, (rebuilt) => {
|
|
799
|
+
rebuiltSnap = rebuilt;
|
|
800
|
+
});
|
|
801
|
+
return rebuiltSnap;
|
|
802
|
+
}).then((result) => {
|
|
803
|
+
onRebuild(result.snapshot);
|
|
804
|
+
});
|
|
805
|
+
});
|
|
806
|
+
};
|
|
807
|
+
const withContext = function (adapter, options) {
|
|
808
|
+
validateNotDisposed();
|
|
809
|
+
const { contextId, contextVersion, onContextLoaded } = options;
|
|
810
|
+
let context = undefined;
|
|
811
|
+
let contextKey = `${contextId}`;
|
|
812
|
+
// if a context version is supplied, key with the version encoded
|
|
813
|
+
if (contextVersion !== undefined) {
|
|
814
|
+
contextKey += `::${contextVersion}`;
|
|
815
|
+
}
|
|
816
|
+
contextKey += ADAPTER_CONTEXT_ID_SUFFIX;
|
|
817
|
+
const contextAsPromise = reviveOrCreateContext(contextKey, durableStore, durableStoreErrorHandler, contextStores, pendingContextStoreKeys, onContextLoaded);
|
|
818
|
+
return (config, requestContext) => {
|
|
819
|
+
if (context === undefined) {
|
|
820
|
+
return contextAsPromise.then((revivedContext) => {
|
|
821
|
+
context = revivedContext;
|
|
822
|
+
return adapter(config, context, requestContext); // TODO - remove as any cast after https://github.com/salesforce-experience-platform-emu/luvio/pull/230
|
|
823
|
+
});
|
|
824
|
+
}
|
|
825
|
+
return adapter(config, context, requestContext);
|
|
826
|
+
};
|
|
827
|
+
};
|
|
828
|
+
const storeRedirect = function (existingKey, canonicalKey) {
|
|
829
|
+
validateNotDisposed();
|
|
830
|
+
pendingStoreRedirects.set(existingKey, canonicalKey);
|
|
831
|
+
// call redirect on staging store so "old" keys are removed from L2 on
|
|
832
|
+
// the next publishChangesToDurableStore. NOTE: we don't need to call
|
|
833
|
+
// redirect on the base environment store because staging store and base
|
|
834
|
+
// L1 store share the same redirect and reverseRedirectKeys
|
|
835
|
+
if (stagingStore === null) {
|
|
836
|
+
stagingStore = buildIngestStagingStore(environment);
|
|
837
|
+
}
|
|
838
|
+
stagingStore.redirect(existingKey, canonicalKey);
|
|
839
|
+
};
|
|
840
|
+
const storeSetTTLOverride = function (namespace, representationName, ttl) {
|
|
841
|
+
validateNotDisposed();
|
|
842
|
+
return Promise.all([
|
|
843
|
+
environment.storeSetTTLOverride(namespace, representationName, ttl),
|
|
844
|
+
durableTTLStore.setDurableTTLOverride(namespace, representationName, ttl),
|
|
845
|
+
]).then();
|
|
846
|
+
};
|
|
847
|
+
const storeSetDefaultTTLOverride = function (ttl) {
|
|
848
|
+
validateNotDisposed();
|
|
849
|
+
return Promise.all([
|
|
850
|
+
environment.storeSetDefaultTTLOverride(ttl),
|
|
851
|
+
durableTTLStore.setDefaultDurableTTLOverrides(ttl),
|
|
852
|
+
]).then();
|
|
853
|
+
};
|
|
854
|
+
const getDurableTTLOverrides = function () {
|
|
855
|
+
validateNotDisposed();
|
|
856
|
+
return durableTTLStore.getDurableTTLOverrides();
|
|
857
|
+
};
|
|
858
|
+
const dispatchResourceRequest = async function (request, context, eventObservers) {
|
|
859
|
+
validateNotDisposed();
|
|
860
|
+
// non-GET adapters call dispatchResourceRequest before any other luvio
|
|
861
|
+
// function so this is our chance to ensure we're initialized
|
|
862
|
+
if (initializationPromise !== undefined) {
|
|
863
|
+
await initializationPromise;
|
|
864
|
+
}
|
|
865
|
+
return environment.dispatchResourceRequest(request, context, eventObservers);
|
|
866
|
+
};
|
|
867
|
+
// NOTE: we can't use "async" keyword on this function because that would
|
|
868
|
+
// force it to always be an async response. The signature is a union
|
|
869
|
+
// of sync/async so no "awaiting" in this function, just promise-chaining
|
|
870
|
+
const applyCachePolicy = function (luvio, adapterRequestContext, buildSnapshotContext, buildCachedSnapshot, buildNetworkSnapshot) {
|
|
871
|
+
validateNotDisposed();
|
|
872
|
+
const wrappedCacheLookup = (injectedBuildSnapshotContext, injectedStoreLookup) => {
|
|
873
|
+
const snapshot = buildCachedSnapshot(injectedBuildSnapshotContext, injectedStoreLookup, luvio);
|
|
874
|
+
// if the adapter attempted to do an L1 lookup and it was unfulfilled
|
|
875
|
+
// then we can attempt an L2 lookup
|
|
876
|
+
if (isUnfulfilledSnapshot(snapshot)) {
|
|
877
|
+
const start = Date.now();
|
|
878
|
+
emitDurableEnvironmentAdapterEvent({ type: 'l2-revive-start' }, adapterRequestContext.eventObservers);
|
|
879
|
+
const revivedSnapshot = reviveSnapshotWrapper(snapshot, () => injectedStoreLookup(snapshot.select, snapshot.refresh)).then((result) => {
|
|
880
|
+
emitDurableEnvironmentAdapterEvent({
|
|
881
|
+
type: 'l2-revive-end',
|
|
882
|
+
snapshot: result.snapshot,
|
|
883
|
+
duration: Date.now() - start,
|
|
884
|
+
l2Trips: result.metrics.l2Trips,
|
|
885
|
+
}, adapterRequestContext.eventObservers);
|
|
886
|
+
return result.snapshot;
|
|
887
|
+
});
|
|
888
|
+
return revivedSnapshot;
|
|
889
|
+
}
|
|
890
|
+
// otherwise just return what buildCachedSnapshot gave us
|
|
891
|
+
return snapshot;
|
|
892
|
+
};
|
|
893
|
+
const wrappedApplyCachePolicy = () => {
|
|
894
|
+
return environment.applyCachePolicy(luvio, adapterRequestContext, buildSnapshotContext, wrappedCacheLookup, buildNetworkSnapshot);
|
|
895
|
+
};
|
|
896
|
+
// GET adapters call applyCachePolicy before any other luvio
|
|
897
|
+
// function so this is our chance to ensure we're initialized
|
|
898
|
+
return initializationPromise !== undefined
|
|
899
|
+
? initializationPromise.then(wrappedApplyCachePolicy)
|
|
900
|
+
: wrappedApplyCachePolicy();
|
|
901
|
+
};
|
|
902
|
+
const getIngestStagingStoreRecords = function () {
|
|
903
|
+
validateNotDisposed();
|
|
904
|
+
if (stagingStore !== null) {
|
|
905
|
+
return stagingStore.fallbackStringKeyInMemoryStore.records;
|
|
906
|
+
}
|
|
907
|
+
return {};
|
|
908
|
+
};
|
|
909
|
+
const getIngestStagingStoreMetadata = function () {
|
|
910
|
+
validateNotDisposed();
|
|
911
|
+
if (stagingStore !== null) {
|
|
912
|
+
return stagingStore.fallbackStringKeyInMemoryStore.metadata;
|
|
913
|
+
}
|
|
914
|
+
return {};
|
|
915
|
+
};
|
|
916
|
+
const getIngestStagingStore = function () {
|
|
917
|
+
validateNotDisposed();
|
|
918
|
+
return stagingStore === null || stagingStore === void 0 ? void 0 : stagingStore.fallbackStringKeyInMemoryStore;
|
|
919
|
+
};
|
|
920
|
+
const handleSuccessResponse = async function (ingestAndBroadcastFunc, getResponseCacheKeysFunc) {
|
|
921
|
+
validateNotDisposed();
|
|
922
|
+
const cacheKeyMap = getResponseCacheKeysFunc();
|
|
923
|
+
const cacheKeyMapKeys = cacheKeyMap.keysAsArray();
|
|
924
|
+
const keysToRevive = new StoreKeySet();
|
|
925
|
+
for (const cacheKeyMapKey of cacheKeyMapKeys) {
|
|
926
|
+
const cacheKey = cacheKeyMap.get(cacheKeyMapKey);
|
|
927
|
+
if (cacheKey.mergeable === true) {
|
|
928
|
+
const canonical = environment.storeGetCanonicalKey(cacheKeyMapKey);
|
|
929
|
+
keysToRevive.add(canonical);
|
|
930
|
+
}
|
|
931
|
+
}
|
|
932
|
+
let snapshotFromMemoryIngest = undefined;
|
|
933
|
+
// To-do: Once these are structured keys, will need to support them throughout durable logic W-12356727
|
|
934
|
+
const keysToReviveAsArray = Array.from(keysToRevive.keysAsStrings());
|
|
935
|
+
if (keysToReviveAsArray.length > 0) {
|
|
936
|
+
// if we need to do an L2 read then L2 write then we need to synchronize
|
|
937
|
+
// our read/merge/ingest/write Promise based on the keys so we don't
|
|
938
|
+
// stomp over any data
|
|
939
|
+
const readWritePromise = (async () => {
|
|
940
|
+
const pendingPromises = [];
|
|
941
|
+
for (const key of keysToReviveAsArray) {
|
|
942
|
+
const pendingPromise = mergeKeysPromiseMap.get(key);
|
|
943
|
+
if (pendingPromise !== undefined) {
|
|
944
|
+
// IMPORTANT: while on the synchronous code path we get a
|
|
945
|
+
// handle to pendingPromise and push it onto the array.
|
|
946
|
+
// This is important because later in this synchronous code
|
|
947
|
+
// path we will upsert readWritePromise into the
|
|
948
|
+
// mergeKeysPromiseMap (essentially overwriting pendingPromise
|
|
949
|
+
// in the map).
|
|
950
|
+
pendingPromises.push(pendingPromise);
|
|
951
|
+
}
|
|
952
|
+
}
|
|
953
|
+
await Promise.all(pendingPromises);
|
|
954
|
+
const entries = await durableStore.getEntries(keysToReviveAsArray, DefaultDurableSegment);
|
|
955
|
+
stagingStore = buildIngestStagingStore(environment);
|
|
956
|
+
publishDurableStoreEntries(entries, (key, record) => {
|
|
957
|
+
if (typeof key === 'string') {
|
|
958
|
+
stagingStore.fallbackStringKeyInMemoryStore.records[key] = record;
|
|
959
|
+
}
|
|
960
|
+
else {
|
|
961
|
+
stagingStore.recordsMap.set(key, record);
|
|
962
|
+
}
|
|
963
|
+
}, (key, metadata) => {
|
|
964
|
+
if (typeof key === 'string') {
|
|
965
|
+
stagingStore.fallbackStringKeyInMemoryStore.metadata[key] = metadata;
|
|
966
|
+
}
|
|
967
|
+
else {
|
|
968
|
+
stagingStore.metadataMap.set(key, metadata);
|
|
969
|
+
}
|
|
970
|
+
});
|
|
971
|
+
snapshotFromMemoryIngest = await ingestAndBroadcastFunc();
|
|
972
|
+
})();
|
|
973
|
+
for (const key of keysToReviveAsArray) {
|
|
974
|
+
// we are overwriting the previous promise at this key, but that
|
|
975
|
+
// is ok because we got a handle to it earlier (see the IMPORTANT
|
|
976
|
+
// comment about 35 lines up)
|
|
977
|
+
mergeKeysPromiseMap.set(key, readWritePromise);
|
|
978
|
+
}
|
|
979
|
+
try {
|
|
980
|
+
await readWritePromise;
|
|
981
|
+
}
|
|
982
|
+
finally {
|
|
983
|
+
for (const key of keysToReviveAsArray) {
|
|
984
|
+
const pendingPromise = mergeKeysPromiseMap.get(key);
|
|
985
|
+
// cleanup the entry from the map if this is the last promise
|
|
986
|
+
// for that key
|
|
987
|
+
if (pendingPromise === readWritePromise) {
|
|
988
|
+
mergeKeysPromiseMap.delete(key);
|
|
989
|
+
}
|
|
990
|
+
}
|
|
991
|
+
}
|
|
992
|
+
}
|
|
993
|
+
else {
|
|
994
|
+
// we aren't doing any merging so we don't have to synchronize, the
|
|
995
|
+
// underlying DurableStore implementation takes care of R/W sync
|
|
996
|
+
// so all we have to do is ingest then write to L2
|
|
997
|
+
stagingStore = buildIngestStagingStore(environment);
|
|
998
|
+
snapshotFromMemoryIngest = await ingestAndBroadcastFunc();
|
|
999
|
+
}
|
|
1000
|
+
if (snapshotFromMemoryIngest === undefined) {
|
|
1001
|
+
return undefined;
|
|
1002
|
+
}
|
|
1003
|
+
if (snapshotFromMemoryIngest.state !== 'Unfulfilled') {
|
|
1004
|
+
return snapshotFromMemoryIngest;
|
|
1005
|
+
}
|
|
1006
|
+
// if snapshot from staging store lookup is unfulfilled then do an L2 lookup
|
|
1007
|
+
const { select, refresh } = snapshotFromMemoryIngest;
|
|
1008
|
+
const result = await reviveSnapshotWrapper(snapshotFromMemoryIngest, () => environment.storeLookup(select, environment.createSnapshot, refresh));
|
|
1009
|
+
return result.snapshot;
|
|
1010
|
+
};
|
|
1011
|
+
const handleErrorResponse = async function (ingestAndBroadcastFunc) {
|
|
1012
|
+
validateNotDisposed();
|
|
1013
|
+
stagingStore = buildIngestStagingStore(environment);
|
|
1014
|
+
return ingestAndBroadcastFunc();
|
|
1015
|
+
};
|
|
1016
|
+
const getNotifyChangeStoreEntries = function (keys) {
|
|
1017
|
+
validateNotDisposed();
|
|
1018
|
+
return durableStore
|
|
1019
|
+
.getEntries(keys.map(serializeStructuredKey), DefaultDurableSegment)
|
|
1020
|
+
.then((durableRecords) => {
|
|
1021
|
+
const entries = [];
|
|
1022
|
+
publishDurableStoreEntries(durableRecords, (key, record) => {
|
|
1023
|
+
entries.push({
|
|
1024
|
+
key,
|
|
1025
|
+
record: record,
|
|
1026
|
+
});
|
|
1027
|
+
}, () => { });
|
|
1028
|
+
return entries;
|
|
1029
|
+
});
|
|
1030
|
+
};
|
|
1031
|
+
// flag entries specified by keys in durable store as expired.
|
|
1032
|
+
// send a notifyStoreUpdateAvailable message through durable store set entries to
|
|
1033
|
+
// indirectly triggers all js environments to refresh snapshots if overlapping with keys.
|
|
1034
|
+
const notifyStoreUpdateAvailable = async function (keys$1) {
|
|
1035
|
+
validateNotDisposed();
|
|
1036
|
+
const entryKeys = keys$1.map(serializeStructuredKey);
|
|
1037
|
+
const entries = await durableStore.getEntries(entryKeys, DefaultDurableSegment);
|
|
1038
|
+
if (entries === undefined || keys$2(entries).length === 0) {
|
|
1039
|
+
return environment.notifyStoreUpdateAvailable(keys$1);
|
|
1040
|
+
}
|
|
1041
|
+
const now = Date.now();
|
|
1042
|
+
let needWriteBack = false;
|
|
1043
|
+
for (let i = 0; i < entryKeys.length; i++) {
|
|
1044
|
+
const key = entryKeys[i];
|
|
1045
|
+
const entry = entries[key];
|
|
1046
|
+
if (entry !== undefined) {
|
|
1047
|
+
const storeEntry = entry;
|
|
1048
|
+
if (storeEntry.metadata !== undefined) {
|
|
1049
|
+
storeEntry.metadata = {
|
|
1050
|
+
...storeEntry.metadata,
|
|
1051
|
+
expirationTimestamp: now,
|
|
1052
|
+
};
|
|
1053
|
+
}
|
|
1054
|
+
needWriteBack = true;
|
|
1055
|
+
}
|
|
1056
|
+
}
|
|
1057
|
+
if (needWriteBack) {
|
|
1058
|
+
await durableStore.setEntries(entries, DefaultDurableSegment);
|
|
1059
|
+
}
|
|
1060
|
+
// push a notifyStoreUpdateAvailable message with entryKeys as data into messaging segment
|
|
1061
|
+
await durableStore.setEntries({ notifyStoreUpdateAvailable: { data: entryKeys } }, MessagingDurableSegment);
|
|
1062
|
+
return Promise.resolve(undefined);
|
|
1063
|
+
};
|
|
1064
|
+
const reviveSnapshotWrapper = function (unavailableSnapshot, buildL1Snapshot) {
|
|
1065
|
+
let revivingStore = undefined;
|
|
1066
|
+
if (useRevivingStore) {
|
|
1067
|
+
// NOTE: `store` is private, there doesn't seem to be a better,
|
|
1068
|
+
// cleaner way of accessing it from a derived environment.
|
|
1069
|
+
let baseStore = environment.store;
|
|
1070
|
+
// If we're rebuilding during an ingest, the existing staging store should be the base store.
|
|
1071
|
+
if (stagingStore) {
|
|
1072
|
+
baseStore = stagingStore;
|
|
1073
|
+
}
|
|
1074
|
+
let revivingStore = buildRevivingStagingStore(baseStore);
|
|
1075
|
+
revivingStores.add(revivingStore);
|
|
1076
|
+
}
|
|
1077
|
+
return reviveSnapshot(environment, durableStore, unavailableSnapshot, durableStoreErrorHandler, () => {
|
|
1078
|
+
const tempStore = stagingStore;
|
|
1079
|
+
const result = buildL1Snapshot();
|
|
1080
|
+
stagingStore = tempStore;
|
|
1081
|
+
return result;
|
|
1082
|
+
}, revivingStore).finally(() => {
|
|
1083
|
+
});
|
|
1084
|
+
};
|
|
1085
|
+
const expirePossibleStaleRecords = async function (keys$1, config, refresh) {
|
|
1086
|
+
validateNotDisposed();
|
|
1087
|
+
const metadataKeys = keys$1.map(serializeStructuredKey);
|
|
1088
|
+
const now = Date.now();
|
|
1089
|
+
const entries = await durableStore.getMetadata(metadataKeys, DefaultDurableSegment);
|
|
1090
|
+
if (entries === undefined || keys$2(entries).length === 0) {
|
|
1091
|
+
return environment.expirePossibleStaleRecords(keys$1);
|
|
1092
|
+
}
|
|
1093
|
+
let metaDataChanged = false;
|
|
1094
|
+
const metadataEntries = metadataKeys.reduce((accu, key) => {
|
|
1095
|
+
const metadataEntry = entries[key];
|
|
1096
|
+
if (metadataEntry.metadata !== undefined) {
|
|
1097
|
+
const metadata = { ...metadataEntry.metadata, expirationTimestamp: now };
|
|
1098
|
+
accu[key] = { metadata };
|
|
1099
|
+
metaDataChanged = true;
|
|
1100
|
+
}
|
|
1101
|
+
return accu;
|
|
1102
|
+
}, {});
|
|
1103
|
+
if (metaDataChanged) {
|
|
1104
|
+
await durableStore.setMetadata(metadataEntries, DefaultDurableSegment);
|
|
1105
|
+
}
|
|
1106
|
+
if (config !== undefined && refresh !== undefined) {
|
|
1107
|
+
return environment.refreshPossibleStaleRecords(config, refresh);
|
|
1108
|
+
}
|
|
1109
|
+
return Promise.resolve();
|
|
1110
|
+
};
|
|
1111
|
+
// set the default cache policy of the base environment
|
|
1112
|
+
environment.setDefaultCachePolicy({
|
|
1113
|
+
type: 'stale-while-revalidate',
|
|
1114
|
+
staleDurationSeconds: Number.MAX_SAFE_INTEGER,
|
|
1115
|
+
});
|
|
1116
|
+
return create$2(environment, {
|
|
1117
|
+
publishStoreMetadata: { value: publishStoreMetadata },
|
|
1118
|
+
storeIngest: { value: storeIngest },
|
|
1119
|
+
storeIngestError: { value: storeIngestError },
|
|
1120
|
+
storeBroadcast: { value: storeBroadcast },
|
|
1121
|
+
storeLookup: { value: storeLookup },
|
|
1122
|
+
storeEvict: { value: storeEvict },
|
|
1123
|
+
wrapNormalizedGraphNode: { value: wrapNormalizedGraphNode },
|
|
1124
|
+
getNode: { value: getNode },
|
|
1125
|
+
rebuildSnapshot: { value: rebuildSnapshot },
|
|
1126
|
+
withContext: { value: withContext },
|
|
1127
|
+
storeSetTTLOverride: { value: storeSetTTLOverride },
|
|
1128
|
+
storeSetDefaultTTLOverride: { value: storeSetDefaultTTLOverride },
|
|
1129
|
+
storePublish: { value: storePublish },
|
|
1130
|
+
storeRedirect: { value: storeRedirect },
|
|
1131
|
+
dispose: { value: dispose },
|
|
1132
|
+
publishChangesToDurableStore: { value: publishChangesToDurableStore },
|
|
1133
|
+
getDurableTTLOverrides: { value: getDurableTTLOverrides },
|
|
1134
|
+
dispatchResourceRequest: { value: dispatchResourceRequest },
|
|
1135
|
+
applyCachePolicy: { value: applyCachePolicy },
|
|
1136
|
+
getIngestStagingStoreRecords: { value: getIngestStagingStoreRecords },
|
|
1137
|
+
getIngestStagingStoreMetadata: { value: getIngestStagingStoreMetadata },
|
|
1138
|
+
getIngestStagingStore: { value: getIngestStagingStore },
|
|
1139
|
+
handleSuccessResponse: { value: handleSuccessResponse },
|
|
1140
|
+
handleErrorResponse: { value: handleErrorResponse },
|
|
1141
|
+
getNotifyChangeStoreEntries: { value: getNotifyChangeStoreEntries },
|
|
1142
|
+
notifyStoreUpdateAvailable: { value: notifyStoreUpdateAvailable },
|
|
1143
|
+
expirePossibleStaleRecords: { value: expirePossibleStaleRecords },
|
|
1144
|
+
});
|
|
1145
|
+
}
|
|
1146
|
+
|
|
1147
|
+
const { keys: keys$1, create: create$1, assign: assign$1, entries: entries$1, values: values$1 } = Object;
|
|
1148
|
+
const { stringify, parse } = JSON;
|
|
1149
|
+
|
|
1150
|
+
function selectColumnsFromTableWhereKeyIn(columnNames, table, keyColumnName, whereIn) {
|
|
1151
|
+
const paramList = whereIn.map(() => '?').join(',');
|
|
1152
|
+
return `SELECT ${columnNames.join(',')} FROM ${table} WHERE ${keyColumnName} IN (${paramList})`;
|
|
1153
|
+
}
|
|
1154
|
+
function selectColumnsFromTableWhereKeyInNamespaced(columnNames, table, keyColumnName, whereIn, namespaceColumnName) {
|
|
1155
|
+
const paramList = whereIn.map(() => '?').join(',');
|
|
1156
|
+
return `SELECT ${columnNames.join(',')} FROM ${table} WHERE ${namespaceColumnName} = ? AND ${keyColumnName} IN (${paramList})`;
|
|
1157
|
+
}
|
|
1158
|
+
|
|
1159
|
+
// These const values must be in sync with the latest
|
|
1160
|
+
// @salesforce/nimbus-plugin-lds/sql schema file
|
|
1161
|
+
const TABLE_NAME$1 = 'lds_data';
|
|
1162
|
+
const COLUMN_NAME_KEY$2 = 'key';
|
|
1163
|
+
const COLUMN_NAME_DATA$2 = 'data';
|
|
1164
|
+
const COLUMN_NAME_METADATA$1 = 'metadata';
|
|
1165
|
+
class LdsDataTable {
|
|
1166
|
+
constructor(plugin) {
|
|
1167
|
+
this.tableName = TABLE_NAME$1;
|
|
1168
|
+
this.columnNames = [COLUMN_NAME_KEY$2, COLUMN_NAME_DATA$2, COLUMN_NAME_METADATA$1];
|
|
1169
|
+
this.conflictColumnNames = [COLUMN_NAME_KEY$2];
|
|
1170
|
+
this.getAllQuery = `SELECT ${this.columnNames.join(',')} FROM ${this.tableName}`;
|
|
1171
|
+
this.plugin = plugin;
|
|
1172
|
+
}
|
|
1173
|
+
getByKeys(keys) {
|
|
1174
|
+
const getQuery = selectColumnsFromTableWhereKeyIn(this.columnNames, this.tableName, COLUMN_NAME_KEY$2, keys);
|
|
1175
|
+
return new Promise((resolve, reject) => {
|
|
1176
|
+
this.plugin.query(getQuery, keys, (x) => {
|
|
1177
|
+
resolve(this.mapToDurableEntries(x));
|
|
1178
|
+
}, reject);
|
|
1179
|
+
});
|
|
1180
|
+
}
|
|
1181
|
+
getMetadataByKeys(keys) {
|
|
1182
|
+
const query = selectColumnsFromTableWhereKeyIn([COLUMN_NAME_KEY$2, COLUMN_NAME_METADATA$1], this.tableName, COLUMN_NAME_KEY$2, keys);
|
|
1183
|
+
return new Promise((resolve, reject) => {
|
|
1184
|
+
this.plugin.query(query, keys, (results) => {
|
|
1185
|
+
resolve(results.rows.reduce((entries, row) => {
|
|
1186
|
+
const [key, stringifiedMetadata] = row;
|
|
1187
|
+
if (stringifiedMetadata !== undefined) {
|
|
1188
|
+
entries[key] = {
|
|
1189
|
+
metadata: parse(stringifiedMetadata),
|
|
1190
|
+
};
|
|
1191
|
+
}
|
|
1192
|
+
return entries;
|
|
1193
|
+
}, {}));
|
|
1194
|
+
}, reject);
|
|
1195
|
+
});
|
|
1196
|
+
}
|
|
1197
|
+
getAll() {
|
|
1198
|
+
return new Promise((resolve, reject) => {
|
|
1199
|
+
this.plugin.query(this.getAllQuery, [], (x) => {
|
|
1200
|
+
resolve(this.mapToDurableEntries(x));
|
|
1201
|
+
}, reject);
|
|
1202
|
+
});
|
|
1203
|
+
}
|
|
1204
|
+
entriesToUpsertOperations(entries, segment) {
|
|
1205
|
+
return {
|
|
1206
|
+
type: 'upsert',
|
|
1207
|
+
table: this.tableName,
|
|
1208
|
+
keyColumn: COLUMN_NAME_KEY$2,
|
|
1209
|
+
context: {
|
|
1210
|
+
segment,
|
|
1211
|
+
},
|
|
1212
|
+
conflictColumns: this.conflictColumnNames,
|
|
1213
|
+
columns: this.columnNames,
|
|
1214
|
+
rows: keys$1(entries).reduce((rows, key) => {
|
|
1215
|
+
const entry = entries[key];
|
|
1216
|
+
const { data, metadata } = entry;
|
|
1217
|
+
const row = [key, stringify(data), metadata ? stringify(metadata) : null];
|
|
1218
|
+
rows.push(row);
|
|
1219
|
+
return rows;
|
|
1220
|
+
}, []),
|
|
1221
|
+
};
|
|
1222
|
+
}
|
|
1223
|
+
metadataToUpdateOperations(entries, segment) {
|
|
1224
|
+
return {
|
|
1225
|
+
type: 'update',
|
|
1226
|
+
table: this.tableName,
|
|
1227
|
+
keyColumn: COLUMN_NAME_KEY$2,
|
|
1228
|
+
context: {
|
|
1229
|
+
segment,
|
|
1230
|
+
type: 'setMetadata',
|
|
1231
|
+
},
|
|
1232
|
+
columns: [COLUMN_NAME_METADATA$1],
|
|
1233
|
+
values: keys$1(entries).reduce((values, key) => {
|
|
1234
|
+
const { metadata } = entries[key];
|
|
1235
|
+
const row = [metadata ? stringify(metadata) : null];
|
|
1236
|
+
values[key] = row;
|
|
1237
|
+
return values;
|
|
1238
|
+
}, {}),
|
|
1239
|
+
};
|
|
1240
|
+
}
|
|
1241
|
+
mapToDurableEntries(sqliteResult) {
|
|
1242
|
+
return sqliteResult.rows.reduce((entries, row) => {
|
|
1243
|
+
const [key, stringifiedData, stringifiedMetadata] = row;
|
|
1244
|
+
const durableStoreEntry = {
|
|
1245
|
+
data: parse(stringifiedData),
|
|
1246
|
+
};
|
|
1247
|
+
if (stringifiedMetadata !== null) {
|
|
1248
|
+
durableStoreEntry.metadata = parse(stringifiedMetadata);
|
|
1249
|
+
}
|
|
1250
|
+
entries[key] = durableStoreEntry;
|
|
1251
|
+
return entries;
|
|
1252
|
+
}, {});
|
|
1253
|
+
}
|
|
1254
|
+
}
|
|
1255
|
+
|
|
1256
|
+
// These const values must be in sync with the latest
|
|
1257
|
+
// @salesforce/nimbus-plugin-lds/sql schema file
|
|
1258
|
+
const TABLE_NAME = 'lds_internal';
|
|
1259
|
+
const COLUMN_NAME_KEY$1 = 'key';
|
|
1260
|
+
const COLUMN_NAME_DATA$1 = 'data';
|
|
1261
|
+
const COLUMN_NAME_METADATA = 'metadata';
|
|
1262
|
+
const COLUMN_NAME_NAMESPACE = 'namespace';
|
|
1263
|
+
class LdsInternalDataTable {
|
|
1264
|
+
constructor(plugin) {
|
|
1265
|
+
this.tableName = TABLE_NAME;
|
|
1266
|
+
this.columnNames = [
|
|
1267
|
+
COLUMN_NAME_KEY$1,
|
|
1268
|
+
COLUMN_NAME_DATA$1,
|
|
1269
|
+
COLUMN_NAME_METADATA,
|
|
1270
|
+
COLUMN_NAME_NAMESPACE,
|
|
1271
|
+
];
|
|
1272
|
+
this.conflictColumnNames = [COLUMN_NAME_KEY$1, COLUMN_NAME_NAMESPACE];
|
|
1273
|
+
this.getAllQuery = `SELECT ${this.columnNames.join(',')} FROM ${this.tableName} WHERE ${COLUMN_NAME_NAMESPACE} = ?`;
|
|
1274
|
+
this.plugin = plugin;
|
|
1275
|
+
}
|
|
1276
|
+
getByKeys(keys, namespace) {
|
|
1277
|
+
if (namespace === undefined) {
|
|
1278
|
+
throw Error('LdsInternalDataTable requires namespace');
|
|
1279
|
+
}
|
|
1280
|
+
const getQuery = selectColumnsFromTableWhereKeyInNamespaced(this.columnNames, this.tableName, COLUMN_NAME_KEY$1, keys, COLUMN_NAME_NAMESPACE);
|
|
1281
|
+
return new Promise((resolve, reject) => {
|
|
1282
|
+
this.plugin.query(getQuery, [namespace].concat(keys), (x) => {
|
|
1283
|
+
resolve(this.mapToDurableEntries(x));
|
|
1284
|
+
}, reject);
|
|
1285
|
+
});
|
|
1286
|
+
}
|
|
1287
|
+
getMetadataByKeys(keys, namespace) {
|
|
1288
|
+
if (namespace === undefined) {
|
|
1289
|
+
throw Error('LdsInternalDataTable requires namespace');
|
|
1290
|
+
}
|
|
1291
|
+
const query = selectColumnsFromTableWhereKeyInNamespaced([COLUMN_NAME_KEY$1, COLUMN_NAME_METADATA], this.tableName, COLUMN_NAME_KEY$1, keys, COLUMN_NAME_NAMESPACE);
|
|
1292
|
+
return new Promise((resolve, reject) => {
|
|
1293
|
+
this.plugin.query(query, [namespace].concat(keys), (results) => {
|
|
1294
|
+
resolve(results.rows.reduce((entries, row) => {
|
|
1295
|
+
const [key, stringifiedMetadata] = row;
|
|
1296
|
+
if (stringifiedMetadata !== undefined) {
|
|
1297
|
+
entries[key] = {
|
|
1298
|
+
metadata: parse(stringifiedMetadata),
|
|
1299
|
+
};
|
|
1300
|
+
}
|
|
1301
|
+
return entries;
|
|
1302
|
+
}, {}));
|
|
1303
|
+
}, reject);
|
|
1304
|
+
});
|
|
1305
|
+
}
|
|
1306
|
+
getAll(namespace) {
|
|
1307
|
+
return new Promise((resolve, reject) => {
|
|
1308
|
+
this.plugin.query(this.getAllQuery, [namespace], (x) => {
|
|
1309
|
+
resolve(this.mapToDurableEntries(x));
|
|
1310
|
+
}, reject);
|
|
1311
|
+
});
|
|
1312
|
+
}
|
|
1313
|
+
entriesToUpsertOperations(entries, segment) {
|
|
1314
|
+
return {
|
|
1315
|
+
type: 'upsert',
|
|
1316
|
+
table: this.tableName,
|
|
1317
|
+
keyColumn: COLUMN_NAME_KEY$1,
|
|
1318
|
+
context: {
|
|
1319
|
+
segment,
|
|
1320
|
+
},
|
|
1321
|
+
conflictColumns: this.conflictColumnNames,
|
|
1322
|
+
columns: this.columnNames,
|
|
1323
|
+
rows: keys$1(entries).reduce((rows, key) => {
|
|
1324
|
+
const entry = entries[key];
|
|
1325
|
+
const { data, metadata } = entry;
|
|
1326
|
+
const row = [key, stringify(data)];
|
|
1327
|
+
if (metadata) {
|
|
1328
|
+
row.push(stringify(metadata));
|
|
1329
|
+
}
|
|
1330
|
+
else {
|
|
1331
|
+
row.push(null);
|
|
1332
|
+
}
|
|
1333
|
+
row.push(segment);
|
|
1334
|
+
rows.push(row);
|
|
1335
|
+
return rows;
|
|
1336
|
+
}, []),
|
|
1337
|
+
};
|
|
1338
|
+
}
|
|
1339
|
+
metadataToUpdateOperations(entries, segment) {
|
|
1340
|
+
return {
|
|
1341
|
+
type: 'update',
|
|
1342
|
+
table: this.tableName,
|
|
1343
|
+
keyColumn: COLUMN_NAME_KEY$1,
|
|
1344
|
+
context: {
|
|
1345
|
+
segment,
|
|
1346
|
+
type: 'setMetadata',
|
|
1347
|
+
},
|
|
1348
|
+
columns: [COLUMN_NAME_METADATA],
|
|
1349
|
+
values: keys$1(entries).reduce((values, key) => {
|
|
1350
|
+
const { metadata } = entries[key];
|
|
1351
|
+
const row = [metadata ? stringify(metadata) : null];
|
|
1352
|
+
values[key] = row;
|
|
1353
|
+
return values;
|
|
1354
|
+
}, {}),
|
|
1355
|
+
};
|
|
1356
|
+
}
|
|
1357
|
+
metadataToUpdateSQLQueries(entries, segment) {
|
|
1358
|
+
return keys$1(entries).reduce((accu, key) => {
|
|
1359
|
+
const { metadata } = entries[key];
|
|
1360
|
+
if (metadata !== undefined) {
|
|
1361
|
+
accu.push({
|
|
1362
|
+
sql: `UPDATE ${this.tableName} SET ${COLUMN_NAME_METADATA} = ? WHERE (${COLUMN_NAME_KEY$1} IS ? AND ${COLUMN_NAME_NAMESPACE} IS ?)`,
|
|
1363
|
+
params: [stringify(metadata), key, segment],
|
|
1364
|
+
change: {
|
|
1365
|
+
ids: [key],
|
|
1366
|
+
segment,
|
|
1367
|
+
type: 'setMetadata',
|
|
1368
|
+
isExternalChange: false,
|
|
1369
|
+
},
|
|
1370
|
+
});
|
|
1371
|
+
}
|
|
1372
|
+
return accu;
|
|
1373
|
+
}, []);
|
|
1374
|
+
}
|
|
1375
|
+
mapToDurableEntries(sqliteResult) {
|
|
1376
|
+
return sqliteResult.rows.reduce((entries, row) => {
|
|
1377
|
+
const [key, stringifiedData, stringifiedMetadata] = row;
|
|
1378
|
+
const durableStoreEntry = {
|
|
1379
|
+
data: parse(stringifiedData),
|
|
1380
|
+
};
|
|
1381
|
+
if (stringifiedMetadata !== null) {
|
|
1382
|
+
durableStoreEntry.metadata = parse(stringifiedMetadata);
|
|
1383
|
+
}
|
|
1384
|
+
entries[key] = durableStoreEntry;
|
|
1385
|
+
return entries;
|
|
1386
|
+
}, {});
|
|
1387
|
+
}
|
|
1388
|
+
}
|
|
1389
|
+
|
|
1390
|
+
const tasker = idleDetector.declareNotifierTaskMulti('NimbusSqliteStore');
|
|
1391
|
+
const instrumentation = getInstrumentation('lds-mobile');
|
|
1392
|
+
const GRAPHQL_QUERY_ROOT_KEY = 'UiApi::uiapi::Query[uiapi]__uiapi__query';
|
|
1393
|
+
const GRAPHQL_QUERY_ROOT_METRIC_NAME = 'gql-query-root-object-size';
|
|
1394
|
+
class NimbusSqliteStore {
|
|
1395
|
+
constructor(plugin, additionalTableMap = {}) {
|
|
1396
|
+
this.plugin = plugin;
|
|
1397
|
+
this.internalDataTable = new LdsInternalDataTable(plugin);
|
|
1398
|
+
this.dataTableMap = {
|
|
1399
|
+
...additionalTableMap,
|
|
1400
|
+
[DefaultDurableSegment]: new LdsDataTable(plugin),
|
|
1401
|
+
};
|
|
1402
|
+
}
|
|
1403
|
+
isEvalSupported() {
|
|
1404
|
+
return true;
|
|
1405
|
+
}
|
|
1406
|
+
query(sql, params) {
|
|
1407
|
+
tasker.add();
|
|
1408
|
+
return new Promise((resolve, reject) => {
|
|
1409
|
+
this.plugin.query(sql, params, (result) => {
|
|
1410
|
+
resolve(result);
|
|
1411
|
+
}, (error) => {
|
|
1412
|
+
reject(error);
|
|
1413
|
+
});
|
|
1414
|
+
}).finally(() => tasker.done());
|
|
1415
|
+
}
|
|
1416
|
+
batchQuery(queries) {
|
|
1417
|
+
const promises = queries.map((q) => this.query(q.sql, q.params));
|
|
1418
|
+
tasker.add();
|
|
1419
|
+
return Promise.all(promises).finally(() => tasker.done());
|
|
1420
|
+
}
|
|
1421
|
+
async getEntries(entryIds, segment) {
|
|
1422
|
+
tasker.add();
|
|
1423
|
+
return this.getTable(segment)
|
|
1424
|
+
.getByKeys(entryIds, segment)
|
|
1425
|
+
.finally(() => tasker.done());
|
|
1426
|
+
}
|
|
1427
|
+
async getMetadata(entryIds, segment) {
|
|
1428
|
+
tasker.add();
|
|
1429
|
+
return this.getTable(segment)
|
|
1430
|
+
.getMetadataByKeys(entryIds, segment)
|
|
1431
|
+
.finally(() => tasker.done());
|
|
1432
|
+
}
|
|
1433
|
+
getAllEntries(segment) {
|
|
1434
|
+
tasker.add();
|
|
1435
|
+
return this.getTable(segment)
|
|
1436
|
+
.getAll(segment)
|
|
1437
|
+
.finally(() => tasker.done());
|
|
1438
|
+
}
|
|
1439
|
+
setEntries(entries, segment) {
|
|
1440
|
+
if (keys$1(entries).length === 0) {
|
|
1441
|
+
return Promise.resolve();
|
|
1442
|
+
}
|
|
1443
|
+
const table = this.getTable(segment);
|
|
1444
|
+
const upsertOperation = table.entriesToUpsertOperations(entries, segment);
|
|
1445
|
+
if (entries[GRAPHQL_QUERY_ROOT_KEY]) {
|
|
1446
|
+
this.trackGraphQLQueryRootSize(upsertOperation);
|
|
1447
|
+
}
|
|
1448
|
+
return this.batchOperationAsPromise([upsertOperation]);
|
|
1449
|
+
}
|
|
1450
|
+
setMetadata(entries, segment) {
|
|
1451
|
+
if (keys$1(entries).length === 0) {
|
|
1452
|
+
return Promise.resolve();
|
|
1453
|
+
}
|
|
1454
|
+
const table = this.getTable(segment);
|
|
1455
|
+
let operation = table.metadataToUpdateOperations(entries, segment);
|
|
1456
|
+
return this.batchOperationAsPromise([operation]);
|
|
1457
|
+
}
|
|
1458
|
+
batchOperations(operations) {
|
|
1459
|
+
const sqliteOperations = operations.reduce((acc, cur) => {
|
|
1460
|
+
if (cur.type === 'setEntries') {
|
|
1461
|
+
if (keys$1(cur.entries).length > 0) {
|
|
1462
|
+
const table = this.getTable(cur.segment);
|
|
1463
|
+
const upsertOperation = table.entriesToUpsertOperations(cur.entries, cur.segment);
|
|
1464
|
+
if (cur.entries[GRAPHQL_QUERY_ROOT_KEY]) {
|
|
1465
|
+
this.trackGraphQLQueryRootSize(upsertOperation);
|
|
1466
|
+
}
|
|
1467
|
+
acc.push(upsertOperation);
|
|
1468
|
+
}
|
|
1469
|
+
}
|
|
1470
|
+
else if (cur.type === 'setMetadata') {
|
|
1471
|
+
if (keys$1(cur.entries).length > 0) {
|
|
1472
|
+
const table = this.getTable(cur.segment);
|
|
1473
|
+
acc.push(table.metadataToUpdateOperations(cur.entries, cur.segment));
|
|
1474
|
+
}
|
|
1475
|
+
}
|
|
1476
|
+
else {
|
|
1477
|
+
if (cur.ids.length > 0) {
|
|
1478
|
+
acc.push(this.idsToDeleteOperation(cur.ids, cur.segment));
|
|
1479
|
+
}
|
|
1480
|
+
}
|
|
1481
|
+
return acc;
|
|
1482
|
+
}, []);
|
|
1483
|
+
return sqliteOperations.length === 0
|
|
1484
|
+
? Promise.resolve()
|
|
1485
|
+
: this.batchOperationAsPromise(sqliteOperations);
|
|
1486
|
+
}
|
|
1487
|
+
evictEntries(entryIds, segment) {
|
|
1488
|
+
return entryIds.length === 0
|
|
1489
|
+
? Promise.resolve()
|
|
1490
|
+
: this.batchOperationAsPromise([this.idsToDeleteOperation(entryIds, segment)]);
|
|
1491
|
+
}
|
|
1492
|
+
registerOnChangedListener(listener) {
|
|
1493
|
+
let unsubscribeId = undefined;
|
|
1494
|
+
this.plugin
|
|
1495
|
+
.registerOnChangedListener(async (changes) => {
|
|
1496
|
+
const durableChanges = changes.map((c) => {
|
|
1497
|
+
let type = c.type === 'upsert' ? 'setEntries' : 'evictEntries';
|
|
1498
|
+
// if our context contains a type then set that as our main level type
|
|
1499
|
+
// allows us in the future of updates to specify the segment change happening
|
|
1500
|
+
// example being update call on metadata only or updating data
|
|
1501
|
+
if ((c.type === 'update' || c.type === 'upsert') &&
|
|
1502
|
+
c.context.type !== undefined) {
|
|
1503
|
+
type = c.context.type;
|
|
1504
|
+
}
|
|
1505
|
+
return {
|
|
1506
|
+
type,
|
|
1507
|
+
ids: c.keys,
|
|
1508
|
+
isExternalChange: false,
|
|
1509
|
+
segment: c.context.segment,
|
|
1510
|
+
};
|
|
1511
|
+
});
|
|
1512
|
+
await listener(durableChanges);
|
|
1513
|
+
})
|
|
1514
|
+
.then((unsub) => {
|
|
1515
|
+
unsubscribeId = unsub;
|
|
1516
|
+
});
|
|
1517
|
+
return () => {
|
|
1518
|
+
if (unsubscribeId) {
|
|
1519
|
+
return this.plugin.unsubscribeOnChangedListener(unsubscribeId);
|
|
1520
|
+
}
|
|
1521
|
+
return Promise.resolve();
|
|
1522
|
+
};
|
|
1523
|
+
}
|
|
1524
|
+
getTable(segment) {
|
|
1525
|
+
return this.dataTableMap[segment] ?? this.internalDataTable;
|
|
1526
|
+
}
|
|
1527
|
+
idsToDeleteOperation(entryIds, segment) {
|
|
1528
|
+
const table = this.getTable(segment);
|
|
1529
|
+
return {
|
|
1530
|
+
type: 'delete',
|
|
1531
|
+
table: table.tableName,
|
|
1532
|
+
keyColumn: 'key',
|
|
1533
|
+
context: {
|
|
1534
|
+
segment,
|
|
1535
|
+
},
|
|
1536
|
+
ids: entryIds,
|
|
1537
|
+
};
|
|
1538
|
+
}
|
|
1539
|
+
batchOperationAsPromise(sqliteOperations) {
|
|
1540
|
+
tasker.add();
|
|
1541
|
+
return new Promise((resolve, reject) => {
|
|
1542
|
+
this.plugin.batchOperations(sqliteOperations, (error) => {
|
|
1543
|
+
if (error && error !== null) {
|
|
1544
|
+
reject(error);
|
|
1545
|
+
}
|
|
1546
|
+
else {
|
|
1547
|
+
resolve();
|
|
1548
|
+
}
|
|
1549
|
+
});
|
|
1550
|
+
}).finally(() => tasker.done());
|
|
1551
|
+
}
|
|
1552
|
+
trackGraphQLQueryRootSize(upsertOperation) {
|
|
1553
|
+
try {
|
|
1554
|
+
if (upsertOperation.type !== 'upsert') {
|
|
1555
|
+
return;
|
|
1556
|
+
}
|
|
1557
|
+
const row = upsertOperation.rows.find((r) => r[0] === GRAPHQL_QUERY_ROOT_KEY);
|
|
1558
|
+
if (!(row && row[1] && typeof row[1] === 'string')) {
|
|
1559
|
+
return;
|
|
1560
|
+
}
|
|
1561
|
+
instrumentation.trackValue(GRAPHQL_QUERY_ROOT_METRIC_NAME, row[1].length);
|
|
1562
|
+
}
|
|
1563
|
+
catch { }
|
|
1564
|
+
}
|
|
1565
|
+
}
|
|
1566
|
+
|
|
1567
|
+
let reportObservers = [];
|
|
1568
|
+
function instrumentAdapter(adapter, metadata) {
|
|
1569
|
+
let instrumentedMobileAdapter = adapter;
|
|
1570
|
+
return instrumentAdapter$1(instrumentedMobileAdapter, metadata, {
|
|
1571
|
+
trackL1Hits: true,
|
|
1572
|
+
trackL2Hits: true,
|
|
1573
|
+
trackCacheMisses: true,
|
|
1574
|
+
reportObserver: (report) => {
|
|
1575
|
+
for (const observer of reportObservers) {
|
|
1576
|
+
observer(report);
|
|
1577
|
+
}
|
|
1578
|
+
},
|
|
1579
|
+
});
|
|
1580
|
+
}
|
|
1581
|
+
function setupMobileInstrumentation(luvio, store) {
|
|
1582
|
+
setupInstrumentation(luvio, store);
|
|
1583
|
+
instrument({ instrumentAdapter });
|
|
1584
|
+
}
|
|
1585
|
+
|
|
1586
|
+
/**
|
|
1587
|
+
* Copyright (c) 2022, Salesforce, Inc.,
|
|
1588
|
+
* All rights reserved.
|
|
1589
|
+
* For full license text, see the LICENSE.txt file
|
|
1590
|
+
*/
|
|
1591
|
+
|
|
1592
|
+
|
|
1593
|
+
const { keys, values, create, assign, freeze, entries } = Object;
|
|
1594
|
+
|
|
1595
|
+
function buildRecordFieldStoreKey(recordKey, fieldName) {
|
|
1596
|
+
return `${recordKey}${RECORD_FIELDS_KEY_JUNCTION}${fieldName}`;
|
|
1597
|
+
}
|
|
1598
|
+
function isStoreKeyRecordId(key) {
|
|
1599
|
+
return key.indexOf(RECORD_ID_PREFIX) > -1 && key.indexOf(RECORD_FIELDS_KEY_JUNCTION) === -1;
|
|
1600
|
+
}
|
|
1601
|
+
function createLink(key) {
|
|
1602
|
+
return { __ref: key };
|
|
1603
|
+
}
|
|
1604
|
+
function isStoreRecordError(storeRecord) {
|
|
1605
|
+
return storeRecord.__type === 'error';
|
|
1606
|
+
}
|
|
1607
|
+
function isEntryDurableRecordRepresentation(entry, key) {
|
|
1608
|
+
// Either a DurableRecordRepresentation or StoreRecordError can live at a record key
|
|
1609
|
+
return ((isStoreKeyRecordId(key) || isStoreKeyRecordViewEntity(key)) &&
|
|
1610
|
+
entry.data.__type === undefined);
|
|
1611
|
+
}
|
|
1612
|
+
/**
|
|
1613
|
+
* Records are stored in the durable store with scalar fields denormalized. This function takes that denoramlized
|
|
1614
|
+
* durable store record representation and normalizes it back out into the format the the luvio store expects it
|
|
1615
|
+
* @param key Record store key
|
|
1616
|
+
* @param entry Durable entry containing a denormalized record representation
|
|
1617
|
+
* @returns a set of entries containing the normalized record and its normalized fields
|
|
1618
|
+
*/
|
|
1619
|
+
function normalizeRecordFields(key, entry) {
|
|
1620
|
+
const { data: record } = entry;
|
|
1621
|
+
const { fields, links } = record;
|
|
1622
|
+
const missingFieldLinks = links === undefined ? [] : keys(links);
|
|
1623
|
+
const fieldNames = keys(fields);
|
|
1624
|
+
const normalizedFields = {};
|
|
1625
|
+
const returnEntries = {};
|
|
1626
|
+
// restore fields
|
|
1627
|
+
for (let i = 0, len = fieldNames.length; i < len; i++) {
|
|
1628
|
+
const fieldName = fieldNames[i];
|
|
1629
|
+
const field = fields[fieldName];
|
|
1630
|
+
if (field.__state !== undefined && field.__state.isMissing === true) {
|
|
1631
|
+
normalizedFields[fieldName] = { isMissing: true, __ref: undefined };
|
|
1632
|
+
continue;
|
|
1633
|
+
}
|
|
1634
|
+
const fieldKey = buildRecordFieldStoreKey(key, fieldName);
|
|
1635
|
+
returnEntries[fieldKey] = { data: field };
|
|
1636
|
+
normalizedFields[fieldName] = createLink(fieldKey);
|
|
1637
|
+
}
|
|
1638
|
+
// restore missing fields
|
|
1639
|
+
for (let i = 0, len = missingFieldLinks.length; i < len; i++) {
|
|
1640
|
+
const fieldName = missingFieldLinks[i];
|
|
1641
|
+
const link = links[fieldName];
|
|
1642
|
+
if (link.isMissing === true) {
|
|
1643
|
+
normalizedFields[fieldName] = { ...link, __ref: undefined };
|
|
1644
|
+
}
|
|
1645
|
+
}
|
|
1646
|
+
returnEntries[key] = {
|
|
1647
|
+
data: assign(record, { fields: normalizedFields }),
|
|
1648
|
+
metadata: entry.metadata,
|
|
1649
|
+
};
|
|
1650
|
+
return returnEntries;
|
|
1651
|
+
}
|
|
1652
|
+
/**
|
|
1653
|
+
* Transforms a record for storage in the durable store. The transformation involves denormalizing
|
|
1654
|
+
* scalar fields and persisting link metadata to transform back into a normalized representation
|
|
1655
|
+
*
|
|
1656
|
+
* If the record contains pending fields this will return undefined as pending records do not get persisted
|
|
1657
|
+
* to the durable store. There should be a refresh operation outbound that will bring in the updated record.
|
|
1658
|
+
*
|
|
1659
|
+
* @param normalizedRecord Record containing normalized field links
|
|
1660
|
+
* @param recordStore a store containing referenced record fields
|
|
1661
|
+
*/
|
|
1662
|
+
function buildDurableRecordRepresentation(normalizedRecord, records, pendingEntries, store) {
|
|
1663
|
+
const fields = normalizedRecord.fields;
|
|
1664
|
+
const filteredFields = {};
|
|
1665
|
+
const fieldNames = keys(fields);
|
|
1666
|
+
for (let i = 0, len = fieldNames.length; i < len; i++) {
|
|
1667
|
+
const fieldName = fieldNames[i];
|
|
1668
|
+
const field = fields[fieldName];
|
|
1669
|
+
// pending fields get filtered out of the durable store
|
|
1670
|
+
const { pending } = field;
|
|
1671
|
+
if (pending === true) {
|
|
1672
|
+
// do not write records with pending fields to the durable store
|
|
1673
|
+
// there should be a refresh operation outbound that will bring in the updated record
|
|
1674
|
+
return undefined;
|
|
1675
|
+
}
|
|
1676
|
+
const { __ref } = field;
|
|
1677
|
+
if (__ref !== undefined) {
|
|
1678
|
+
let ref = records[__ref];
|
|
1679
|
+
if (pendingEntries !== undefined) {
|
|
1680
|
+
// If the ref was part of the pending write that takes precedence
|
|
1681
|
+
const pendingEntry = pendingEntries[__ref];
|
|
1682
|
+
if (pendingEntry !== undefined) {
|
|
1683
|
+
ref = pendingEntry.data;
|
|
1684
|
+
}
|
|
1685
|
+
}
|
|
1686
|
+
// if field reference exists then add it to our filteredFields
|
|
1687
|
+
if (ref !== undefined) {
|
|
1688
|
+
filteredFields[fieldName] = ref;
|
|
1689
|
+
}
|
|
1690
|
+
else {
|
|
1691
|
+
// if we have a store to read, try to find the field there too
|
|
1692
|
+
// The durable ingest staging store may pass through to L1, and
|
|
1693
|
+
// not all fields are necessarily published every time, so it is
|
|
1694
|
+
// important to check L1 and not just the fields being published,
|
|
1695
|
+
// otherwise we risk truncating the fields on the record.
|
|
1696
|
+
if (store) {
|
|
1697
|
+
ref = store.readEntry(__ref);
|
|
1698
|
+
if (ref !== undefined) {
|
|
1699
|
+
filteredFields[fieldName] = ref;
|
|
1700
|
+
}
|
|
1701
|
+
}
|
|
1702
|
+
}
|
|
1703
|
+
}
|
|
1704
|
+
// we want to preserve fields that are missing nodes
|
|
1705
|
+
if (field.isMissing === true) {
|
|
1706
|
+
filteredFields[fieldName] = {
|
|
1707
|
+
value: undefined,
|
|
1708
|
+
displayValue: undefined,
|
|
1709
|
+
__state: { isMissing: true },
|
|
1710
|
+
};
|
|
1711
|
+
}
|
|
1712
|
+
}
|
|
1713
|
+
return {
|
|
1714
|
+
...normalizedRecord,
|
|
1715
|
+
fields: filteredFields,
|
|
1716
|
+
};
|
|
1717
|
+
}
|
|
1718
|
+
function getDenormalizedKey(originalKey, recordId, luvio) {
|
|
1719
|
+
// this will likely need to be handled when moving to structured keys
|
|
1720
|
+
// note record view entities dont have an associated keybuilder. They get ingested as records to a different key format
|
|
1721
|
+
// see the override for how they are handled packages/lds-adapters-uiapi/src/raml-artifacts/types/RecordRepresentation/keyBuilderFromType.ts
|
|
1722
|
+
if (originalKey.startsWith(RECORD_VIEW_ENTITY_ID_PREFIX)) {
|
|
1723
|
+
return RECORD_VIEW_ENTITY_ID_PREFIX + recordId;
|
|
1724
|
+
}
|
|
1725
|
+
return keyBuilderRecord(luvio, { recordId });
|
|
1726
|
+
}
|
|
1727
|
+
function makeRecordDenormalizingDurableStore(luvio, durableStore, getStoreRecords, getStoreMetadata, getStore, sqlStore) {
|
|
1728
|
+
const getEntries = function (entries, segment) {
|
|
1729
|
+
// this HOF only inspects records in the default segment
|
|
1730
|
+
if (segment !== DefaultDurableSegment) {
|
|
1731
|
+
return durableStore.getEntries(entries, segment);
|
|
1732
|
+
}
|
|
1733
|
+
const { length: entriesLength } = entries;
|
|
1734
|
+
if (entriesLength === 0) {
|
|
1735
|
+
return Promise.resolve({});
|
|
1736
|
+
}
|
|
1737
|
+
// filter out record field keys
|
|
1738
|
+
const filteredEntryIds = [];
|
|
1739
|
+
// map of records to avoid requesting duplicate record keys when requesting both records and fields
|
|
1740
|
+
const recordEntries = {};
|
|
1741
|
+
const recordViewEntries = {};
|
|
1742
|
+
for (let i = 0, len = entriesLength; i < len; i++) {
|
|
1743
|
+
const id = entries[i];
|
|
1744
|
+
const recordId = extractRecordIdFromStoreKey(id);
|
|
1745
|
+
if (recordId !== undefined) {
|
|
1746
|
+
if (id.startsWith(RECORD_VIEW_ENTITY_ID_PREFIX)) {
|
|
1747
|
+
if (recordViewEntries[recordId] === undefined) {
|
|
1748
|
+
const key = getDenormalizedKey(id, recordId, luvio);
|
|
1749
|
+
recordViewEntries[recordId] = true;
|
|
1750
|
+
filteredEntryIds.push(key);
|
|
1751
|
+
}
|
|
1752
|
+
}
|
|
1753
|
+
else {
|
|
1754
|
+
if (recordEntries[recordId] === undefined) {
|
|
1755
|
+
const key = getDenormalizedKey(id, recordId, luvio);
|
|
1756
|
+
recordEntries[recordId] = true;
|
|
1757
|
+
filteredEntryIds.push(key);
|
|
1758
|
+
}
|
|
1759
|
+
}
|
|
1760
|
+
}
|
|
1761
|
+
else {
|
|
1762
|
+
filteredEntryIds.push(id);
|
|
1763
|
+
}
|
|
1764
|
+
}
|
|
1765
|
+
// call base getEntries
|
|
1766
|
+
return durableStore.getEntries(filteredEntryIds, segment).then((durableEntries) => {
|
|
1767
|
+
if (durableEntries === undefined) {
|
|
1768
|
+
return undefined;
|
|
1769
|
+
}
|
|
1770
|
+
const returnEntries = create(null);
|
|
1771
|
+
const keys$1 = keys(durableEntries);
|
|
1772
|
+
for (let i = 0, len = keys$1.length; i < len; i++) {
|
|
1773
|
+
const key = keys$1[i];
|
|
1774
|
+
const value = durableEntries[key];
|
|
1775
|
+
if (value === undefined) {
|
|
1776
|
+
continue;
|
|
1777
|
+
}
|
|
1778
|
+
if (isEntryDurableRecordRepresentation(value, key)) {
|
|
1779
|
+
assign(returnEntries, normalizeRecordFields(key, value));
|
|
1780
|
+
}
|
|
1781
|
+
else {
|
|
1782
|
+
returnEntries[key] = value;
|
|
1783
|
+
}
|
|
1784
|
+
}
|
|
1785
|
+
return returnEntries;
|
|
1786
|
+
});
|
|
1787
|
+
};
|
|
1788
|
+
const denormalizeEntries = function (entries) {
|
|
1789
|
+
let hasEntries = false;
|
|
1790
|
+
let hasMetadata = false;
|
|
1791
|
+
const putEntries = create(null);
|
|
1792
|
+
const putMetadata = create(null);
|
|
1793
|
+
const keys$1 = keys(entries);
|
|
1794
|
+
const putRecords = {};
|
|
1795
|
+
const putRecordViews = {};
|
|
1796
|
+
const storeRecords = getStoreRecords !== undefined ? getStoreRecords() : {};
|
|
1797
|
+
const storeMetadata = getStoreMetadata !== undefined ? getStoreMetadata() : {};
|
|
1798
|
+
const store = getStore();
|
|
1799
|
+
for (let i = 0, len = keys$1.length; i < len; i++) {
|
|
1800
|
+
const key = keys$1[i];
|
|
1801
|
+
let value = entries[key];
|
|
1802
|
+
const recordId = extractRecordIdFromStoreKey(key);
|
|
1803
|
+
// do not put normalized field values
|
|
1804
|
+
if (recordId !== undefined) {
|
|
1805
|
+
const isRecordView = key.startsWith(RECORD_VIEW_ENTITY_ID_PREFIX);
|
|
1806
|
+
if (isRecordView) {
|
|
1807
|
+
if (putRecordViews[recordId] === true) {
|
|
1808
|
+
continue;
|
|
1809
|
+
}
|
|
1810
|
+
}
|
|
1811
|
+
else {
|
|
1812
|
+
if (putRecords[recordId] === true) {
|
|
1813
|
+
continue;
|
|
1814
|
+
}
|
|
1815
|
+
}
|
|
1816
|
+
const recordKey = getDenormalizedKey(key, recordId, luvio);
|
|
1817
|
+
const recordEntries = entries;
|
|
1818
|
+
const entry = recordEntries[recordKey];
|
|
1819
|
+
let record = entry && entry.data;
|
|
1820
|
+
if (record === undefined) {
|
|
1821
|
+
record = storeRecords[recordKey];
|
|
1822
|
+
if (record === undefined) {
|
|
1823
|
+
// fields are being published without a record for them existing,
|
|
1824
|
+
// fields cannot exist standalone in the durable store
|
|
1825
|
+
continue;
|
|
1826
|
+
}
|
|
1827
|
+
}
|
|
1828
|
+
if (isRecordView) {
|
|
1829
|
+
putRecordViews[recordId] = true;
|
|
1830
|
+
}
|
|
1831
|
+
else {
|
|
1832
|
+
putRecords[recordId] = true;
|
|
1833
|
+
}
|
|
1834
|
+
if (isStoreRecordError(record)) {
|
|
1835
|
+
hasEntries = true;
|
|
1836
|
+
putEntries[recordKey] = value;
|
|
1837
|
+
continue;
|
|
1838
|
+
}
|
|
1839
|
+
let metadata = entry && entry.metadata;
|
|
1840
|
+
if (metadata === undefined) {
|
|
1841
|
+
metadata = {
|
|
1842
|
+
...storeMetadata[recordKey],
|
|
1843
|
+
metadataVersion: DURABLE_METADATA_VERSION,
|
|
1844
|
+
};
|
|
1845
|
+
}
|
|
1846
|
+
const denormalizedRecord = buildDurableRecordRepresentation(record, storeRecords, recordEntries, store);
|
|
1847
|
+
if (denormalizedRecord !== undefined) {
|
|
1848
|
+
hasEntries = true;
|
|
1849
|
+
putEntries[recordKey] = {
|
|
1850
|
+
data: denormalizedRecord,
|
|
1851
|
+
metadata,
|
|
1852
|
+
};
|
|
1853
|
+
// if undefined then it is pending
|
|
1854
|
+
// we should still update metadata on pending records
|
|
1855
|
+
}
|
|
1856
|
+
else {
|
|
1857
|
+
hasMetadata = true;
|
|
1858
|
+
metadata.expirationTimestamp = metadata.ingestionTimestamp;
|
|
1859
|
+
putMetadata[recordKey] = {
|
|
1860
|
+
metadata,
|
|
1861
|
+
};
|
|
1862
|
+
}
|
|
1863
|
+
}
|
|
1864
|
+
else {
|
|
1865
|
+
hasEntries = true;
|
|
1866
|
+
putEntries[key] = value;
|
|
1867
|
+
}
|
|
1868
|
+
}
|
|
1869
|
+
return { putEntries, putMetadata, hasEntries, hasMetadata };
|
|
1870
|
+
};
|
|
1871
|
+
const setEntries = function (entries, segment) {
|
|
1872
|
+
if (segment !== DefaultDurableSegment) {
|
|
1873
|
+
return durableStore.setEntries(entries, segment);
|
|
1874
|
+
}
|
|
1875
|
+
const { putEntries, putMetadata, hasEntries, hasMetadata } = denormalizeEntries(entries);
|
|
1876
|
+
const promises = [
|
|
1877
|
+
hasEntries ? durableStore.setEntries(putEntries, segment) : undefined,
|
|
1878
|
+
];
|
|
1879
|
+
if (sqlStore !== undefined) {
|
|
1880
|
+
promises.push(hasMetadata && sqlStore !== undefined
|
|
1881
|
+
? durableStore.setMetadata(putMetadata, segment)
|
|
1882
|
+
: undefined);
|
|
1883
|
+
}
|
|
1884
|
+
return Promise.all(promises).then(() => { });
|
|
1885
|
+
};
|
|
1886
|
+
const batchOperations = function (operations) {
|
|
1887
|
+
const operationsWithDenormedRecords = [];
|
|
1888
|
+
for (let i = 0, len = operations.length; i < len; i++) {
|
|
1889
|
+
const operation = operations[i];
|
|
1890
|
+
if (operation.type === 'setMetadata') {
|
|
1891
|
+
// if setMetadata also contains entry data then it needs to be denormalized.
|
|
1892
|
+
const keys$1 = keys(operation.entries);
|
|
1893
|
+
if (keys$1.length > 0) {
|
|
1894
|
+
const firstKey = keys$1[0];
|
|
1895
|
+
// casted to any to check if data exists
|
|
1896
|
+
const firstEntry = operation.entries[firstKey];
|
|
1897
|
+
// it is not possible for setMetadata to contain entries with both data and no data in the same operation.
|
|
1898
|
+
// this is determined by the plugin supporting update batch calls before it gets to this HOF.
|
|
1899
|
+
// so we only need to check one entry to confirm this for performance
|
|
1900
|
+
if (firstEntry.data !== undefined) {
|
|
1901
|
+
const { putEntries, putMetadata, hasMetadata } = denormalizeEntries(operation.entries);
|
|
1902
|
+
operationsWithDenormedRecords.push({
|
|
1903
|
+
...operation,
|
|
1904
|
+
entries: putEntries,
|
|
1905
|
+
});
|
|
1906
|
+
if (hasMetadata && sqlStore !== undefined) {
|
|
1907
|
+
operationsWithDenormedRecords.push({
|
|
1908
|
+
...operation,
|
|
1909
|
+
entries: putMetadata,
|
|
1910
|
+
type: 'setMetadata',
|
|
1911
|
+
});
|
|
1912
|
+
}
|
|
1913
|
+
}
|
|
1914
|
+
else {
|
|
1915
|
+
operationsWithDenormedRecords.push(operation);
|
|
1916
|
+
}
|
|
1917
|
+
}
|
|
1918
|
+
continue;
|
|
1919
|
+
}
|
|
1920
|
+
if (operation.segment !== DefaultDurableSegment || operation.type === 'evictEntries') {
|
|
1921
|
+
operationsWithDenormedRecords.push(operation);
|
|
1922
|
+
continue;
|
|
1923
|
+
}
|
|
1924
|
+
const { putEntries, putMetadata, hasMetadata } = denormalizeEntries(operation.entries);
|
|
1925
|
+
operationsWithDenormedRecords.push({
|
|
1926
|
+
...operation,
|
|
1927
|
+
entries: putEntries,
|
|
1928
|
+
});
|
|
1929
|
+
if (hasMetadata && sqlStore !== undefined) {
|
|
1930
|
+
operationsWithDenormedRecords.push({
|
|
1931
|
+
...operation,
|
|
1932
|
+
entries: putMetadata,
|
|
1933
|
+
type: 'setMetadata',
|
|
1934
|
+
});
|
|
1935
|
+
}
|
|
1936
|
+
}
|
|
1937
|
+
return durableStore.batchOperations(operationsWithDenormedRecords);
|
|
1938
|
+
};
|
|
1939
|
+
return create(durableStore, {
|
|
1940
|
+
getEntries: { value: getEntries, writable: true },
|
|
1941
|
+
setEntries: { value: setEntries, writable: true },
|
|
1942
|
+
batchOperations: { value: batchOperations, writable: true },
|
|
1943
|
+
});
|
|
1944
|
+
}
|
|
1945
|
+
|
|
1946
|
+
let luvio;
|
|
1947
|
+
let getIngestRecords;
|
|
1948
|
+
let getIngestMetadata;
|
|
1949
|
+
let getIngestStore;
|
|
1950
|
+
// LdsSqliteStore plugin helper
|
|
1951
|
+
function getNimbusDurableStore() {
|
|
1952
|
+
const resolvedPlugin = __nimbus.plugins.LdsSqliteStore;
|
|
1953
|
+
return new NimbusSqliteStore(resolvedPlugin);
|
|
1954
|
+
}
|
|
1955
|
+
/**
|
|
1956
|
+
* Use record denormalizing store to persist field values and references
|
|
1957
|
+
*/
|
|
1958
|
+
function createRecordDenormingStore(luvio, durableStore) {
|
|
1959
|
+
const recordDenormingStore = makeRecordDenormalizingDurableStore(luvio, durableStore, () => getIngestRecords(), () => getIngestMetadata(), () => getIngestStore());
|
|
1960
|
+
return recordDenormingStore;
|
|
1961
|
+
}
|
|
1962
|
+
function getRuntime() {
|
|
1963
|
+
// Create stores
|
|
1964
|
+
const store = new InMemoryStore();
|
|
1965
|
+
const durableStore = getNimbusDurableStore();
|
|
1966
|
+
const recordDenormingStore = createRecordDenormingStore(luvio, durableStore);
|
|
1967
|
+
// Create environment with store + aura network adapter
|
|
1968
|
+
const baseEnv = new Environment(store, networkAdapter);
|
|
1969
|
+
const durableEnv = makeDurable(baseEnv, {
|
|
1970
|
+
durableStore: recordDenormingStore,
|
|
1971
|
+
});
|
|
1972
|
+
// Set ingest records/metadata properties from durable environment
|
|
1973
|
+
getIngestRecords = durableEnv.getIngestStagingStoreRecords;
|
|
1974
|
+
getIngestMetadata = durableEnv.getIngestStagingStoreMetadata;
|
|
1975
|
+
getIngestStore = durableEnv.getIngestStagingStore;
|
|
1976
|
+
// Return new luvio instance
|
|
1977
|
+
luvio = new Luvio(durableEnv, {
|
|
1978
|
+
instrument: instrumentLuvio,
|
|
1979
|
+
});
|
|
1980
|
+
// Currently instruments store runtime perf
|
|
1981
|
+
setupMobileInstrumentation(luvio, store);
|
|
1982
|
+
// Initialize OneStore
|
|
1983
|
+
initializeOneStore(durableStore['plugin']);
|
|
1984
|
+
return luvio;
|
|
1985
|
+
}
|
|
1986
|
+
|
|
1987
|
+
// so eslint doesn't complain about nimbus
|
|
1988
|
+
/* global __nimbus */
|
|
1989
|
+
function ldsRuntimeBridge() {
|
|
1990
|
+
if (typeof __nimbus !== 'undefined' &&
|
|
1991
|
+
__nimbus.plugins !== undefined &&
|
|
1992
|
+
__nimbus.plugins.LdsSqliteStore !== undefined) {
|
|
1993
|
+
if (__nimbus.plugins.GaterPlugin !== undefined &&
|
|
1994
|
+
__nimbus.plugins.GaterPlugin.isUnifiedLdsCacheEnabled !== undefined) {
|
|
1995
|
+
// The existence of isUnifiedLdsCacheEnabled must be synchronous, only if the feature flag is set.
|
|
1996
|
+
const { luvio } = getRuntime$1();
|
|
1997
|
+
setDefaultLuvio({ luvio });
|
|
1998
|
+
return { name: 'ldsRuntimeMobile' };
|
|
1999
|
+
}
|
|
2000
|
+
else {
|
|
2001
|
+
// Since LdsSqliteStore plugin is present, use Luvio with persistent store
|
|
2002
|
+
const luvio = getRuntime();
|
|
2003
|
+
setDefaultLuvio({ luvio });
|
|
2004
|
+
return { name: 'ldsRuntimeBridge' };
|
|
2005
|
+
}
|
|
2006
|
+
}
|
|
2007
|
+
else {
|
|
2008
|
+
// Plugin not available fallback to lds-runtime-aura/ldsEngineCreator
|
|
2009
|
+
return ldsEngineCreator();
|
|
2010
|
+
}
|
|
2011
|
+
}
|
|
2012
|
+
|
|
2013
|
+
export { ldsRuntimeBridge as default };
|
|
2014
|
+
// version: 0.1.0-dev1-54c03dd38c
|