@salesforce/lds-runtime-mobile 1.233.0 → 1.236.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/main.js +472 -156
- package/package.json +2 -2
- package/sfdc/main.js +472 -156
package/dist/main.js
CHANGED
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
*/
|
|
14
14
|
import { withRegistration, register } from '@salesforce/lds-default-luvio';
|
|
15
15
|
import { setupInstrumentation, instrumentAdapter as instrumentAdapter$1, instrumentLuvio, setLdsAdaptersUiapiInstrumentation, setLdsNetworkAdapterInstrumentation } from '@salesforce/lds-instrumentation';
|
|
16
|
-
import { HttpStatusCode, StoreKeySet, serializeStructuredKey, Reader, deepFreeze, emitAdapterEvent, createCustomAdapterEventEmitter, StoreKeyMap, isFileReference, Environment, Luvio, InMemoryStore } from '@luvio/engine';
|
|
16
|
+
import { HttpStatusCode, StoreKeySet, serializeStructuredKey, StringKeyInMemoryStore, Reader, deepFreeze, emitAdapterEvent, createCustomAdapterEventEmitter, StoreKeyMap, isFileReference, Environment, Luvio, InMemoryStore } from '@luvio/engine';
|
|
17
17
|
import excludeStaleRecordsGate from '@salesforce/gate/lds.graphqlEvalExcludeStaleRecords';
|
|
18
18
|
import { parseAndVisit, Kind, visit, execute, buildSchema, isObjectType, defaultFieldResolver } from '@luvio/graphql-parser';
|
|
19
19
|
import { getRecordId18, keyBuilderQuickActionExecutionRepresentation, ingestQuickActionExecutionRepresentation, keyBuilderContentDocumentCompositeRepresentation, getResponseCacheKeysContentDocumentCompositeRepresentation, keyBuilderFromTypeContentDocumentCompositeRepresentation, ingestContentDocumentCompositeRepresentation, keyBuilderRecord, getTypeCacheKeysRecord, keyBuilderFromTypeRecordRepresentation, ingestRecord, RecordRepresentationRepresentationType, ObjectInfoRepresentationType, getRecordAdapterFactory, getObjectInfoAdapterFactory, getObjectInfosAdapterFactory, UiApiNamespace, RecordRepresentationType, RecordRepresentationTTL, RecordRepresentationVersion, getRecordsAdapterFactory } from '@salesforce/lds-adapters-uiapi';
|
|
@@ -33,6 +33,7 @@ import eagerEvalValidAt from '@salesforce/gate/lds.eagerEvalValidAt';
|
|
|
33
33
|
import eagerEvalStaleWhileRevalidate from '@salesforce/gate/lds.eagerEvalStaleWhileRevalidate';
|
|
34
34
|
import eagerEvalDefaultCachePolicy from '@salesforce/gate/lds.eagerEvalDefaultCachePolicy';
|
|
35
35
|
import ldsPrimingGraphqlBatch from '@salesforce/gate/lds.primingGraphqlBatch';
|
|
36
|
+
import ldsMetadataRefreshEnabled from '@salesforce/gate/lds.metadataRefreshEnabled';
|
|
36
37
|
|
|
37
38
|
/**
|
|
38
39
|
* Copyright (c) 2022, Salesforce, Inc.,
|
|
@@ -40,6 +41,7 @@ import ldsPrimingGraphqlBatch from '@salesforce/gate/lds.primingGraphqlBatch';
|
|
|
40
41
|
* For full license text, see the LICENSE.txt file
|
|
41
42
|
*/
|
|
42
43
|
|
|
44
|
+
|
|
43
45
|
const { parse: parse$6, stringify: stringify$6 } = JSON;
|
|
44
46
|
const { join: join$2, push: push$2, unshift } = Array.prototype;
|
|
45
47
|
const { isArray: isArray$5 } = Array;
|
|
@@ -615,7 +617,7 @@ function publishDurableStoreEntries(durableRecords, put, publishMetadata) {
|
|
|
615
617
|
* will refresh the snapshot from network, and then run the results from network
|
|
616
618
|
* through L2 ingestion, returning the subsequent revived snapshot.
|
|
617
619
|
*/
|
|
618
|
-
function reviveSnapshot(baseEnvironment, durableStore, unavailableSnapshot, durableStoreErrorHandler, buildL1Snapshot, reviveMetrics = { l2Trips: [] }) {
|
|
620
|
+
function reviveSnapshot(baseEnvironment, durableStore, unavailableSnapshot, durableStoreErrorHandler, buildL1Snapshot, revivingStore, reviveMetrics = { l2Trips: [] }) {
|
|
619
621
|
const { recordId, select, missingLinks, seenRecords, state } = unavailableSnapshot;
|
|
620
622
|
// L2 can only revive Unfulfilled snapshots that have a selector since they have the
|
|
621
623
|
// info needed to revive (like missingLinks) and rebuild. Otherwise return L1 snapshot.
|
|
@@ -625,10 +627,21 @@ function reviveSnapshot(baseEnvironment, durableStore, unavailableSnapshot, dura
|
|
|
625
627
|
metrics: reviveMetrics,
|
|
626
628
|
});
|
|
627
629
|
}
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
630
|
+
const keysToReviveSet = new StoreKeySet();
|
|
631
|
+
if (revivingStore) {
|
|
632
|
+
// Any stale keys since the last l2 read should be cleared and fetched again
|
|
633
|
+
for (const staleKey of revivingStore.staleEntries) {
|
|
634
|
+
keysToReviveSet.add(staleKey);
|
|
635
|
+
}
|
|
636
|
+
revivingStore.clearStale();
|
|
637
|
+
}
|
|
638
|
+
else {
|
|
639
|
+
// when not using a reviving store:
|
|
640
|
+
// in case L1 store changes/deallocs a record while we are doing the async read
|
|
641
|
+
// we attempt to read all keys from L2 - so combine recordId with any seenRecords
|
|
642
|
+
keysToReviveSet.add(recordId);
|
|
643
|
+
keysToReviveSet.merge(seenRecords);
|
|
644
|
+
}
|
|
632
645
|
keysToReviveSet.merge(missingLinks);
|
|
633
646
|
const keysToRevive = keysToReviveSet.keysAsArray();
|
|
634
647
|
const canonicalKeys = keysToRevive.map((x) => serializeStructuredKey(baseEnvironment.storeGetCanonicalKey(x)));
|
|
@@ -678,7 +691,7 @@ function reviveSnapshot(baseEnvironment, durableStore, unavailableSnapshot, dura
|
|
|
678
691
|
for (let i = 0, len = newKeys.length; i < len; i++) {
|
|
679
692
|
const newSnapshotSeenKey = newKeys[i];
|
|
680
693
|
if (!alreadyRequestedOrRevivedSet.has(newSnapshotSeenKey)) {
|
|
681
|
-
return reviveSnapshot(baseEnvironment, durableStore, snapshot, durableStoreErrorHandler, buildL1Snapshot, reviveMetrics);
|
|
694
|
+
return reviveSnapshot(baseEnvironment, durableStore, snapshot, durableStoreErrorHandler, buildL1Snapshot, revivingStore, reviveMetrics);
|
|
682
695
|
}
|
|
683
696
|
}
|
|
684
697
|
}
|
|
@@ -767,8 +780,9 @@ class DurableTTLStore {
|
|
|
767
780
|
}
|
|
768
781
|
}
|
|
769
782
|
|
|
770
|
-
function flushInMemoryStoreValuesToDurableStore(store, durableStore, durableStoreErrorHandler, redirects, additionalDurableStoreOperations = []) {
|
|
783
|
+
function flushInMemoryStoreValuesToDurableStore(store, durableStore, durableStoreErrorHandler, redirects, additionalDurableStoreOperations = [], enableDurableMetadataRefresh = false) {
|
|
771
784
|
const durableRecords = create$6(null);
|
|
785
|
+
const refreshedDurableRecords = create$6(null);
|
|
772
786
|
const evictedRecords = create$6(null);
|
|
773
787
|
const { records, metadata: storeMetadata, visitedIds, refreshedIds, } = store.fallbackStringKeyInMemoryStore;
|
|
774
788
|
// TODO: W-8909393 Once metadata is stored in its own segment we need to
|
|
@@ -778,32 +792,36 @@ function flushInMemoryStoreValuesToDurableStore(store, durableStore, durableStor
|
|
|
778
792
|
for (let i = 0, len = keys$1.length; i < len; i += 1) {
|
|
779
793
|
const key = keys$1[i];
|
|
780
794
|
const record = records[key];
|
|
795
|
+
const wasVisited = visitedIds[key] !== undefined;
|
|
781
796
|
// this record has been evicted, evict from DS
|
|
782
|
-
if (record === undefined) {
|
|
797
|
+
if (wasVisited && record === undefined) {
|
|
783
798
|
evictedRecords[key] = true;
|
|
784
799
|
continue;
|
|
785
800
|
}
|
|
786
801
|
const metadata = storeMetadata[key];
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
durableRecords[key].metadata = {
|
|
792
|
-
...metadata,
|
|
793
|
-
metadataVersion: DURABLE_METADATA_VERSION,
|
|
794
|
-
};
|
|
795
|
-
}
|
|
802
|
+
const entries = wasVisited === true || enableDurableMetadataRefresh === false
|
|
803
|
+
? durableRecords
|
|
804
|
+
: refreshedDurableRecords;
|
|
805
|
+
setRecordTo(entries, key, record, metadata);
|
|
796
806
|
}
|
|
797
807
|
const durableStoreOperations = additionalDurableStoreOperations;
|
|
798
|
-
// publishes
|
|
799
808
|
const recordKeys = keys$7(durableRecords);
|
|
800
809
|
if (recordKeys.length > 0) {
|
|
810
|
+
// publishes with data
|
|
801
811
|
durableStoreOperations.push({
|
|
802
812
|
type: 'setEntries',
|
|
803
813
|
entries: durableRecords,
|
|
804
814
|
segment: DefaultDurableSegment,
|
|
805
815
|
});
|
|
806
816
|
}
|
|
817
|
+
if (keys$7(refreshedDurableRecords).length > 0) {
|
|
818
|
+
// publishes with only metadata updates
|
|
819
|
+
durableStoreOperations.push({
|
|
820
|
+
type: 'setMetadata',
|
|
821
|
+
entries: refreshedDurableRecords,
|
|
822
|
+
segment: DefaultDurableSegment,
|
|
823
|
+
});
|
|
824
|
+
}
|
|
807
825
|
// redirects
|
|
808
826
|
redirects.forEach((value, key) => {
|
|
809
827
|
durableStoreOperations.push({
|
|
@@ -830,6 +848,17 @@ function flushInMemoryStoreValuesToDurableStore(store, durableStore, durableStor
|
|
|
830
848
|
}
|
|
831
849
|
return Promise.resolve();
|
|
832
850
|
}
|
|
851
|
+
function setRecordTo(entries, key, record, metadata) {
|
|
852
|
+
entries[key] = {
|
|
853
|
+
data: record,
|
|
854
|
+
};
|
|
855
|
+
if (metadata !== undefined) {
|
|
856
|
+
entries[key].metadata = {
|
|
857
|
+
...metadata,
|
|
858
|
+
metadataVersion: DURABLE_METADATA_VERSION,
|
|
859
|
+
};
|
|
860
|
+
}
|
|
861
|
+
}
|
|
833
862
|
|
|
834
863
|
const DurableEnvironmentEventDiscriminator = 'durable';
|
|
835
864
|
function emitDurableEnvironmentAdapterEvent(eventData, observers) {
|
|
@@ -874,6 +903,50 @@ async function reviveRedirects(durableStore, env) {
|
|
|
874
903
|
}
|
|
875
904
|
}
|
|
876
905
|
|
|
906
|
+
function buildRevivingStagingStore(upstreamStore) {
|
|
907
|
+
const localStore = new StringKeyInMemoryStore();
|
|
908
|
+
const staleEntries = new Set();
|
|
909
|
+
function readEntry(key) {
|
|
910
|
+
if (typeof key !== 'string') {
|
|
911
|
+
return upstreamStore.readEntry(key);
|
|
912
|
+
}
|
|
913
|
+
let storeEntry = localStore.readEntry(key);
|
|
914
|
+
if (!storeEntry) {
|
|
915
|
+
// read from upstream store...
|
|
916
|
+
storeEntry = upstreamStore.readEntry(key);
|
|
917
|
+
// put it in our store to avoid it getting evicted prior to the next durable store read
|
|
918
|
+
localStore.put(key, storeEntry);
|
|
919
|
+
}
|
|
920
|
+
return storeEntry;
|
|
921
|
+
}
|
|
922
|
+
// Entries are marked stale by the durable store change listener. They are not
|
|
923
|
+
// immediately evicted so as to not result in a cache miss during a rebuild.
|
|
924
|
+
// The revive process will clear stale entries and read them from the durable store
|
|
925
|
+
// on the next revive loop.
|
|
926
|
+
function markStale(key) {
|
|
927
|
+
staleEntries.add(key);
|
|
928
|
+
}
|
|
929
|
+
// The revive loop clears stale entries right before reading from the durable store.
|
|
930
|
+
// Any stale entries will be revived to ensure they are present in L1 and match the
|
|
931
|
+
// latest data.
|
|
932
|
+
function clearStale() {
|
|
933
|
+
for (const key of staleEntries) {
|
|
934
|
+
localStore.dealloc(key);
|
|
935
|
+
}
|
|
936
|
+
staleEntries.clear();
|
|
937
|
+
}
|
|
938
|
+
// All functions other than `readEntry` pass through to the upstream store.
|
|
939
|
+
// A reviving store is only "active" during a call to `environment.storeLookup`, and will
|
|
940
|
+
// be used by the reader attempting to build an L1 snapshot. Immediately after the L1 rebuild
|
|
941
|
+
// the reviving store becomes inactive other than receiving change notifications.
|
|
942
|
+
return create$6(upstreamStore, {
|
|
943
|
+
readEntry: { value: readEntry },
|
|
944
|
+
markStale: { value: markStale },
|
|
945
|
+
clearStale: { value: clearStale },
|
|
946
|
+
staleEntries: { value: staleEntries },
|
|
947
|
+
});
|
|
948
|
+
}
|
|
949
|
+
|
|
877
950
|
const AdapterContextSegment = 'ADAPTER-CONTEXT';
|
|
878
951
|
const ADAPTER_CONTEXT_ID_SUFFIX = '__NAMED_CONTEXT';
|
|
879
952
|
async function reviveOrCreateContext(adapterId, durableStore, durableStoreErrorHandler, contextStores, pendingContextStoreKeys, onContextLoaded) {
|
|
@@ -929,14 +1002,16 @@ function isUnfulfilledSnapshot$1(cachedSnapshotResult) {
|
|
|
929
1002
|
* @param durableStore A DurableStore implementation
|
|
930
1003
|
* @param instrumentation An instrumentation function implementation
|
|
931
1004
|
*/
|
|
932
|
-
function makeDurable(environment, { durableStore, instrumentation }) {
|
|
933
|
-
let
|
|
1005
|
+
function makeDurable(environment, { durableStore, instrumentation, useRevivingStore, enableDurableMetadataRefresh = false, }) {
|
|
1006
|
+
let stagingStore = null;
|
|
934
1007
|
const durableTTLStore = new DurableTTLStore(durableStore);
|
|
935
1008
|
const mergeKeysPromiseMap = new Map();
|
|
936
1009
|
// When a context store is mutated we write it to L2, which causes DS on change
|
|
937
1010
|
// event. If this instance of makeDurable caused that L2 write we can ignore that
|
|
938
1011
|
// on change event. This Set helps us do that.
|
|
939
1012
|
const pendingContextStoreKeys = new Set();
|
|
1013
|
+
// Reviving stores are tracked so that they can be notified of durable store change notifications.
|
|
1014
|
+
const revivingStores = new Set();
|
|
940
1015
|
// redirects that need to be flushed to the durable store
|
|
941
1016
|
const pendingStoreRedirects = new Map();
|
|
942
1017
|
const contextStores = create$6(null);
|
|
@@ -962,6 +1037,7 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
962
1037
|
const defaultSegmentKeys = [];
|
|
963
1038
|
const adapterContextSegmentKeys = [];
|
|
964
1039
|
const redirectSegmentKeys = [];
|
|
1040
|
+
const metadataRefreshSegmentKeys = [];
|
|
965
1041
|
const messagingSegmentKeys = [];
|
|
966
1042
|
let shouldBroadcast = false;
|
|
967
1043
|
for (let i = 0, len = changes.length; i < len; i++) {
|
|
@@ -969,7 +1045,12 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
969
1045
|
// we only care about changes to the data which is stored in the default
|
|
970
1046
|
// segment or the adapter context
|
|
971
1047
|
if (change.segment === DefaultDurableSegment) {
|
|
972
|
-
|
|
1048
|
+
if (change.type === 'setMetadata') {
|
|
1049
|
+
metadataRefreshSegmentKeys.push(...change.ids);
|
|
1050
|
+
}
|
|
1051
|
+
else {
|
|
1052
|
+
defaultSegmentKeys.push(...change.ids);
|
|
1053
|
+
}
|
|
973
1054
|
}
|
|
974
1055
|
else if (change.segment === AdapterContextSegment) {
|
|
975
1056
|
adapterContextSegmentKeys.push(...change.ids);
|
|
@@ -1033,9 +1114,26 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
1033
1114
|
// and go through an entire broadcast/revive cycle for unchanged data
|
|
1034
1115
|
// call base environment storeEvict so this evict is not tracked for durable deletion
|
|
1035
1116
|
environment.storeEvict(key);
|
|
1117
|
+
for (const revivingStore of revivingStores) {
|
|
1118
|
+
revivingStore.markStale(key);
|
|
1119
|
+
}
|
|
1036
1120
|
}
|
|
1037
1121
|
shouldBroadcast = true;
|
|
1038
1122
|
}
|
|
1123
|
+
// process metadata only refreshes
|
|
1124
|
+
if (metadataRefreshSegmentKeys.length > 0) {
|
|
1125
|
+
const entries = await durableStore.getMetadata(metadataRefreshSegmentKeys, DefaultDurableSegment);
|
|
1126
|
+
if (entries !== undefined) {
|
|
1127
|
+
const entryKeys = keys$7(entries);
|
|
1128
|
+
for (let i = 0, len = entryKeys.length; i < len; i++) {
|
|
1129
|
+
const entryKey = entryKeys[i];
|
|
1130
|
+
const { metadata } = entries[entryKey];
|
|
1131
|
+
if (metadata !== undefined) {
|
|
1132
|
+
environment.putStoreMetadata(entryKey, metadata, false);
|
|
1133
|
+
}
|
|
1134
|
+
}
|
|
1135
|
+
}
|
|
1136
|
+
}
|
|
1039
1137
|
if (shouldBroadcast) {
|
|
1040
1138
|
await environment.storeBroadcast(rebuildSnapshot, environment.snapshotAvailable);
|
|
1041
1139
|
}
|
|
@@ -1061,10 +1159,10 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
1061
1159
|
};
|
|
1062
1160
|
const storePublish = function (key, data) {
|
|
1063
1161
|
validateNotDisposed();
|
|
1064
|
-
if (
|
|
1065
|
-
|
|
1162
|
+
if (stagingStore === null) {
|
|
1163
|
+
stagingStore = buildIngestStagingStore(environment);
|
|
1066
1164
|
}
|
|
1067
|
-
|
|
1165
|
+
stagingStore.publish(key, data);
|
|
1068
1166
|
// remove record from main luvio L1 cache while we are on the synchronous path
|
|
1069
1167
|
// because we do not want some other code attempting to use the
|
|
1070
1168
|
// in-memory values before the durable store onChanged handler
|
|
@@ -1073,26 +1171,26 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
1073
1171
|
};
|
|
1074
1172
|
const publishStoreMetadata = function (recordId, storeMetadata) {
|
|
1075
1173
|
validateNotDisposed();
|
|
1076
|
-
if (
|
|
1077
|
-
|
|
1174
|
+
if (stagingStore === null) {
|
|
1175
|
+
stagingStore = buildIngestStagingStore(environment);
|
|
1078
1176
|
}
|
|
1079
|
-
|
|
1177
|
+
stagingStore.publishMetadata(recordId, storeMetadata);
|
|
1080
1178
|
};
|
|
1081
1179
|
const storeIngest = function (key, ingest, response, luvio) {
|
|
1082
1180
|
validateNotDisposed();
|
|
1083
1181
|
// we don't ingest to the luvio L1 store from network directly, we ingest to
|
|
1084
1182
|
// L2 and let DurableStore on change event revive keys into luvio L1 store
|
|
1085
|
-
if (
|
|
1086
|
-
|
|
1183
|
+
if (stagingStore === null) {
|
|
1184
|
+
stagingStore = buildIngestStagingStore(environment);
|
|
1087
1185
|
}
|
|
1088
|
-
environment.storeIngest(key, ingest, response, luvio,
|
|
1186
|
+
environment.storeIngest(key, ingest, response, luvio, stagingStore);
|
|
1089
1187
|
};
|
|
1090
1188
|
const storeIngestError = function (key, errorSnapshot, storeMetadataParams, _storeOverride) {
|
|
1091
1189
|
validateNotDisposed();
|
|
1092
|
-
if (
|
|
1093
|
-
|
|
1190
|
+
if (stagingStore === null) {
|
|
1191
|
+
stagingStore = buildIngestStagingStore(environment);
|
|
1094
1192
|
}
|
|
1095
|
-
environment.storeIngestError(key, errorSnapshot, storeMetadataParams,
|
|
1193
|
+
environment.storeIngestError(key, errorSnapshot, storeMetadataParams, stagingStore);
|
|
1096
1194
|
};
|
|
1097
1195
|
const storeBroadcast = function (_rebuildSnapshot, _snapshotDataAvailable) {
|
|
1098
1196
|
validateNotDisposed();
|
|
@@ -1103,19 +1201,19 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
1103
1201
|
};
|
|
1104
1202
|
const publishChangesToDurableStore = function (additionalDurableStoreOperations) {
|
|
1105
1203
|
validateNotDisposed();
|
|
1106
|
-
if (
|
|
1204
|
+
if (stagingStore === null) {
|
|
1107
1205
|
return Promise.resolve();
|
|
1108
1206
|
}
|
|
1109
|
-
const promise = flushInMemoryStoreValuesToDurableStore(
|
|
1207
|
+
const promise = flushInMemoryStoreValuesToDurableStore(stagingStore, durableStore, durableStoreErrorHandler, new Map(pendingStoreRedirects), additionalDurableStoreOperations, enableDurableMetadataRefresh);
|
|
1110
1208
|
pendingStoreRedirects.clear();
|
|
1111
|
-
|
|
1209
|
+
stagingStore = null;
|
|
1112
1210
|
return promise;
|
|
1113
1211
|
};
|
|
1114
1212
|
const storeLookup = function (sel, createSnapshot, refresh, ttlStrategy) {
|
|
1115
1213
|
validateNotDisposed();
|
|
1116
|
-
// if this lookup is right after an ingest there will be a staging store
|
|
1117
|
-
if (
|
|
1118
|
-
const reader = new Reader(
|
|
1214
|
+
// if this lookup is right after an ingest or during a revive there will be a staging store
|
|
1215
|
+
if (stagingStore !== null) {
|
|
1216
|
+
const reader = new Reader(stagingStore, sel.variables, refresh, undefined, ttlStrategy);
|
|
1119
1217
|
return reader.read(sel);
|
|
1120
1218
|
}
|
|
1121
1219
|
// otherwise this is from buildCachedSnapshot and we should use the luvio
|
|
@@ -1124,24 +1222,24 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
1124
1222
|
};
|
|
1125
1223
|
const storeEvict = function (key) {
|
|
1126
1224
|
validateNotDisposed();
|
|
1127
|
-
if (
|
|
1128
|
-
|
|
1225
|
+
if (stagingStore === null) {
|
|
1226
|
+
stagingStore = buildIngestStagingStore(environment);
|
|
1129
1227
|
}
|
|
1130
|
-
|
|
1228
|
+
stagingStore.evict(key);
|
|
1131
1229
|
};
|
|
1132
1230
|
const getNode = function (key) {
|
|
1133
1231
|
validateNotDisposed();
|
|
1134
|
-
if (
|
|
1135
|
-
|
|
1232
|
+
if (stagingStore === null) {
|
|
1233
|
+
stagingStore = buildIngestStagingStore(environment);
|
|
1136
1234
|
}
|
|
1137
|
-
return environment.getNode(key,
|
|
1235
|
+
return environment.getNode(key, stagingStore);
|
|
1138
1236
|
};
|
|
1139
1237
|
const wrapNormalizedGraphNode = function (normalized) {
|
|
1140
1238
|
validateNotDisposed();
|
|
1141
|
-
if (
|
|
1142
|
-
|
|
1239
|
+
if (stagingStore === null) {
|
|
1240
|
+
stagingStore = buildIngestStagingStore(environment);
|
|
1143
1241
|
}
|
|
1144
|
-
return environment.wrapNormalizedGraphNode(normalized,
|
|
1242
|
+
return environment.wrapNormalizedGraphNode(normalized, stagingStore);
|
|
1145
1243
|
};
|
|
1146
1244
|
const rebuildSnapshot = function (snapshot, onRebuild) {
|
|
1147
1245
|
validateNotDisposed();
|
|
@@ -1153,7 +1251,7 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
1153
1251
|
return;
|
|
1154
1252
|
}
|
|
1155
1253
|
// Do an L2 revive and emit to subscriber using the callback.
|
|
1156
|
-
|
|
1254
|
+
reviveSnapshotWrapper(rebuilt, () => {
|
|
1157
1255
|
// reviveSnapshot will revive into L1, and since "records" is a reference
|
|
1158
1256
|
// (and not a copy) to the L1 records we can use it for rebuild
|
|
1159
1257
|
let rebuiltSnap;
|
|
@@ -1194,10 +1292,10 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
1194
1292
|
// the next publishChangesToDurableStore. NOTE: we don't need to call
|
|
1195
1293
|
// redirect on the base environment store because staging store and base
|
|
1196
1294
|
// L1 store share the same redirect and reverseRedirectKeys
|
|
1197
|
-
if (
|
|
1198
|
-
|
|
1295
|
+
if (stagingStore === null) {
|
|
1296
|
+
stagingStore = buildIngestStagingStore(environment);
|
|
1199
1297
|
}
|
|
1200
|
-
|
|
1298
|
+
stagingStore.redirect(existingKey, canonicalKey);
|
|
1201
1299
|
};
|
|
1202
1300
|
const storeSetTTLOverride = function (namespace, representationName, ttl) {
|
|
1203
1301
|
validateNotDisposed();
|
|
@@ -1238,7 +1336,7 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
1238
1336
|
if (isUnfulfilledSnapshot$1(snapshot)) {
|
|
1239
1337
|
const start = Date.now();
|
|
1240
1338
|
emitDurableEnvironmentAdapterEvent({ type: 'l2-revive-start' }, adapterRequestContext.eventObservers);
|
|
1241
|
-
const revivedSnapshot =
|
|
1339
|
+
const revivedSnapshot = reviveSnapshotWrapper(snapshot, () => injectedStoreLookup(snapshot.select, snapshot.refresh)).then((result) => {
|
|
1242
1340
|
emitDurableEnvironmentAdapterEvent({
|
|
1243
1341
|
type: 'l2-revive-end',
|
|
1244
1342
|
snapshot: result.snapshot,
|
|
@@ -1263,15 +1361,15 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
1263
1361
|
};
|
|
1264
1362
|
const getIngestStagingStoreRecords = function () {
|
|
1265
1363
|
validateNotDisposed();
|
|
1266
|
-
if (
|
|
1267
|
-
return
|
|
1364
|
+
if (stagingStore !== null) {
|
|
1365
|
+
return stagingStore.fallbackStringKeyInMemoryStore.records;
|
|
1268
1366
|
}
|
|
1269
1367
|
return {};
|
|
1270
1368
|
};
|
|
1271
1369
|
const getIngestStagingStoreMetadata = function () {
|
|
1272
1370
|
validateNotDisposed();
|
|
1273
|
-
if (
|
|
1274
|
-
return
|
|
1371
|
+
if (stagingStore !== null) {
|
|
1372
|
+
return stagingStore.fallbackStringKeyInMemoryStore.metadata;
|
|
1275
1373
|
}
|
|
1276
1374
|
return {};
|
|
1277
1375
|
};
|
|
@@ -1310,22 +1408,20 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
1310
1408
|
}
|
|
1311
1409
|
await Promise.all(pendingPromises);
|
|
1312
1410
|
const entries = await durableStore.getEntries(keysToReviveAsArray, DefaultDurableSegment);
|
|
1313
|
-
|
|
1411
|
+
stagingStore = buildIngestStagingStore(environment);
|
|
1314
1412
|
publishDurableStoreEntries(entries, (key, record) => {
|
|
1315
1413
|
if (typeof key === 'string') {
|
|
1316
|
-
|
|
1317
|
-
record;
|
|
1414
|
+
stagingStore.fallbackStringKeyInMemoryStore.records[key] = record;
|
|
1318
1415
|
}
|
|
1319
1416
|
else {
|
|
1320
|
-
|
|
1417
|
+
stagingStore.recordsMap.set(key, record);
|
|
1321
1418
|
}
|
|
1322
1419
|
}, (key, metadata) => {
|
|
1323
1420
|
if (typeof key === 'string') {
|
|
1324
|
-
|
|
1325
|
-
metadata;
|
|
1421
|
+
stagingStore.fallbackStringKeyInMemoryStore.metadata[key] = metadata;
|
|
1326
1422
|
}
|
|
1327
1423
|
else {
|
|
1328
|
-
|
|
1424
|
+
stagingStore.metadataMap.set(key, metadata);
|
|
1329
1425
|
}
|
|
1330
1426
|
});
|
|
1331
1427
|
snapshotFromMemoryIngest = await ingestAndBroadcastFunc();
|
|
@@ -1354,7 +1450,7 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
1354
1450
|
// we aren't doing any merging so we don't have to synchronize, the
|
|
1355
1451
|
// underlying DurableStore implementation takes care of R/W sync
|
|
1356
1452
|
// so all we have to do is ingest then write to L2
|
|
1357
|
-
|
|
1453
|
+
stagingStore = buildIngestStagingStore(environment);
|
|
1358
1454
|
snapshotFromMemoryIngest = await ingestAndBroadcastFunc();
|
|
1359
1455
|
}
|
|
1360
1456
|
if (snapshotFromMemoryIngest === undefined) {
|
|
@@ -1365,12 +1461,12 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
1365
1461
|
}
|
|
1366
1462
|
// if snapshot from staging store lookup is unfulfilled then do an L2 lookup
|
|
1367
1463
|
const { select, refresh } = snapshotFromMemoryIngest;
|
|
1368
|
-
const result = await
|
|
1464
|
+
const result = await reviveSnapshotWrapper(snapshotFromMemoryIngest, () => environment.storeLookup(select, environment.createSnapshot, refresh));
|
|
1369
1465
|
return result.snapshot;
|
|
1370
1466
|
};
|
|
1371
1467
|
const handleErrorResponse = async function (ingestAndBroadcastFunc) {
|
|
1372
1468
|
validateNotDisposed();
|
|
1373
|
-
|
|
1469
|
+
stagingStore = buildIngestStagingStore(environment);
|
|
1374
1470
|
return ingestAndBroadcastFunc();
|
|
1375
1471
|
};
|
|
1376
1472
|
const getNotifyChangeStoreEntries = function (keys) {
|
|
@@ -1421,6 +1517,27 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
1421
1517
|
await durableStore.setEntries({ notifyStoreUpdateAvailable: { data: entryKeys } }, MessagingDurableSegment);
|
|
1422
1518
|
return Promise.resolve(undefined);
|
|
1423
1519
|
};
|
|
1520
|
+
const reviveSnapshotWrapper = function (unavailableSnapshot, buildL1Snapshot) {
|
|
1521
|
+
let revivingStore = undefined;
|
|
1522
|
+
if (useRevivingStore) {
|
|
1523
|
+
// NOTE: `store` is private, there doesn't seem to be a better,
|
|
1524
|
+
// cleaner way of accessing it from a derived environment.
|
|
1525
|
+
let baseStore = environment.store;
|
|
1526
|
+
// If we're rebuilding during an ingest, the existing staging store should be the base store.
|
|
1527
|
+
if (stagingStore) {
|
|
1528
|
+
baseStore = stagingStore;
|
|
1529
|
+
}
|
|
1530
|
+
let revivingStore = buildRevivingStagingStore(baseStore);
|
|
1531
|
+
revivingStores.add(revivingStore);
|
|
1532
|
+
}
|
|
1533
|
+
return reviveSnapshot(environment, durableStore, unavailableSnapshot, durableStoreErrorHandler, () => {
|
|
1534
|
+
const tempStore = stagingStore;
|
|
1535
|
+
const result = buildL1Snapshot();
|
|
1536
|
+
stagingStore = tempStore;
|
|
1537
|
+
return result;
|
|
1538
|
+
}, revivingStore).finally(() => {
|
|
1539
|
+
});
|
|
1540
|
+
};
|
|
1424
1541
|
// set the default cache policy of the base environment
|
|
1425
1542
|
environment.setDefaultCachePolicy({
|
|
1426
1543
|
type: 'stale-while-revalidate',
|
|
@@ -1461,6 +1578,73 @@ function makeDurable(environment, { durableStore, instrumentation }) {
|
|
|
1461
1578
|
* For full license text, see the LICENSE.txt file
|
|
1462
1579
|
*/
|
|
1463
1580
|
|
|
1581
|
+
const API_NAMESPACE = 'UiApi';
|
|
1582
|
+
const RECORD_REPRESENTATION_NAME = 'RecordRepresentation';
|
|
1583
|
+
const RECORD_VIEW_ENTITY_REPRESENTATION_NAME = 'RecordViewEntityRepresentation';
|
|
1584
|
+
const RECORD_ID_PREFIX = `${API_NAMESPACE}::${RECORD_REPRESENTATION_NAME}:`;
|
|
1585
|
+
const RECORD_VIEW_ENTITY_ID_PREFIX = `${API_NAMESPACE}::${RECORD_VIEW_ENTITY_REPRESENTATION_NAME}:Name:`;
|
|
1586
|
+
const RECORD_FIELDS_KEY_JUNCTION = '__fields__';
|
|
1587
|
+
function isStoreKeyRecordId(key) {
|
|
1588
|
+
return key.indexOf(RECORD_ID_PREFIX) > -1 && key.indexOf(RECORD_FIELDS_KEY_JUNCTION) === -1;
|
|
1589
|
+
}
|
|
1590
|
+
function isStoreKeyRecordViewEntity(key) {
|
|
1591
|
+
return (key.indexOf(RECORD_VIEW_ENTITY_ID_PREFIX) > -1 &&
|
|
1592
|
+
key.indexOf(RECORD_FIELDS_KEY_JUNCTION) === -1);
|
|
1593
|
+
}
|
|
1594
|
+
function isStoreKeyRecordField(key) {
|
|
1595
|
+
return key.indexOf(RECORD_ID_PREFIX) > -1 && key.indexOf(RECORD_FIELDS_KEY_JUNCTION) > -1;
|
|
1596
|
+
}
|
|
1597
|
+
function extractRecordIdFromStoreKey(key) {
|
|
1598
|
+
if (key === undefined ||
|
|
1599
|
+
(key.indexOf(RECORD_ID_PREFIX) === -1 && key.indexOf(RECORD_VIEW_ENTITY_ID_PREFIX) === -1)) {
|
|
1600
|
+
return undefined;
|
|
1601
|
+
}
|
|
1602
|
+
const parts = key.split(':');
|
|
1603
|
+
return parts[parts.length - 1].split('_')[0];
|
|
1604
|
+
}
|
|
1605
|
+
function buildRecordFieldStoreKey(recordKey, fieldName) {
|
|
1606
|
+
return `${recordKey}${RECORD_FIELDS_KEY_JUNCTION}${fieldName}`;
|
|
1607
|
+
}
|
|
1608
|
+
function objectsDeepEqual(lhs, rhs) {
|
|
1609
|
+
if (lhs === rhs)
|
|
1610
|
+
return true;
|
|
1611
|
+
if (typeof lhs !== 'object' || typeof rhs !== 'object' || lhs === null || rhs === null)
|
|
1612
|
+
return false;
|
|
1613
|
+
const lhsKeys = Object.keys(lhs);
|
|
1614
|
+
const rhsKeys = Object.keys(rhs);
|
|
1615
|
+
if (lhsKeys.length !== rhsKeys.length)
|
|
1616
|
+
return false;
|
|
1617
|
+
for (let key of lhsKeys) {
|
|
1618
|
+
if (!rhsKeys.includes(key))
|
|
1619
|
+
return false;
|
|
1620
|
+
if (typeof lhs[key] === 'function' || typeof rhs[key] === 'function') {
|
|
1621
|
+
if (lhs[key].toString() !== rhs[key].toString())
|
|
1622
|
+
return false;
|
|
1623
|
+
}
|
|
1624
|
+
else {
|
|
1625
|
+
if (!objectsDeepEqual(lhs[key], rhs[key]))
|
|
1626
|
+
return false;
|
|
1627
|
+
}
|
|
1628
|
+
}
|
|
1629
|
+
return true;
|
|
1630
|
+
}
|
|
1631
|
+
|
|
1632
|
+
function isStoreRecordError(storeRecord) {
|
|
1633
|
+
return storeRecord.__type === 'error';
|
|
1634
|
+
}
|
|
1635
|
+
function isEntryDurableRecordRepresentation(entry, key) {
|
|
1636
|
+
// Either a DurableRecordRepresentation or StoreRecordError can live at a record key
|
|
1637
|
+
return ((isStoreKeyRecordId(key) || isStoreKeyRecordViewEntity(key)) &&
|
|
1638
|
+
entry.data.__type === undefined);
|
|
1639
|
+
}
|
|
1640
|
+
|
|
1641
|
+
/**
|
|
1642
|
+
* Copyright (c) 2022, Salesforce, Inc.,
|
|
1643
|
+
* All rights reserved.
|
|
1644
|
+
* For full license text, see the LICENSE.txt file
|
|
1645
|
+
*/
|
|
1646
|
+
|
|
1647
|
+
|
|
1464
1648
|
const GRAPHQL_ROOT_KEY$1 = `GraphQL::graphql`;
|
|
1465
1649
|
function findIds(json) {
|
|
1466
1650
|
const entries = Object.entries(json);
|
|
@@ -1839,7 +2023,7 @@ function isFailure(result) {
|
|
|
1839
2023
|
function errors(result) {
|
|
1840
2024
|
return result.error;
|
|
1841
2025
|
}
|
|
1842
|
-
function values$
|
|
2026
|
+
function values$4(result) {
|
|
1843
2027
|
return result.value;
|
|
1844
2028
|
}
|
|
1845
2029
|
function flattenResults(results) {
|
|
@@ -1847,7 +2031,7 @@ function flattenResults(results) {
|
|
|
1847
2031
|
if (fails.length > 0) {
|
|
1848
2032
|
return failure(fails);
|
|
1849
2033
|
}
|
|
1850
|
-
return success(results.filter(isSuccess).map(values$
|
|
2034
|
+
return success(results.filter(isSuccess).map(values$4));
|
|
1851
2035
|
}
|
|
1852
2036
|
|
|
1853
2037
|
function getFieldInfo(apiName, fieldName, infoMap) {
|
|
@@ -2733,7 +2917,7 @@ function fieldsToFilters(fieldValues, joinAlias, apiName, input, compoundOperato
|
|
|
2733
2917
|
if (failures.length > 0) {
|
|
2734
2918
|
return failure(failures);
|
|
2735
2919
|
}
|
|
2736
|
-
const containers = results.filter(isSuccess).map(values$
|
|
2920
|
+
const containers = results.filter(isSuccess).map(values$4);
|
|
2737
2921
|
const predicates = [];
|
|
2738
2922
|
containers.forEach((c) => {
|
|
2739
2923
|
if (c.predicate !== undefined) {
|
|
@@ -3025,7 +3209,7 @@ function dateFunctions(operatorNode, extract, dataType) {
|
|
|
3025
3209
|
if (fails.length > 0) {
|
|
3026
3210
|
return failure(fails);
|
|
3027
3211
|
}
|
|
3028
|
-
const vals = results.filter(isSuccess).reduce(flatMap(values$
|
|
3212
|
+
const vals = results.filter(isSuccess).reduce(flatMap(values$4), []);
|
|
3029
3213
|
return success(vals);
|
|
3030
3214
|
}
|
|
3031
3215
|
function isFilterFunction(name) {
|
|
@@ -3035,7 +3219,7 @@ function fieldOperators(operatorNode, dataType) {
|
|
|
3035
3219
|
const results = Object.entries(operatorNode.fields)
|
|
3036
3220
|
.filter(([key, _]) => isFilterFunction(key) === false)
|
|
3037
3221
|
.map(([key, value]) => operatorWithValue(key, value, dataType));
|
|
3038
|
-
const _values = results.filter(isSuccess).map(values$
|
|
3222
|
+
const _values = results.filter(isSuccess).map(values$4);
|
|
3039
3223
|
const fails = results.filter(isFailure).reduce(flatMap(errors), []);
|
|
3040
3224
|
if (fails.length > 0) {
|
|
3041
3225
|
return failure(fails);
|
|
@@ -3956,7 +4140,7 @@ function selectionToQueryField(node, names, parentApiName, parentAlias, input, j
|
|
|
3956
4140
|
}
|
|
3957
4141
|
function recordFields(luvioSelections, names, parentApiName, parentAlias, input, joins) {
|
|
3958
4142
|
const results = luvioSelections.map((selection) => selectionToQueryField(selection, names, parentApiName, parentAlias, input, joins));
|
|
3959
|
-
const fields = results.filter(isSuccess).reduce(flatMap(values$
|
|
4143
|
+
const fields = results.filter(isSuccess).reduce(flatMap(values$4), []);
|
|
3960
4144
|
const fails = results.filter(isFailure).reduce(flatMap(errors), []);
|
|
3961
4145
|
if (fails.length > 0) {
|
|
3962
4146
|
return failure(fails);
|
|
@@ -4202,7 +4386,7 @@ function rootRecordQuery(selection, input) {
|
|
|
4202
4386
|
}
|
|
4203
4387
|
function rootQuery(recordNodes, input) {
|
|
4204
4388
|
const results = recordNodes.map((record) => rootRecordQuery(record, input));
|
|
4205
|
-
const connections = results.filter(isSuccess).map(values$
|
|
4389
|
+
const connections = results.filter(isSuccess).map(values$4);
|
|
4206
4390
|
const fails = results.filter(isFailure).reduce(flatMap(errors), []);
|
|
4207
4391
|
if (fails.length > 0) {
|
|
4208
4392
|
return failure(fails);
|
|
@@ -4653,7 +4837,11 @@ function makeStoreEval(preconditioner, objectInfoService, userId, contextProvide
|
|
|
4653
4837
|
try {
|
|
4654
4838
|
const { data, seenRecords } = await queryEvaluator(rootQuery, context, eventEmitter);
|
|
4655
4839
|
const rebuildWithStoreEval = ((originalSnapshot) => {
|
|
4656
|
-
return storeEval(config, originalSnapshot, observers, connectionKeyBuilder)
|
|
4840
|
+
return storeEval(config, originalSnapshot, observers, connectionKeyBuilder).then((rebuiltSnapshot) => {
|
|
4841
|
+
return objectsDeepEqual(originalSnapshot.data, rebuiltSnapshot.data)
|
|
4842
|
+
? originalSnapshot
|
|
4843
|
+
: rebuiltSnapshot;
|
|
4844
|
+
});
|
|
4657
4845
|
});
|
|
4658
4846
|
const recordId = generateUniqueRecordId$1();
|
|
4659
4847
|
// if the non-eval'ed snapshot was an error then we return a synthetic
|
|
@@ -4687,6 +4875,7 @@ function makeStoreEval(preconditioner, objectInfoService, userId, contextProvide
|
|
|
4687
4875
|
* For full license text, see the LICENSE.txt file
|
|
4688
4876
|
*/
|
|
4689
4877
|
|
|
4878
|
+
|
|
4690
4879
|
/* Ideally we would use AbortController but it does not exist in V8, this is a simplified version */
|
|
4691
4880
|
class LdsAbortController {
|
|
4692
4881
|
constructor() {
|
|
@@ -4781,6 +4970,7 @@ class AsyncWorkerPool {
|
|
|
4781
4970
|
* For full license text, see the LICENSE.txt file
|
|
4782
4971
|
*/
|
|
4783
4972
|
|
|
4973
|
+
|
|
4784
4974
|
var DraftActionStatus;
|
|
4785
4975
|
(function (DraftActionStatus) {
|
|
4786
4976
|
DraftActionStatus["Pending"] = "pending";
|
|
@@ -4956,7 +5146,7 @@ function createDraftSynthesisErrorResponse(message = 'failed to synthesize draft
|
|
|
4956
5146
|
return new DraftErrorFetchResponse(HttpStatusCode.BadRequest, error);
|
|
4957
5147
|
}
|
|
4958
5148
|
|
|
4959
|
-
const { keys: keys$5, create: create$5, assign: assign$5, values: values$
|
|
5149
|
+
const { keys: keys$5, create: create$5, assign: assign$5, values: values$3 } = Object;
|
|
4960
5150
|
const { stringify: stringify$5, parse: parse$5 } = JSON;
|
|
4961
5151
|
const { isArray: isArray$3 } = Array;
|
|
4962
5152
|
|
|
@@ -5391,7 +5581,7 @@ class DurableDraftQueue {
|
|
|
5391
5581
|
const queueOperations = handler.getQueueOperationsForCompletingDrafts(queue, action);
|
|
5392
5582
|
// write the queue operations to the store prior to ingesting the result
|
|
5393
5583
|
await this.draftStore.completeAction(queueOperations);
|
|
5394
|
-
await handler.handleActionCompleted(action, queueOperations, values$
|
|
5584
|
+
await handler.handleActionCompleted(action, queueOperations, values$3(this.handlers));
|
|
5395
5585
|
this.retryIntervalMilliseconds = 0;
|
|
5396
5586
|
this.uploadingActionId = undefined;
|
|
5397
5587
|
await this.notifyChangedListeners({
|
|
@@ -6613,49 +6803,6 @@ function makeEnvironmentDraftAware(luvio, env, durableStore, handlers, draftQueu
|
|
|
6613
6803
|
});
|
|
6614
6804
|
}
|
|
6615
6805
|
|
|
6616
|
-
/**
|
|
6617
|
-
* Copyright (c) 2022, Salesforce, Inc.,
|
|
6618
|
-
* All rights reserved.
|
|
6619
|
-
* For full license text, see the LICENSE.txt file
|
|
6620
|
-
*/
|
|
6621
|
-
|
|
6622
|
-
const API_NAMESPACE = 'UiApi';
|
|
6623
|
-
const RECORD_REPRESENTATION_NAME = 'RecordRepresentation';
|
|
6624
|
-
const RECORD_VIEW_ENTITY_REPRESENTATION_NAME = 'RecordViewEntityRepresentation';
|
|
6625
|
-
const RECORD_ID_PREFIX = `${API_NAMESPACE}::${RECORD_REPRESENTATION_NAME}:`;
|
|
6626
|
-
const RECORD_VIEW_ENTITY_ID_PREFIX = `${API_NAMESPACE}::${RECORD_VIEW_ENTITY_REPRESENTATION_NAME}:Name:`;
|
|
6627
|
-
const RECORD_FIELDS_KEY_JUNCTION = '__fields__';
|
|
6628
|
-
function isStoreKeyRecordId(key) {
|
|
6629
|
-
return key.indexOf(RECORD_ID_PREFIX) > -1 && key.indexOf(RECORD_FIELDS_KEY_JUNCTION) === -1;
|
|
6630
|
-
}
|
|
6631
|
-
function isStoreKeyRecordViewEntity(key) {
|
|
6632
|
-
return (key.indexOf(RECORD_VIEW_ENTITY_ID_PREFIX) > -1 &&
|
|
6633
|
-
key.indexOf(RECORD_FIELDS_KEY_JUNCTION) === -1);
|
|
6634
|
-
}
|
|
6635
|
-
function isStoreKeyRecordField(key) {
|
|
6636
|
-
return key.indexOf(RECORD_ID_PREFIX) > -1 && key.indexOf(RECORD_FIELDS_KEY_JUNCTION) > -1;
|
|
6637
|
-
}
|
|
6638
|
-
function extractRecordIdFromStoreKey(key) {
|
|
6639
|
-
if (key === undefined ||
|
|
6640
|
-
(key.indexOf(RECORD_ID_PREFIX) === -1 && key.indexOf(RECORD_VIEW_ENTITY_ID_PREFIX) === -1)) {
|
|
6641
|
-
return undefined;
|
|
6642
|
-
}
|
|
6643
|
-
const parts = key.split(':');
|
|
6644
|
-
return parts[parts.length - 1].split('_')[0];
|
|
6645
|
-
}
|
|
6646
|
-
function buildRecordFieldStoreKey(recordKey, fieldName) {
|
|
6647
|
-
return `${recordKey}${RECORD_FIELDS_KEY_JUNCTION}${fieldName}`;
|
|
6648
|
-
}
|
|
6649
|
-
|
|
6650
|
-
function isStoreRecordError(storeRecord) {
|
|
6651
|
-
return storeRecord.__type === 'error';
|
|
6652
|
-
}
|
|
6653
|
-
function isEntryDurableRecordRepresentation(entry, key) {
|
|
6654
|
-
// Either a DurableRecordRepresentation or StoreRecordError can live at a record key
|
|
6655
|
-
return ((isStoreKeyRecordId(key) || isStoreKeyRecordViewEntity(key)) &&
|
|
6656
|
-
entry.data.__type === undefined);
|
|
6657
|
-
}
|
|
6658
|
-
|
|
6659
6806
|
function serializeFieldArguments(argumentNodes, variables) {
|
|
6660
6807
|
const mutableArgumentNodes = Object.assign([], argumentNodes);
|
|
6661
6808
|
return `args__(${mutableArgumentNodes
|
|
@@ -6760,6 +6907,7 @@ function buildQueryTypeStringKey(args) {
|
|
|
6760
6907
|
* For full license text, see the LICENSE.txt file
|
|
6761
6908
|
*/
|
|
6762
6909
|
|
|
6910
|
+
|
|
6763
6911
|
class DataLoader {
|
|
6764
6912
|
constructor(batchLoadFn) {
|
|
6765
6913
|
this._batchLoadFn = batchLoadFn;
|
|
@@ -6887,7 +7035,7 @@ function isArrayLike(x) {
|
|
|
6887
7035
|
(x.length === 0 || (x.length > 0 && Object.prototype.hasOwnProperty.call(x, x.length - 1))));
|
|
6888
7036
|
}
|
|
6889
7037
|
|
|
6890
|
-
const { create: create$4, keys: keys$4, values: values$
|
|
7038
|
+
const { create: create$4, keys: keys$4, values: values$2, entries: entries$3, assign: assign$4 } = Object;
|
|
6891
7039
|
const { stringify: stringify$4, parse: parse$4 } = JSON;
|
|
6892
7040
|
const { isArray: isArray$2 } = Array;
|
|
6893
7041
|
|
|
@@ -7220,7 +7368,7 @@ function dateTimePredicate(input, operator, field, alias) {
|
|
|
7220
7368
|
return predicate;
|
|
7221
7369
|
}
|
|
7222
7370
|
else if (literal !== undefined) {
|
|
7223
|
-
const isAvailableLiteral = values$
|
|
7371
|
+
const isAvailableLiteral = values$2(DateLiteral).includes(literal);
|
|
7224
7372
|
// eslint-disable-next-line @salesforce/lds/no-error-in-production
|
|
7225
7373
|
if (!isAvailableLiteral)
|
|
7226
7374
|
throw new Error(`${literal} is not a valid DateLiteral`);
|
|
@@ -7894,7 +8042,7 @@ function dedupeJoins(joins) {
|
|
|
7894
8042
|
for (const join of joins) {
|
|
7895
8043
|
deduped[join.alias + join.to] = join;
|
|
7896
8044
|
}
|
|
7897
|
-
return values$
|
|
8045
|
+
return values$2(deduped);
|
|
7898
8046
|
}
|
|
7899
8047
|
function buildJoins(config) {
|
|
7900
8048
|
let sql = '';
|
|
@@ -8542,7 +8690,7 @@ function flatten(previous, current) {
|
|
|
8542
8690
|
return previous.concat(current);
|
|
8543
8691
|
}
|
|
8544
8692
|
function findFieldInfo(objectInfo, fieldName) {
|
|
8545
|
-
return values$
|
|
8693
|
+
return values$2(objectInfo.fields).find((field) => field.apiName === fieldName ||
|
|
8546
8694
|
(field.dataType === 'Reference' && field.relationshipName === fieldName));
|
|
8547
8695
|
}
|
|
8548
8696
|
|
|
@@ -8562,10 +8710,10 @@ function orderByToPredicate(orderBy, recordType, alias, objectInfoMap, joins) {
|
|
|
8562
8710
|
for (let i = 0, len = keys$1.length; i < len; i++) {
|
|
8563
8711
|
const key = keys$1[i];
|
|
8564
8712
|
const parentFields = objectInfoMap[recordType].fields;
|
|
8565
|
-
const fieldInfo = values$
|
|
8713
|
+
const fieldInfo = values$2(parentFields).find(findSpanningField(key));
|
|
8566
8714
|
if (fieldInfo && fieldInfo.referenceToInfos.length > 0) {
|
|
8567
8715
|
const { apiName } = fieldInfo.referenceToInfos[0];
|
|
8568
|
-
const parentFieldInfo = values$
|
|
8716
|
+
const parentFieldInfo = values$2(objectInfoMap[recordType].fields).find(findSpanningField(fieldInfo.apiName));
|
|
8569
8717
|
if (parentFieldInfo !== undefined) {
|
|
8570
8718
|
const path = {
|
|
8571
8719
|
leftPath: `$.fields.${parentFieldInfo.apiName}.value`,
|
|
@@ -8707,7 +8855,7 @@ function addResolversToSchema(schema, polyFields) {
|
|
|
8707
8855
|
let baseRecord = undefined;
|
|
8708
8856
|
// Concrete types for Polymorphic field
|
|
8709
8857
|
const polyTypes = [];
|
|
8710
|
-
for (const type of values$
|
|
8858
|
+
for (const type of values$2(schema.getTypeMap())) {
|
|
8711
8859
|
if (type.name === 'Record') {
|
|
8712
8860
|
recordInterface = type;
|
|
8713
8861
|
}
|
|
@@ -8720,7 +8868,7 @@ function addResolversToSchema(schema, polyFields) {
|
|
|
8720
8868
|
if (polyFields.find((fieldTypeName) => fieldTypeName === type.name) !== undefined) {
|
|
8721
8869
|
polyTypes.push(type);
|
|
8722
8870
|
}
|
|
8723
|
-
const fields = values$
|
|
8871
|
+
const fields = values$2(type.getFields());
|
|
8724
8872
|
// initialize the fields of current type with default behavior
|
|
8725
8873
|
for (const field of fields) {
|
|
8726
8874
|
field.resolve = defaultFieldResolver;
|
|
@@ -9122,26 +9270,20 @@ function generateRecordQueries(objectInfos) {
|
|
|
9122
9270
|
let recordConnections = ``;
|
|
9123
9271
|
const polymorphicFieldTypeNames = new Set();
|
|
9124
9272
|
let typedScalars = new Set();
|
|
9125
|
-
|
|
9273
|
+
let parentRelationshipFields = new Set();
|
|
9274
|
+
for (const objectInfo of values$2(objectInfos)) {
|
|
9126
9275
|
const { apiName, childRelationships } = objectInfo;
|
|
9127
9276
|
let fields = ``;
|
|
9128
9277
|
typedScalars.add(`${apiName}_Filter`);
|
|
9129
9278
|
typedScalars.add(`${apiName}_OrderBy`);
|
|
9130
|
-
for (const
|
|
9131
|
-
const { childObjectApiName } = childRelationship;
|
|
9132
|
-
// Only add the relationship if there is relevant objectinfos for it,
|
|
9133
|
-
// otherwise we'd be defining types we cannot satisfy and aren't referenced in
|
|
9134
|
-
// the query.
|
|
9135
|
-
if (objectInfos[childObjectApiName] !== undefined) {
|
|
9136
|
-
fields += `${childRelationship.relationshipName}(first: Int, where: ${childObjectApiName}_Filter, orderBy: ${childObjectApiName}_OrderBy, scope: SupportedScopes): ${childObjectApiName}Connection \n`;
|
|
9137
|
-
typedScalars.add(`${childObjectApiName}_Filter`);
|
|
9138
|
-
typedScalars.add(`${childObjectApiName}_OrderBy`);
|
|
9139
|
-
}
|
|
9140
|
-
}
|
|
9141
|
-
for (const field of values$1(objectInfo.fields)) {
|
|
9279
|
+
for (const field of values$2(objectInfo.fields)) {
|
|
9142
9280
|
if (!fieldsStaticallyAdded.includes(field.apiName)) {
|
|
9143
9281
|
fields += `${field.apiName}: ${dataTypeToType(field.dataType, field.apiName)}\n`;
|
|
9144
9282
|
}
|
|
9283
|
+
//handles parent relationship
|
|
9284
|
+
if (field.relationshipName === null) {
|
|
9285
|
+
continue;
|
|
9286
|
+
}
|
|
9145
9287
|
// For spanning parent relationships with no union types
|
|
9146
9288
|
if (field.referenceToInfos.length === 1) {
|
|
9147
9289
|
const [relation] = field.referenceToInfos;
|
|
@@ -9149,11 +9291,13 @@ function generateRecordQueries(objectInfos) {
|
|
|
9149
9291
|
// otherwise we'd be defining types we cannot satisfy and aren't referenced in
|
|
9150
9292
|
// the query.
|
|
9151
9293
|
if (objectInfos[relation.apiName] !== undefined) {
|
|
9294
|
+
parentRelationshipFields.add(field.relationshipName);
|
|
9152
9295
|
fields += `${field.relationshipName}: ${relation.apiName}\n`;
|
|
9153
9296
|
}
|
|
9154
9297
|
// For polymorphic field, its type is 'Record' inteface. The concrete entity type name is saved for field resolving of next phase
|
|
9155
9298
|
}
|
|
9156
9299
|
else if (field.referenceToInfos.length > 1) {
|
|
9300
|
+
parentRelationshipFields.add(field.relationshipName);
|
|
9157
9301
|
fields += `${field.relationshipName}: Record\n`;
|
|
9158
9302
|
for (const relation of field.referenceToInfos) {
|
|
9159
9303
|
if (objectInfos[relation.apiName] !== undefined) {
|
|
@@ -9162,6 +9306,20 @@ function generateRecordQueries(objectInfos) {
|
|
|
9162
9306
|
}
|
|
9163
9307
|
}
|
|
9164
9308
|
}
|
|
9309
|
+
// handles child relationship
|
|
9310
|
+
for (const childRelationship of childRelationships) {
|
|
9311
|
+
const { childObjectApiName } = childRelationship;
|
|
9312
|
+
// Only add the relationship if there is relevant objectinfos for it,
|
|
9313
|
+
// otherwise we'd be defining types we cannot satisfy and aren't referenced in
|
|
9314
|
+
// the query.
|
|
9315
|
+
// If one field has both parent relationship and child relationship with the same name, the child relationship is ignored. This is how the server GQL has implemented as date of 08/07/2023
|
|
9316
|
+
if (objectInfos[childObjectApiName] !== undefined &&
|
|
9317
|
+
!parentRelationshipFields.has(childRelationship.relationshipName)) {
|
|
9318
|
+
fields += `${childRelationship.relationshipName}(first: Int, where: ${childObjectApiName}_Filter, orderBy: ${childObjectApiName}_OrderBy, scope: SupportedScopes): ${childObjectApiName}Connection \n`;
|
|
9319
|
+
typedScalars.add(`${childObjectApiName}_Filter`);
|
|
9320
|
+
typedScalars.add(`${childObjectApiName}_OrderBy`);
|
|
9321
|
+
}
|
|
9322
|
+
}
|
|
9165
9323
|
recordQueries += `${apiName}(first: Int, where: ${apiName}_Filter, orderBy: ${apiName}_OrderBy, scope: SupportedScopes): ${apiName}Connection\n`;
|
|
9166
9324
|
const isServiceAppointment = apiName === 'ServiceAppointment';
|
|
9167
9325
|
recordConnections += /* GraphQL */ `
|
|
@@ -9641,7 +9799,7 @@ function isMineScopeAvailable(apiNamePath, objectInfoApiMap, objectInfos) {
|
|
|
9641
9799
|
const objectInfo = objectInfos[apiName[0]];
|
|
9642
9800
|
if (!objectInfo)
|
|
9643
9801
|
return false;
|
|
9644
|
-
return values$
|
|
9802
|
+
return values$2(objectInfo.fields).some((fieldInfo) => {
|
|
9645
9803
|
return (fieldInfo.apiName === 'OwnerId' &&
|
|
9646
9804
|
fieldInfo.referenceToInfos.some((referenceToInfo) => {
|
|
9647
9805
|
return referenceToInfo.apiName === 'User';
|
|
@@ -10892,7 +11050,8 @@ function referenceIdFieldForRelationship(relationshipName) {
|
|
|
10892
11050
|
* For full license text, see the LICENSE.txt file
|
|
10893
11051
|
*/
|
|
10894
11052
|
|
|
10895
|
-
|
|
11053
|
+
|
|
11054
|
+
const { keys: keys$3, values: values$1, create: create$3, assign: assign$3, freeze } = Object;
|
|
10896
11055
|
const { stringify: stringify$3, parse: parse$3 } = JSON;
|
|
10897
11056
|
const { shift } = Array.prototype;
|
|
10898
11057
|
const { isArray: isArray$1 } = Array;
|
|
@@ -11742,7 +11901,7 @@ class UiApiActionHandler extends AbstractResourceRequestActionHandler {
|
|
|
11742
11901
|
return;
|
|
11743
11902
|
}
|
|
11744
11903
|
const objectInfo = objectInfoMap[apiName];
|
|
11745
|
-
const optionalFields = values(objectInfo.fields).map((field) => `${apiName}.${field.apiName}`);
|
|
11904
|
+
const optionalFields = values$1(objectInfo.fields).map((field) => `${apiName}.${field.apiName}`);
|
|
11746
11905
|
await getAdapterData(this.getRecordAdapter, {
|
|
11747
11906
|
recordId: referenceFieldInfo.id,
|
|
11748
11907
|
optionalFields,
|
|
@@ -11761,7 +11920,7 @@ class UiApiActionHandler extends AbstractResourceRequestActionHandler {
|
|
|
11761
11920
|
const referenceToInfos = fieldInfo.referenceToInfos;
|
|
11762
11921
|
const apiNames = referenceToInfos.map((referenceToInfo) => referenceToInfo.apiName);
|
|
11763
11922
|
const objectInfoMap = await this.objectInfoService.getObjectInfos(apiNames);
|
|
11764
|
-
for (const objectInfo of values(objectInfoMap)) {
|
|
11923
|
+
for (const objectInfo of values$1(objectInfoMap)) {
|
|
11765
11924
|
const { apiName, keyPrefix } = objectInfo;
|
|
11766
11925
|
if (keyPrefix !== null && id.startsWith(keyPrefix)) {
|
|
11767
11926
|
return apiName;
|
|
@@ -12271,14 +12430,30 @@ function makeRecordDenormalizingDurableStore(luvio, durableStore, getStoreRecord
|
|
|
12271
12430
|
const operationsWithDenormedRecords = [];
|
|
12272
12431
|
for (let i = 0, len = operations.length; i < len; i++) {
|
|
12273
12432
|
const operation = operations[i];
|
|
12274
|
-
if (
|
|
12275
|
-
|
|
12276
|
-
|
|
12433
|
+
if (durableStore.plugin !== undefined &&
|
|
12434
|
+
durableStore.plugin.supportsBatchUpdates !== undefined &&
|
|
12435
|
+
durableStore.plugin.supportsBatchUpdates() === true) {
|
|
12436
|
+
if (operation.segment !== DefaultDurableSegment ||
|
|
12437
|
+
operation.type !== 'setEntries') {
|
|
12438
|
+
operationsWithDenormedRecords.push(operation);
|
|
12439
|
+
continue;
|
|
12440
|
+
}
|
|
12441
|
+
operationsWithDenormedRecords.push({
|
|
12442
|
+
...operation,
|
|
12443
|
+
entries: denormalizeEntries(operation.entries),
|
|
12444
|
+
});
|
|
12445
|
+
}
|
|
12446
|
+
else {
|
|
12447
|
+
if (operation.segment !== DefaultDurableSegment ||
|
|
12448
|
+
operation.type === 'evictEntries') {
|
|
12449
|
+
operationsWithDenormedRecords.push(operation);
|
|
12450
|
+
continue;
|
|
12451
|
+
}
|
|
12452
|
+
operationsWithDenormedRecords.push({
|
|
12453
|
+
...operation,
|
|
12454
|
+
entries: denormalizeEntries(operation.entries),
|
|
12455
|
+
});
|
|
12277
12456
|
}
|
|
12278
|
-
operationsWithDenormedRecords.push({
|
|
12279
|
-
...operation,
|
|
12280
|
-
entries: denormalizeEntries(operation.entries),
|
|
12281
|
-
});
|
|
12282
12457
|
}
|
|
12283
12458
|
return durableStore.batchOperations(operationsWithDenormedRecords);
|
|
12284
12459
|
};
|
|
@@ -12826,6 +13001,9 @@ function draftAwareGraphQLAdapterFactory(userId, objectInfoService, store, luvio
|
|
|
12826
13001
|
if (!rebuildResult.errors) {
|
|
12827
13002
|
rebuildResult = removeSyntheticFields(rebuildResult, config.query);
|
|
12828
13003
|
}
|
|
13004
|
+
if (objectsDeepEqual(rebuildResult, originalSnapshot.data)) {
|
|
13005
|
+
return originalSnapshot;
|
|
13006
|
+
}
|
|
12829
13007
|
// 'originalSnapshot' is the local eval snapshot subscribed. It is always in 'Fulfilled' state. This behavior would change once W-1273462(rebuild non-evaluated snapshot when the graphql local eval rebuild is triggered) is resolved.
|
|
12830
13008
|
return {
|
|
12831
13009
|
...originalSnapshot,
|
|
@@ -13546,6 +13724,7 @@ const recordIdGenerator = (id) => {
|
|
|
13546
13724
|
* For full license text, see the LICENSE.txt file
|
|
13547
13725
|
*/
|
|
13548
13726
|
|
|
13727
|
+
|
|
13549
13728
|
const { keys: keys$2, create: create$2, assign: assign$2, entries: entries$2 } = Object;
|
|
13550
13729
|
const { stringify: stringify$2, parse: parse$2 } = JSON;
|
|
13551
13730
|
const { push: push$1, join: join$1, slice: slice$1 } = Array.prototype;
|
|
@@ -13671,6 +13850,7 @@ function buildLdsResponse(response) {
|
|
|
13671
13850
|
}
|
|
13672
13851
|
|
|
13673
13852
|
// so eslint doesn't complain about nimbus
|
|
13853
|
+
/* global __nimbus */
|
|
13674
13854
|
const tasker = idleDetector.declareNotifierTaskMulti('NimbusNetworkAdapter');
|
|
13675
13855
|
const NimbusNetworkAdapter = (request, resourceRequestContext) => {
|
|
13676
13856
|
tasker.add();
|
|
@@ -14347,6 +14527,7 @@ const { stringify: stringify$1, parse: parse$1 } = JSON;
|
|
|
14347
14527
|
const { push, join, slice } = Array.prototype;
|
|
14348
14528
|
|
|
14349
14529
|
// so eslint doesn't complain about nimbus
|
|
14530
|
+
/* global __nimbus */
|
|
14350
14531
|
/**
|
|
14351
14532
|
* An implementation of the DraftQueue interface which serializes
|
|
14352
14533
|
* requests and sends them across the Nimbus bridge and deserializes the result.
|
|
@@ -14696,6 +14877,7 @@ function instrumentDraftQueue(queue) {
|
|
|
14696
14877
|
}
|
|
14697
14878
|
|
|
14698
14879
|
// so eslint doesn't complain about nimbus
|
|
14880
|
+
/* global __nimbus */
|
|
14699
14881
|
function buildLdsDraftQueue(durableStore) {
|
|
14700
14882
|
if (typeof __nimbus !== 'undefined' &&
|
|
14701
14883
|
__nimbus.plugins !== undefined &&
|
|
@@ -14990,7 +15172,7 @@ function registerReportObserver(reportObserver) {
|
|
|
14990
15172
|
};
|
|
14991
15173
|
}
|
|
14992
15174
|
|
|
14993
|
-
const { keys, create, assign, entries } = Object;
|
|
15175
|
+
const { keys, create, assign, entries, values } = Object;
|
|
14994
15176
|
const { stringify, parse } = JSON;
|
|
14995
15177
|
|
|
14996
15178
|
function selectColumnsFromTableWhereKeyIn(columnNames, table, keyColumnName, whereIn) {
|
|
@@ -15024,6 +15206,22 @@ class LdsDataTable {
|
|
|
15024
15206
|
}, reject);
|
|
15025
15207
|
});
|
|
15026
15208
|
}
|
|
15209
|
+
getMetadataByKeys(keys) {
|
|
15210
|
+
const query = selectColumnsFromTableWhereKeyIn([COLUMN_NAME_KEY$2, COLUMN_NAME_METADATA$1], this.tableName, COLUMN_NAME_KEY$2, keys);
|
|
15211
|
+
return new Promise((resolve, reject) => {
|
|
15212
|
+
this.plugin.query(query, keys, (results) => {
|
|
15213
|
+
resolve(results.rows.reduce((entries, row) => {
|
|
15214
|
+
const [key, stringifiedMetadata] = row;
|
|
15215
|
+
if (stringifiedMetadata !== undefined) {
|
|
15216
|
+
entries[key] = {
|
|
15217
|
+
metadata: parse(stringifiedMetadata),
|
|
15218
|
+
};
|
|
15219
|
+
}
|
|
15220
|
+
return entries;
|
|
15221
|
+
}, {}));
|
|
15222
|
+
}, reject);
|
|
15223
|
+
});
|
|
15224
|
+
}
|
|
15027
15225
|
getAll() {
|
|
15028
15226
|
return new Promise((resolve, reject) => {
|
|
15029
15227
|
this.plugin.query(this.getAllQuery, [], (x) => {
|
|
@@ -15050,6 +15248,24 @@ class LdsDataTable {
|
|
|
15050
15248
|
}, []),
|
|
15051
15249
|
};
|
|
15052
15250
|
}
|
|
15251
|
+
metadataToUpdateOperations(entries, segment) {
|
|
15252
|
+
return {
|
|
15253
|
+
type: 'update',
|
|
15254
|
+
table: this.tableName,
|
|
15255
|
+
keyColumn: COLUMN_NAME_KEY$2,
|
|
15256
|
+
context: {
|
|
15257
|
+
segment,
|
|
15258
|
+
type: 'setMetadata',
|
|
15259
|
+
},
|
|
15260
|
+
columns: [COLUMN_NAME_METADATA$1],
|
|
15261
|
+
values: keys(entries).reduce((values, key) => {
|
|
15262
|
+
const { metadata } = entries[key];
|
|
15263
|
+
const row = [metadata ? stringify(metadata) : null];
|
|
15264
|
+
values[key] = row;
|
|
15265
|
+
return values;
|
|
15266
|
+
}, {}),
|
|
15267
|
+
};
|
|
15268
|
+
}
|
|
15053
15269
|
mapToDurableEntries(sqliteResult) {
|
|
15054
15270
|
return sqliteResult.rows.reduce((entries, row) => {
|
|
15055
15271
|
const [key, stringifiedData, stringifiedMetadata] = row;
|
|
@@ -15096,6 +15312,25 @@ class LdsInternalDataTable {
|
|
|
15096
15312
|
}, reject);
|
|
15097
15313
|
});
|
|
15098
15314
|
}
|
|
15315
|
+
getMetadataByKeys(keys, namespace) {
|
|
15316
|
+
if (namespace === undefined) {
|
|
15317
|
+
throw Error('LdsInternalDataTable requires namespace');
|
|
15318
|
+
}
|
|
15319
|
+
const query = selectColumnsFromTableWhereKeyInNamespaced([COLUMN_NAME_KEY$1, COLUMN_NAME_METADATA], this.tableName, COLUMN_NAME_KEY$1, keys, COLUMN_NAME_NAMESPACE);
|
|
15320
|
+
return new Promise((resolve, reject) => {
|
|
15321
|
+
this.plugin.query(query, [namespace].concat(keys), (results) => {
|
|
15322
|
+
resolve(results.rows.reduce((entries, row) => {
|
|
15323
|
+
const [key, stringifiedMetadata] = row;
|
|
15324
|
+
if (stringifiedMetadata !== undefined) {
|
|
15325
|
+
entries[key] = {
|
|
15326
|
+
metadata: parse(stringifiedMetadata),
|
|
15327
|
+
};
|
|
15328
|
+
}
|
|
15329
|
+
return entries;
|
|
15330
|
+
}, {}));
|
|
15331
|
+
}, reject);
|
|
15332
|
+
});
|
|
15333
|
+
}
|
|
15099
15334
|
getAll(namespace) {
|
|
15100
15335
|
return new Promise((resolve, reject) => {
|
|
15101
15336
|
this.plugin.query(this.getAllQuery, [namespace], (x) => {
|
|
@@ -15129,6 +15364,42 @@ class LdsInternalDataTable {
|
|
|
15129
15364
|
}, []),
|
|
15130
15365
|
};
|
|
15131
15366
|
}
|
|
15367
|
+
metadataToUpdateOperations(entries, segment) {
|
|
15368
|
+
return {
|
|
15369
|
+
type: 'update',
|
|
15370
|
+
table: this.tableName,
|
|
15371
|
+
keyColumn: COLUMN_NAME_KEY$1,
|
|
15372
|
+
context: {
|
|
15373
|
+
segment,
|
|
15374
|
+
type: 'setMetadata',
|
|
15375
|
+
},
|
|
15376
|
+
columns: [COLUMN_NAME_METADATA],
|
|
15377
|
+
values: keys(entries).reduce((values, key) => {
|
|
15378
|
+
const { metadata } = entries[key];
|
|
15379
|
+
const row = [metadata ? stringify(metadata) : null];
|
|
15380
|
+
values[key] = row;
|
|
15381
|
+
return values;
|
|
15382
|
+
}, {}),
|
|
15383
|
+
};
|
|
15384
|
+
}
|
|
15385
|
+
metadataToUpdateSQLQueries(entries, segment) {
|
|
15386
|
+
return keys(entries).reduce((accu, key) => {
|
|
15387
|
+
const { metadata } = entries[key];
|
|
15388
|
+
if (metadata !== undefined) {
|
|
15389
|
+
accu.push({
|
|
15390
|
+
sql: `UPDATE ${this.tableName} SET ${COLUMN_NAME_METADATA} = ? WHERE (${COLUMN_NAME_KEY$1} IS ? AND ${COLUMN_NAME_NAMESPACE} IS ?)`,
|
|
15391
|
+
params: [stringify(metadata), key, segment],
|
|
15392
|
+
change: {
|
|
15393
|
+
ids: [key],
|
|
15394
|
+
segment,
|
|
15395
|
+
type: 'setMetadata',
|
|
15396
|
+
isExternalChange: false,
|
|
15397
|
+
},
|
|
15398
|
+
});
|
|
15399
|
+
}
|
|
15400
|
+
return accu;
|
|
15401
|
+
}, []);
|
|
15402
|
+
}
|
|
15132
15403
|
mapToDurableEntries(sqliteResult) {
|
|
15133
15404
|
return sqliteResult.rows.reduce((entries, row) => {
|
|
15134
15405
|
const [key, stringifiedData, stringifiedMetadata] = row;
|
|
@@ -15165,9 +15436,16 @@ class NimbusSqliteStore {
|
|
|
15165
15436
|
});
|
|
15166
15437
|
});
|
|
15167
15438
|
}
|
|
15439
|
+
batchQuery(queries) {
|
|
15440
|
+
const promises = queries.map((q) => this.query(q.sql, q.params));
|
|
15441
|
+
return Promise.all(promises);
|
|
15442
|
+
}
|
|
15168
15443
|
async getEntries(entryIds, segment) {
|
|
15169
15444
|
return this.getTable(segment).getByKeys(entryIds, segment);
|
|
15170
15445
|
}
|
|
15446
|
+
async getMetadata(entryIds, segment) {
|
|
15447
|
+
return this.getTable(segment).getMetadataByKeys(entryIds, segment);
|
|
15448
|
+
}
|
|
15171
15449
|
getAllEntries(segment) {
|
|
15172
15450
|
return this.getTable(segment).getAll(segment);
|
|
15173
15451
|
}
|
|
@@ -15176,12 +15454,30 @@ class NimbusSqliteStore {
|
|
|
15176
15454
|
const upsertOperation = table.entriesToUpsertOperations(entries, segment);
|
|
15177
15455
|
return this.batchOperationAsPromise([upsertOperation]);
|
|
15178
15456
|
}
|
|
15457
|
+
setMetadata(entries, segment) {
|
|
15458
|
+
const table = this.getTable(segment);
|
|
15459
|
+
const operation = this.plugin.supportsBatchUpdates === undefined ||
|
|
15460
|
+
this.plugin.supportsBatchUpdates() === false
|
|
15461
|
+
? table.entriesToUpsertOperations(entries, segment)
|
|
15462
|
+
: table.metadataToUpdateOperations(entries, segment);
|
|
15463
|
+
return this.batchOperationAsPromise([operation]);
|
|
15464
|
+
}
|
|
15179
15465
|
batchOperations(operations) {
|
|
15180
15466
|
const sqliteOperations = operations.reduce((acc, cur) => {
|
|
15181
15467
|
if (cur.type === 'setEntries') {
|
|
15182
15468
|
const table = this.getTable(cur.segment);
|
|
15183
15469
|
acc.push(table.entriesToUpsertOperations(cur.entries, cur.segment));
|
|
15184
15470
|
}
|
|
15471
|
+
else if (cur.type === 'setMetadata') {
|
|
15472
|
+
const table = this.getTable(cur.segment);
|
|
15473
|
+
if (this.plugin.supportsBatchUpdates === undefined ||
|
|
15474
|
+
this.plugin.supportsBatchUpdates() === false) {
|
|
15475
|
+
acc.push(table.entriesToUpsertOperations(cur.entries, cur.segment));
|
|
15476
|
+
}
|
|
15477
|
+
else {
|
|
15478
|
+
acc.push(table.metadataToUpdateOperations(cur.entries, cur.segment));
|
|
15479
|
+
}
|
|
15480
|
+
}
|
|
15185
15481
|
else {
|
|
15186
15482
|
acc.push(this.idsToDeleteOperation(cur.ids, cur.segment));
|
|
15187
15483
|
}
|
|
@@ -15198,8 +15494,15 @@ class NimbusSqliteStore {
|
|
|
15198
15494
|
this.plugin
|
|
15199
15495
|
.registerOnChangedListener(async (changes) => {
|
|
15200
15496
|
const durableChanges = changes.map((c) => {
|
|
15497
|
+
let type = c.type === 'upsert' ? 'setEntries' : 'evictEntries';
|
|
15498
|
+
// if our context contains a type then set that as our main level type
|
|
15499
|
+
// allows us in the future of updates to specify the segment change happening
|
|
15500
|
+
// example being update call on metadata only or updating data
|
|
15501
|
+
if (c.type === 'update' && c.context.type !== undefined) {
|
|
15502
|
+
type = c.context.type;
|
|
15503
|
+
}
|
|
15201
15504
|
return {
|
|
15202
|
-
type
|
|
15505
|
+
type,
|
|
15203
15506
|
ids: c.keys,
|
|
15204
15507
|
isExternalChange: false,
|
|
15205
15508
|
segment: c.context.segment,
|
|
@@ -15266,6 +15569,10 @@ class AbstractKeyValueDataTable {
|
|
|
15266
15569
|
}, reject);
|
|
15267
15570
|
});
|
|
15268
15571
|
}
|
|
15572
|
+
getMetadataByKeys(_keys) {
|
|
15573
|
+
// eslint-disable-next-line @salesforce/lds/no-error-in-production
|
|
15574
|
+
throw new Error(`There is no metadata in the ${this.tableName} table.`);
|
|
15575
|
+
}
|
|
15269
15576
|
getAll() {
|
|
15270
15577
|
const getAllQuery = `SELECT ${this.columnNames.join(',')} FROM ${this.tableName}`;
|
|
15271
15578
|
return new Promise((resolve, reject) => {
|
|
@@ -15291,6 +15598,10 @@ class AbstractKeyValueDataTable {
|
|
|
15291
15598
|
}, []),
|
|
15292
15599
|
};
|
|
15293
15600
|
}
|
|
15601
|
+
metadataToUpdateOperations(_entries, _segment) {
|
|
15602
|
+
// eslint-disable-next-line @salesforce/lds/no-error-in-production
|
|
15603
|
+
throw new Error(`There is no metadata in the ${this.tableName} table.`);
|
|
15604
|
+
}
|
|
15294
15605
|
mapToDurableEntries(sqliteResult) {
|
|
15295
15606
|
return sqliteResult.rows.reduce((entries, row) => {
|
|
15296
15607
|
const [key, stringifiedData] = row;
|
|
@@ -15322,6 +15633,7 @@ class LdsDraftIdMapDataTable extends AbstractKeyValueDataTable {
|
|
|
15322
15633
|
}
|
|
15323
15634
|
|
|
15324
15635
|
// so eslint doesn't complain about nimbus
|
|
15636
|
+
/* global __nimbus */
|
|
15325
15637
|
let baseDurableStore;
|
|
15326
15638
|
function getNimbusDurableStore(plugin) {
|
|
15327
15639
|
if (baseDurableStore === undefined) {
|
|
@@ -15527,6 +15839,7 @@ function setupInspection(luvio) {
|
|
|
15527
15839
|
* For full license text, see the LICENSE.txt file
|
|
15528
15840
|
*/
|
|
15529
15841
|
|
|
15842
|
+
|
|
15530
15843
|
class EventEmitter {
|
|
15531
15844
|
constructor() {
|
|
15532
15845
|
// @ts-ignore typescript doesn't like us setting this to an empty object for some reason
|
|
@@ -16578,6 +16891,8 @@ function primingSessionFactory(config) {
|
|
|
16578
16891
|
}
|
|
16579
16892
|
|
|
16580
16893
|
// so eslint doesn't complain about nimbus
|
|
16894
|
+
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
|
16895
|
+
/* global __nimbus */
|
|
16581
16896
|
let lazyDraftQueue;
|
|
16582
16897
|
let lazyDraftManager;
|
|
16583
16898
|
let lazyLuvio;
|
|
@@ -16630,6 +16945,7 @@ function getRuntime() {
|
|
|
16630
16945
|
const gqlEnv = makeEnvironmentGraphqlAware(baseEnv);
|
|
16631
16946
|
const durableEnv = makeDurable(gqlEnv, {
|
|
16632
16947
|
durableStore: recordDenormingStore,
|
|
16948
|
+
enableDurableMetadataRefresh: ldsMetadataRefreshEnabled.isOpen({ fallback: false }),
|
|
16633
16949
|
});
|
|
16634
16950
|
getIngestRecords = durableEnv.getIngestStagingStoreRecords;
|
|
16635
16951
|
getIngestMetadata = durableEnv.getIngestStagingStoreMetadata;
|
|
@@ -16731,4 +17047,4 @@ register({
|
|
|
16731
17047
|
});
|
|
16732
17048
|
|
|
16733
17049
|
export { getRuntime, registerReportObserver, reportGraphqlQueryParseError };
|
|
16734
|
-
// version: 1.
|
|
17050
|
+
// version: 1.236.0-036823f57
|