@salesforce/lds-network-adapter 0.1.0-dev1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE.txt +82 -0
- package/dist/ldsNetwork.js +472 -0
- package/dist/types/dispatch/dedupe.d.ts +10 -0
- package/dist/types/dispatch/execute-aggregate-ui.d.ts +26 -0
- package/dist/types/dispatch/main.d.ts +9 -0
- package/dist/types/dispatch/records.d.ts +3 -0
- package/dist/types/dispatch/related-lists.d.ts +3 -0
- package/dist/types/instrumentation.d.ts +17 -0
- package/dist/types/language.d.ts +16 -0
- package/dist/types/main.d.ts +8 -0
- package/dist/types/token-bucket.d.ts +28 -0
- package/dist/types/uiapi-base.d.ts +1 -0
- package/package.json +32 -0
package/LICENSE.txt
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
Terms of Use
|
|
2
|
+
|
|
3
|
+
Copyright 2022 Salesforce, Inc. All rights reserved.
|
|
4
|
+
|
|
5
|
+
These Terms of Use govern the download, installation, and/or use of this
|
|
6
|
+
software provided by Salesforce, Inc. ("Salesforce") (the "Software"), were
|
|
7
|
+
last updated on April 15, 2022, and constitute a legally binding
|
|
8
|
+
agreement between you and Salesforce. If you do not agree to these Terms of
|
|
9
|
+
Use, do not install or use the Software.
|
|
10
|
+
|
|
11
|
+
Salesforce grants you a worldwide, non-exclusive, no-charge, royalty-free
|
|
12
|
+
copyright license to reproduce, prepare derivative works of, publicly
|
|
13
|
+
display, publicly perform, sublicense, and distribute the Software and
|
|
14
|
+
derivative works subject to these Terms. These Terms shall be included in
|
|
15
|
+
all copies or substantial portions of the Software.
|
|
16
|
+
|
|
17
|
+
Subject to the limited rights expressly granted hereunder, Salesforce
|
|
18
|
+
reserves all rights, title, and interest in and to all intellectual
|
|
19
|
+
property subsisting in the Software. No rights are granted to you hereunder
|
|
20
|
+
other than as expressly set forth herein. Users residing in countries on
|
|
21
|
+
the United States Office of Foreign Assets Control sanction list, or which
|
|
22
|
+
are otherwise subject to a US export embargo, may not use the Software.
|
|
23
|
+
|
|
24
|
+
Implementation of the Software may require development work, for which you
|
|
25
|
+
are responsible. The Software may contain bugs, errors and
|
|
26
|
+
incompatibilities and is made available on an AS IS basis without support,
|
|
27
|
+
updates, or service level commitments.
|
|
28
|
+
|
|
29
|
+
Salesforce reserves the right at any time to modify, suspend, or
|
|
30
|
+
discontinue, the Software (or any part thereof) with or without notice. You
|
|
31
|
+
agree that Salesforce shall not be liable to you or to any third party for
|
|
32
|
+
any modification, suspension, or discontinuance.
|
|
33
|
+
|
|
34
|
+
You agree to defend Salesforce against any claim, demand, suit or
|
|
35
|
+
proceeding made or brought against Salesforce by a third party arising out
|
|
36
|
+
of or accruing from (a) your use of the Software, and (b) any application
|
|
37
|
+
you develop with the Software that infringes any copyright, trademark,
|
|
38
|
+
trade secret, trade dress, patent, or other intellectual property right of
|
|
39
|
+
any person or defames any person or violates their rights of publicity or
|
|
40
|
+
privacy (each a "Claim Against Salesforce"), and will indemnify Salesforce
|
|
41
|
+
from any damages, attorney fees, and costs finally awarded against
|
|
42
|
+
Salesforce as a result of, or for any amounts paid by Salesforce under a
|
|
43
|
+
settlement approved by you in writing of, a Claim Against Salesforce,
|
|
44
|
+
provided Salesforce (x) promptly gives you written notice of the Claim
|
|
45
|
+
Against Salesforce, (y) gives you sole control of the defense and
|
|
46
|
+
settlement of the Claim Against Salesforce (except that you may not settle
|
|
47
|
+
any Claim Against Salesforce unless it unconditionally releases Salesforce
|
|
48
|
+
of all liability), and (z) gives you all reasonable assistance, at your
|
|
49
|
+
expense.
|
|
50
|
+
|
|
51
|
+
WITHOUT LIMITING THE GENERALITY OF THE FOREGOING, THE SOFTWARE IS NOT
|
|
52
|
+
SUPPORTED AND IS PROVIDED "AS IS," WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
53
|
+
IMPLIED. IN NO EVENT SHALL SALESFORCE HAVE ANY LIABILITY FOR ANY DAMAGES,
|
|
54
|
+
INCLUDING, BUT NOT LIMITED TO, DIRECT, INDIRECT, SPECIAL, INCIDENTAL,
|
|
55
|
+
PUNITIVE, OR CONSEQUENTIAL DAMAGES, OR DAMAGES BASED ON LOST PROFITS, DATA,
|
|
56
|
+
OR USE, IN CONNECTION WITH THE SOFTWARE, HOWEVER CAUSED AND WHETHER IN
|
|
57
|
+
CONTRACT, TORT, OR UNDER ANY OTHER THEORY OF LIABILITY, WHETHER OR NOT YOU
|
|
58
|
+
HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
|
|
59
|
+
|
|
60
|
+
These Terms of Use shall be governed exclusively by the internal laws of
|
|
61
|
+
the State of California, without regard to its conflicts of laws
|
|
62
|
+
rules. Each party hereby consents to the exclusive jurisdiction of the
|
|
63
|
+
state and federal courts located in San Francisco County, California to
|
|
64
|
+
adjudicate any dispute arising out of or relating to these Terms of Use and
|
|
65
|
+
the download, installation, and/or use of the Software. Except as expressly
|
|
66
|
+
stated herein, these Terms of Use constitute the entire agreement between
|
|
67
|
+
the parties, and supersede all prior and contemporaneous agreements,
|
|
68
|
+
proposals, or representations, written or oral, concerning their subject
|
|
69
|
+
matter. No modification, amendment, or waiver of any provision of these
|
|
70
|
+
Terms of Use shall be effective unless it is by an update to these Terms of
|
|
71
|
+
Use that Salesforce makes available, or is in writing and signed by the
|
|
72
|
+
party against whom the modification, amendment, or waiver is to be
|
|
73
|
+
asserted.
|
|
74
|
+
|
|
75
|
+
Data Privacy: Salesforce may collect, process, and store device,
|
|
76
|
+
system, and other information related to your use of the Software. This
|
|
77
|
+
information includes, but is not limited to, IP address, user metrics, and
|
|
78
|
+
other data ("Usage Data"). Salesforce may use Usage Data for analytics,
|
|
79
|
+
product development, and marketing purposes. You acknowledge that files
|
|
80
|
+
generated in conjunction with the Software may contain sensitive or
|
|
81
|
+
confidential data, and you are solely responsible for anonymizing and
|
|
82
|
+
protecting such data.
|
|
@@ -0,0 +1,472 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Copyright (c) 2022, Salesforce, Inc.,
|
|
3
|
+
* All rights reserved.
|
|
4
|
+
* For full license text, see the LICENSE.txt file
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
import { HttpStatusCode } from '@luvio/engine';
|
|
8
|
+
|
|
9
|
+
const { parse, stringify } = JSON;
|
|
10
|
+
const { join, push, unshift } = Array.prototype;
|
|
11
|
+
const { isArray } = Array;
|
|
12
|
+
const { entries, keys } = Object;
|
|
13
|
+
|
|
14
|
+
const UI_API_BASE_URI = '/services/data/v66.0/ui-api';
|
|
15
|
+
|
|
16
|
+
let instrumentation = {
|
|
17
|
+
aggregateUiChunkCount: (_cb) => { },
|
|
18
|
+
aggregateUiConnectError: () => { },
|
|
19
|
+
duplicateRequest: (_cb) => { },
|
|
20
|
+
getRecordAggregateInvoke: () => { },
|
|
21
|
+
getRecordAggregateResolve: (_cb) => { },
|
|
22
|
+
getRecordAggregateReject: (_cb) => { },
|
|
23
|
+
getRecordAggregateRetry: () => { },
|
|
24
|
+
getRecordNormalInvoke: () => { },
|
|
25
|
+
networkRateLimitExceeded: () => { },
|
|
26
|
+
};
|
|
27
|
+
function instrument(newInstrumentation) {
|
|
28
|
+
instrumentation = Object.assign(instrumentation, newInstrumentation);
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
const LDS_RECORDS_AGGREGATE_UI = 'LDS_Records_AggregateUi';
|
|
32
|
+
// Boundary which represents the limit that we start chunking at,
|
|
33
|
+
// determined by comma separated string length of fields
|
|
34
|
+
const MAX_STRING_LENGTH_PER_CHUNK = 10000;
|
|
35
|
+
// UIAPI limit
|
|
36
|
+
const MAX_AGGREGATE_UI_CHUNK_LIMIT = 50;
|
|
37
|
+
function createOkResponse(body) {
|
|
38
|
+
return {
|
|
39
|
+
status: HttpStatusCode.Ok,
|
|
40
|
+
body,
|
|
41
|
+
statusText: 'ok',
|
|
42
|
+
headers: {},
|
|
43
|
+
ok: true,
|
|
44
|
+
};
|
|
45
|
+
}
|
|
46
|
+
function getErrorResponseText(status) {
|
|
47
|
+
switch (status) {
|
|
48
|
+
case HttpStatusCode.Ok:
|
|
49
|
+
return 'OK';
|
|
50
|
+
case HttpStatusCode.NotModified:
|
|
51
|
+
return 'Not Modified';
|
|
52
|
+
case HttpStatusCode.NotFound:
|
|
53
|
+
return 'Not Found';
|
|
54
|
+
case HttpStatusCode.BadRequest:
|
|
55
|
+
return 'Bad Request';
|
|
56
|
+
case HttpStatusCode.ServerError:
|
|
57
|
+
return 'Server Error';
|
|
58
|
+
default:
|
|
59
|
+
return `Unexpected HTTP Status Code: ${status}`;
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
function createErrorResponse(status, body) {
|
|
63
|
+
return {
|
|
64
|
+
status,
|
|
65
|
+
body,
|
|
66
|
+
statusText: getErrorResponseText(status),
|
|
67
|
+
headers: {},
|
|
68
|
+
ok: false,
|
|
69
|
+
};
|
|
70
|
+
}
|
|
71
|
+
function isSpanningRecord(fieldValue) {
|
|
72
|
+
return fieldValue !== null && typeof fieldValue === 'object' && !Array.isArray(fieldValue);
|
|
73
|
+
}
|
|
74
|
+
function mergeRecordFields(first, second) {
|
|
75
|
+
const { fields: targetFields } = first;
|
|
76
|
+
const { fields: sourceFields } = second;
|
|
77
|
+
const fieldNames = keys(sourceFields);
|
|
78
|
+
for (let i = 0, len = fieldNames.length; i < len; i += 1) {
|
|
79
|
+
const fieldName = fieldNames[i];
|
|
80
|
+
const sourceField = sourceFields[fieldName];
|
|
81
|
+
const targetField = targetFields[fieldName];
|
|
82
|
+
if (isSpanningRecord(sourceField.value)) {
|
|
83
|
+
if (targetField === undefined) {
|
|
84
|
+
targetFields[fieldName] = sourceFields[fieldName];
|
|
85
|
+
continue;
|
|
86
|
+
}
|
|
87
|
+
mergeRecordFields(targetField.value, sourceField.value);
|
|
88
|
+
continue;
|
|
89
|
+
}
|
|
90
|
+
targetFields[fieldName] = sourceFields[fieldName];
|
|
91
|
+
}
|
|
92
|
+
return first;
|
|
93
|
+
}
|
|
94
|
+
/** Invoke executeAggregateUi Aura controller. This is only to be used with large getRecord requests that
|
|
95
|
+
* would otherwise cause a query length exception.
|
|
96
|
+
*/
|
|
97
|
+
function dispatchSplitRecordAggregateUiAction(recordId, networkAdapter, resourceRequest, resourceRequestContext) {
|
|
98
|
+
instrumentation.getRecordAggregateInvoke();
|
|
99
|
+
return networkAdapter(resourceRequest, resourceRequestContext).then((resp) => {
|
|
100
|
+
const { body } = resp;
|
|
101
|
+
// This response body could be an executeAggregateUi, which we don't natively support.
|
|
102
|
+
// Massage it into looking like a getRecord response.
|
|
103
|
+
if (body === null ||
|
|
104
|
+
body === undefined ||
|
|
105
|
+
body.compositeResponse === undefined ||
|
|
106
|
+
body.compositeResponse.length === 0) {
|
|
107
|
+
// We shouldn't even get into this state - a 200 with no body?
|
|
108
|
+
throw createErrorResponse(HttpStatusCode.ServerError, {
|
|
109
|
+
error: 'No response body in executeAggregateUi found',
|
|
110
|
+
});
|
|
111
|
+
}
|
|
112
|
+
const merged = body.compositeResponse.reduce((seed, response) => {
|
|
113
|
+
if (response.httpStatusCode !== HttpStatusCode.Ok) {
|
|
114
|
+
instrumentation.getRecordAggregateReject(() => recordId);
|
|
115
|
+
throw createErrorResponse(HttpStatusCode.ServerError, {
|
|
116
|
+
error: response.message,
|
|
117
|
+
});
|
|
118
|
+
}
|
|
119
|
+
if (seed === null) {
|
|
120
|
+
return response.body;
|
|
121
|
+
}
|
|
122
|
+
return mergeRecordFields(seed, response.body);
|
|
123
|
+
}, null);
|
|
124
|
+
instrumentation.getRecordAggregateResolve(() => {
|
|
125
|
+
return {
|
|
126
|
+
recordId,
|
|
127
|
+
apiName: merged.apiName,
|
|
128
|
+
};
|
|
129
|
+
});
|
|
130
|
+
return createOkResponse(merged);
|
|
131
|
+
}, (err) => {
|
|
132
|
+
instrumentation.getRecordAggregateReject(() => recordId);
|
|
133
|
+
// rethrow error
|
|
134
|
+
throw err;
|
|
135
|
+
});
|
|
136
|
+
}
|
|
137
|
+
function shouldUseAggregateUiForGetRecord(fieldsArray, optionalFieldsArray) {
|
|
138
|
+
return fieldsArray.length + optionalFieldsArray.length >= MAX_STRING_LENGTH_PER_CHUNK;
|
|
139
|
+
}
|
|
140
|
+
function buildAggregateUiUrl(params, resourceRequest) {
|
|
141
|
+
const { fields, optionalFields } = params;
|
|
142
|
+
const queryString = [];
|
|
143
|
+
if (fields !== undefined && fields.length > 0) {
|
|
144
|
+
const fieldString = join.call(fields, ',');
|
|
145
|
+
push.call(queryString, `fields=${encodeURIComponent(fieldString)}`);
|
|
146
|
+
}
|
|
147
|
+
if (optionalFields !== undefined && optionalFields.length > 0) {
|
|
148
|
+
const optionalFieldString = join.call(optionalFields, ',');
|
|
149
|
+
push.call(queryString, `optionalFields=${encodeURIComponent(optionalFieldString)}`);
|
|
150
|
+
}
|
|
151
|
+
return `${resourceRequest.baseUri}${resourceRequest.basePath}?${join.call(queryString, '&')}`;
|
|
152
|
+
}
|
|
153
|
+
function buildGetRecordByFieldsCompositeRequest(resourceRequest, recordsCompositeRequest) {
|
|
154
|
+
const { fieldsArray, optionalFieldsArray, fieldsLength, optionalFieldsLength } = recordsCompositeRequest;
|
|
155
|
+
// Formula: # of fields per chunk = floor(avg field length / max length per chunk)
|
|
156
|
+
const averageFieldStringLength = Math.floor((fieldsLength + optionalFieldsLength) / (fieldsArray.length + optionalFieldsArray.length));
|
|
157
|
+
const fieldsPerChunk = Math.floor(MAX_STRING_LENGTH_PER_CHUNK / averageFieldStringLength);
|
|
158
|
+
const optionalFieldsChunks = [];
|
|
159
|
+
// Do the same for optional tracked fields
|
|
160
|
+
for (let i = 0, j = optionalFieldsArray.length; i < j; i += fieldsPerChunk) {
|
|
161
|
+
const newChunk = optionalFieldsArray.slice(i, i + fieldsPerChunk);
|
|
162
|
+
push.call(optionalFieldsChunks, newChunk);
|
|
163
|
+
}
|
|
164
|
+
const compositeRequest = [];
|
|
165
|
+
// Add fields as one chunk at the beginning of the compositeRequest
|
|
166
|
+
if (fieldsArray.length > 0) {
|
|
167
|
+
const url = buildAggregateUiUrl({
|
|
168
|
+
fields: fieldsArray,
|
|
169
|
+
}, resourceRequest);
|
|
170
|
+
push.call(compositeRequest, {
|
|
171
|
+
url,
|
|
172
|
+
referenceId: `${LDS_RECORDS_AGGREGATE_UI}_fields`,
|
|
173
|
+
});
|
|
174
|
+
}
|
|
175
|
+
// Make sure we don't exceed the max subquery chunk limit for aggUi by capping the amount
|
|
176
|
+
// of optionalFields subqueries at MAX_AGGREGATE_UI_CHUNK_LIMIT - 1 (first chunk is for fields)
|
|
177
|
+
const maxNumberOfAllowableOptionalFieldsChunks = MAX_AGGREGATE_UI_CHUNK_LIMIT - 1;
|
|
178
|
+
const optionalFieldsChunksLength = Math.min(optionalFieldsChunks.length, maxNumberOfAllowableOptionalFieldsChunks);
|
|
179
|
+
for (let i = 0; i < optionalFieldsChunksLength; i += 1) {
|
|
180
|
+
const fieldChunk = optionalFieldsChunks[i];
|
|
181
|
+
const url = buildAggregateUiUrl({
|
|
182
|
+
optionalFields: fieldChunk,
|
|
183
|
+
}, resourceRequest);
|
|
184
|
+
push.call(compositeRequest, {
|
|
185
|
+
url,
|
|
186
|
+
referenceId: `${LDS_RECORDS_AGGREGATE_UI}_optionalFields_${i}`,
|
|
187
|
+
});
|
|
188
|
+
}
|
|
189
|
+
return compositeRequest;
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
const UIAPI_RECORDS_PATH = `${UI_API_BASE_URI}/records`;
|
|
193
|
+
const UIAPI_RECORDS_BATCH_PATH = `${UI_API_BASE_URI}/records/batch/`;
|
|
194
|
+
const QUERY_TOO_COMPLICATED_ERROR_CODE = 'QUERY_TOO_COMPLICATED';
|
|
195
|
+
function fetchResponseIsQueryTooComplicated(error) {
|
|
196
|
+
const { body } = error;
|
|
197
|
+
if (error.status === HttpStatusCode.BadRequest && body !== undefined) {
|
|
198
|
+
return (body.statusCode === HttpStatusCode.BadRequest &&
|
|
199
|
+
body.errorCode === QUERY_TOO_COMPLICATED_ERROR_CODE);
|
|
200
|
+
}
|
|
201
|
+
return false;
|
|
202
|
+
}
|
|
203
|
+
/*
|
|
204
|
+
* Takes a ResourceRequest, builds the aggregateUi payload, and dispatches via aggregateUi action
|
|
205
|
+
*/
|
|
206
|
+
function buildAndDispatchGetRecordAggregateUi(recordId, req, params) {
|
|
207
|
+
const { networkAdapter, resourceRequest, resourceRequestContext } = req;
|
|
208
|
+
const compositeRequest = buildGetRecordByFieldsCompositeRequest(resourceRequest, params);
|
|
209
|
+
// W-12245125: Emit chunk size metrics
|
|
210
|
+
instrumentation.aggregateUiChunkCount(() => compositeRequest.length);
|
|
211
|
+
const aggregateUiParams = {
|
|
212
|
+
compositeRequest,
|
|
213
|
+
};
|
|
214
|
+
const aggregateUiResourceRequest = {
|
|
215
|
+
baseUri: UI_API_BASE_URI,
|
|
216
|
+
basePath: '/aggregate-ui',
|
|
217
|
+
method: 'post',
|
|
218
|
+
priority: resourceRequest.priority,
|
|
219
|
+
urlParams: {},
|
|
220
|
+
body: aggregateUiParams,
|
|
221
|
+
queryParams: {},
|
|
222
|
+
headers: {},
|
|
223
|
+
};
|
|
224
|
+
return dispatchSplitRecordAggregateUiAction(recordId, networkAdapter, aggregateUiResourceRequest, resourceRequestContext);
|
|
225
|
+
}
|
|
226
|
+
const getRecordDispatcher = (req) => {
|
|
227
|
+
const { resourceRequest, networkAdapter, resourceRequestContext } = req;
|
|
228
|
+
const { queryParams, urlParams } = resourceRequest;
|
|
229
|
+
const { fields, optionalFields } = queryParams;
|
|
230
|
+
if (process.env.NODE_ENV !== 'production') {
|
|
231
|
+
if (typeof urlParams.recordId !== 'string') {
|
|
232
|
+
throw new Error(`Invalid recordId: expected string, recieved "${typeof urlParams.recordId}"`);
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
const recordId = urlParams.recordId;
|
|
236
|
+
const fieldsArray = fields !== undefined && isArray(fields) ? fields : [];
|
|
237
|
+
const optionalFieldsArray = optionalFields !== undefined && Array.isArray(optionalFields)
|
|
238
|
+
? optionalFields
|
|
239
|
+
: [];
|
|
240
|
+
const fieldsString = fieldsArray.join(',');
|
|
241
|
+
const optionalFieldsString = optionalFieldsArray.join(',');
|
|
242
|
+
// Don't submit a megarequest to UIAPI due to SOQL limit reasons.
|
|
243
|
+
// Split and aggregate if needed
|
|
244
|
+
const useAggregateUi = shouldUseAggregateUiForGetRecord(fieldsString, optionalFieldsString);
|
|
245
|
+
if (useAggregateUi) {
|
|
246
|
+
return buildAndDispatchGetRecordAggregateUi(recordId, {
|
|
247
|
+
networkAdapter,
|
|
248
|
+
resourceRequest,
|
|
249
|
+
resourceRequestContext,
|
|
250
|
+
}, {
|
|
251
|
+
fieldsArray,
|
|
252
|
+
optionalFieldsArray,
|
|
253
|
+
fieldsLength: fieldsString.length,
|
|
254
|
+
optionalFieldsLength: optionalFieldsString.length,
|
|
255
|
+
});
|
|
256
|
+
}
|
|
257
|
+
return defaultDispatcher(req).catch((err) => {
|
|
258
|
+
if (fetchResponseIsQueryTooComplicated(err)) {
|
|
259
|
+
// Retry with aggregateUi to see if we can avoid Query Too Complicated
|
|
260
|
+
return buildAndDispatchGetRecordAggregateUi(recordId, {
|
|
261
|
+
networkAdapter,
|
|
262
|
+
resourceRequest,
|
|
263
|
+
resourceRequestContext,
|
|
264
|
+
}, {
|
|
265
|
+
fieldsArray,
|
|
266
|
+
optionalFieldsArray,
|
|
267
|
+
fieldsLength: fieldsString.length,
|
|
268
|
+
optionalFieldsLength: optionalFieldsString.length,
|
|
269
|
+
});
|
|
270
|
+
}
|
|
271
|
+
else {
|
|
272
|
+
throw err;
|
|
273
|
+
}
|
|
274
|
+
});
|
|
275
|
+
};
|
|
276
|
+
function matchRecordsHandlers(path, resourceRequest) {
|
|
277
|
+
const method = resourceRequest.method.toLowerCase();
|
|
278
|
+
if (method === 'get' &&
|
|
279
|
+
path.startsWith(UIAPI_RECORDS_PATH) &&
|
|
280
|
+
path.startsWith(UIAPI_RECORDS_BATCH_PATH) === false) {
|
|
281
|
+
return getRecordDispatcher;
|
|
282
|
+
}
|
|
283
|
+
return null;
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
const defaultDispatcher = (req) => {
|
|
287
|
+
const { networkAdapter, resourceRequest, resourceRequestContext } = req;
|
|
288
|
+
return networkAdapter(resourceRequest, resourceRequestContext);
|
|
289
|
+
};
|
|
290
|
+
function getDispatcher(resourceRequest) {
|
|
291
|
+
const { basePath, baseUri } = resourceRequest;
|
|
292
|
+
const path = `${baseUri}${basePath}`;
|
|
293
|
+
const recordsMatch = matchRecordsHandlers(path, resourceRequest);
|
|
294
|
+
if (recordsMatch !== null) {
|
|
295
|
+
return recordsMatch;
|
|
296
|
+
}
|
|
297
|
+
return defaultDispatcher;
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
const inflightRequests = Object.create(null);
|
|
301
|
+
const TRANSACTION_KEY_SEP = '::';
|
|
302
|
+
const EMPTY_STRING = '';
|
|
303
|
+
function isResourceRequestDedupable(resourceRequest) {
|
|
304
|
+
const resourceRequestContext = resourceRequest.resourceRequestContext;
|
|
305
|
+
return (resourceRequest.resourceRequest.method.toLowerCase() === 'get' ||
|
|
306
|
+
(resourceRequestContext && resourceRequestContext.luvioRequestMethod === 'get'));
|
|
307
|
+
}
|
|
308
|
+
function getTransactionKey(req) {
|
|
309
|
+
const { resourceRequest } = req;
|
|
310
|
+
const { baseUri, basePath, queryParams, headers } = resourceRequest;
|
|
311
|
+
const path = `${baseUri}${basePath}`;
|
|
312
|
+
const queryParamsString = queryParams ? stringify(queryParams) : EMPTY_STRING;
|
|
313
|
+
const headersString = stringify(headers);
|
|
314
|
+
const bodyString = resourceRequest.body && isResourceRequestDedupable(req)
|
|
315
|
+
? stringify(resourceRequest.body)
|
|
316
|
+
: EMPTY_STRING;
|
|
317
|
+
return `${path}${TRANSACTION_KEY_SEP}${headersString}${TRANSACTION_KEY_SEP}${queryParamsString}${bodyString}`;
|
|
318
|
+
}
|
|
319
|
+
function getFulfillingRequest(inflightRequests, resourceRequest) {
|
|
320
|
+
const { fulfill } = resourceRequest;
|
|
321
|
+
if (fulfill === undefined) {
|
|
322
|
+
return null;
|
|
323
|
+
}
|
|
324
|
+
const handlersMap = entries(inflightRequests);
|
|
325
|
+
for (let i = 0, len = handlersMap.length; i < len; i += 1) {
|
|
326
|
+
const [transactionKey, handlers] = handlersMap[i];
|
|
327
|
+
// check fulfillment against only the first handler ([0]) because it's equal or
|
|
328
|
+
// fulfills all subsequent handlers in the array
|
|
329
|
+
const existing = handlers[0].resourceRequest;
|
|
330
|
+
if (fulfill(existing, resourceRequest) === true) {
|
|
331
|
+
return transactionKey;
|
|
332
|
+
}
|
|
333
|
+
}
|
|
334
|
+
return null;
|
|
335
|
+
}
|
|
336
|
+
/**
|
|
337
|
+
Dedupes network requests being made to Salesforce APIs
|
|
338
|
+
This function is only designed to dedupe GET requests.
|
|
339
|
+
|
|
340
|
+
If POST/PUT/PATCH/DELETE requests need to be deduped, that should be handled
|
|
341
|
+
on the server instead of here.
|
|
342
|
+
*/
|
|
343
|
+
const dedupeRequest = (req) => {
|
|
344
|
+
const { resourceRequest } = req;
|
|
345
|
+
if (process.env.NODE_ENV !== 'production') {
|
|
346
|
+
if (!isResourceRequestDedupable(req)) {
|
|
347
|
+
throw new Error('Invalid ResourceRequest that cannot be deduped. Only "get" Requests supported.');
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
const transactionKey = getTransactionKey(req);
|
|
351
|
+
// if an identical request is in-flight then queue for its response (do not re-issue the request)
|
|
352
|
+
if (transactionKey in inflightRequests) {
|
|
353
|
+
return new Promise((resolve, reject) => {
|
|
354
|
+
push.call(inflightRequests[transactionKey], {
|
|
355
|
+
resolve,
|
|
356
|
+
reject,
|
|
357
|
+
resourceRequest,
|
|
358
|
+
});
|
|
359
|
+
});
|
|
360
|
+
}
|
|
361
|
+
const dispatch = getDispatcher(resourceRequest);
|
|
362
|
+
// fallback to checking a custom deduper to find a similar (but not identical) request
|
|
363
|
+
const similarTransactionKey = getFulfillingRequest(inflightRequests, resourceRequest);
|
|
364
|
+
if (similarTransactionKey !== null) {
|
|
365
|
+
return new Promise((resolve) => {
|
|
366
|
+
// custom dedupers find similar (not identical) requests. if the similar request fails
|
|
367
|
+
// there's no guarantee the deduped request should fail. thus we re-issue the
|
|
368
|
+
// original request in the case of a failure
|
|
369
|
+
push.call(inflightRequests[similarTransactionKey], {
|
|
370
|
+
resolve,
|
|
371
|
+
reject: function reissueRequest() {
|
|
372
|
+
resolve(dispatch(req));
|
|
373
|
+
},
|
|
374
|
+
resourceRequest,
|
|
375
|
+
});
|
|
376
|
+
});
|
|
377
|
+
}
|
|
378
|
+
dispatch(req).then((response) => {
|
|
379
|
+
const handlers = inflightRequests[transactionKey];
|
|
380
|
+
delete inflightRequests[transactionKey];
|
|
381
|
+
// handlers mutate responses so must clone the response for each.
|
|
382
|
+
// the first handler is given the original version to avoid an
|
|
383
|
+
// extra clone (particularly when there's only 1 handler).
|
|
384
|
+
for (let i = 1, len = handlers.length; i < len; i++) {
|
|
385
|
+
const handler = handlers[i];
|
|
386
|
+
handler.resolve(parse(stringify(response)));
|
|
387
|
+
}
|
|
388
|
+
handlers[0].resolve(response);
|
|
389
|
+
}, (error) => {
|
|
390
|
+
const handlers = inflightRequests[transactionKey];
|
|
391
|
+
delete inflightRequests[transactionKey];
|
|
392
|
+
for (let i = 0, len = handlers.length; i < len; i++) {
|
|
393
|
+
const handler = handlers[i];
|
|
394
|
+
handler.reject(error);
|
|
395
|
+
}
|
|
396
|
+
});
|
|
397
|
+
// rely on sync behavior of Promise creation to create the list for handlers
|
|
398
|
+
return new Promise((resolve, reject) => {
|
|
399
|
+
inflightRequests[transactionKey] = [{ resolve, reject, resourceRequest }];
|
|
400
|
+
});
|
|
401
|
+
};
|
|
402
|
+
|
|
403
|
+
const RATE_LIMIT_CONFIG = {
|
|
404
|
+
bucketCapacity: 100,
|
|
405
|
+
fillsPerSecond: 100,
|
|
406
|
+
};
|
|
407
|
+
class TokenBucket {
|
|
408
|
+
/**
|
|
409
|
+
* Constructs an instance of Token Bucket for rate limiting
|
|
410
|
+
*
|
|
411
|
+
* @param bucket The token holding capacity of the bucket
|
|
412
|
+
* @param refillTokensPerSecond The number of tokens replenished every second
|
|
413
|
+
*/
|
|
414
|
+
constructor(config) {
|
|
415
|
+
this.bucketCapacity = config.bucketCapacity;
|
|
416
|
+
this.refillTokensPerMilliSecond = config.fillsPerSecond / 1000;
|
|
417
|
+
this.tokens = config.bucketCapacity;
|
|
418
|
+
this.lastRefillTime = Date.now();
|
|
419
|
+
}
|
|
420
|
+
/**
|
|
421
|
+
* Refills the bucket and removes desired number of tokens
|
|
422
|
+
*
|
|
423
|
+
* @param removeTokens number of tokens to be removed from the bucket should be >= 0
|
|
424
|
+
* @returns {boolean} true if removing token was succesful
|
|
425
|
+
*/
|
|
426
|
+
take(removeTokens) {
|
|
427
|
+
// refill tokens before removing
|
|
428
|
+
this.refill();
|
|
429
|
+
const { tokens } = this;
|
|
430
|
+
const remainingTokens = tokens - removeTokens;
|
|
431
|
+
if (remainingTokens >= 0) {
|
|
432
|
+
this.tokens = remainingTokens;
|
|
433
|
+
return true;
|
|
434
|
+
}
|
|
435
|
+
return false;
|
|
436
|
+
}
|
|
437
|
+
refill() {
|
|
438
|
+
const { bucketCapacity, tokens, refillTokensPerMilliSecond, lastRefillTime } = this;
|
|
439
|
+
const now = Date.now();
|
|
440
|
+
const timePassed = now - lastRefillTime;
|
|
441
|
+
// Number of tokens should be integer so something like Math.floor is desired
|
|
442
|
+
// Using Bitwise NOT ~ twice will achieve the same result with performance benefits
|
|
443
|
+
const calculatedTokens = tokens + ~~(timePassed * refillTokensPerMilliSecond);
|
|
444
|
+
this.tokens = bucketCapacity < calculatedTokens ? bucketCapacity : calculatedTokens;
|
|
445
|
+
this.lastRefillTime = now;
|
|
446
|
+
}
|
|
447
|
+
}
|
|
448
|
+
var tokenBucket = new TokenBucket(RATE_LIMIT_CONFIG);
|
|
449
|
+
|
|
450
|
+
function platformNetworkAdapter(baseNetworkAdapter) {
|
|
451
|
+
return (resourceRequest, resourceRequestContext) => {
|
|
452
|
+
if (!tokenBucket.take(1)) {
|
|
453
|
+
// We are hitting rate limiting, add some metrics
|
|
454
|
+
instrumentation.networkRateLimitExceeded();
|
|
455
|
+
}
|
|
456
|
+
const salesforceRequest = {
|
|
457
|
+
networkAdapter: baseNetworkAdapter,
|
|
458
|
+
resourceRequest: resourceRequest,
|
|
459
|
+
resourceRequestContext: resourceRequestContext,
|
|
460
|
+
};
|
|
461
|
+
// If GET, or overriden to be treated as a GET with resourceRequestContext.networkResourceOverride, then dedupe.
|
|
462
|
+
if (isResourceRequestDedupable(salesforceRequest)) {
|
|
463
|
+
return dedupeRequest(salesforceRequest);
|
|
464
|
+
}
|
|
465
|
+
else {
|
|
466
|
+
const dispatch = getDispatcher(resourceRequest);
|
|
467
|
+
return dispatch(salesforceRequest);
|
|
468
|
+
}
|
|
469
|
+
};
|
|
470
|
+
}
|
|
471
|
+
|
|
472
|
+
export { platformNetworkAdapter as default, instrument };
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import type { Dispatcher, SalesforceResourceRequest } from './main';
|
|
2
|
+
export declare function isResourceRequestDedupable(resourceRequest: SalesforceResourceRequest): boolean;
|
|
3
|
+
/**
|
|
4
|
+
Dedupes network requests being made to Salesforce APIs
|
|
5
|
+
This function is only designed to dedupe GET requests.
|
|
6
|
+
|
|
7
|
+
If POST/PUT/PATCH/DELETE requests need to be deduped, that should be handled
|
|
8
|
+
on the server instead of here.
|
|
9
|
+
*/
|
|
10
|
+
export declare const dedupeRequest: Dispatcher;
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import type { FetchResponse, NetworkAdapter, ResourceRequest, ResourceRequestContext } from '@luvio/engine';
|
|
2
|
+
import type { RecordRepresentation } from '@salesforce/lds-adapters-uiapi';
|
|
3
|
+
export declare const MAX_AGGREGATE_UI_CHUNK_LIMIT = 50;
|
|
4
|
+
export interface CompositeRequest {
|
|
5
|
+
url: string;
|
|
6
|
+
referenceId: string;
|
|
7
|
+
}
|
|
8
|
+
export declare function mergeRecordFields(first: RecordRepresentation, second: RecordRepresentation): RecordRepresentation;
|
|
9
|
+
/** Invoke executeAggregateUi Aura controller. This is only to be used with large getRecord requests that
|
|
10
|
+
* would otherwise cause a query length exception.
|
|
11
|
+
*/
|
|
12
|
+
export declare function dispatchSplitRecordAggregateUiAction(recordId: string, networkAdapter: NetworkAdapter, resourceRequest: ResourceRequest, resourceRequestContext: ResourceRequestContext): Promise<FetchResponse<unknown>>;
|
|
13
|
+
export declare function shouldUseAggregateUiForGetRecord(fieldsArray: string, optionalFieldsArray: string): boolean;
|
|
14
|
+
interface AggregateUiParams {
|
|
15
|
+
fields?: string[];
|
|
16
|
+
optionalFields?: string[];
|
|
17
|
+
}
|
|
18
|
+
export declare function buildAggregateUiUrl(params: AggregateUiParams, resourceRequest: ResourceRequest): string;
|
|
19
|
+
export interface GetRecordCompositeRequestParams {
|
|
20
|
+
fieldsArray: Array<string>;
|
|
21
|
+
optionalFieldsArray: Array<string>;
|
|
22
|
+
fieldsLength: number;
|
|
23
|
+
optionalFieldsLength: number;
|
|
24
|
+
}
|
|
25
|
+
export declare function buildGetRecordByFieldsCompositeRequest(resourceRequest: ResourceRequest, recordsCompositeRequest: GetRecordCompositeRequestParams): CompositeRequest[];
|
|
26
|
+
export {};
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import type { FetchResponse, NetworkAdapter, ResourceRequest, ResourceRequestContext } from '@luvio/engine';
|
|
2
|
+
export interface SalesforceResourceRequest {
|
|
3
|
+
networkAdapter: NetworkAdapter;
|
|
4
|
+
resourceRequest: ResourceRequest;
|
|
5
|
+
resourceRequestContext: ResourceRequestContext;
|
|
6
|
+
}
|
|
7
|
+
export type Dispatcher = (req: SalesforceResourceRequest) => Promise<FetchResponse<unknown>>;
|
|
8
|
+
export declare const defaultDispatcher: Dispatcher;
|
|
9
|
+
export declare function getDispatcher(resourceRequest: ResourceRequest): Dispatcher;
|
|
@@ -0,0 +1,3 @@
|
|
|
1
|
+
import type { ResourceRequest } from '@luvio/engine';
|
|
2
|
+
export declare function isRelatedListPostRecordsResourceRequest(resourceRequest: ResourceRequest): boolean;
|
|
3
|
+
export declare function convertPostRelatedListRecordsToGet(resourceRequest: ResourceRequest): ResourceRequest;
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import type { ResourceRequest } from '@luvio/engine';
|
|
2
|
+
export declare let instrumentation: {
|
|
3
|
+
aggregateUiChunkCount: (_cb: () => number) => void;
|
|
4
|
+
aggregateUiConnectError: () => void;
|
|
5
|
+
duplicateRequest: (_cb: () => ResourceRequest) => void;
|
|
6
|
+
getRecordAggregateInvoke: () => void;
|
|
7
|
+
getRecordAggregateResolve: (_cb: () => {
|
|
8
|
+
recordId: string;
|
|
9
|
+
apiName: string;
|
|
10
|
+
}) => void;
|
|
11
|
+
getRecordAggregateReject: (_cb: () => string) => void;
|
|
12
|
+
getRecordAggregateRetry: () => void;
|
|
13
|
+
getRecordNormalInvoke: () => void;
|
|
14
|
+
networkRateLimitExceeded: () => void;
|
|
15
|
+
};
|
|
16
|
+
export declare function instrument(newInstrumentation: Partial<typeof instrumentation>): void;
|
|
17
|
+
export type Instrument = typeof instrument;
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
declare const parse: (text: string, reviver?: ((this: any, key: string, value: any) => any) | undefined) => any, stringify: {
|
|
2
|
+
(value: any, replacer?: ((this: any, key: string, value: any) => any) | undefined, space?: string | number | undefined): string;
|
|
3
|
+
(value: any, replacer?: (string | number)[] | null | undefined, space?: string | number | undefined): string;
|
|
4
|
+
};
|
|
5
|
+
declare const join: (separator?: string | undefined) => string, push: (...items: any[]) => number, unshift: (...items: any[]) => number;
|
|
6
|
+
declare const isArray: (arg: any) => arg is any[];
|
|
7
|
+
declare const entries: {
|
|
8
|
+
<T>(o: {
|
|
9
|
+
[s: string]: T;
|
|
10
|
+
} | ArrayLike<T>): [string, T][];
|
|
11
|
+
(o: {}): [string, any][];
|
|
12
|
+
}, keys: {
|
|
13
|
+
(o: object): string[];
|
|
14
|
+
(o: {}): string[];
|
|
15
|
+
};
|
|
16
|
+
export { isArray as ArrayIsArray, entries as ObjectEntries, keys as ObjectKeys, parse as JSONParse, stringify as JSONStringify, join as ArrayPrototypeJoin, push as ArrayPrototypePush, unshift as ArrayPrototypeUnshift, };
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
import type { NetworkAdapter } from '@luvio/engine';
|
|
2
|
+
import type { Instrument } from './instrumentation';
|
|
3
|
+
export { instrument, Instrument } from './instrumentation';
|
|
4
|
+
export default function platformNetworkAdapter(baseNetworkAdapter: NetworkAdapter): NetworkAdapter;
|
|
5
|
+
export type Registration = {
|
|
6
|
+
id: '@salesforce/lds-network-adapter';
|
|
7
|
+
instrument: Instrument;
|
|
8
|
+
};
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
interface RateLimitConfig {
|
|
2
|
+
bucketCapacity: number;
|
|
3
|
+
fillsPerSecond: number;
|
|
4
|
+
}
|
|
5
|
+
export declare const RATE_LIMIT_CONFIG: RateLimitConfig;
|
|
6
|
+
export declare class TokenBucket {
|
|
7
|
+
bucketCapacity: number;
|
|
8
|
+
readonly refillTokensPerMilliSecond: number;
|
|
9
|
+
tokens: number;
|
|
10
|
+
private lastRefillTime;
|
|
11
|
+
/**
|
|
12
|
+
* Constructs an instance of Token Bucket for rate limiting
|
|
13
|
+
*
|
|
14
|
+
* @param bucket The token holding capacity of the bucket
|
|
15
|
+
* @param refillTokensPerSecond The number of tokens replenished every second
|
|
16
|
+
*/
|
|
17
|
+
constructor(config: RateLimitConfig);
|
|
18
|
+
/**
|
|
19
|
+
* Refills the bucket and removes desired number of tokens
|
|
20
|
+
*
|
|
21
|
+
* @param removeTokens number of tokens to be removed from the bucket should be >= 0
|
|
22
|
+
* @returns {boolean} true if removing token was succesful
|
|
23
|
+
*/
|
|
24
|
+
take(removeTokens: number): boolean;
|
|
25
|
+
private refill;
|
|
26
|
+
}
|
|
27
|
+
declare const _default: TokenBucket;
|
|
28
|
+
export default _default;
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export declare const UI_API_BASE_URI = "/services/data/v66.0/ui-api";
|
package/package.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@salesforce/lds-network-adapter",
|
|
3
|
+
"version": "0.1.0-dev1",
|
|
4
|
+
"license": "SEE LICENSE IN LICENSE.txt",
|
|
5
|
+
"description": "LDS shared Network Adapter code for Lightning Platform",
|
|
6
|
+
"main": "dist/ldsNetwork.js",
|
|
7
|
+
"module": "dist/ldsNetwork.js",
|
|
8
|
+
"types": "dist/types/main.d.ts",
|
|
9
|
+
"files": [
|
|
10
|
+
"dist"
|
|
11
|
+
],
|
|
12
|
+
"exports": {
|
|
13
|
+
".": {
|
|
14
|
+
"types": "./dist/types/main.d.ts",
|
|
15
|
+
"import": "./dist/ldsNetwork.js",
|
|
16
|
+
"default": "./dist/ldsNetwork.js"
|
|
17
|
+
}
|
|
18
|
+
},
|
|
19
|
+
"scripts": {
|
|
20
|
+
"prepare": "yarn build",
|
|
21
|
+
"build": "rollup --bundleConfigAsCjs --config rollup.config.js",
|
|
22
|
+
"clean": "rm -rf dist",
|
|
23
|
+
"test:unit": "jest",
|
|
24
|
+
"test:debug": "node --inspect-brk ../../node_modules/.bin/jest --runInBand"
|
|
25
|
+
},
|
|
26
|
+
"dependencies": {
|
|
27
|
+
"@luvio/engine": "0.158.7"
|
|
28
|
+
},
|
|
29
|
+
"devDependencies": {
|
|
30
|
+
"@salesforce/lds-adapters-uiapi": "^0.1.0-dev1"
|
|
31
|
+
}
|
|
32
|
+
}
|