@salesforce/lds-network-fetch 1.332.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE.txt +82 -0
- package/dist/ldsNetworkFetch.js +810 -0
- package/dist/types/__mocks__/@salesforce/lds-environment-settings.d.ts +2 -0
- package/dist/types/__mocks__/@salesforce/lds-instrumentation.d.ts +7 -0
- package/dist/types/__mocks__/aura-storage.d.ts +19 -0
- package/dist/types/__mocks__/aura.d.ts +3 -0
- package/dist/types/__mocks__/instrumentation/service.d.ts +33 -0
- package/dist/types/instrumentation.d.ts +24 -0
- package/dist/types/main.d.ts +3 -0
- package/dist/types/networkAdapter.d.ts +27 -0
- package/dist/types/utils.d.ts +23 -0
- package/package.json +43 -0
package/LICENSE.txt
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
Terms of Use
|
|
2
|
+
|
|
3
|
+
Copyright 2022 Salesforce, Inc. All rights reserved.
|
|
4
|
+
|
|
5
|
+
These Terms of Use govern the download, installation, and/or use of this
|
|
6
|
+
software provided by Salesforce, Inc. ("Salesforce") (the "Software"), were
|
|
7
|
+
last updated on April 15, 2022, and constitute a legally binding
|
|
8
|
+
agreement between you and Salesforce. If you do not agree to these Terms of
|
|
9
|
+
Use, do not install or use the Software.
|
|
10
|
+
|
|
11
|
+
Salesforce grants you a worldwide, non-exclusive, no-charge, royalty-free
|
|
12
|
+
copyright license to reproduce, prepare derivative works of, publicly
|
|
13
|
+
display, publicly perform, sublicense, and distribute the Software and
|
|
14
|
+
derivative works subject to these Terms. These Terms shall be included in
|
|
15
|
+
all copies or substantial portions of the Software.
|
|
16
|
+
|
|
17
|
+
Subject to the limited rights expressly granted hereunder, Salesforce
|
|
18
|
+
reserves all rights, title, and interest in and to all intellectual
|
|
19
|
+
property subsisting in the Software. No rights are granted to you hereunder
|
|
20
|
+
other than as expressly set forth herein. Users residing in countries on
|
|
21
|
+
the United States Office of Foreign Assets Control sanction list, or which
|
|
22
|
+
are otherwise subject to a US export embargo, may not use the Software.
|
|
23
|
+
|
|
24
|
+
Implementation of the Software may require development work, for which you
|
|
25
|
+
are responsible. The Software may contain bugs, errors and
|
|
26
|
+
incompatibilities and is made available on an AS IS basis without support,
|
|
27
|
+
updates, or service level commitments.
|
|
28
|
+
|
|
29
|
+
Salesforce reserves the right at any time to modify, suspend, or
|
|
30
|
+
discontinue, the Software (or any part thereof) with or without notice. You
|
|
31
|
+
agree that Salesforce shall not be liable to you or to any third party for
|
|
32
|
+
any modification, suspension, or discontinuance.
|
|
33
|
+
|
|
34
|
+
You agree to defend Salesforce against any claim, demand, suit or
|
|
35
|
+
proceeding made or brought against Salesforce by a third party arising out
|
|
36
|
+
of or accruing from (a) your use of the Software, and (b) any application
|
|
37
|
+
you develop with the Software that infringes any copyright, trademark,
|
|
38
|
+
trade secret, trade dress, patent, or other intellectual property right of
|
|
39
|
+
any person or defames any person or violates their rights of publicity or
|
|
40
|
+
privacy (each a "Claim Against Salesforce"), and will indemnify Salesforce
|
|
41
|
+
from any damages, attorney fees, and costs finally awarded against
|
|
42
|
+
Salesforce as a result of, or for any amounts paid by Salesforce under a
|
|
43
|
+
settlement approved by you in writing of, a Claim Against Salesforce,
|
|
44
|
+
provided Salesforce (x) promptly gives you written notice of the Claim
|
|
45
|
+
Against Salesforce, (y) gives you sole control of the defense and
|
|
46
|
+
settlement of the Claim Against Salesforce (except that you may not settle
|
|
47
|
+
any Claim Against Salesforce unless it unconditionally releases Salesforce
|
|
48
|
+
of all liability), and (z) gives you all reasonable assistance, at your
|
|
49
|
+
expense.
|
|
50
|
+
|
|
51
|
+
WITHOUT LIMITING THE GENERALITY OF THE FOREGOING, THE SOFTWARE IS NOT
|
|
52
|
+
SUPPORTED AND IS PROVIDED "AS IS," WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
53
|
+
IMPLIED. IN NO EVENT SHALL SALESFORCE HAVE ANY LIABILITY FOR ANY DAMAGES,
|
|
54
|
+
INCLUDING, BUT NOT LIMITED TO, DIRECT, INDIRECT, SPECIAL, INCIDENTAL,
|
|
55
|
+
PUNITIVE, OR CONSEQUENTIAL DAMAGES, OR DAMAGES BASED ON LOST PROFITS, DATA,
|
|
56
|
+
OR USE, IN CONNECTION WITH THE SOFTWARE, HOWEVER CAUSED AND WHETHER IN
|
|
57
|
+
CONTRACT, TORT, OR UNDER ANY OTHER THEORY OF LIABILITY, WHETHER OR NOT YOU
|
|
58
|
+
HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
|
|
59
|
+
|
|
60
|
+
These Terms of Use shall be governed exclusively by the internal laws of
|
|
61
|
+
the State of California, without regard to its conflicts of laws
|
|
62
|
+
rules. Each party hereby consents to the exclusive jurisdiction of the
|
|
63
|
+
state and federal courts located in San Francisco County, California to
|
|
64
|
+
adjudicate any dispute arising out of or relating to these Terms of Use and
|
|
65
|
+
the download, installation, and/or use of the Software. Except as expressly
|
|
66
|
+
stated herein, these Terms of Use constitute the entire agreement between
|
|
67
|
+
the parties, and supersede all prior and contemporaneous agreements,
|
|
68
|
+
proposals, or representations, written or oral, concerning their subject
|
|
69
|
+
matter. No modification, amendment, or waiver of any provision of these
|
|
70
|
+
Terms of Use shall be effective unless it is by an update to these Terms of
|
|
71
|
+
Use that Salesforce makes available, or is in writing and signed by the
|
|
72
|
+
party against whom the modification, amendment, or waiver is to be
|
|
73
|
+
asserted.
|
|
74
|
+
|
|
75
|
+
Data Privacy: Salesforce may collect, process, and store device,
|
|
76
|
+
system, and other information related to your use of the Software. This
|
|
77
|
+
information includes, but is not limited to, IP address, user metrics, and
|
|
78
|
+
other data ("Usage Data"). Salesforce may use Usage Data for analytics,
|
|
79
|
+
product development, and marketing purposes. You acknowledge that files
|
|
80
|
+
generated in conjunction with the Software may contain sensitive or
|
|
81
|
+
confidential data, and you are solely responsible for anonymizing and
|
|
82
|
+
protecting such data.
|
|
@@ -0,0 +1,810 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Copyright (c) 2022, Salesforce, Inc.,
|
|
3
|
+
* All rights reserved.
|
|
4
|
+
* For full license text, see the LICENSE.txt file
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
/*
|
|
8
|
+
* ATTENTION!
|
|
9
|
+
* THIS IS A GENERATED FILE FROM https://github.com/salesforce-experience-platform-emu/lds-lightning-platform
|
|
10
|
+
* If you would like to contribute to LDS, please follow the steps outlined in the git repo.
|
|
11
|
+
* Any changes made to this file in p4 will be automatically overwritten.
|
|
12
|
+
* *******************************************************************************************
|
|
13
|
+
*/
|
|
14
|
+
/* proxy-compat-disable */
|
|
15
|
+
import { UIAPI_GET_LAYOUT, UIAPI_GET_LAYOUT_USER_STATE, UIAPI_OBJECT_INFO_BATCH_PATH, UIAPI_OBJECT_INFO_PATH, shouldForceRefresh, createOkResponse as createOkResponse$1, layoutUserStateStorage, layoutUserStateStorageStatsLogger, layoutStorage, layoutStorageStatsLogger, objectInfoStorage, objectInfoStorageStatsLogger, getTransactionKey as getTransactionKey$1 } from 'force/ldsNetwork';
|
|
16
|
+
import { HttpStatusCode as HttpStatusCode$1 } from 'force/luvioEngine';
|
|
17
|
+
|
|
18
|
+
var SnapshotState;
|
|
19
|
+
(function (SnapshotState) {
|
|
20
|
+
SnapshotState["Fulfilled"] = "Fulfilled";
|
|
21
|
+
SnapshotState["Unfulfilled"] = "Unfulfilled";
|
|
22
|
+
SnapshotState["Error"] = "Error";
|
|
23
|
+
SnapshotState["Pending"] = "Pending";
|
|
24
|
+
SnapshotState["Stale"] = "Stale";
|
|
25
|
+
})(SnapshotState || (SnapshotState = {}));
|
|
26
|
+
const { isArray: isArray$1 } = Array;
|
|
27
|
+
|
|
28
|
+
Promise.resolve();
|
|
29
|
+
|
|
30
|
+
var StoreErrorStatus;
|
|
31
|
+
(function (StoreErrorStatus) {
|
|
32
|
+
StoreErrorStatus[StoreErrorStatus["RESOURCE_NOT_FOUND"] = 404] = "RESOURCE_NOT_FOUND";
|
|
33
|
+
})(StoreErrorStatus || (StoreErrorStatus = {}));
|
|
34
|
+
var StoreRecordType;
|
|
35
|
+
(function (StoreRecordType) {
|
|
36
|
+
StoreRecordType["Error"] = "error";
|
|
37
|
+
})(StoreRecordType || (StoreRecordType = {}));
|
|
38
|
+
var StoreLinkStateValues$1;
|
|
39
|
+
(function (StoreLinkStateValues) {
|
|
40
|
+
StoreLinkStateValues[StoreLinkStateValues["NotPresent"] = 0] = "NotPresent";
|
|
41
|
+
StoreLinkStateValues[StoreLinkStateValues["RefNotPresent"] = 1] = "RefNotPresent";
|
|
42
|
+
StoreLinkStateValues[StoreLinkStateValues["RefPresent"] = 2] = "RefPresent";
|
|
43
|
+
StoreLinkStateValues[StoreLinkStateValues["Null"] = 3] = "Null";
|
|
44
|
+
StoreLinkStateValues[StoreLinkStateValues["Missing"] = 4] = "Missing";
|
|
45
|
+
StoreLinkStateValues[StoreLinkStateValues["Pending"] = 5] = "Pending";
|
|
46
|
+
})(StoreLinkStateValues$1 || (StoreLinkStateValues$1 = {}));
|
|
47
|
+
var StoreResolveResultState;
|
|
48
|
+
(function (StoreResolveResultState) {
|
|
49
|
+
StoreResolveResultState[StoreResolveResultState["Found"] = 0] = "Found";
|
|
50
|
+
StoreResolveResultState[StoreResolveResultState["Error"] = 1] = "Error";
|
|
51
|
+
StoreResolveResultState[StoreResolveResultState["Null"] = 2] = "Null";
|
|
52
|
+
StoreResolveResultState[StoreResolveResultState["NotPresent"] = 3] = "NotPresent";
|
|
53
|
+
StoreResolveResultState[StoreResolveResultState["Stale"] = 4] = "Stale";
|
|
54
|
+
})(StoreResolveResultState || (StoreResolveResultState = {}));
|
|
55
|
+
var HttpStatusCode;
|
|
56
|
+
(function (HttpStatusCode) {
|
|
57
|
+
HttpStatusCode[HttpStatusCode["Ok"] = 200] = "Ok";
|
|
58
|
+
HttpStatusCode[HttpStatusCode["Created"] = 201] = "Created";
|
|
59
|
+
HttpStatusCode[HttpStatusCode["NoContent"] = 204] = "NoContent";
|
|
60
|
+
HttpStatusCode[HttpStatusCode["NotModified"] = 304] = "NotModified";
|
|
61
|
+
HttpStatusCode[HttpStatusCode["BadRequest"] = 400] = "BadRequest";
|
|
62
|
+
HttpStatusCode[HttpStatusCode["Unauthorized"] = 401] = "Unauthorized";
|
|
63
|
+
HttpStatusCode[HttpStatusCode["Forbidden"] = 403] = "Forbidden";
|
|
64
|
+
HttpStatusCode[HttpStatusCode["NotFound"] = 404] = "NotFound";
|
|
65
|
+
HttpStatusCode[HttpStatusCode["ServerError"] = 500] = "ServerError";
|
|
66
|
+
HttpStatusCode[HttpStatusCode["GatewayTimeout"] = 504] = "GatewayTimeout";
|
|
67
|
+
})(HttpStatusCode || (HttpStatusCode = {}));
|
|
68
|
+
/**
|
|
69
|
+
* A type guard function for determining if an unknown object is a {@link FormData}
|
|
70
|
+
*/
|
|
71
|
+
function isFormData(obj) {
|
|
72
|
+
return (typeof obj === 'object' &&
|
|
73
|
+
obj !== null &&
|
|
74
|
+
'namedEntries' in obj &&
|
|
75
|
+
isArray$1(obj.namedEntries));
|
|
76
|
+
}
|
|
77
|
+
/**
|
|
78
|
+
* A type guard function for determining if an unknown object is a {@link FileReference}
|
|
79
|
+
*/
|
|
80
|
+
function isFileReference(entryValue) {
|
|
81
|
+
return (typeof entryValue === 'object' &&
|
|
82
|
+
entryValue !== null &&
|
|
83
|
+
'isFileReference' in entryValue &&
|
|
84
|
+
entryValue.isFileReference === true);
|
|
85
|
+
}
|
|
86
|
+
var GraphNodeType;
|
|
87
|
+
(function (GraphNodeType) {
|
|
88
|
+
GraphNodeType["Link"] = "Link";
|
|
89
|
+
GraphNodeType["Node"] = "Node";
|
|
90
|
+
GraphNodeType["Error"] = "Error";
|
|
91
|
+
GraphNodeType["Locked"] = "Locked";
|
|
92
|
+
})(GraphNodeType || (GraphNodeType = {}));
|
|
93
|
+
|
|
94
|
+
var StoreLinkStateValues;
|
|
95
|
+
(function (StoreLinkStateValues) {
|
|
96
|
+
StoreLinkStateValues[StoreLinkStateValues["NotPresent"] = 0] = "NotPresent";
|
|
97
|
+
StoreLinkStateValues[StoreLinkStateValues["RefNotPresent"] = 1] = "RefNotPresent";
|
|
98
|
+
StoreLinkStateValues[StoreLinkStateValues["RefPresent"] = 2] = "RefPresent";
|
|
99
|
+
StoreLinkStateValues[StoreLinkStateValues["Null"] = 3] = "Null";
|
|
100
|
+
StoreLinkStateValues[StoreLinkStateValues["Missing"] = 4] = "Missing";
|
|
101
|
+
StoreLinkStateValues[StoreLinkStateValues["Pending"] = 5] = "Pending";
|
|
102
|
+
})(StoreLinkStateValues || (StoreLinkStateValues = {}));
|
|
103
|
+
var FragmentReadResultState;
|
|
104
|
+
(function (FragmentReadResultState) {
|
|
105
|
+
FragmentReadResultState[FragmentReadResultState["Missing"] = 0] = "Missing";
|
|
106
|
+
FragmentReadResultState[FragmentReadResultState["Success"] = 1] = "Success";
|
|
107
|
+
FragmentReadResultState[FragmentReadResultState["Error"] = 2] = "Error";
|
|
108
|
+
})(FragmentReadResultState || (FragmentReadResultState = {}));
|
|
109
|
+
({
|
|
110
|
+
state: FragmentReadResultState.Missing,
|
|
111
|
+
});
|
|
112
|
+
|
|
113
|
+
var ResourceParamType;
|
|
114
|
+
(function (ResourceParamType) {
|
|
115
|
+
ResourceParamType[ResourceParamType["UrlParameter"] = 0] = "UrlParameter";
|
|
116
|
+
ResourceParamType[ResourceParamType["QueryParameter"] = 1] = "QueryParameter";
|
|
117
|
+
ResourceParamType[ResourceParamType["Body"] = 2] = "Body";
|
|
118
|
+
ResourceParamType[ResourceParamType["Header"] = 3] = "Header";
|
|
119
|
+
})(ResourceParamType || (ResourceParamType = {}));
|
|
120
|
+
var TypeCheckShapes;
|
|
121
|
+
(function (TypeCheckShapes) {
|
|
122
|
+
TypeCheckShapes[TypeCheckShapes["String"] = 0] = "String";
|
|
123
|
+
TypeCheckShapes[TypeCheckShapes["Boolean"] = 1] = "Boolean";
|
|
124
|
+
TypeCheckShapes[TypeCheckShapes["Number"] = 2] = "Number";
|
|
125
|
+
TypeCheckShapes[TypeCheckShapes["Integer"] = 3] = "Integer";
|
|
126
|
+
TypeCheckShapes[TypeCheckShapes["Unsupported"] = 4] = "Unsupported";
|
|
127
|
+
})(TypeCheckShapes || (TypeCheckShapes = {}));
|
|
128
|
+
// engine version: 0.156.5-f5fd8c7a
|
|
129
|
+
|
|
130
|
+
const { keys: keys$1 } = Object;
|
|
131
|
+
|
|
132
|
+
// we're going to intentionally bundle this small bit of luvio engine code into
|
|
133
|
+
// this module to keep it runtime dependency-free
|
|
134
|
+
const fetchNetworkAdapter = async (resourceRequest, _resourceRequestContext) => {
|
|
135
|
+
const { baseUri, basePath, body: requestBody, queryParams, method, headers } = resourceRequest;
|
|
136
|
+
const qs = generateQueryString(queryParams);
|
|
137
|
+
const path = `${baseUri}${basePath}${qs}`;
|
|
138
|
+
let body;
|
|
139
|
+
// some endpoints use FormData for POST request body, check here
|
|
140
|
+
// if we have a POST body that is FormData
|
|
141
|
+
if (method === 'post' && isFormData(requestBody)) {
|
|
142
|
+
// we will populate a DOM FormData and pass that to fetch
|
|
143
|
+
const newForm = new FormData();
|
|
144
|
+
for (const { name, value } of requestBody.namedEntries) {
|
|
145
|
+
// if this is a string or real DOM File then we can
|
|
146
|
+
// just add it to FormData
|
|
147
|
+
if (typeof value === 'string' || value instanceof File) {
|
|
148
|
+
newForm.append(name, value);
|
|
149
|
+
}
|
|
150
|
+
// this network adapter doesn't currently support FileReference's
|
|
151
|
+
else if (isFileReference(value)) {
|
|
152
|
+
throw Error(`Luvio fetchNetworkAdapter does not support FileReference's`);
|
|
153
|
+
}
|
|
154
|
+
// else we have a Luvio File that isn't a real DOM file,
|
|
155
|
+
// so we need to turn it into a DOM file
|
|
156
|
+
else {
|
|
157
|
+
const buffer = await value.arrayBuffer();
|
|
158
|
+
newForm.append(name, new File([new Uint8Array(buffer)], value.name, {
|
|
159
|
+
type: value.type,
|
|
160
|
+
}));
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
body = newForm;
|
|
164
|
+
}
|
|
165
|
+
else if (requestBody === null) {
|
|
166
|
+
body = null;
|
|
167
|
+
}
|
|
168
|
+
else {
|
|
169
|
+
body = JSON.stringify(requestBody);
|
|
170
|
+
headers['Content-Type'] = 'application/json';
|
|
171
|
+
}
|
|
172
|
+
const response = await fetch(path, {
|
|
173
|
+
method: method.toUpperCase(),
|
|
174
|
+
headers: generateHeaders(headers),
|
|
175
|
+
body,
|
|
176
|
+
});
|
|
177
|
+
const { status, ok, statusText } = response;
|
|
178
|
+
// coerce headers
|
|
179
|
+
const responseHeaders = {};
|
|
180
|
+
response.headers.forEach((value, key) => {
|
|
181
|
+
responseHeaders[key] = value;
|
|
182
|
+
});
|
|
183
|
+
// parse body
|
|
184
|
+
let responseBody = null;
|
|
185
|
+
if (status !== 204) {
|
|
186
|
+
const contentType = responseHeaders['content-type'];
|
|
187
|
+
responseBody =
|
|
188
|
+
contentType && contentType.startsWith('application/json')
|
|
189
|
+
? await response.json()
|
|
190
|
+
: await response.text();
|
|
191
|
+
}
|
|
192
|
+
return {
|
|
193
|
+
body: responseBody,
|
|
194
|
+
status,
|
|
195
|
+
statusText,
|
|
196
|
+
ok,
|
|
197
|
+
headers: responseHeaders,
|
|
198
|
+
};
|
|
199
|
+
};
|
|
200
|
+
function generateQueryString(params) {
|
|
201
|
+
const queryStrings = [];
|
|
202
|
+
for (const key of keys$1(params)) {
|
|
203
|
+
queryStrings.push(`${key}=${params[key]}`);
|
|
204
|
+
}
|
|
205
|
+
if (queryStrings.length > 0) {
|
|
206
|
+
return `?${queryStrings.join('&')}`;
|
|
207
|
+
}
|
|
208
|
+
return '';
|
|
209
|
+
}
|
|
210
|
+
function generateHeaders(headers) {
|
|
211
|
+
const fetchHeaders = new Headers();
|
|
212
|
+
for (const key of keys$1(headers)) {
|
|
213
|
+
fetchHeaders.set(key, headers[key]);
|
|
214
|
+
}
|
|
215
|
+
return fetchHeaders;
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
/**
|
|
219
|
+
* Copyright (c) 2022, Salesforce, Inc.,
|
|
220
|
+
* All rights reserved.
|
|
221
|
+
* For full license text, see the LICENSE.txt file
|
|
222
|
+
*/
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
const { parse, stringify } = JSON;
|
|
226
|
+
const { join, push, unshift } = Array.prototype;
|
|
227
|
+
const { isArray } = Array;
|
|
228
|
+
const { entries, keys } = Object;
|
|
229
|
+
|
|
230
|
+
const UI_API_BASE_URI = '/services/data/v63.0/ui-api';
|
|
231
|
+
|
|
232
|
+
const LDS_RECORDS_AGGREGATE_UI = 'LDS_Records_AggregateUi';
|
|
233
|
+
// Boundary which represents the limit that we start chunking at,
|
|
234
|
+
// determined by comma separated string length of fields
|
|
235
|
+
const MAX_STRING_LENGTH_PER_CHUNK = 10000;
|
|
236
|
+
// UIAPI limit
|
|
237
|
+
const MAX_AGGREGATE_UI_CHUNK_LIMIT = 50;
|
|
238
|
+
function createOkResponse(body) {
|
|
239
|
+
return {
|
|
240
|
+
status: HttpStatusCode$1.Ok,
|
|
241
|
+
body,
|
|
242
|
+
statusText: 'ok',
|
|
243
|
+
headers: {},
|
|
244
|
+
ok: true,
|
|
245
|
+
};
|
|
246
|
+
}
|
|
247
|
+
function getErrorResponseText(status) {
|
|
248
|
+
switch (status) {
|
|
249
|
+
case HttpStatusCode$1.Ok:
|
|
250
|
+
return 'OK';
|
|
251
|
+
case HttpStatusCode$1.NotModified:
|
|
252
|
+
return 'Not Modified';
|
|
253
|
+
case HttpStatusCode$1.NotFound:
|
|
254
|
+
return 'Not Found';
|
|
255
|
+
case HttpStatusCode$1.BadRequest:
|
|
256
|
+
return 'Bad Request';
|
|
257
|
+
case HttpStatusCode$1.ServerError:
|
|
258
|
+
return 'Server Error';
|
|
259
|
+
default:
|
|
260
|
+
return `Unexpected HTTP Status Code: ${status}`;
|
|
261
|
+
}
|
|
262
|
+
}
|
|
263
|
+
function createErrorResponse(status, body) {
|
|
264
|
+
return {
|
|
265
|
+
status,
|
|
266
|
+
body,
|
|
267
|
+
statusText: getErrorResponseText(status),
|
|
268
|
+
headers: {},
|
|
269
|
+
ok: false,
|
|
270
|
+
};
|
|
271
|
+
}
|
|
272
|
+
function isSpanningRecord(fieldValue) {
|
|
273
|
+
return fieldValue !== null && typeof fieldValue === 'object';
|
|
274
|
+
}
|
|
275
|
+
function mergeRecordFields(first, second) {
|
|
276
|
+
const { fields: targetFields } = first;
|
|
277
|
+
const { fields: sourceFields } = second;
|
|
278
|
+
const fieldNames = keys(sourceFields);
|
|
279
|
+
for (let i = 0, len = fieldNames.length; i < len; i += 1) {
|
|
280
|
+
const fieldName = fieldNames[i];
|
|
281
|
+
const sourceField = sourceFields[fieldName];
|
|
282
|
+
const targetField = targetFields[fieldName];
|
|
283
|
+
if (isSpanningRecord(sourceField.value)) {
|
|
284
|
+
if (targetField === undefined) {
|
|
285
|
+
targetFields[fieldName] = sourceFields[fieldName];
|
|
286
|
+
continue;
|
|
287
|
+
}
|
|
288
|
+
mergeRecordFields(targetField.value, sourceField.value);
|
|
289
|
+
continue;
|
|
290
|
+
}
|
|
291
|
+
targetFields[fieldName] = sourceFields[fieldName];
|
|
292
|
+
}
|
|
293
|
+
return first;
|
|
294
|
+
}
|
|
295
|
+
/** Invoke executeAggregateUi Aura controller. This is only to be used with large getRecord requests that
|
|
296
|
+
* would otherwise cause a query length exception.
|
|
297
|
+
*/
|
|
298
|
+
function dispatchSplitRecordAggregateUiAction(recordId, networkAdapter, resourceRequest, resourceRequestContext) {
|
|
299
|
+
return networkAdapter(resourceRequest, resourceRequestContext).then((resp) => {
|
|
300
|
+
const { body } = resp;
|
|
301
|
+
// This response body could be an executeAggregateUi, which we don't natively support.
|
|
302
|
+
// Massage it into looking like a getRecord response.
|
|
303
|
+
if (body === null ||
|
|
304
|
+
body === undefined ||
|
|
305
|
+
body.compositeResponse === undefined ||
|
|
306
|
+
body.compositeResponse.length === 0) {
|
|
307
|
+
// We shouldn't even get into this state - a 200 with no body?
|
|
308
|
+
throw createErrorResponse(HttpStatusCode$1.ServerError, {
|
|
309
|
+
error: 'No response body in executeAggregateUi found',
|
|
310
|
+
});
|
|
311
|
+
}
|
|
312
|
+
const merged = body.compositeResponse.reduce((seed, response) => {
|
|
313
|
+
if (response.httpStatusCode !== HttpStatusCode$1.Ok) {
|
|
314
|
+
throw createErrorResponse(HttpStatusCode$1.ServerError, {
|
|
315
|
+
error: response.message,
|
|
316
|
+
});
|
|
317
|
+
}
|
|
318
|
+
if (seed === null) {
|
|
319
|
+
return response.body;
|
|
320
|
+
}
|
|
321
|
+
return mergeRecordFields(seed, response.body);
|
|
322
|
+
}, null);
|
|
323
|
+
return createOkResponse(merged);
|
|
324
|
+
}, (err) => {
|
|
325
|
+
// rethrow error
|
|
326
|
+
throw err;
|
|
327
|
+
});
|
|
328
|
+
}
|
|
329
|
+
function shouldUseAggregateUiForGetRecord(fieldsArray, optionalFieldsArray) {
|
|
330
|
+
return fieldsArray.length + optionalFieldsArray.length >= MAX_STRING_LENGTH_PER_CHUNK;
|
|
331
|
+
}
|
|
332
|
+
function buildAggregateUiUrl(params, resourceRequest) {
|
|
333
|
+
const { fields, optionalFields } = params;
|
|
334
|
+
const queryString = [];
|
|
335
|
+
if (fields !== undefined && fields.length > 0) {
|
|
336
|
+
const fieldString = join.call(fields, ',');
|
|
337
|
+
push.call(queryString, `fields=${encodeURIComponent(fieldString)}`);
|
|
338
|
+
}
|
|
339
|
+
if (optionalFields !== undefined && optionalFields.length > 0) {
|
|
340
|
+
const optionalFieldString = join.call(optionalFields, ',');
|
|
341
|
+
push.call(queryString, `optionalFields=${encodeURIComponent(optionalFieldString)}`);
|
|
342
|
+
}
|
|
343
|
+
return `${resourceRequest.baseUri}${resourceRequest.basePath}?${join.call(queryString, '&')}`;
|
|
344
|
+
}
|
|
345
|
+
function buildGetRecordByFieldsCompositeRequest(resourceRequest, recordsCompositeRequest) {
|
|
346
|
+
const { fieldsArray, optionalFieldsArray, fieldsLength, optionalFieldsLength } = recordsCompositeRequest;
|
|
347
|
+
// Formula: # of fields per chunk = floor(avg field length / max length per chunk)
|
|
348
|
+
const averageFieldStringLength = Math.floor((fieldsLength + optionalFieldsLength) / (fieldsArray.length + optionalFieldsArray.length));
|
|
349
|
+
const fieldsPerChunk = Math.floor(MAX_STRING_LENGTH_PER_CHUNK / averageFieldStringLength);
|
|
350
|
+
const optionalFieldsChunks = [];
|
|
351
|
+
// Do the same for optional tracked fields
|
|
352
|
+
for (let i = 0, j = optionalFieldsArray.length; i < j; i += fieldsPerChunk) {
|
|
353
|
+
const newChunk = optionalFieldsArray.slice(i, i + fieldsPerChunk);
|
|
354
|
+
push.call(optionalFieldsChunks, newChunk);
|
|
355
|
+
}
|
|
356
|
+
const compositeRequest = [];
|
|
357
|
+
// Add fields as one chunk at the beginning of the compositeRequest
|
|
358
|
+
if (fieldsArray.length > 0) {
|
|
359
|
+
const url = buildAggregateUiUrl({
|
|
360
|
+
fields: fieldsArray,
|
|
361
|
+
}, resourceRequest);
|
|
362
|
+
push.call(compositeRequest, {
|
|
363
|
+
url,
|
|
364
|
+
referenceId: `${LDS_RECORDS_AGGREGATE_UI}_fields`,
|
|
365
|
+
});
|
|
366
|
+
}
|
|
367
|
+
// Make sure we don't exceed the max subquery chunk limit for aggUi by capping the amount
|
|
368
|
+
// of optionalFields subqueries at MAX_AGGREGATE_UI_CHUNK_LIMIT - 1 (first chunk is for fields)
|
|
369
|
+
const maxNumberOfAllowableOptionalFieldsChunks = MAX_AGGREGATE_UI_CHUNK_LIMIT - 1;
|
|
370
|
+
const optionalFieldsChunksLength = Math.min(optionalFieldsChunks.length, maxNumberOfAllowableOptionalFieldsChunks);
|
|
371
|
+
for (let i = 0; i < optionalFieldsChunksLength; i += 1) {
|
|
372
|
+
const fieldChunk = optionalFieldsChunks[i];
|
|
373
|
+
const url = buildAggregateUiUrl({
|
|
374
|
+
optionalFields: fieldChunk,
|
|
375
|
+
}, resourceRequest);
|
|
376
|
+
push.call(compositeRequest, {
|
|
377
|
+
url,
|
|
378
|
+
referenceId: `${LDS_RECORDS_AGGREGATE_UI}_optionalFields_${i}`,
|
|
379
|
+
});
|
|
380
|
+
}
|
|
381
|
+
return compositeRequest;
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
const UIAPI_RECORDS_PATH = `${UI_API_BASE_URI}/records`;
|
|
385
|
+
const UIAPI_RECORDS_BATCH_PATH = `${UI_API_BASE_URI}/records/batch/`;
|
|
386
|
+
const QUERY_TOO_COMPLICATED_ERROR_CODE = 'QUERY_TOO_COMPLICATED';
|
|
387
|
+
function fetchResponseIsQueryTooComplicated(error) {
|
|
388
|
+
const { body } = error;
|
|
389
|
+
if (error.status === HttpStatusCode$1.BadRequest && body !== undefined) {
|
|
390
|
+
return (body.statusCode === HttpStatusCode$1.BadRequest &&
|
|
391
|
+
body.errorCode === QUERY_TOO_COMPLICATED_ERROR_CODE);
|
|
392
|
+
}
|
|
393
|
+
return false;
|
|
394
|
+
}
|
|
395
|
+
/*
|
|
396
|
+
* Takes a ResourceRequest, builds the aggregateUi payload, and dispatches via aggregateUi action
|
|
397
|
+
*/
|
|
398
|
+
function buildAndDispatchGetRecordAggregateUi(recordId, req, params) {
|
|
399
|
+
const { networkAdapter, resourceRequest, resourceRequestContext } = req;
|
|
400
|
+
const compositeRequest = buildGetRecordByFieldsCompositeRequest(resourceRequest, params);
|
|
401
|
+
const aggregateUiParams = {
|
|
402
|
+
compositeRequest,
|
|
403
|
+
};
|
|
404
|
+
const aggregateUiResourceRequest = {
|
|
405
|
+
baseUri: UI_API_BASE_URI,
|
|
406
|
+
basePath: '/aggregate-ui',
|
|
407
|
+
method: 'post',
|
|
408
|
+
priority: resourceRequest.priority,
|
|
409
|
+
urlParams: {},
|
|
410
|
+
body: aggregateUiParams,
|
|
411
|
+
queryParams: {},
|
|
412
|
+
headers: {},
|
|
413
|
+
};
|
|
414
|
+
return dispatchSplitRecordAggregateUiAction(recordId, networkAdapter, aggregateUiResourceRequest, resourceRequestContext);
|
|
415
|
+
}
|
|
416
|
+
const getRecordDispatcher = (req) => {
|
|
417
|
+
const { resourceRequest, networkAdapter, resourceRequestContext } = req;
|
|
418
|
+
const { queryParams, urlParams } = resourceRequest;
|
|
419
|
+
const { fields, optionalFields } = queryParams;
|
|
420
|
+
if (process.env.NODE_ENV !== 'production') {
|
|
421
|
+
if (typeof urlParams.recordId !== 'string') {
|
|
422
|
+
throw new Error(`Invalid recordId: expected string, recieved "${typeof urlParams.recordId}"`);
|
|
423
|
+
}
|
|
424
|
+
}
|
|
425
|
+
const recordId = urlParams.recordId;
|
|
426
|
+
const fieldsArray = fields !== undefined && isArray(fields) ? fields : [];
|
|
427
|
+
const optionalFieldsArray = optionalFields !== undefined && Array.isArray(optionalFields)
|
|
428
|
+
? optionalFields
|
|
429
|
+
: [];
|
|
430
|
+
const fieldsString = fieldsArray.join(',');
|
|
431
|
+
const optionalFieldsString = optionalFieldsArray.join(',');
|
|
432
|
+
// Don't submit a megarequest to UIAPI due to SOQL limit reasons.
|
|
433
|
+
// Split and aggregate if needed
|
|
434
|
+
const useAggregateUi = shouldUseAggregateUiForGetRecord(fieldsString, optionalFieldsString);
|
|
435
|
+
if (useAggregateUi) {
|
|
436
|
+
return buildAndDispatchGetRecordAggregateUi(recordId, {
|
|
437
|
+
networkAdapter,
|
|
438
|
+
resourceRequest,
|
|
439
|
+
resourceRequestContext,
|
|
440
|
+
}, {
|
|
441
|
+
fieldsArray,
|
|
442
|
+
optionalFieldsArray,
|
|
443
|
+
fieldsLength: fieldsString.length,
|
|
444
|
+
optionalFieldsLength: optionalFieldsString.length,
|
|
445
|
+
});
|
|
446
|
+
}
|
|
447
|
+
return defaultDispatcher(req).catch((err) => {
|
|
448
|
+
if (fetchResponseIsQueryTooComplicated(err)) {
|
|
449
|
+
// Retry with aggregateUi to see if we can avoid Query Too Complicated
|
|
450
|
+
return buildAndDispatchGetRecordAggregateUi(recordId, {
|
|
451
|
+
networkAdapter,
|
|
452
|
+
resourceRequest,
|
|
453
|
+
resourceRequestContext,
|
|
454
|
+
}, {
|
|
455
|
+
fieldsArray,
|
|
456
|
+
optionalFieldsArray,
|
|
457
|
+
fieldsLength: fieldsString.length,
|
|
458
|
+
optionalFieldsLength: optionalFieldsString.length,
|
|
459
|
+
});
|
|
460
|
+
}
|
|
461
|
+
else {
|
|
462
|
+
throw err;
|
|
463
|
+
}
|
|
464
|
+
});
|
|
465
|
+
};
|
|
466
|
+
function matchRecordsHandlers(path, resourceRequest) {
|
|
467
|
+
const method = resourceRequest.method.toLowerCase();
|
|
468
|
+
if (method === 'get' &&
|
|
469
|
+
path.startsWith(UIAPI_RECORDS_PATH) &&
|
|
470
|
+
path.startsWith(UIAPI_RECORDS_BATCH_PATH) === false) {
|
|
471
|
+
return getRecordDispatcher;
|
|
472
|
+
}
|
|
473
|
+
return null;
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
const defaultDispatcher = (req) => {
|
|
477
|
+
const { networkAdapter, resourceRequest, resourceRequestContext } = req;
|
|
478
|
+
return networkAdapter(resourceRequest, resourceRequestContext);
|
|
479
|
+
};
|
|
480
|
+
function getDispatcher(resourceRequest) {
|
|
481
|
+
const { basePath, baseUri } = resourceRequest;
|
|
482
|
+
const path = `${baseUri}${basePath}`;
|
|
483
|
+
const recordsMatch = matchRecordsHandlers(path, resourceRequest);
|
|
484
|
+
if (recordsMatch !== null) {
|
|
485
|
+
return recordsMatch;
|
|
486
|
+
}
|
|
487
|
+
return defaultDispatcher;
|
|
488
|
+
}
|
|
489
|
+
|
|
490
|
+
const inflightRequests = Object.create(null);
|
|
491
|
+
const TRANSACTION_KEY_SEP = '::';
|
|
492
|
+
const EMPTY_STRING = '';
|
|
493
|
+
function isResourceRequestDedupable(resourceRequest) {
|
|
494
|
+
const resourceRequestContext = resourceRequest.resourceRequestContext;
|
|
495
|
+
return (resourceRequest.resourceRequest.method.toLowerCase() === 'get' ||
|
|
496
|
+
(resourceRequestContext && resourceRequestContext.luvioRequestMethod === 'get'));
|
|
497
|
+
}
|
|
498
|
+
function getTransactionKey(req) {
|
|
499
|
+
const { resourceRequest } = req;
|
|
500
|
+
const { baseUri, basePath, queryParams, headers } = resourceRequest;
|
|
501
|
+
const path = `${baseUri}${basePath}`;
|
|
502
|
+
const queryParamsString = queryParams ? stringify(queryParams) : EMPTY_STRING;
|
|
503
|
+
const headersString = stringify(headers);
|
|
504
|
+
const bodyString = resourceRequest.body && isResourceRequestDedupable(req)
|
|
505
|
+
? stringify(resourceRequest.body)
|
|
506
|
+
: EMPTY_STRING;
|
|
507
|
+
return `${path}${TRANSACTION_KEY_SEP}${headersString}${TRANSACTION_KEY_SEP}${queryParamsString}${bodyString}`;
|
|
508
|
+
}
|
|
509
|
+
function getFulfillingRequest(inflightRequests, resourceRequest) {
|
|
510
|
+
const { fulfill } = resourceRequest;
|
|
511
|
+
if (fulfill === undefined) {
|
|
512
|
+
return null;
|
|
513
|
+
}
|
|
514
|
+
const handlersMap = entries(inflightRequests);
|
|
515
|
+
for (let i = 0, len = handlersMap.length; i < len; i += 1) {
|
|
516
|
+
const [transactionKey, handlers] = handlersMap[i];
|
|
517
|
+
// check fulfillment against only the first handler ([0]) because it's equal or
|
|
518
|
+
// fulfills all subsequent handlers in the array
|
|
519
|
+
const existing = handlers[0].resourceRequest;
|
|
520
|
+
if (fulfill(existing, resourceRequest) === true) {
|
|
521
|
+
return transactionKey;
|
|
522
|
+
}
|
|
523
|
+
}
|
|
524
|
+
return null;
|
|
525
|
+
}
|
|
526
|
+
/**
|
|
527
|
+
Dedupes network requests being made to Salesforce APIs
|
|
528
|
+
This function is only designed to dedupe GET requests.
|
|
529
|
+
|
|
530
|
+
If POST/PUT/PATCH/DELETE requests need to be deduped, that should be handled
|
|
531
|
+
on the server instead of here.
|
|
532
|
+
*/
|
|
533
|
+
const dedupeRequest = (req) => {
|
|
534
|
+
const { resourceRequest } = req;
|
|
535
|
+
if (process.env.NODE_ENV !== 'production') {
|
|
536
|
+
if (!isResourceRequestDedupable(req)) {
|
|
537
|
+
throw new Error('Invalid ResourceRequest that cannot be deduped. Only "get" Requests supported.');
|
|
538
|
+
}
|
|
539
|
+
}
|
|
540
|
+
const transactionKey = getTransactionKey(req);
|
|
541
|
+
// if an identical request is in-flight then queue for its response (do not re-issue the request)
|
|
542
|
+
if (transactionKey in inflightRequests) {
|
|
543
|
+
return new Promise((resolve, reject) => {
|
|
544
|
+
push.call(inflightRequests[transactionKey], {
|
|
545
|
+
resolve,
|
|
546
|
+
reject,
|
|
547
|
+
resourceRequest,
|
|
548
|
+
});
|
|
549
|
+
});
|
|
550
|
+
}
|
|
551
|
+
const dispatch = getDispatcher(resourceRequest);
|
|
552
|
+
// fallback to checking a custom deduper to find a similar (but not identical) request
|
|
553
|
+
const similarTransactionKey = getFulfillingRequest(inflightRequests, resourceRequest);
|
|
554
|
+
if (similarTransactionKey !== null) {
|
|
555
|
+
return new Promise((resolve) => {
|
|
556
|
+
// custom dedupers find similar (not identical) requests. if the similar request fails
|
|
557
|
+
// there's no guarantee the deduped request should fail. thus we re-issue the
|
|
558
|
+
// original request in the case of a failure
|
|
559
|
+
push.call(inflightRequests[similarTransactionKey], {
|
|
560
|
+
resolve,
|
|
561
|
+
reject: function reissueRequest() {
|
|
562
|
+
resolve(dispatch(req));
|
|
563
|
+
},
|
|
564
|
+
resourceRequest,
|
|
565
|
+
});
|
|
566
|
+
});
|
|
567
|
+
}
|
|
568
|
+
dispatch(req).then((response) => {
|
|
569
|
+
const handlers = inflightRequests[transactionKey];
|
|
570
|
+
delete inflightRequests[transactionKey];
|
|
571
|
+
// handlers mutate responses so must clone the response for each.
|
|
572
|
+
// the first handler is given the original version to avoid an
|
|
573
|
+
// extra clone (particularly when there's only 1 handler).
|
|
574
|
+
for (let i = 1, len = handlers.length; i < len; i++) {
|
|
575
|
+
const handler = handlers[i];
|
|
576
|
+
handler.resolve(parse(stringify(response)));
|
|
577
|
+
}
|
|
578
|
+
handlers[0].resolve(response);
|
|
579
|
+
}, (error) => {
|
|
580
|
+
const handlers = inflightRequests[transactionKey];
|
|
581
|
+
delete inflightRequests[transactionKey];
|
|
582
|
+
for (let i = 0, len = handlers.length; i < len; i++) {
|
|
583
|
+
const handler = handlers[i];
|
|
584
|
+
handler.reject(error);
|
|
585
|
+
}
|
|
586
|
+
});
|
|
587
|
+
// rely on sync behavior of Promise creation to create the list for handlers
|
|
588
|
+
return new Promise((resolve, reject) => {
|
|
589
|
+
inflightRequests[transactionKey] = [{ resolve, reject, resourceRequest }];
|
|
590
|
+
});
|
|
591
|
+
};
|
|
592
|
+
|
|
593
|
+
const RATE_LIMIT_CONFIG = {
|
|
594
|
+
bucketCapacity: 100,
|
|
595
|
+
fillsPerSecond: 100,
|
|
596
|
+
};
|
|
597
|
+
class TokenBucket {
|
|
598
|
+
/**
|
|
599
|
+
* Constructs an instance of Token Bucket for rate limiting
|
|
600
|
+
*
|
|
601
|
+
* @param bucket The token holding capacity of the bucket
|
|
602
|
+
* @param refillTokensPerSecond The number of tokens replenished every second
|
|
603
|
+
*/
|
|
604
|
+
constructor(config) {
|
|
605
|
+
this.bucketCapacity = config.bucketCapacity;
|
|
606
|
+
this.refillTokensPerMilliSecond = config.fillsPerSecond / 1000;
|
|
607
|
+
this.tokens = config.bucketCapacity;
|
|
608
|
+
this.lastRefillTime = Date.now();
|
|
609
|
+
}
|
|
610
|
+
/**
|
|
611
|
+
* Refills the bucket and removes desired number of tokens
|
|
612
|
+
*
|
|
613
|
+
* @param removeTokens number of tokens to be removed from the bucket should be >= 0
|
|
614
|
+
* @returns {boolean} true if removing token was succesful
|
|
615
|
+
*/
|
|
616
|
+
take(removeTokens) {
|
|
617
|
+
// refill tokens before removing
|
|
618
|
+
this.refill();
|
|
619
|
+
const { tokens } = this;
|
|
620
|
+
const remainingTokens = tokens - removeTokens;
|
|
621
|
+
if (remainingTokens >= 0) {
|
|
622
|
+
this.tokens = remainingTokens;
|
|
623
|
+
return true;
|
|
624
|
+
}
|
|
625
|
+
return false;
|
|
626
|
+
}
|
|
627
|
+
refill() {
|
|
628
|
+
const { bucketCapacity, tokens, refillTokensPerMilliSecond, lastRefillTime } = this;
|
|
629
|
+
const now = Date.now();
|
|
630
|
+
const timePassed = now - lastRefillTime;
|
|
631
|
+
// Number of tokens should be integer so something like Math.floor is desired
|
|
632
|
+
// Using Bitwise NOT ~ twice will achieve the same result with performance benefits
|
|
633
|
+
const calculatedTokens = tokens + ~~(timePassed * refillTokensPerMilliSecond);
|
|
634
|
+
this.tokens = bucketCapacity < calculatedTokens ? bucketCapacity : calculatedTokens;
|
|
635
|
+
this.lastRefillTime = now;
|
|
636
|
+
}
|
|
637
|
+
}
|
|
638
|
+
var tokenBucket = new TokenBucket(RATE_LIMIT_CONFIG);
|
|
639
|
+
|
|
640
|
+
function platformNetworkAdapter(baseNetworkAdapter) {
|
|
641
|
+
return (resourceRequest, resourceRequestContext) => {
|
|
642
|
+
if (!tokenBucket.take(1)) ;
|
|
643
|
+
const salesforceRequest = {
|
|
644
|
+
networkAdapter: baseNetworkAdapter,
|
|
645
|
+
resourceRequest: resourceRequest,
|
|
646
|
+
resourceRequestContext: resourceRequestContext,
|
|
647
|
+
};
|
|
648
|
+
// If GET, or overriden to be treated as a GET with resourceRequestContext.networkResourceOverride, then dedupe.
|
|
649
|
+
if (isResourceRequestDedupable(salesforceRequest)) {
|
|
650
|
+
return dedupeRequest(salesforceRequest);
|
|
651
|
+
}
|
|
652
|
+
else {
|
|
653
|
+
const dispatch = getDispatcher(resourceRequest);
|
|
654
|
+
return dispatch(salesforceRequest);
|
|
655
|
+
}
|
|
656
|
+
};
|
|
657
|
+
}
|
|
658
|
+
|
|
659
|
+
const layoutUserStateStorageConfig = {
|
|
660
|
+
storage: layoutUserStateStorage,
|
|
661
|
+
statsLogger: layoutUserStateStorageStatsLogger,
|
|
662
|
+
};
|
|
663
|
+
const layoutStorageConfig = {
|
|
664
|
+
storage: layoutStorage,
|
|
665
|
+
statsLogger: layoutStorageStatsLogger,
|
|
666
|
+
};
|
|
667
|
+
const objectInfoStorageConfig = {
|
|
668
|
+
storage: objectInfoStorage,
|
|
669
|
+
statsLogger: objectInfoStorageStatsLogger,
|
|
670
|
+
};
|
|
671
|
+
/**
|
|
672
|
+
* Given a path, return the aura storage/logger reference if we store the resource durably for network-based caching.
|
|
673
|
+
* @param path - The base path for the request.
|
|
674
|
+
* @returns LdsStorageConfig if we cache that resource, otherwise null.
|
|
675
|
+
*/
|
|
676
|
+
function getStorageAndLogger(path) {
|
|
677
|
+
if (path.startsWith(UIAPI_GET_LAYOUT) && path.endsWith(UIAPI_GET_LAYOUT_USER_STATE)) {
|
|
678
|
+
// getLayoutUserState
|
|
679
|
+
return layoutUserStateStorageConfig;
|
|
680
|
+
}
|
|
681
|
+
else if (path.startsWith(UIAPI_GET_LAYOUT) &&
|
|
682
|
+
path.endsWith(UIAPI_GET_LAYOUT_USER_STATE) === false) {
|
|
683
|
+
// getLayout
|
|
684
|
+
return layoutStorageConfig;
|
|
685
|
+
}
|
|
686
|
+
else if (path.startsWith(UIAPI_OBJECT_INFO_BATCH_PATH)) {
|
|
687
|
+
// getObjectInfos
|
|
688
|
+
return objectInfoStorageConfig;
|
|
689
|
+
}
|
|
690
|
+
else if (path.startsWith(UIAPI_OBJECT_INFO_PATH) &&
|
|
691
|
+
/picklist-values\/[a-zA-Z\d]+\/[a-zA-Z\d]+/.test(path) === false &&
|
|
692
|
+
/picklist-values\/[a-zA-Z\d]+/.test(path) === false) {
|
|
693
|
+
// getObjectInfo
|
|
694
|
+
return objectInfoStorageConfig;
|
|
695
|
+
}
|
|
696
|
+
return null;
|
|
697
|
+
}
|
|
698
|
+
const platformFetchAdapter = platformNetworkAdapter(fetchNetworkAdapter);
|
|
699
|
+
/**
|
|
700
|
+
* Helper function to fetch data, if available in aura storage cache returns that instead of going to network.
|
|
701
|
+
* @param resourceRequest - {@link ResourceRequest}
|
|
702
|
+
* @param resourceRequestContext {@link ResourceRequestContext}
|
|
703
|
+
* @param storageAndLogger - Object containing a reference to aura storage/logger
|
|
704
|
+
* @param transactionKey - The transaction key, derived from the resource request params.
|
|
705
|
+
* @returns Promise<FetchResponse<any> - Promise that resolves to a fetch response.
|
|
706
|
+
*/
|
|
707
|
+
function checkLdsStorageCacheOrFetch(resourceRequest, resourceRequestContext, storageAndLogger, transactionKey) {
|
|
708
|
+
if (!shouldForceRefresh(resourceRequest)) {
|
|
709
|
+
if (storageAndLogger?.storage) {
|
|
710
|
+
return storageAndLogger.storage.get(transactionKey).then((cacheResult) => {
|
|
711
|
+
if (cacheResult !== undefined) {
|
|
712
|
+
storageAndLogger.statsLogger.logHits();
|
|
713
|
+
// This creates an AuraFetchResponse, should it just be FetchResponse?
|
|
714
|
+
return createOkResponse$1(cacheResult);
|
|
715
|
+
}
|
|
716
|
+
storageAndLogger.statsLogger.logMisses();
|
|
717
|
+
return platformFetchAdapter(resourceRequest, resourceRequestContext);
|
|
718
|
+
}, () => {
|
|
719
|
+
return platformFetchAdapter(resourceRequest, resourceRequestContext);
|
|
720
|
+
});
|
|
721
|
+
}
|
|
722
|
+
}
|
|
723
|
+
return platformFetchAdapter(resourceRequest, resourceRequestContext);
|
|
724
|
+
}
|
|
725
|
+
|
|
726
|
+
// noop implementation for when one is not provided
|
|
727
|
+
let internalRequestTracker = {
|
|
728
|
+
registerHandler: (_request, _name, _loadedCheck) => { },
|
|
729
|
+
markFinished: (_request) => { },
|
|
730
|
+
};
|
|
731
|
+
// noop implementation for when one is not provided
|
|
732
|
+
let internalRequestLogger = {
|
|
733
|
+
resolve: (_request, _response) => { },
|
|
734
|
+
reject: (_request, _error) => { },
|
|
735
|
+
};
|
|
736
|
+
/**
|
|
737
|
+
* Wrapper around fetch network adapter from luvio
|
|
738
|
+
*
|
|
739
|
+
* @returns fetchNetworkAdapter {@link NetworkAdapter}
|
|
740
|
+
*/
|
|
741
|
+
function setupFetchNetworkAdapter() {
|
|
742
|
+
return async (resourceRequest, resourceRequestContext) => {
|
|
743
|
+
return fetchNetworkAdapter(resourceRequest);
|
|
744
|
+
};
|
|
745
|
+
}
|
|
746
|
+
/**
|
|
747
|
+
* Helper function to make the fetch network call.
|
|
748
|
+
* @param resourceRequest - {@link ResourceRequest}
|
|
749
|
+
* @param resourceRequestContext {@link ResourceRequestContext}
|
|
750
|
+
* @returns Promise<FetchResponse<any>>
|
|
751
|
+
*/
|
|
752
|
+
function handleSendFetchRequest(resourceRequest, resourceRequestContext) {
|
|
753
|
+
const { baseUri, basePath } = resourceRequest;
|
|
754
|
+
const path = `${baseUri}${basePath}`;
|
|
755
|
+
const storageAndLogger = getStorageAndLogger(path);
|
|
756
|
+
const transactionKey = getTransactionKey$1(resourceRequest);
|
|
757
|
+
const response = checkLdsStorageCacheOrFetch(resourceRequest, resourceRequestContext, storageAndLogger, transactionKey);
|
|
758
|
+
internalRequestTracker.registerHandler(response, 'lds-inflight-network-request', () => {
|
|
759
|
+
// return false until we signal we are finished
|
|
760
|
+
return false;
|
|
761
|
+
});
|
|
762
|
+
response
|
|
763
|
+
.then((value) => {
|
|
764
|
+
if (storageAndLogger !== null && storageAndLogger.storage !== null) {
|
|
765
|
+
storageAndLogger.storage.set(transactionKey, value.body).catch((_error) => { });
|
|
766
|
+
}
|
|
767
|
+
internalRequestLogger.resolve(resourceRequest, value);
|
|
768
|
+
})
|
|
769
|
+
.catch((error) => {
|
|
770
|
+
internalRequestLogger.reject(resourceRequest, error);
|
|
771
|
+
});
|
|
772
|
+
return response.finally(() => {
|
|
773
|
+
internalRequestTracker.markFinished(response);
|
|
774
|
+
});
|
|
775
|
+
}
|
|
776
|
+
/**
|
|
777
|
+
* Wrapper around fetch network adapter from luvio
|
|
778
|
+
*
|
|
779
|
+
* @param requestTracker
|
|
780
|
+
* @param requestLogger
|
|
781
|
+
* @returns lexNetworkAdapter {@link NetworkAdapter}
|
|
782
|
+
*/
|
|
783
|
+
function setupLexNetworkAdapter(requestTracker, requestLogger) {
|
|
784
|
+
internalRequestTracker = requestTracker || internalRequestTracker;
|
|
785
|
+
internalRequestLogger = requestLogger || internalRequestLogger;
|
|
786
|
+
return async (resourceRequest, resourceRequestContext) => {
|
|
787
|
+
return handleSendFetchRequest(resourceRequest, resourceRequestContext);
|
|
788
|
+
};
|
|
789
|
+
}
|
|
790
|
+
|
|
791
|
+
const NO_OP = () => { };
|
|
792
|
+
// For use by callers within this module to instrument interesting things.
|
|
793
|
+
const instrumentation = {
|
|
794
|
+
log: NO_OP,
|
|
795
|
+
error: NO_OP,
|
|
796
|
+
};
|
|
797
|
+
/**
|
|
798
|
+
* Allows external modules (typically a runtime environment) to set
|
|
799
|
+
* instrumentation hooks for this module. Note that the hooks are
|
|
800
|
+
* incremental - hooks not suppiled in newInstrumentation will retain
|
|
801
|
+
* their previous values. The default instrumentation hooks are no-ops.
|
|
802
|
+
*
|
|
803
|
+
* @param newInstrumentation instrumentation hooks to be overridden
|
|
804
|
+
*/
|
|
805
|
+
function instrument(newInstrumentation) {
|
|
806
|
+
Object.assign(instrumentation, newInstrumentation);
|
|
807
|
+
}
|
|
808
|
+
|
|
809
|
+
export { instrument, setupFetchNetworkAdapter, setupLexNetworkAdapter };
|
|
810
|
+
// version: 1.332.0-fe34ef5c6f
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
declare class Storage {
|
|
2
|
+
_entries: {
|
|
3
|
+
[key: string]: any;
|
|
4
|
+
};
|
|
5
|
+
get(key: string): Promise<any>;
|
|
6
|
+
set(key: string, value: any): Promise<void>;
|
|
7
|
+
clear(): Promise<void>;
|
|
8
|
+
getSize(): Promise<number>;
|
|
9
|
+
isPersistent(): boolean;
|
|
10
|
+
}
|
|
11
|
+
declare const _default: {
|
|
12
|
+
initStorage({ name }: {
|
|
13
|
+
name: string;
|
|
14
|
+
}): Storage;
|
|
15
|
+
getStorage(name: string): Storage;
|
|
16
|
+
deleteStorage(name: string): void;
|
|
17
|
+
__reset(): Promise<void>;
|
|
18
|
+
};
|
|
19
|
+
export default _default;
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
export declare function counter(): {
|
|
2
|
+
increment(): void;
|
|
3
|
+
decrement(): void;
|
|
4
|
+
getValue(): void;
|
|
5
|
+
reset(): void;
|
|
6
|
+
};
|
|
7
|
+
export declare function gauge(): {
|
|
8
|
+
setValue(): void;
|
|
9
|
+
getValue(): void;
|
|
10
|
+
reset(): void;
|
|
11
|
+
};
|
|
12
|
+
export declare function mark(): void;
|
|
13
|
+
export declare function markStart(): void;
|
|
14
|
+
export declare function markEnd(): void;
|
|
15
|
+
export declare function perfStart(): void;
|
|
16
|
+
export declare function perfEnd(): void;
|
|
17
|
+
export declare function percentileHistogram(): {
|
|
18
|
+
update(): void;
|
|
19
|
+
getValue(): void;
|
|
20
|
+
reset(): void;
|
|
21
|
+
};
|
|
22
|
+
export declare function time(): void;
|
|
23
|
+
export declare function timer(): {
|
|
24
|
+
addDuration(): void;
|
|
25
|
+
getValue(): void;
|
|
26
|
+
time(): void;
|
|
27
|
+
};
|
|
28
|
+
export declare function registerCacheStats(): {
|
|
29
|
+
logHits(): void;
|
|
30
|
+
logMisses(): void;
|
|
31
|
+
};
|
|
32
|
+
export declare function registerPlugin(): void;
|
|
33
|
+
export declare function registerPeriodicLogger(): void;
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Instrumentation hooks exposed by this module.
|
|
3
|
+
*/
|
|
4
|
+
export interface FetchNetworkInstrumentation {
|
|
5
|
+
/**
|
|
6
|
+
* Used to send logs to Splunk
|
|
7
|
+
*/
|
|
8
|
+
log(message: string): void;
|
|
9
|
+
/**
|
|
10
|
+
* Used to record an error, which will also be sent to Splunk, but also could be used
|
|
11
|
+
* for sending error counts to Argus
|
|
12
|
+
*/
|
|
13
|
+
error(err: Error | string): void;
|
|
14
|
+
}
|
|
15
|
+
export declare const instrumentation: FetchNetworkInstrumentation;
|
|
16
|
+
/**
|
|
17
|
+
* Allows external modules (typically a runtime environment) to set
|
|
18
|
+
* instrumentation hooks for this module. Note that the hooks are
|
|
19
|
+
* incremental - hooks not suppiled in newInstrumentation will retain
|
|
20
|
+
* their previous values. The default instrumentation hooks are no-ops.
|
|
21
|
+
*
|
|
22
|
+
* @param newInstrumentation instrumentation hooks to be overridden
|
|
23
|
+
*/
|
|
24
|
+
export declare function instrument(newInstrumentation: Partial<FetchNetworkInstrumentation>): void;
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import type { NetworkAdapter, ResourceRequest } from '@luvio/engine';
|
|
2
|
+
export type RequestTracker = {
|
|
3
|
+
registerHandler: (request: any, name: string, loadedCheck: () => boolean) => void;
|
|
4
|
+
markFinished: (request: any) => void;
|
|
5
|
+
};
|
|
6
|
+
export type RequestLogger = {
|
|
7
|
+
resolve: (request: ResourceRequest, response: any) => void;
|
|
8
|
+
reject: (request: ResourceRequest, error: Error) => void;
|
|
9
|
+
};
|
|
10
|
+
export declare function getTrackerAndLoggerForTest(): {
|
|
11
|
+
tracker: RequestTracker;
|
|
12
|
+
logger: RequestLogger;
|
|
13
|
+
};
|
|
14
|
+
/**
|
|
15
|
+
* Wrapper around fetch network adapter from luvio
|
|
16
|
+
*
|
|
17
|
+
* @returns fetchNetworkAdapter {@link NetworkAdapter}
|
|
18
|
+
*/
|
|
19
|
+
export declare function setupFetchNetworkAdapter(): NetworkAdapter;
|
|
20
|
+
/**
|
|
21
|
+
* Wrapper around fetch network adapter from luvio
|
|
22
|
+
*
|
|
23
|
+
* @param requestTracker
|
|
24
|
+
* @param requestLogger
|
|
25
|
+
* @returns lexNetworkAdapter {@link NetworkAdapter}
|
|
26
|
+
*/
|
|
27
|
+
export declare function setupLexNetworkAdapter(requestTracker?: RequestTracker, requestLogger?: RequestLogger): NetworkAdapter;
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import type { CacheStatsLogger } from 'instrumentation/service';
|
|
2
|
+
import type { AuraStorage } from '@salesforce/lds-aura-storage';
|
|
3
|
+
import type { FetchResponse, ResourceRequest, ResourceRequestContext } from '@luvio/engine';
|
|
4
|
+
type LdsStorageConfig = {
|
|
5
|
+
storage: AuraStorage | null;
|
|
6
|
+
statsLogger: CacheStatsLogger;
|
|
7
|
+
};
|
|
8
|
+
/**
|
|
9
|
+
* Given a path, return the aura storage/logger reference if we store the resource durably for network-based caching.
|
|
10
|
+
* @param path - The base path for the request.
|
|
11
|
+
* @returns LdsStorageConfig if we cache that resource, otherwise null.
|
|
12
|
+
*/
|
|
13
|
+
export declare function getStorageAndLogger(path: string): LdsStorageConfig | null;
|
|
14
|
+
/**
|
|
15
|
+
* Helper function to fetch data, if available in aura storage cache returns that instead of going to network.
|
|
16
|
+
* @param resourceRequest - {@link ResourceRequest}
|
|
17
|
+
* @param resourceRequestContext {@link ResourceRequestContext}
|
|
18
|
+
* @param storageAndLogger - Object containing a reference to aura storage/logger
|
|
19
|
+
* @param transactionKey - The transaction key, derived from the resource request params.
|
|
20
|
+
* @returns Promise<FetchResponse<any> - Promise that resolves to a fetch response.
|
|
21
|
+
*/
|
|
22
|
+
export declare function checkLdsStorageCacheOrFetch(resourceRequest: ResourceRequest, resourceRequestContext: ResourceRequestContext, storageAndLogger: LdsStorageConfig | null, transactionKey: string): Promise<FetchResponse<any>>;
|
|
23
|
+
export {};
|
package/package.json
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@salesforce/lds-network-fetch",
|
|
3
|
+
"version": "1.332.0",
|
|
4
|
+
"license": "SEE LICENSE IN LICENSE.txt",
|
|
5
|
+
"description": "LDS Network Adapter using fetch",
|
|
6
|
+
"main": "dist/ldsNetworkFetch.js",
|
|
7
|
+
"module": "dist/ldsNetworkFetch.js",
|
|
8
|
+
"types": "dist/types/main.d.ts",
|
|
9
|
+
"files": [
|
|
10
|
+
"dist"
|
|
11
|
+
],
|
|
12
|
+
"exports": {
|
|
13
|
+
".": {
|
|
14
|
+
"types": "./dist/types/main.d.ts",
|
|
15
|
+
"import": "./dist/ldsNetworkFetch.js",
|
|
16
|
+
"default": "./dist/ldsNetworkFetch.js"
|
|
17
|
+
}
|
|
18
|
+
},
|
|
19
|
+
"sfdc": {
|
|
20
|
+
"path": "forcelds/ldsNetworkFetch/",
|
|
21
|
+
"publishedFileName": "ldsNetworkFetch.js",
|
|
22
|
+
"overrides": {
|
|
23
|
+
"artifactDirectory": "dist",
|
|
24
|
+
"outputModuleName": "ldsNetworkFetch"
|
|
25
|
+
}
|
|
26
|
+
},
|
|
27
|
+
"scripts": {
|
|
28
|
+
"prepare": "yarn build",
|
|
29
|
+
"build": "rollup --bundleConfigAsCjs --config rollup.config.js",
|
|
30
|
+
"clean": "rm -rf dist",
|
|
31
|
+
"test:perf": "best",
|
|
32
|
+
"test:unit": "jest",
|
|
33
|
+
"test:debug": "node --inspect-brk ../../node_modules/.bin/jest --runInBand",
|
|
34
|
+
"release:corejar": "yarn build && ../core-build/scripts/core.js --name=lds-network-fetch"
|
|
35
|
+
},
|
|
36
|
+
"dependencies": {
|
|
37
|
+
"@luvio/engine": "0.156.5",
|
|
38
|
+
"@luvio/network-adapter-fetch": "0.156.5"
|
|
39
|
+
},
|
|
40
|
+
"devDependencies": {
|
|
41
|
+
"@salesforce/lds-network-aura": "^1.332.0"
|
|
42
|
+
}
|
|
43
|
+
}
|