@salesforce/lds-network-adapter 1.124.2 → 1.124.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ldsNetwork.js +444 -444
- package/dist/{dispatch → types/dispatch}/dedupe.d.ts +9 -9
- package/dist/{dispatch → types/dispatch}/execute-aggregate-ui.d.ts +26 -26
- package/dist/{dispatch → types/dispatch}/main.d.ts +9 -9
- package/dist/{dispatch → types/dispatch}/records.d.ts +3 -3
- package/dist/{dispatch → types/dispatch}/related-lists.d.ts +3 -3
- package/dist/{instrumentation.d.ts → types/instrumentation.d.ts} +17 -17
- package/dist/{language.d.ts → types/language.d.ts} +16 -16
- package/dist/{main.d.ts → types/main.d.ts} +8 -8
- package/dist/{token-bucket.d.ts → types/token-bucket.d.ts} +28 -28
- package/dist/{uiapi-base.d.ts → types/uiapi-base.d.ts} +1 -1
- package/package.json +3 -3
package/dist/ldsNetwork.js
CHANGED
|
@@ -6,467 +6,467 @@
|
|
|
6
6
|
|
|
7
7
|
import { HttpStatusCode } from '@luvio/engine';
|
|
8
8
|
|
|
9
|
-
const { parse, stringify } = JSON;
|
|
10
|
-
const { join, push, unshift } = Array.prototype;
|
|
11
|
-
const { isArray } = Array;
|
|
9
|
+
const { parse, stringify } = JSON;
|
|
10
|
+
const { join, push, unshift } = Array.prototype;
|
|
11
|
+
const { isArray } = Array;
|
|
12
12
|
const { entries, keys } = Object;
|
|
13
13
|
|
|
14
14
|
const UI_API_BASE_URI = '/services/data/v58.0/ui-api';
|
|
15
15
|
|
|
16
|
-
let instrumentation = {
|
|
17
|
-
aggregateUiChunkCount: (_cb) => { },
|
|
18
|
-
aggregateUiConnectError: () => { },
|
|
19
|
-
duplicateRequest: (_cb) => { },
|
|
20
|
-
getRecordAggregateInvoke: () => { },
|
|
21
|
-
getRecordAggregateResolve: (_cb) => { },
|
|
22
|
-
getRecordAggregateReject: (_cb) => { },
|
|
23
|
-
getRecordAggregateRetry: () => { },
|
|
24
|
-
getRecordNormalInvoke: () => { },
|
|
25
|
-
networkRateLimitExceeded: () => { },
|
|
26
|
-
};
|
|
27
|
-
function instrument(newInstrumentation) {
|
|
28
|
-
instrumentation = Object.assign(instrumentation, newInstrumentation);
|
|
16
|
+
let instrumentation = {
|
|
17
|
+
aggregateUiChunkCount: (_cb) => { },
|
|
18
|
+
aggregateUiConnectError: () => { },
|
|
19
|
+
duplicateRequest: (_cb) => { },
|
|
20
|
+
getRecordAggregateInvoke: () => { },
|
|
21
|
+
getRecordAggregateResolve: (_cb) => { },
|
|
22
|
+
getRecordAggregateReject: (_cb) => { },
|
|
23
|
+
getRecordAggregateRetry: () => { },
|
|
24
|
+
getRecordNormalInvoke: () => { },
|
|
25
|
+
networkRateLimitExceeded: () => { },
|
|
26
|
+
};
|
|
27
|
+
function instrument(newInstrumentation) {
|
|
28
|
+
instrumentation = Object.assign(instrumentation, newInstrumentation);
|
|
29
29
|
}
|
|
30
30
|
|
|
31
|
-
const LDS_RECORDS_AGGREGATE_UI = 'LDS_Records_AggregateUi';
|
|
32
|
-
// Boundary which represents the limit that we start chunking at,
|
|
33
|
-
// determined by comma separated string length of fields
|
|
34
|
-
const MAX_STRING_LENGTH_PER_CHUNK = 10000;
|
|
35
|
-
// UIAPI limit
|
|
36
|
-
const MAX_AGGREGATE_UI_CHUNK_LIMIT = 50;
|
|
37
|
-
function createOkResponse(body) {
|
|
38
|
-
return {
|
|
39
|
-
status: HttpStatusCode.Ok,
|
|
40
|
-
body,
|
|
41
|
-
statusText: 'ok',
|
|
42
|
-
headers: {},
|
|
43
|
-
ok: true,
|
|
44
|
-
};
|
|
45
|
-
}
|
|
46
|
-
function getErrorResponseText(status) {
|
|
47
|
-
switch (status) {
|
|
48
|
-
case HttpStatusCode.Ok:
|
|
49
|
-
return 'OK';
|
|
50
|
-
case HttpStatusCode.NotModified:
|
|
51
|
-
return 'Not Modified';
|
|
52
|
-
case HttpStatusCode.NotFound:
|
|
53
|
-
return 'Not Found';
|
|
54
|
-
case HttpStatusCode.BadRequest:
|
|
55
|
-
return 'Bad Request';
|
|
56
|
-
case HttpStatusCode.ServerError:
|
|
57
|
-
return 'Server Error';
|
|
58
|
-
default:
|
|
59
|
-
return `Unexpected HTTP Status Code: ${status}`;
|
|
60
|
-
}
|
|
61
|
-
}
|
|
62
|
-
function createErrorResponse(status, body) {
|
|
63
|
-
return {
|
|
64
|
-
status,
|
|
65
|
-
body,
|
|
66
|
-
statusText: getErrorResponseText(status),
|
|
67
|
-
headers: {},
|
|
68
|
-
ok: false,
|
|
69
|
-
};
|
|
70
|
-
}
|
|
71
|
-
function isSpanningRecord(fieldValue) {
|
|
72
|
-
return fieldValue !== null && typeof fieldValue === 'object';
|
|
73
|
-
}
|
|
74
|
-
function mergeRecordFields(first, second) {
|
|
75
|
-
const { fields: targetFields } = first;
|
|
76
|
-
const { fields: sourceFields } = second;
|
|
77
|
-
const fieldNames = keys(sourceFields);
|
|
78
|
-
for (let i = 0, len = fieldNames.length; i < len; i += 1) {
|
|
79
|
-
const fieldName = fieldNames[i];
|
|
80
|
-
const sourceField = sourceFields[fieldName];
|
|
81
|
-
const targetField = targetFields[fieldName];
|
|
82
|
-
if (isSpanningRecord(sourceField.value)) {
|
|
83
|
-
if (targetField === undefined) {
|
|
84
|
-
targetFields[fieldName] = sourceFields[fieldName];
|
|
85
|
-
continue;
|
|
86
|
-
}
|
|
87
|
-
mergeRecordFields(targetField.value, sourceField.value);
|
|
88
|
-
continue;
|
|
89
|
-
}
|
|
90
|
-
targetFields[fieldName] = sourceFields[fieldName];
|
|
91
|
-
}
|
|
92
|
-
return first;
|
|
93
|
-
}
|
|
94
|
-
/** Invoke executeAggregateUi Aura controller. This is only to be used with large getRecord requests that
|
|
95
|
-
* would otherwise cause a query length exception.
|
|
96
|
-
*/
|
|
97
|
-
function dispatchSplitRecordAggregateUiAction(recordId, networkAdapter, resourceRequest, resourceRequestContext) {
|
|
98
|
-
instrumentation.getRecordAggregateInvoke();
|
|
99
|
-
return networkAdapter(resourceRequest, resourceRequestContext).then((resp) => {
|
|
100
|
-
const { body } = resp;
|
|
101
|
-
// This response body could be an executeAggregateUi, which we don't natively support.
|
|
102
|
-
// Massage it into looking like a getRecord response.
|
|
103
|
-
if (body === null ||
|
|
104
|
-
body === undefined ||
|
|
105
|
-
body.compositeResponse === undefined ||
|
|
106
|
-
body.compositeResponse.length === 0) {
|
|
107
|
-
// We shouldn't even get into this state - a 200 with no body?
|
|
108
|
-
throw createErrorResponse(HttpStatusCode.ServerError, {
|
|
109
|
-
error: 'No response body in executeAggregateUi found',
|
|
110
|
-
});
|
|
111
|
-
}
|
|
112
|
-
const merged = body.compositeResponse.reduce((seed, response) => {
|
|
113
|
-
if (response.httpStatusCode !== HttpStatusCode.Ok) {
|
|
114
|
-
instrumentation.getRecordAggregateReject(() => recordId);
|
|
115
|
-
throw createErrorResponse(HttpStatusCode.ServerError, {
|
|
116
|
-
error: response.message,
|
|
117
|
-
});
|
|
118
|
-
}
|
|
119
|
-
if (seed === null) {
|
|
120
|
-
return response.body;
|
|
121
|
-
}
|
|
122
|
-
return mergeRecordFields(seed, response.body);
|
|
123
|
-
}, null);
|
|
124
|
-
instrumentation.getRecordAggregateResolve(() => {
|
|
125
|
-
return {
|
|
126
|
-
recordId,
|
|
127
|
-
apiName: merged.apiName,
|
|
128
|
-
};
|
|
129
|
-
});
|
|
130
|
-
return createOkResponse(merged);
|
|
131
|
-
}, (err) => {
|
|
132
|
-
instrumentation.getRecordAggregateReject(() => recordId);
|
|
133
|
-
// rethrow error
|
|
134
|
-
throw err;
|
|
135
|
-
});
|
|
136
|
-
}
|
|
137
|
-
function shouldUseAggregateUiForGetRecord(fieldsArray, optionalFieldsArray) {
|
|
138
|
-
return fieldsArray.length + optionalFieldsArray.length >= MAX_STRING_LENGTH_PER_CHUNK;
|
|
139
|
-
}
|
|
140
|
-
function buildAggregateUiUrl(params, resourceRequest) {
|
|
141
|
-
const { fields, optionalFields } = params;
|
|
142
|
-
const queryString = [];
|
|
143
|
-
if (fields !== undefined && fields.length > 0) {
|
|
144
|
-
const fieldString = join.call(fields, ',');
|
|
145
|
-
push.call(queryString, `fields=${encodeURIComponent(fieldString)}`);
|
|
146
|
-
}
|
|
147
|
-
if (optionalFields !== undefined && optionalFields.length > 0) {
|
|
148
|
-
const optionalFieldString = join.call(optionalFields, ',');
|
|
149
|
-
push.call(queryString, `optionalFields=${encodeURIComponent(optionalFieldString)}`);
|
|
150
|
-
}
|
|
151
|
-
return `${resourceRequest.baseUri}${resourceRequest.basePath}?${join.call(queryString, '&')}`;
|
|
152
|
-
}
|
|
153
|
-
function buildGetRecordByFieldsCompositeRequest(resourceRequest, recordsCompositeRequest) {
|
|
154
|
-
const { fieldsArray, optionalFieldsArray, fieldsLength, optionalFieldsLength } = recordsCompositeRequest;
|
|
155
|
-
// Formula: # of fields per chunk = floor(avg field length / max length per chunk)
|
|
156
|
-
const averageFieldStringLength = Math.floor((fieldsLength + optionalFieldsLength) / (fieldsArray.length + optionalFieldsArray.length));
|
|
157
|
-
const fieldsPerChunk = Math.floor(MAX_STRING_LENGTH_PER_CHUNK / averageFieldStringLength);
|
|
158
|
-
const optionalFieldsChunks = [];
|
|
159
|
-
// Do the same for optional tracked fields
|
|
160
|
-
for (let i = 0, j = optionalFieldsArray.length; i < j; i += fieldsPerChunk) {
|
|
161
|
-
const newChunk = optionalFieldsArray.slice(i, i + fieldsPerChunk);
|
|
162
|
-
push.call(optionalFieldsChunks, newChunk);
|
|
163
|
-
}
|
|
164
|
-
const compositeRequest = [];
|
|
165
|
-
// Add fields as one chunk at the beginning of the compositeRequest
|
|
166
|
-
if (fieldsArray.length > 0) {
|
|
167
|
-
const url = buildAggregateUiUrl({
|
|
168
|
-
fields: fieldsArray,
|
|
169
|
-
}, resourceRequest);
|
|
170
|
-
push.call(compositeRequest, {
|
|
171
|
-
url,
|
|
172
|
-
referenceId: `${LDS_RECORDS_AGGREGATE_UI}_fields`,
|
|
173
|
-
});
|
|
174
|
-
}
|
|
175
|
-
// Make sure we don't exceed the max subquery chunk limit for aggUi by capping the amount
|
|
176
|
-
// of optionalFields subqueries at MAX_AGGREGATE_UI_CHUNK_LIMIT - 1 (first chunk is for fields)
|
|
177
|
-
const maxNumberOfAllowableOptionalFieldsChunks = MAX_AGGREGATE_UI_CHUNK_LIMIT - 1;
|
|
178
|
-
const optionalFieldsChunksLength = Math.min(optionalFieldsChunks.length, maxNumberOfAllowableOptionalFieldsChunks);
|
|
179
|
-
for (let i = 0; i < optionalFieldsChunksLength; i += 1) {
|
|
180
|
-
const fieldChunk = optionalFieldsChunks[i];
|
|
181
|
-
const url = buildAggregateUiUrl({
|
|
182
|
-
optionalFields: fieldChunk,
|
|
183
|
-
}, resourceRequest);
|
|
184
|
-
push.call(compositeRequest, {
|
|
185
|
-
url,
|
|
186
|
-
referenceId: `${LDS_RECORDS_AGGREGATE_UI}_optionalFields_${i}`,
|
|
187
|
-
});
|
|
188
|
-
}
|
|
189
|
-
return compositeRequest;
|
|
31
|
+
const LDS_RECORDS_AGGREGATE_UI = 'LDS_Records_AggregateUi';
|
|
32
|
+
// Boundary which represents the limit that we start chunking at,
|
|
33
|
+
// determined by comma separated string length of fields
|
|
34
|
+
const MAX_STRING_LENGTH_PER_CHUNK = 10000;
|
|
35
|
+
// UIAPI limit
|
|
36
|
+
const MAX_AGGREGATE_UI_CHUNK_LIMIT = 50;
|
|
37
|
+
function createOkResponse(body) {
|
|
38
|
+
return {
|
|
39
|
+
status: HttpStatusCode.Ok,
|
|
40
|
+
body,
|
|
41
|
+
statusText: 'ok',
|
|
42
|
+
headers: {},
|
|
43
|
+
ok: true,
|
|
44
|
+
};
|
|
45
|
+
}
|
|
46
|
+
function getErrorResponseText(status) {
|
|
47
|
+
switch (status) {
|
|
48
|
+
case HttpStatusCode.Ok:
|
|
49
|
+
return 'OK';
|
|
50
|
+
case HttpStatusCode.NotModified:
|
|
51
|
+
return 'Not Modified';
|
|
52
|
+
case HttpStatusCode.NotFound:
|
|
53
|
+
return 'Not Found';
|
|
54
|
+
case HttpStatusCode.BadRequest:
|
|
55
|
+
return 'Bad Request';
|
|
56
|
+
case HttpStatusCode.ServerError:
|
|
57
|
+
return 'Server Error';
|
|
58
|
+
default:
|
|
59
|
+
return `Unexpected HTTP Status Code: ${status}`;
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
function createErrorResponse(status, body) {
|
|
63
|
+
return {
|
|
64
|
+
status,
|
|
65
|
+
body,
|
|
66
|
+
statusText: getErrorResponseText(status),
|
|
67
|
+
headers: {},
|
|
68
|
+
ok: false,
|
|
69
|
+
};
|
|
70
|
+
}
|
|
71
|
+
function isSpanningRecord(fieldValue) {
|
|
72
|
+
return fieldValue !== null && typeof fieldValue === 'object';
|
|
73
|
+
}
|
|
74
|
+
function mergeRecordFields(first, second) {
|
|
75
|
+
const { fields: targetFields } = first;
|
|
76
|
+
const { fields: sourceFields } = second;
|
|
77
|
+
const fieldNames = keys(sourceFields);
|
|
78
|
+
for (let i = 0, len = fieldNames.length; i < len; i += 1) {
|
|
79
|
+
const fieldName = fieldNames[i];
|
|
80
|
+
const sourceField = sourceFields[fieldName];
|
|
81
|
+
const targetField = targetFields[fieldName];
|
|
82
|
+
if (isSpanningRecord(sourceField.value)) {
|
|
83
|
+
if (targetField === undefined) {
|
|
84
|
+
targetFields[fieldName] = sourceFields[fieldName];
|
|
85
|
+
continue;
|
|
86
|
+
}
|
|
87
|
+
mergeRecordFields(targetField.value, sourceField.value);
|
|
88
|
+
continue;
|
|
89
|
+
}
|
|
90
|
+
targetFields[fieldName] = sourceFields[fieldName];
|
|
91
|
+
}
|
|
92
|
+
return first;
|
|
93
|
+
}
|
|
94
|
+
/** Invoke executeAggregateUi Aura controller. This is only to be used with large getRecord requests that
|
|
95
|
+
* would otherwise cause a query length exception.
|
|
96
|
+
*/
|
|
97
|
+
function dispatchSplitRecordAggregateUiAction(recordId, networkAdapter, resourceRequest, resourceRequestContext) {
|
|
98
|
+
instrumentation.getRecordAggregateInvoke();
|
|
99
|
+
return networkAdapter(resourceRequest, resourceRequestContext).then((resp) => {
|
|
100
|
+
const { body } = resp;
|
|
101
|
+
// This response body could be an executeAggregateUi, which we don't natively support.
|
|
102
|
+
// Massage it into looking like a getRecord response.
|
|
103
|
+
if (body === null ||
|
|
104
|
+
body === undefined ||
|
|
105
|
+
body.compositeResponse === undefined ||
|
|
106
|
+
body.compositeResponse.length === 0) {
|
|
107
|
+
// We shouldn't even get into this state - a 200 with no body?
|
|
108
|
+
throw createErrorResponse(HttpStatusCode.ServerError, {
|
|
109
|
+
error: 'No response body in executeAggregateUi found',
|
|
110
|
+
});
|
|
111
|
+
}
|
|
112
|
+
const merged = body.compositeResponse.reduce((seed, response) => {
|
|
113
|
+
if (response.httpStatusCode !== HttpStatusCode.Ok) {
|
|
114
|
+
instrumentation.getRecordAggregateReject(() => recordId);
|
|
115
|
+
throw createErrorResponse(HttpStatusCode.ServerError, {
|
|
116
|
+
error: response.message,
|
|
117
|
+
});
|
|
118
|
+
}
|
|
119
|
+
if (seed === null) {
|
|
120
|
+
return response.body;
|
|
121
|
+
}
|
|
122
|
+
return mergeRecordFields(seed, response.body);
|
|
123
|
+
}, null);
|
|
124
|
+
instrumentation.getRecordAggregateResolve(() => {
|
|
125
|
+
return {
|
|
126
|
+
recordId,
|
|
127
|
+
apiName: merged.apiName,
|
|
128
|
+
};
|
|
129
|
+
});
|
|
130
|
+
return createOkResponse(merged);
|
|
131
|
+
}, (err) => {
|
|
132
|
+
instrumentation.getRecordAggregateReject(() => recordId);
|
|
133
|
+
// rethrow error
|
|
134
|
+
throw err;
|
|
135
|
+
});
|
|
136
|
+
}
|
|
137
|
+
function shouldUseAggregateUiForGetRecord(fieldsArray, optionalFieldsArray) {
|
|
138
|
+
return fieldsArray.length + optionalFieldsArray.length >= MAX_STRING_LENGTH_PER_CHUNK;
|
|
139
|
+
}
|
|
140
|
+
function buildAggregateUiUrl(params, resourceRequest) {
|
|
141
|
+
const { fields, optionalFields } = params;
|
|
142
|
+
const queryString = [];
|
|
143
|
+
if (fields !== undefined && fields.length > 0) {
|
|
144
|
+
const fieldString = join.call(fields, ',');
|
|
145
|
+
push.call(queryString, `fields=${encodeURIComponent(fieldString)}`);
|
|
146
|
+
}
|
|
147
|
+
if (optionalFields !== undefined && optionalFields.length > 0) {
|
|
148
|
+
const optionalFieldString = join.call(optionalFields, ',');
|
|
149
|
+
push.call(queryString, `optionalFields=${encodeURIComponent(optionalFieldString)}`);
|
|
150
|
+
}
|
|
151
|
+
return `${resourceRequest.baseUri}${resourceRequest.basePath}?${join.call(queryString, '&')}`;
|
|
152
|
+
}
|
|
153
|
+
function buildGetRecordByFieldsCompositeRequest(resourceRequest, recordsCompositeRequest) {
|
|
154
|
+
const { fieldsArray, optionalFieldsArray, fieldsLength, optionalFieldsLength } = recordsCompositeRequest;
|
|
155
|
+
// Formula: # of fields per chunk = floor(avg field length / max length per chunk)
|
|
156
|
+
const averageFieldStringLength = Math.floor((fieldsLength + optionalFieldsLength) / (fieldsArray.length + optionalFieldsArray.length));
|
|
157
|
+
const fieldsPerChunk = Math.floor(MAX_STRING_LENGTH_PER_CHUNK / averageFieldStringLength);
|
|
158
|
+
const optionalFieldsChunks = [];
|
|
159
|
+
// Do the same for optional tracked fields
|
|
160
|
+
for (let i = 0, j = optionalFieldsArray.length; i < j; i += fieldsPerChunk) {
|
|
161
|
+
const newChunk = optionalFieldsArray.slice(i, i + fieldsPerChunk);
|
|
162
|
+
push.call(optionalFieldsChunks, newChunk);
|
|
163
|
+
}
|
|
164
|
+
const compositeRequest = [];
|
|
165
|
+
// Add fields as one chunk at the beginning of the compositeRequest
|
|
166
|
+
if (fieldsArray.length > 0) {
|
|
167
|
+
const url = buildAggregateUiUrl({
|
|
168
|
+
fields: fieldsArray,
|
|
169
|
+
}, resourceRequest);
|
|
170
|
+
push.call(compositeRequest, {
|
|
171
|
+
url,
|
|
172
|
+
referenceId: `${LDS_RECORDS_AGGREGATE_UI}_fields`,
|
|
173
|
+
});
|
|
174
|
+
}
|
|
175
|
+
// Make sure we don't exceed the max subquery chunk limit for aggUi by capping the amount
|
|
176
|
+
// of optionalFields subqueries at MAX_AGGREGATE_UI_CHUNK_LIMIT - 1 (first chunk is for fields)
|
|
177
|
+
const maxNumberOfAllowableOptionalFieldsChunks = MAX_AGGREGATE_UI_CHUNK_LIMIT - 1;
|
|
178
|
+
const optionalFieldsChunksLength = Math.min(optionalFieldsChunks.length, maxNumberOfAllowableOptionalFieldsChunks);
|
|
179
|
+
for (let i = 0; i < optionalFieldsChunksLength; i += 1) {
|
|
180
|
+
const fieldChunk = optionalFieldsChunks[i];
|
|
181
|
+
const url = buildAggregateUiUrl({
|
|
182
|
+
optionalFields: fieldChunk,
|
|
183
|
+
}, resourceRequest);
|
|
184
|
+
push.call(compositeRequest, {
|
|
185
|
+
url,
|
|
186
|
+
referenceId: `${LDS_RECORDS_AGGREGATE_UI}_optionalFields_${i}`,
|
|
187
|
+
});
|
|
188
|
+
}
|
|
189
|
+
return compositeRequest;
|
|
190
190
|
}
|
|
191
191
|
|
|
192
|
-
const UIAPI_RECORDS_PATH = `${UI_API_BASE_URI}/records`;
|
|
193
|
-
const UIAPI_RECORDS_BATCH_PATH = `${UI_API_BASE_URI}/records/batch/`;
|
|
194
|
-
const QUERY_TOO_COMPLICATED_ERROR_CODE = 'QUERY_TOO_COMPLICATED';
|
|
195
|
-
function fetchResponseIsQueryTooComplicated(error) {
|
|
196
|
-
const { body } = error;
|
|
197
|
-
if (error.status === HttpStatusCode.BadRequest && body !== undefined) {
|
|
198
|
-
return (body.statusCode === HttpStatusCode.BadRequest &&
|
|
199
|
-
body.errorCode === QUERY_TOO_COMPLICATED_ERROR_CODE);
|
|
200
|
-
}
|
|
201
|
-
return false;
|
|
202
|
-
}
|
|
203
|
-
/*
|
|
204
|
-
* Takes a ResourceRequest, builds the aggregateUi payload, and dispatches via aggregateUi action
|
|
205
|
-
*/
|
|
206
|
-
function buildAndDispatchGetRecordAggregateUi(recordId, req, params) {
|
|
207
|
-
const { networkAdapter, resourceRequest, resourceRequestContext } = req;
|
|
208
|
-
const compositeRequest = buildGetRecordByFieldsCompositeRequest(resourceRequest, params);
|
|
209
|
-
// W-12245125: Emit chunk size metrics
|
|
210
|
-
instrumentation.aggregateUiChunkCount(() => compositeRequest.length);
|
|
211
|
-
const aggregateUiParams = {
|
|
212
|
-
compositeRequest,
|
|
213
|
-
};
|
|
214
|
-
const aggregateUiResourceRequest = {
|
|
215
|
-
baseUri: UI_API_BASE_URI,
|
|
216
|
-
basePath: '/aggregate-ui',
|
|
217
|
-
method: 'post',
|
|
218
|
-
priority: resourceRequest.priority,
|
|
219
|
-
urlParams: {},
|
|
220
|
-
body: aggregateUiParams,
|
|
221
|
-
queryParams: {},
|
|
222
|
-
headers: {},
|
|
223
|
-
};
|
|
224
|
-
return dispatchSplitRecordAggregateUiAction(recordId, networkAdapter, aggregateUiResourceRequest, resourceRequestContext);
|
|
225
|
-
}
|
|
226
|
-
const getRecordDispatcher = (req) => {
|
|
227
|
-
const { resourceRequest, networkAdapter, resourceRequestContext } = req;
|
|
228
|
-
const { queryParams, urlParams } = resourceRequest;
|
|
229
|
-
const { fields, optionalFields } = queryParams;
|
|
230
|
-
if (process.env.NODE_ENV !== 'production') {
|
|
231
|
-
if (typeof urlParams.recordId !== 'string') {
|
|
232
|
-
throw new Error(`Invalid recordId: expected string, recieved "${typeof urlParams.recordId}"`);
|
|
233
|
-
}
|
|
234
|
-
}
|
|
235
|
-
const recordId = urlParams.recordId;
|
|
236
|
-
const fieldsArray = fields !== undefined && isArray(fields) ? fields : [];
|
|
237
|
-
const optionalFieldsArray = optionalFields !== undefined && Array.isArray(optionalFields)
|
|
238
|
-
? optionalFields
|
|
239
|
-
: [];
|
|
240
|
-
const fieldsString = fieldsArray.join(',');
|
|
241
|
-
const optionalFieldsString = optionalFieldsArray.join(',');
|
|
242
|
-
// Don't submit a megarequest to UIAPI due to SOQL limit reasons.
|
|
243
|
-
// Split and aggregate if needed
|
|
244
|
-
const useAggregateUi = shouldUseAggregateUiForGetRecord(fieldsString, optionalFieldsString);
|
|
245
|
-
if (useAggregateUi) {
|
|
246
|
-
return buildAndDispatchGetRecordAggregateUi(recordId, {
|
|
247
|
-
networkAdapter,
|
|
248
|
-
resourceRequest,
|
|
249
|
-
resourceRequestContext,
|
|
250
|
-
}, {
|
|
251
|
-
fieldsArray,
|
|
252
|
-
optionalFieldsArray,
|
|
253
|
-
fieldsLength: fieldsString.length,
|
|
254
|
-
optionalFieldsLength: optionalFieldsString.length,
|
|
255
|
-
});
|
|
256
|
-
}
|
|
257
|
-
return defaultDispatcher(req).catch((err) => {
|
|
258
|
-
if (fetchResponseIsQueryTooComplicated(err)) {
|
|
259
|
-
// Retry with aggregateUi to see if we can avoid Query Too Complicated
|
|
260
|
-
return buildAndDispatchGetRecordAggregateUi(recordId, {
|
|
261
|
-
networkAdapter,
|
|
262
|
-
resourceRequest,
|
|
263
|
-
resourceRequestContext,
|
|
264
|
-
}, {
|
|
265
|
-
fieldsArray,
|
|
266
|
-
optionalFieldsArray,
|
|
267
|
-
fieldsLength: fieldsString.length,
|
|
268
|
-
optionalFieldsLength: optionalFieldsString.length,
|
|
269
|
-
});
|
|
270
|
-
}
|
|
271
|
-
else {
|
|
272
|
-
throw err;
|
|
273
|
-
}
|
|
274
|
-
});
|
|
275
|
-
};
|
|
276
|
-
function matchRecordsHandlers(path, resourceRequest) {
|
|
277
|
-
const method = resourceRequest.method.toLowerCase();
|
|
278
|
-
if (method === 'get' &&
|
|
279
|
-
path.startsWith(UIAPI_RECORDS_PATH) &&
|
|
280
|
-
path.startsWith(UIAPI_RECORDS_BATCH_PATH) === false) {
|
|
281
|
-
return getRecordDispatcher;
|
|
282
|
-
}
|
|
283
|
-
return null;
|
|
192
|
+
const UIAPI_RECORDS_PATH = `${UI_API_BASE_URI}/records`;
|
|
193
|
+
const UIAPI_RECORDS_BATCH_PATH = `${UI_API_BASE_URI}/records/batch/`;
|
|
194
|
+
const QUERY_TOO_COMPLICATED_ERROR_CODE = 'QUERY_TOO_COMPLICATED';
|
|
195
|
+
function fetchResponseIsQueryTooComplicated(error) {
|
|
196
|
+
const { body } = error;
|
|
197
|
+
if (error.status === HttpStatusCode.BadRequest && body !== undefined) {
|
|
198
|
+
return (body.statusCode === HttpStatusCode.BadRequest &&
|
|
199
|
+
body.errorCode === QUERY_TOO_COMPLICATED_ERROR_CODE);
|
|
200
|
+
}
|
|
201
|
+
return false;
|
|
202
|
+
}
|
|
203
|
+
/*
|
|
204
|
+
* Takes a ResourceRequest, builds the aggregateUi payload, and dispatches via aggregateUi action
|
|
205
|
+
*/
|
|
206
|
+
function buildAndDispatchGetRecordAggregateUi(recordId, req, params) {
|
|
207
|
+
const { networkAdapter, resourceRequest, resourceRequestContext } = req;
|
|
208
|
+
const compositeRequest = buildGetRecordByFieldsCompositeRequest(resourceRequest, params);
|
|
209
|
+
// W-12245125: Emit chunk size metrics
|
|
210
|
+
instrumentation.aggregateUiChunkCount(() => compositeRequest.length);
|
|
211
|
+
const aggregateUiParams = {
|
|
212
|
+
compositeRequest,
|
|
213
|
+
};
|
|
214
|
+
const aggregateUiResourceRequest = {
|
|
215
|
+
baseUri: UI_API_BASE_URI,
|
|
216
|
+
basePath: '/aggregate-ui',
|
|
217
|
+
method: 'post',
|
|
218
|
+
priority: resourceRequest.priority,
|
|
219
|
+
urlParams: {},
|
|
220
|
+
body: aggregateUiParams,
|
|
221
|
+
queryParams: {},
|
|
222
|
+
headers: {},
|
|
223
|
+
};
|
|
224
|
+
return dispatchSplitRecordAggregateUiAction(recordId, networkAdapter, aggregateUiResourceRequest, resourceRequestContext);
|
|
225
|
+
}
|
|
226
|
+
const getRecordDispatcher = (req) => {
|
|
227
|
+
const { resourceRequest, networkAdapter, resourceRequestContext } = req;
|
|
228
|
+
const { queryParams, urlParams } = resourceRequest;
|
|
229
|
+
const { fields, optionalFields } = queryParams;
|
|
230
|
+
if (process.env.NODE_ENV !== 'production') {
|
|
231
|
+
if (typeof urlParams.recordId !== 'string') {
|
|
232
|
+
throw new Error(`Invalid recordId: expected string, recieved "${typeof urlParams.recordId}"`);
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
const recordId = urlParams.recordId;
|
|
236
|
+
const fieldsArray = fields !== undefined && isArray(fields) ? fields : [];
|
|
237
|
+
const optionalFieldsArray = optionalFields !== undefined && Array.isArray(optionalFields)
|
|
238
|
+
? optionalFields
|
|
239
|
+
: [];
|
|
240
|
+
const fieldsString = fieldsArray.join(',');
|
|
241
|
+
const optionalFieldsString = optionalFieldsArray.join(',');
|
|
242
|
+
// Don't submit a megarequest to UIAPI due to SOQL limit reasons.
|
|
243
|
+
// Split and aggregate if needed
|
|
244
|
+
const useAggregateUi = shouldUseAggregateUiForGetRecord(fieldsString, optionalFieldsString);
|
|
245
|
+
if (useAggregateUi) {
|
|
246
|
+
return buildAndDispatchGetRecordAggregateUi(recordId, {
|
|
247
|
+
networkAdapter,
|
|
248
|
+
resourceRequest,
|
|
249
|
+
resourceRequestContext,
|
|
250
|
+
}, {
|
|
251
|
+
fieldsArray,
|
|
252
|
+
optionalFieldsArray,
|
|
253
|
+
fieldsLength: fieldsString.length,
|
|
254
|
+
optionalFieldsLength: optionalFieldsString.length,
|
|
255
|
+
});
|
|
256
|
+
}
|
|
257
|
+
return defaultDispatcher(req).catch((err) => {
|
|
258
|
+
if (fetchResponseIsQueryTooComplicated(err)) {
|
|
259
|
+
// Retry with aggregateUi to see if we can avoid Query Too Complicated
|
|
260
|
+
return buildAndDispatchGetRecordAggregateUi(recordId, {
|
|
261
|
+
networkAdapter,
|
|
262
|
+
resourceRequest,
|
|
263
|
+
resourceRequestContext,
|
|
264
|
+
}, {
|
|
265
|
+
fieldsArray,
|
|
266
|
+
optionalFieldsArray,
|
|
267
|
+
fieldsLength: fieldsString.length,
|
|
268
|
+
optionalFieldsLength: optionalFieldsString.length,
|
|
269
|
+
});
|
|
270
|
+
}
|
|
271
|
+
else {
|
|
272
|
+
throw err;
|
|
273
|
+
}
|
|
274
|
+
});
|
|
275
|
+
};
|
|
276
|
+
function matchRecordsHandlers(path, resourceRequest) {
|
|
277
|
+
const method = resourceRequest.method.toLowerCase();
|
|
278
|
+
if (method === 'get' &&
|
|
279
|
+
path.startsWith(UIAPI_RECORDS_PATH) &&
|
|
280
|
+
path.startsWith(UIAPI_RECORDS_BATCH_PATH) === false) {
|
|
281
|
+
return getRecordDispatcher;
|
|
282
|
+
}
|
|
283
|
+
return null;
|
|
284
284
|
}
|
|
285
285
|
|
|
286
|
-
const defaultDispatcher = (req) => {
|
|
287
|
-
const { networkAdapter, resourceRequest, resourceRequestContext } = req;
|
|
288
|
-
return networkAdapter(resourceRequest, resourceRequestContext);
|
|
289
|
-
};
|
|
290
|
-
function getDispatcher(resourceRequest) {
|
|
291
|
-
const { basePath, baseUri } = resourceRequest;
|
|
292
|
-
const path = `${baseUri}${basePath}`;
|
|
293
|
-
const recordsMatch = matchRecordsHandlers(path, resourceRequest);
|
|
294
|
-
if (recordsMatch !== null) {
|
|
295
|
-
return recordsMatch;
|
|
296
|
-
}
|
|
297
|
-
return defaultDispatcher;
|
|
286
|
+
const defaultDispatcher = (req) => {
|
|
287
|
+
const { networkAdapter, resourceRequest, resourceRequestContext } = req;
|
|
288
|
+
return networkAdapter(resourceRequest, resourceRequestContext);
|
|
289
|
+
};
|
|
290
|
+
function getDispatcher(resourceRequest) {
|
|
291
|
+
const { basePath, baseUri } = resourceRequest;
|
|
292
|
+
const path = `${baseUri}${basePath}`;
|
|
293
|
+
const recordsMatch = matchRecordsHandlers(path, resourceRequest);
|
|
294
|
+
if (recordsMatch !== null) {
|
|
295
|
+
return recordsMatch;
|
|
296
|
+
}
|
|
297
|
+
return defaultDispatcher;
|
|
298
298
|
}
|
|
299
299
|
|
|
300
|
-
const inflightRequests = Object.create(null);
|
|
301
|
-
const TRANSACTION_KEY_SEP = '::';
|
|
302
|
-
const EMPTY_STRING = '';
|
|
303
|
-
function isResourceRequestDedupable(resourceRequest) {
|
|
304
|
-
const resourceRequestContext = resourceRequest.resourceRequestContext;
|
|
305
|
-
return (resourceRequest.resourceRequest.method.toLowerCase() === 'get' ||
|
|
306
|
-
(resourceRequestContext && resourceRequestContext.luvioRequestMethod === 'get'));
|
|
307
|
-
}
|
|
308
|
-
function getTransactionKey(req) {
|
|
309
|
-
const { resourceRequest } = req;
|
|
310
|
-
const { baseUri, basePath, queryParams, headers } = resourceRequest;
|
|
311
|
-
const path = `${baseUri}${basePath}`;
|
|
312
|
-
const queryParamsString = queryParams ? stringify(queryParams) : EMPTY_STRING;
|
|
313
|
-
const headersString = stringify(headers);
|
|
314
|
-
const bodyString = resourceRequest.body && isResourceRequestDedupable(req)
|
|
315
|
-
? stringify(resourceRequest.body)
|
|
316
|
-
: EMPTY_STRING;
|
|
317
|
-
return `${path}${TRANSACTION_KEY_SEP}${headersString}${TRANSACTION_KEY_SEP}${queryParamsString}${bodyString}`;
|
|
318
|
-
}
|
|
319
|
-
function getFulfillingRequest(inflightRequests, resourceRequest) {
|
|
320
|
-
const { fulfill } = resourceRequest;
|
|
321
|
-
if (fulfill === undefined) {
|
|
322
|
-
return null;
|
|
323
|
-
}
|
|
324
|
-
const handlersMap = entries(inflightRequests);
|
|
325
|
-
for (let i = 0, len = handlersMap.length; i < len; i += 1) {
|
|
326
|
-
const [transactionKey, handlers] = handlersMap[i];
|
|
327
|
-
// check fulfillment against only the first handler ([0]) because it's equal or
|
|
328
|
-
// fulfills all subsequent handlers in the array
|
|
329
|
-
const existing = handlers[0].resourceRequest;
|
|
330
|
-
if (fulfill(existing, resourceRequest) === true) {
|
|
331
|
-
return transactionKey;
|
|
332
|
-
}
|
|
333
|
-
}
|
|
334
|
-
return null;
|
|
335
|
-
}
|
|
336
|
-
/**
|
|
337
|
-
Dedupes network requests being made to Salesforce APIs
|
|
338
|
-
This function is only designed to dedupe GET requests.
|
|
300
|
+
const inflightRequests = Object.create(null);
|
|
301
|
+
const TRANSACTION_KEY_SEP = '::';
|
|
302
|
+
const EMPTY_STRING = '';
|
|
303
|
+
function isResourceRequestDedupable(resourceRequest) {
|
|
304
|
+
const resourceRequestContext = resourceRequest.resourceRequestContext;
|
|
305
|
+
return (resourceRequest.resourceRequest.method.toLowerCase() === 'get' ||
|
|
306
|
+
(resourceRequestContext && resourceRequestContext.luvioRequestMethod === 'get'));
|
|
307
|
+
}
|
|
308
|
+
function getTransactionKey(req) {
|
|
309
|
+
const { resourceRequest } = req;
|
|
310
|
+
const { baseUri, basePath, queryParams, headers } = resourceRequest;
|
|
311
|
+
const path = `${baseUri}${basePath}`;
|
|
312
|
+
const queryParamsString = queryParams ? stringify(queryParams) : EMPTY_STRING;
|
|
313
|
+
const headersString = stringify(headers);
|
|
314
|
+
const bodyString = resourceRequest.body && isResourceRequestDedupable(req)
|
|
315
|
+
? stringify(resourceRequest.body)
|
|
316
|
+
: EMPTY_STRING;
|
|
317
|
+
return `${path}${TRANSACTION_KEY_SEP}${headersString}${TRANSACTION_KEY_SEP}${queryParamsString}${bodyString}`;
|
|
318
|
+
}
|
|
319
|
+
function getFulfillingRequest(inflightRequests, resourceRequest) {
|
|
320
|
+
const { fulfill } = resourceRequest;
|
|
321
|
+
if (fulfill === undefined) {
|
|
322
|
+
return null;
|
|
323
|
+
}
|
|
324
|
+
const handlersMap = entries(inflightRequests);
|
|
325
|
+
for (let i = 0, len = handlersMap.length; i < len; i += 1) {
|
|
326
|
+
const [transactionKey, handlers] = handlersMap[i];
|
|
327
|
+
// check fulfillment against only the first handler ([0]) because it's equal or
|
|
328
|
+
// fulfills all subsequent handlers in the array
|
|
329
|
+
const existing = handlers[0].resourceRequest;
|
|
330
|
+
if (fulfill(existing, resourceRequest) === true) {
|
|
331
|
+
return transactionKey;
|
|
332
|
+
}
|
|
333
|
+
}
|
|
334
|
+
return null;
|
|
335
|
+
}
|
|
336
|
+
/**
|
|
337
|
+
Dedupes network requests being made to Salesforce APIs
|
|
338
|
+
This function is only designed to dedupe GET requests.
|
|
339
339
|
|
|
340
|
-
If POST/PUT/PATCH/DELETE requests need to be deduped, that should be handled
|
|
341
|
-
on the server instead of here.
|
|
342
|
-
*/
|
|
343
|
-
const dedupeRequest = (req) => {
|
|
344
|
-
const { resourceRequest } = req;
|
|
345
|
-
if (process.env.NODE_ENV !== 'production') {
|
|
346
|
-
if (!isResourceRequestDedupable(req)) {
|
|
347
|
-
throw new Error('Invalid ResourceRequest that cannot be deduped. Only "get" Requests supported.');
|
|
348
|
-
}
|
|
349
|
-
}
|
|
350
|
-
const transactionKey = getTransactionKey(req);
|
|
351
|
-
// if an identical request is in-flight then queue for its response (do not re-issue the request)
|
|
352
|
-
if (transactionKey in inflightRequests) {
|
|
353
|
-
return new Promise((resolve, reject) => {
|
|
354
|
-
push.call(inflightRequests[transactionKey], {
|
|
355
|
-
resolve,
|
|
356
|
-
reject,
|
|
357
|
-
resourceRequest,
|
|
358
|
-
});
|
|
359
|
-
});
|
|
360
|
-
}
|
|
361
|
-
const dispatch = getDispatcher(resourceRequest);
|
|
362
|
-
// fallback to checking a custom deduper to find a similar (but not identical) request
|
|
363
|
-
const similarTransactionKey = getFulfillingRequest(inflightRequests, resourceRequest);
|
|
364
|
-
if (similarTransactionKey !== null) {
|
|
365
|
-
return new Promise((resolve) => {
|
|
366
|
-
// custom dedupers find similar (not identical) requests. if the similar request fails
|
|
367
|
-
// there's no guarantee the deduped request should fail. thus we re-issue the
|
|
368
|
-
// original request in the case of a failure
|
|
369
|
-
push.call(inflightRequests[similarTransactionKey], {
|
|
370
|
-
resolve,
|
|
371
|
-
reject: function reissueRequest() {
|
|
372
|
-
resolve(dispatch(req));
|
|
373
|
-
},
|
|
374
|
-
resourceRequest,
|
|
375
|
-
});
|
|
376
|
-
});
|
|
377
|
-
}
|
|
378
|
-
dispatch(req).then((response) => {
|
|
379
|
-
const handlers = inflightRequests[transactionKey];
|
|
380
|
-
delete inflightRequests[transactionKey];
|
|
381
|
-
// handlers mutate responses so must clone the response for each.
|
|
382
|
-
// the first handler is given the original version to avoid an
|
|
383
|
-
// extra clone (particularly when there's only 1 handler).
|
|
384
|
-
for (let i = 1, len = handlers.length; i < len; i++) {
|
|
385
|
-
const handler = handlers[i];
|
|
386
|
-
handler.resolve(parse(stringify(response)));
|
|
387
|
-
}
|
|
388
|
-
handlers[0].resolve(response);
|
|
389
|
-
}, (error) => {
|
|
390
|
-
const handlers = inflightRequests[transactionKey];
|
|
391
|
-
delete inflightRequests[transactionKey];
|
|
392
|
-
for (let i = 0, len = handlers.length; i < len; i++) {
|
|
393
|
-
const handler = handlers[i];
|
|
394
|
-
handler.reject(error);
|
|
395
|
-
}
|
|
396
|
-
});
|
|
397
|
-
// rely on sync behavior of Promise creation to create the list for handlers
|
|
398
|
-
return new Promise((resolve, reject) => {
|
|
399
|
-
inflightRequests[transactionKey] = [{ resolve, reject, resourceRequest }];
|
|
400
|
-
});
|
|
340
|
+
If POST/PUT/PATCH/DELETE requests need to be deduped, that should be handled
|
|
341
|
+
on the server instead of here.
|
|
342
|
+
*/
|
|
343
|
+
const dedupeRequest = (req) => {
|
|
344
|
+
const { resourceRequest } = req;
|
|
345
|
+
if (process.env.NODE_ENV !== 'production') {
|
|
346
|
+
if (!isResourceRequestDedupable(req)) {
|
|
347
|
+
throw new Error('Invalid ResourceRequest that cannot be deduped. Only "get" Requests supported.');
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
const transactionKey = getTransactionKey(req);
|
|
351
|
+
// if an identical request is in-flight then queue for its response (do not re-issue the request)
|
|
352
|
+
if (transactionKey in inflightRequests) {
|
|
353
|
+
return new Promise((resolve, reject) => {
|
|
354
|
+
push.call(inflightRequests[transactionKey], {
|
|
355
|
+
resolve,
|
|
356
|
+
reject,
|
|
357
|
+
resourceRequest,
|
|
358
|
+
});
|
|
359
|
+
});
|
|
360
|
+
}
|
|
361
|
+
const dispatch = getDispatcher(resourceRequest);
|
|
362
|
+
// fallback to checking a custom deduper to find a similar (but not identical) request
|
|
363
|
+
const similarTransactionKey = getFulfillingRequest(inflightRequests, resourceRequest);
|
|
364
|
+
if (similarTransactionKey !== null) {
|
|
365
|
+
return new Promise((resolve) => {
|
|
366
|
+
// custom dedupers find similar (not identical) requests. if the similar request fails
|
|
367
|
+
// there's no guarantee the deduped request should fail. thus we re-issue the
|
|
368
|
+
// original request in the case of a failure
|
|
369
|
+
push.call(inflightRequests[similarTransactionKey], {
|
|
370
|
+
resolve,
|
|
371
|
+
reject: function reissueRequest() {
|
|
372
|
+
resolve(dispatch(req));
|
|
373
|
+
},
|
|
374
|
+
resourceRequest,
|
|
375
|
+
});
|
|
376
|
+
});
|
|
377
|
+
}
|
|
378
|
+
dispatch(req).then((response) => {
|
|
379
|
+
const handlers = inflightRequests[transactionKey];
|
|
380
|
+
delete inflightRequests[transactionKey];
|
|
381
|
+
// handlers mutate responses so must clone the response for each.
|
|
382
|
+
// the first handler is given the original version to avoid an
|
|
383
|
+
// extra clone (particularly when there's only 1 handler).
|
|
384
|
+
for (let i = 1, len = handlers.length; i < len; i++) {
|
|
385
|
+
const handler = handlers[i];
|
|
386
|
+
handler.resolve(parse(stringify(response)));
|
|
387
|
+
}
|
|
388
|
+
handlers[0].resolve(response);
|
|
389
|
+
}, (error) => {
|
|
390
|
+
const handlers = inflightRequests[transactionKey];
|
|
391
|
+
delete inflightRequests[transactionKey];
|
|
392
|
+
for (let i = 0, len = handlers.length; i < len; i++) {
|
|
393
|
+
const handler = handlers[i];
|
|
394
|
+
handler.reject(error);
|
|
395
|
+
}
|
|
396
|
+
});
|
|
397
|
+
// rely on sync behavior of Promise creation to create the list for handlers
|
|
398
|
+
return new Promise((resolve, reject) => {
|
|
399
|
+
inflightRequests[transactionKey] = [{ resolve, reject, resourceRequest }];
|
|
400
|
+
});
|
|
401
401
|
};
|
|
402
402
|
|
|
403
|
-
const RATE_LIMIT_CONFIG = {
|
|
404
|
-
bucketCapacity: 100,
|
|
405
|
-
fillsPerSecond: 100,
|
|
406
|
-
};
|
|
407
|
-
class TokenBucket {
|
|
408
|
-
/**
|
|
409
|
-
* Constructs an instance of Token Bucket for rate limiting
|
|
410
|
-
*
|
|
411
|
-
* @param bucket The token holding capacity of the bucket
|
|
412
|
-
* @param refillTokensPerSecond The number of tokens replenished every second
|
|
413
|
-
*/
|
|
414
|
-
constructor(config) {
|
|
415
|
-
this.bucketCapacity = config.bucketCapacity;
|
|
416
|
-
this.refillTokensPerMilliSecond = config.fillsPerSecond / 1000;
|
|
417
|
-
this.tokens = config.bucketCapacity;
|
|
418
|
-
this.lastRefillTime = Date.now();
|
|
419
|
-
}
|
|
420
|
-
/**
|
|
421
|
-
* Refills the bucket and removes desired number of tokens
|
|
422
|
-
*
|
|
423
|
-
* @param removeTokens number of tokens to be removed from the bucket should be >= 0
|
|
424
|
-
* @returns {boolean} true if removing token was succesful
|
|
425
|
-
*/
|
|
426
|
-
take(removeTokens) {
|
|
427
|
-
// refill tokens before removing
|
|
428
|
-
this.refill();
|
|
429
|
-
const { tokens } = this;
|
|
430
|
-
const remainingTokens = tokens - removeTokens;
|
|
431
|
-
if (remainingTokens >= 0) {
|
|
432
|
-
this.tokens = remainingTokens;
|
|
433
|
-
return true;
|
|
434
|
-
}
|
|
435
|
-
return false;
|
|
436
|
-
}
|
|
437
|
-
refill() {
|
|
438
|
-
const { bucketCapacity, tokens, refillTokensPerMilliSecond, lastRefillTime } = this;
|
|
439
|
-
const now = Date.now();
|
|
440
|
-
const timePassed = now - lastRefillTime;
|
|
441
|
-
// Number of tokens should be integer so something like Math.floor is desired
|
|
442
|
-
// Using Bitwise NOT ~ twice will achieve the same result with performance benefits
|
|
443
|
-
const calculatedTokens = tokens + ~~(timePassed * refillTokensPerMilliSecond);
|
|
444
|
-
this.tokens = bucketCapacity < calculatedTokens ? bucketCapacity : calculatedTokens;
|
|
445
|
-
this.lastRefillTime = now;
|
|
446
|
-
}
|
|
447
|
-
}
|
|
403
|
+
const RATE_LIMIT_CONFIG = {
|
|
404
|
+
bucketCapacity: 100,
|
|
405
|
+
fillsPerSecond: 100,
|
|
406
|
+
};
|
|
407
|
+
class TokenBucket {
|
|
408
|
+
/**
|
|
409
|
+
* Constructs an instance of Token Bucket for rate limiting
|
|
410
|
+
*
|
|
411
|
+
* @param bucket The token holding capacity of the bucket
|
|
412
|
+
* @param refillTokensPerSecond The number of tokens replenished every second
|
|
413
|
+
*/
|
|
414
|
+
constructor(config) {
|
|
415
|
+
this.bucketCapacity = config.bucketCapacity;
|
|
416
|
+
this.refillTokensPerMilliSecond = config.fillsPerSecond / 1000;
|
|
417
|
+
this.tokens = config.bucketCapacity;
|
|
418
|
+
this.lastRefillTime = Date.now();
|
|
419
|
+
}
|
|
420
|
+
/**
|
|
421
|
+
* Refills the bucket and removes desired number of tokens
|
|
422
|
+
*
|
|
423
|
+
* @param removeTokens number of tokens to be removed from the bucket should be >= 0
|
|
424
|
+
* @returns {boolean} true if removing token was succesful
|
|
425
|
+
*/
|
|
426
|
+
take(removeTokens) {
|
|
427
|
+
// refill tokens before removing
|
|
428
|
+
this.refill();
|
|
429
|
+
const { tokens } = this;
|
|
430
|
+
const remainingTokens = tokens - removeTokens;
|
|
431
|
+
if (remainingTokens >= 0) {
|
|
432
|
+
this.tokens = remainingTokens;
|
|
433
|
+
return true;
|
|
434
|
+
}
|
|
435
|
+
return false;
|
|
436
|
+
}
|
|
437
|
+
refill() {
|
|
438
|
+
const { bucketCapacity, tokens, refillTokensPerMilliSecond, lastRefillTime } = this;
|
|
439
|
+
const now = Date.now();
|
|
440
|
+
const timePassed = now - lastRefillTime;
|
|
441
|
+
// Number of tokens should be integer so something like Math.floor is desired
|
|
442
|
+
// Using Bitwise NOT ~ twice will achieve the same result with performance benefits
|
|
443
|
+
const calculatedTokens = tokens + ~~(timePassed * refillTokensPerMilliSecond);
|
|
444
|
+
this.tokens = bucketCapacity < calculatedTokens ? bucketCapacity : calculatedTokens;
|
|
445
|
+
this.lastRefillTime = now;
|
|
446
|
+
}
|
|
447
|
+
}
|
|
448
448
|
var tokenBucket = new TokenBucket(RATE_LIMIT_CONFIG);
|
|
449
449
|
|
|
450
|
-
function platformNetworkAdapter(baseNetworkAdapter) {
|
|
451
|
-
return (resourceRequest, resourceRequestContext) => {
|
|
452
|
-
if (!tokenBucket.take(1)) {
|
|
453
|
-
// We are hitting rate limiting, add some metrics
|
|
454
|
-
instrumentation.networkRateLimitExceeded();
|
|
455
|
-
}
|
|
456
|
-
const salesforceRequest = {
|
|
457
|
-
networkAdapter: baseNetworkAdapter,
|
|
458
|
-
resourceRequest: resourceRequest,
|
|
459
|
-
resourceRequestContext: resourceRequestContext,
|
|
460
|
-
};
|
|
461
|
-
// If GET, or overriden to be treated as a GET with resourceRequestContext.networkResourceOverride, then dedupe.
|
|
462
|
-
if (isResourceRequestDedupable(salesforceRequest)) {
|
|
463
|
-
return dedupeRequest(salesforceRequest);
|
|
464
|
-
}
|
|
465
|
-
else {
|
|
466
|
-
const dispatch = getDispatcher(resourceRequest);
|
|
467
|
-
return dispatch(salesforceRequest);
|
|
468
|
-
}
|
|
469
|
-
};
|
|
450
|
+
function platformNetworkAdapter(baseNetworkAdapter) {
|
|
451
|
+
return (resourceRequest, resourceRequestContext) => {
|
|
452
|
+
if (!tokenBucket.take(1)) {
|
|
453
|
+
// We are hitting rate limiting, add some metrics
|
|
454
|
+
instrumentation.networkRateLimitExceeded();
|
|
455
|
+
}
|
|
456
|
+
const salesforceRequest = {
|
|
457
|
+
networkAdapter: baseNetworkAdapter,
|
|
458
|
+
resourceRequest: resourceRequest,
|
|
459
|
+
resourceRequestContext: resourceRequestContext,
|
|
460
|
+
};
|
|
461
|
+
// If GET, or overriden to be treated as a GET with resourceRequestContext.networkResourceOverride, then dedupe.
|
|
462
|
+
if (isResourceRequestDedupable(salesforceRequest)) {
|
|
463
|
+
return dedupeRequest(salesforceRequest);
|
|
464
|
+
}
|
|
465
|
+
else {
|
|
466
|
+
const dispatch = getDispatcher(resourceRequest);
|
|
467
|
+
return dispatch(salesforceRequest);
|
|
468
|
+
}
|
|
469
|
+
};
|
|
470
470
|
}
|
|
471
471
|
|
|
472
472
|
export { platformNetworkAdapter as default, instrument };
|
|
@@ -1,10 +1,10 @@
|
|
|
1
|
-
import type { Dispatcher, SalesforceResourceRequest } from './main';
|
|
2
|
-
export declare function isResourceRequestDedupable(resourceRequest: SalesforceResourceRequest): boolean;
|
|
3
|
-
/**
|
|
4
|
-
Dedupes network requests being made to Salesforce APIs
|
|
5
|
-
This function is only designed to dedupe GET requests.
|
|
1
|
+
import type { Dispatcher, SalesforceResourceRequest } from './main';
|
|
2
|
+
export declare function isResourceRequestDedupable(resourceRequest: SalesforceResourceRequest): boolean;
|
|
3
|
+
/**
|
|
4
|
+
Dedupes network requests being made to Salesforce APIs
|
|
5
|
+
This function is only designed to dedupe GET requests.
|
|
6
6
|
|
|
7
|
-
If POST/PUT/PATCH/DELETE requests need to be deduped, that should be handled
|
|
8
|
-
on the server instead of here.
|
|
9
|
-
*/
|
|
10
|
-
export declare const dedupeRequest: Dispatcher;
|
|
7
|
+
If POST/PUT/PATCH/DELETE requests need to be deduped, that should be handled
|
|
8
|
+
on the server instead of here.
|
|
9
|
+
*/
|
|
10
|
+
export declare const dedupeRequest: Dispatcher;
|
|
@@ -1,26 +1,26 @@
|
|
|
1
|
-
import type { FetchResponse, NetworkAdapter, ResourceRequest, ResourceRequestContext } from '@luvio/engine';
|
|
2
|
-
import type { RecordRepresentation } from '@salesforce/lds-adapters-uiapi';
|
|
3
|
-
export declare const MAX_AGGREGATE_UI_CHUNK_LIMIT = 50;
|
|
4
|
-
export interface CompositeRequest {
|
|
5
|
-
url: string;
|
|
6
|
-
referenceId: string;
|
|
7
|
-
}
|
|
8
|
-
export declare function mergeRecordFields(first: RecordRepresentation, second: RecordRepresentation): RecordRepresentation;
|
|
9
|
-
/** Invoke executeAggregateUi Aura controller. This is only to be used with large getRecord requests that
|
|
10
|
-
* would otherwise cause a query length exception.
|
|
11
|
-
*/
|
|
12
|
-
export declare function dispatchSplitRecordAggregateUiAction(recordId: string, networkAdapter: NetworkAdapter, resourceRequest: ResourceRequest, resourceRequestContext: ResourceRequestContext): Promise<FetchResponse<unknown>>;
|
|
13
|
-
export declare function shouldUseAggregateUiForGetRecord(fieldsArray: string, optionalFieldsArray: string): boolean;
|
|
14
|
-
interface AggregateUiParams {
|
|
15
|
-
fields?: string[];
|
|
16
|
-
optionalFields?: string[];
|
|
17
|
-
}
|
|
18
|
-
export declare function buildAggregateUiUrl(params: AggregateUiParams, resourceRequest: ResourceRequest): string;
|
|
19
|
-
export interface GetRecordCompositeRequestParams {
|
|
20
|
-
fieldsArray: Array<string>;
|
|
21
|
-
optionalFieldsArray: Array<string>;
|
|
22
|
-
fieldsLength: number;
|
|
23
|
-
optionalFieldsLength: number;
|
|
24
|
-
}
|
|
25
|
-
export declare function buildGetRecordByFieldsCompositeRequest(resourceRequest: ResourceRequest, recordsCompositeRequest: GetRecordCompositeRequestParams): CompositeRequest[];
|
|
26
|
-
export {};
|
|
1
|
+
import type { FetchResponse, NetworkAdapter, ResourceRequest, ResourceRequestContext } from '@luvio/engine';
|
|
2
|
+
import type { RecordRepresentation } from '@salesforce/lds-adapters-uiapi';
|
|
3
|
+
export declare const MAX_AGGREGATE_UI_CHUNK_LIMIT = 50;
|
|
4
|
+
export interface CompositeRequest {
|
|
5
|
+
url: string;
|
|
6
|
+
referenceId: string;
|
|
7
|
+
}
|
|
8
|
+
export declare function mergeRecordFields(first: RecordRepresentation, second: RecordRepresentation): RecordRepresentation;
|
|
9
|
+
/** Invoke executeAggregateUi Aura controller. This is only to be used with large getRecord requests that
|
|
10
|
+
* would otherwise cause a query length exception.
|
|
11
|
+
*/
|
|
12
|
+
export declare function dispatchSplitRecordAggregateUiAction(recordId: string, networkAdapter: NetworkAdapter, resourceRequest: ResourceRequest, resourceRequestContext: ResourceRequestContext): Promise<FetchResponse<unknown>>;
|
|
13
|
+
export declare function shouldUseAggregateUiForGetRecord(fieldsArray: string, optionalFieldsArray: string): boolean;
|
|
14
|
+
interface AggregateUiParams {
|
|
15
|
+
fields?: string[];
|
|
16
|
+
optionalFields?: string[];
|
|
17
|
+
}
|
|
18
|
+
export declare function buildAggregateUiUrl(params: AggregateUiParams, resourceRequest: ResourceRequest): string;
|
|
19
|
+
export interface GetRecordCompositeRequestParams {
|
|
20
|
+
fieldsArray: Array<string>;
|
|
21
|
+
optionalFieldsArray: Array<string>;
|
|
22
|
+
fieldsLength: number;
|
|
23
|
+
optionalFieldsLength: number;
|
|
24
|
+
}
|
|
25
|
+
export declare function buildGetRecordByFieldsCompositeRequest(resourceRequest: ResourceRequest, recordsCompositeRequest: GetRecordCompositeRequestParams): CompositeRequest[];
|
|
26
|
+
export {};
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
import type { FetchResponse, NetworkAdapter, ResourceRequest, ResourceRequestContext } from '@luvio/engine';
|
|
2
|
-
export interface SalesforceResourceRequest {
|
|
3
|
-
networkAdapter: NetworkAdapter;
|
|
4
|
-
resourceRequest: ResourceRequest;
|
|
5
|
-
resourceRequestContext: ResourceRequestContext;
|
|
6
|
-
}
|
|
7
|
-
export type Dispatcher = (req: SalesforceResourceRequest) => Promise<FetchResponse<unknown>>;
|
|
8
|
-
export declare const defaultDispatcher: Dispatcher;
|
|
9
|
-
export declare function getDispatcher(resourceRequest: ResourceRequest): Dispatcher;
|
|
1
|
+
import type { FetchResponse, NetworkAdapter, ResourceRequest, ResourceRequestContext } from '@luvio/engine';
|
|
2
|
+
export interface SalesforceResourceRequest {
|
|
3
|
+
networkAdapter: NetworkAdapter;
|
|
4
|
+
resourceRequest: ResourceRequest;
|
|
5
|
+
resourceRequestContext: ResourceRequestContext;
|
|
6
|
+
}
|
|
7
|
+
export type Dispatcher = (req: SalesforceResourceRequest) => Promise<FetchResponse<unknown>>;
|
|
8
|
+
export declare const defaultDispatcher: Dispatcher;
|
|
9
|
+
export declare function getDispatcher(resourceRequest: ResourceRequest): Dispatcher;
|
|
@@ -1,3 +1,3 @@
|
|
|
1
|
-
import type { ResourceRequest } from '@luvio/engine';
|
|
2
|
-
import type { Dispatcher } from './main';
|
|
3
|
-
export declare function matchRecordsHandlers(path: string, resourceRequest: ResourceRequest): Dispatcher | null;
|
|
1
|
+
import type { ResourceRequest } from '@luvio/engine';
|
|
2
|
+
import type { Dispatcher } from './main';
|
|
3
|
+
export declare function matchRecordsHandlers(path: string, resourceRequest: ResourceRequest): Dispatcher | null;
|
|
@@ -1,3 +1,3 @@
|
|
|
1
|
-
import type { ResourceRequest } from '@luvio/engine';
|
|
2
|
-
export declare function isRelatedListPostRecordsResourceRequest(resourceRequest: ResourceRequest): boolean;
|
|
3
|
-
export declare function convertPostRelatedListRecordsToGet(resourceRequest: ResourceRequest): ResourceRequest;
|
|
1
|
+
import type { ResourceRequest } from '@luvio/engine';
|
|
2
|
+
export declare function isRelatedListPostRecordsResourceRequest(resourceRequest: ResourceRequest): boolean;
|
|
3
|
+
export declare function convertPostRelatedListRecordsToGet(resourceRequest: ResourceRequest): ResourceRequest;
|
|
@@ -1,17 +1,17 @@
|
|
|
1
|
-
import type { ResourceRequest } from '@luvio/engine';
|
|
2
|
-
export declare let instrumentation: {
|
|
3
|
-
aggregateUiChunkCount: (_cb: () => number) => void;
|
|
4
|
-
aggregateUiConnectError: () => void;
|
|
5
|
-
duplicateRequest: (_cb: () => ResourceRequest) => void;
|
|
6
|
-
getRecordAggregateInvoke: () => void;
|
|
7
|
-
getRecordAggregateResolve: (_cb: () => {
|
|
8
|
-
recordId: string;
|
|
9
|
-
apiName: string;
|
|
10
|
-
}) => void;
|
|
11
|
-
getRecordAggregateReject: (_cb: () => string) => void;
|
|
12
|
-
getRecordAggregateRetry: () => void;
|
|
13
|
-
getRecordNormalInvoke: () => void;
|
|
14
|
-
networkRateLimitExceeded: () => void;
|
|
15
|
-
};
|
|
16
|
-
export declare function instrument(newInstrumentation: Partial<typeof instrumentation>): void;
|
|
17
|
-
export type Instrument = typeof instrument;
|
|
1
|
+
import type { ResourceRequest } from '@luvio/engine';
|
|
2
|
+
export declare let instrumentation: {
|
|
3
|
+
aggregateUiChunkCount: (_cb: () => number) => void;
|
|
4
|
+
aggregateUiConnectError: () => void;
|
|
5
|
+
duplicateRequest: (_cb: () => ResourceRequest) => void;
|
|
6
|
+
getRecordAggregateInvoke: () => void;
|
|
7
|
+
getRecordAggregateResolve: (_cb: () => {
|
|
8
|
+
recordId: string;
|
|
9
|
+
apiName: string;
|
|
10
|
+
}) => void;
|
|
11
|
+
getRecordAggregateReject: (_cb: () => string) => void;
|
|
12
|
+
getRecordAggregateRetry: () => void;
|
|
13
|
+
getRecordNormalInvoke: () => void;
|
|
14
|
+
networkRateLimitExceeded: () => void;
|
|
15
|
+
};
|
|
16
|
+
export declare function instrument(newInstrumentation: Partial<typeof instrumentation>): void;
|
|
17
|
+
export type Instrument = typeof instrument;
|
|
@@ -1,16 +1,16 @@
|
|
|
1
|
-
declare const parse: (text: string, reviver?: ((this: any, key: string, value: any) => any) | undefined) => any, stringify: {
|
|
2
|
-
(value: any, replacer?: ((this: any, key: string, value: any) => any) | undefined, space?: string | number | undefined): string;
|
|
3
|
-
(value: any, replacer?: (string | number)[] | null | undefined, space?: string | number | undefined): string;
|
|
4
|
-
};
|
|
5
|
-
declare const join: (separator?: string | undefined) => string, push: (...items: any[]) => number, unshift: (...items: any[]) => number;
|
|
6
|
-
declare const isArray: (arg: any) => arg is any[];
|
|
7
|
-
declare const entries: {
|
|
8
|
-
<T>(o: {
|
|
9
|
-
[s: string]: T;
|
|
10
|
-
} | ArrayLike<T>): [string, T][];
|
|
11
|
-
(o: {}): [string, any][];
|
|
12
|
-
}, keys: {
|
|
13
|
-
(o: object): string[];
|
|
14
|
-
(o: {}): string[];
|
|
15
|
-
};
|
|
16
|
-
export { isArray as ArrayIsArray, entries as ObjectEntries, keys as ObjectKeys, parse as JSONParse, stringify as JSONStringify, join as ArrayPrototypeJoin, push as ArrayPrototypePush, unshift as ArrayPrototypeUnshift, };
|
|
1
|
+
declare const parse: (text: string, reviver?: ((this: any, key: string, value: any) => any) | undefined) => any, stringify: {
|
|
2
|
+
(value: any, replacer?: ((this: any, key: string, value: any) => any) | undefined, space?: string | number | undefined): string;
|
|
3
|
+
(value: any, replacer?: (string | number)[] | null | undefined, space?: string | number | undefined): string;
|
|
4
|
+
};
|
|
5
|
+
declare const join: (separator?: string | undefined) => string, push: (...items: any[]) => number, unshift: (...items: any[]) => number;
|
|
6
|
+
declare const isArray: (arg: any) => arg is any[];
|
|
7
|
+
declare const entries: {
|
|
8
|
+
<T>(o: {
|
|
9
|
+
[s: string]: T;
|
|
10
|
+
} | ArrayLike<T>): [string, T][];
|
|
11
|
+
(o: {}): [string, any][];
|
|
12
|
+
}, keys: {
|
|
13
|
+
(o: object): string[];
|
|
14
|
+
(o: {}): string[];
|
|
15
|
+
};
|
|
16
|
+
export { isArray as ArrayIsArray, entries as ObjectEntries, keys as ObjectKeys, parse as JSONParse, stringify as JSONStringify, join as ArrayPrototypeJoin, push as ArrayPrototypePush, unshift as ArrayPrototypeUnshift, };
|
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
import type { NetworkAdapter } from '@luvio/engine';
|
|
2
|
-
import type { Instrument } from './instrumentation';
|
|
3
|
-
export { instrument, Instrument } from './instrumentation';
|
|
4
|
-
export default function platformNetworkAdapter(baseNetworkAdapter: NetworkAdapter): NetworkAdapter;
|
|
5
|
-
export type Registration = {
|
|
6
|
-
id: '@salesforce/lds-network-adapter';
|
|
7
|
-
instrument: Instrument;
|
|
8
|
-
};
|
|
1
|
+
import type { NetworkAdapter } from '@luvio/engine';
|
|
2
|
+
import type { Instrument } from './instrumentation';
|
|
3
|
+
export { instrument, Instrument } from './instrumentation';
|
|
4
|
+
export default function platformNetworkAdapter(baseNetworkAdapter: NetworkAdapter): NetworkAdapter;
|
|
5
|
+
export type Registration = {
|
|
6
|
+
id: '@salesforce/lds-network-adapter';
|
|
7
|
+
instrument: Instrument;
|
|
8
|
+
};
|
|
@@ -1,28 +1,28 @@
|
|
|
1
|
-
interface RateLimitConfig {
|
|
2
|
-
bucketCapacity: number;
|
|
3
|
-
fillsPerSecond: number;
|
|
4
|
-
}
|
|
5
|
-
export declare const RATE_LIMIT_CONFIG: RateLimitConfig;
|
|
6
|
-
export declare class TokenBucket {
|
|
7
|
-
bucketCapacity: number;
|
|
8
|
-
readonly refillTokensPerMilliSecond: number;
|
|
9
|
-
tokens: number;
|
|
10
|
-
private lastRefillTime;
|
|
11
|
-
/**
|
|
12
|
-
* Constructs an instance of Token Bucket for rate limiting
|
|
13
|
-
*
|
|
14
|
-
* @param bucket The token holding capacity of the bucket
|
|
15
|
-
* @param refillTokensPerSecond The number of tokens replenished every second
|
|
16
|
-
*/
|
|
17
|
-
constructor(config: RateLimitConfig);
|
|
18
|
-
/**
|
|
19
|
-
* Refills the bucket and removes desired number of tokens
|
|
20
|
-
*
|
|
21
|
-
* @param removeTokens number of tokens to be removed from the bucket should be >= 0
|
|
22
|
-
* @returns {boolean} true if removing token was succesful
|
|
23
|
-
*/
|
|
24
|
-
take(removeTokens: number): boolean;
|
|
25
|
-
private refill;
|
|
26
|
-
}
|
|
27
|
-
declare const _default: TokenBucket;
|
|
28
|
-
export default _default;
|
|
1
|
+
interface RateLimitConfig {
|
|
2
|
+
bucketCapacity: number;
|
|
3
|
+
fillsPerSecond: number;
|
|
4
|
+
}
|
|
5
|
+
export declare const RATE_LIMIT_CONFIG: RateLimitConfig;
|
|
6
|
+
export declare class TokenBucket {
|
|
7
|
+
bucketCapacity: number;
|
|
8
|
+
readonly refillTokensPerMilliSecond: number;
|
|
9
|
+
tokens: number;
|
|
10
|
+
private lastRefillTime;
|
|
11
|
+
/**
|
|
12
|
+
* Constructs an instance of Token Bucket for rate limiting
|
|
13
|
+
*
|
|
14
|
+
* @param bucket The token holding capacity of the bucket
|
|
15
|
+
* @param refillTokensPerSecond The number of tokens replenished every second
|
|
16
|
+
*/
|
|
17
|
+
constructor(config: RateLimitConfig);
|
|
18
|
+
/**
|
|
19
|
+
* Refills the bucket and removes desired number of tokens
|
|
20
|
+
*
|
|
21
|
+
* @param removeTokens number of tokens to be removed from the bucket should be >= 0
|
|
22
|
+
* @returns {boolean} true if removing token was succesful
|
|
23
|
+
*/
|
|
24
|
+
take(removeTokens: number): boolean;
|
|
25
|
+
private refill;
|
|
26
|
+
}
|
|
27
|
+
declare const _default: TokenBucket;
|
|
28
|
+
export default _default;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
export declare const UI_API_BASE_URI = "/services/data/v58.0/ui-api";
|
|
1
|
+
export declare const UI_API_BASE_URI = "/services/data/v58.0/ui-api";
|
package/package.json
CHANGED
|
@@ -1,18 +1,18 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@salesforce/lds-network-adapter",
|
|
3
|
-
"version": "1.124.
|
|
3
|
+
"version": "1.124.4",
|
|
4
4
|
"license": "SEE LICENSE IN LICENSE.txt",
|
|
5
5
|
"description": "LDS shared Network Adapter code for Lightning Platform",
|
|
6
6
|
"main": "dist/ldsNetwork.js",
|
|
7
7
|
"module": "dist/ldsNetwork.js",
|
|
8
|
-
"types": "dist/main.d.ts",
|
|
8
|
+
"types": "dist/types/main.d.ts",
|
|
9
9
|
"files": [
|
|
10
10
|
"dist"
|
|
11
11
|
],
|
|
12
12
|
"exports": {
|
|
13
13
|
".": {
|
|
14
14
|
"import": "./dist/ldsNetwork.js",
|
|
15
|
-
"types": "./dist/main.d.ts",
|
|
15
|
+
"types": "./dist/types/main.d.ts",
|
|
16
16
|
"default": "./dist/ldsNetwork.js"
|
|
17
17
|
}
|
|
18
18
|
},
|