@rabbitio/ui-kit 1.0.0-beta.37 → 1.0.0-beta.39
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +2042 -4
- package/dist/index.cjs.map +1 -1
- package/dist/index.modern.js +1193 -1
- package/dist/index.modern.js.map +1 -1
- package/dist/index.module.js +2029 -5
- package/dist/index.module.js.map +1 -1
- package/dist/index.umd.js +2044 -8
- package/dist/index.umd.js.map +1 -1
- package/package.json +4 -2
- package/src/common/adapters/axiosAdapter.js +35 -0
- package/src/common/errorUtils.js +15 -0
- package/src/common/utils/postponeExecution.js +11 -0
- package/src/components/utils/uiUtils.js +14 -0
- package/src/components/utils/urlQueryUtils.js +87 -0
- package/src/index.js +16 -0
- package/src/robustExteranlApiCallerService/cacheAndConcurrentRequestsResolver.js +559 -0
- package/src/robustExteranlApiCallerService/cachedRobustExternalApiCallerService.js +188 -0
- package/src/robustExteranlApiCallerService/cancelProcessing.js +29 -0
- package/src/robustExteranlApiCallerService/concurrentCalculationsMetadataHolder.js +103 -0
- package/src/robustExteranlApiCallerService/externalApiProvider.js +156 -0
- package/src/robustExteranlApiCallerService/externalServicesStatsCollector.js +82 -0
- package/src/robustExteranlApiCallerService/robustExternalAPICallerService.js +386 -0
|
@@ -0,0 +1,559 @@
|
|
|
1
|
+
import { v4 } from "uuid";
|
|
2
|
+
|
|
3
|
+
import { improveAndRethrow } from "../common/errorUtils.js";
|
|
4
|
+
import { Logger } from "../common/utils/logging/logger.js";
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* This util helps to avoid duplicated calls to a shared resource.
|
|
8
|
+
* It tracks is there currently active calculation for the specific cache id and make all other requests
|
|
9
|
+
* with the same cache id waiting for this active calculation to be finished. When the calculation ends
|
|
10
|
+
* the resolver allows all the waiting requesters to get the data from cache and start their own calculations.
|
|
11
|
+
*
|
|
12
|
+
* This class should be instantiated inside some other service where you need to request some resource concurrently.
|
|
13
|
+
* Rules:
|
|
14
|
+
* 1. When you need to make a request inside your main service call 'getCachedOrWaitForCachedOrAcquireLock'
|
|
15
|
+
* on the instance of this class and await for the result. If the flag allowing to start calculation is true
|
|
16
|
+
* then you can request data inside your main service. Otherwise you should use the cached data as an another
|
|
17
|
+
* requester just finished the most resent requesting and there is actual data in the cache that
|
|
18
|
+
* is returned to you here.
|
|
19
|
+
* 1.1 Also you can acquire a lock directly if you don't want to get cached data. Use the corresponding method 'acquireLock'.
|
|
20
|
+
*
|
|
21
|
+
* 2. If you start requesting (when you successfully acquired the lock) then after receiving the result of your
|
|
22
|
+
* requesting you should call the 'saveCachedData' so the retrieved data will appear in the cache.
|
|
23
|
+
*
|
|
24
|
+
* 3. If you successfully acquired the lock then you should after calling the 'saveCachedData' call
|
|
25
|
+
* the 'releaseLock' - this is mandatory to release the lock and allow other requesters to perform their requests.
|
|
26
|
+
* WARNING: If for any reason you forget to call this method then this class instance will wait perpetually for
|
|
27
|
+
* the lock releasing and all your attempts to request the data will constantly fail. So usually call it
|
|
28
|
+
* inside the 'finally' block.
|
|
29
|
+
*
|
|
30
|
+
* TODO: [tests, critical++] add unit tests - massively used logic and can produce sophisticated concurrency bugs
|
|
31
|
+
*/
|
|
32
|
+
export class CacheAndConcurrentRequestsResolver {
|
|
33
|
+
/**
|
|
34
|
+
* @param bio {string} unique identifier for the exact service
|
|
35
|
+
* @param cache {Cache} cache
|
|
36
|
+
* @param cacheTtl {number|null} time to live for cache ms. 0 or null means the cache cannot expire
|
|
37
|
+
* @param [maxCallAttemptsToWaitForAlreadyRunningRequest=100] {number} number of request allowed to do waiting for
|
|
38
|
+
* result before we fail the original request. Use custom value only if you need to make the attempts count
|
|
39
|
+
* and polling interval changes.
|
|
40
|
+
* @param [timeoutBetweenAttemptsToCheckWhetherAlreadyRunningRequestFinished=1000] {number}
|
|
41
|
+
* timeout ms for polling for a result. if you change maxCallAttemptsToWaitForAlreadyRunningRequest
|
|
42
|
+
* then this parameter maybe also require the custom value.
|
|
43
|
+
* @param [removeExpiredCacheAutomatically=true] {boolean}
|
|
44
|
+
*/
|
|
45
|
+
constructor(
|
|
46
|
+
bio,
|
|
47
|
+
cache,
|
|
48
|
+
cacheTtl,
|
|
49
|
+
removeExpiredCacheAutomatically = true,
|
|
50
|
+
maxCallAttemptsToWaitForAlreadyRunningRequest = 100,
|
|
51
|
+
timeoutBetweenAttemptsToCheckWhetherAlreadyRunningRequestFinished = 1000
|
|
52
|
+
) {
|
|
53
|
+
if (
|
|
54
|
+
cacheTtl != null &&
|
|
55
|
+
cacheTtl <
|
|
56
|
+
timeoutBetweenAttemptsToCheckWhetherAlreadyRunningRequestFinished *
|
|
57
|
+
2
|
|
58
|
+
) {
|
|
59
|
+
/*
|
|
60
|
+
* During the lifetime of this service e.g. if the data is being retrieved slowly we can get
|
|
61
|
+
* RACE CONDITION when we constantly retrieve data and during retrieval it is expired, so we are trying
|
|
62
|
+
* to retrieve it again and again.
|
|
63
|
+
* We have a protection mechanism that we will wait no more than
|
|
64
|
+
* maxCallAttemptsToWaitForAlreadyRunningRequest * timeoutBetweenAttemptsToCheckWhetherAlreadyRunningRequestFinished
|
|
65
|
+
* but this additional check is aimed to reduce potential loading time for some requests.
|
|
66
|
+
*/
|
|
67
|
+
throw new Error(
|
|
68
|
+
`DEV: Wrong parameters passed to construct ${bio} - TTL ${cacheTtl} should be 2 times greater than ${timeoutBetweenAttemptsToCheckWhetherAlreadyRunningRequestFinished}`
|
|
69
|
+
);
|
|
70
|
+
}
|
|
71
|
+
this._bio = bio;
|
|
72
|
+
this._cache = cache;
|
|
73
|
+
this._cacheTtlMs = cacheTtl != null ? cacheTtl : null;
|
|
74
|
+
this._maxExecutionTimeMs =
|
|
75
|
+
maxCallAttemptsToWaitForAlreadyRunningRequest *
|
|
76
|
+
timeoutBetweenAttemptsToCheckWhetherAlreadyRunningRequestFinished;
|
|
77
|
+
this._removeExpiredCacheAutomatically = removeExpiredCacheAutomatically;
|
|
78
|
+
this._requestsManager = new ManagerOfRequestsToTheSameResource(
|
|
79
|
+
bio,
|
|
80
|
+
maxCallAttemptsToWaitForAlreadyRunningRequest,
|
|
81
|
+
timeoutBetweenAttemptsToCheckWhetherAlreadyRunningRequestFinished
|
|
82
|
+
);
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
/**
|
|
86
|
+
* When using this service this is the major method you should call to get data by cache id.
|
|
87
|
+
* This method checks is there cached data and ether
|
|
88
|
+
* - returns you flag that you can start requesting data from the shared resource
|
|
89
|
+
* - or if there is already started calculation waits until it is finished (removed from this service)
|
|
90
|
+
* and returns you the retrieved data
|
|
91
|
+
* - or just returns you the cached data
|
|
92
|
+
*
|
|
93
|
+
* 'canStartDataRetrieval' equal true means that the lock was acquired, and you should manually call 'saveCachedData'
|
|
94
|
+
* if needed and then 'releaseLock' to mark this calculation as finished so other
|
|
95
|
+
* requesters can take their share of the resource.
|
|
96
|
+
*
|
|
97
|
+
* @param cacheId {string}
|
|
98
|
+
* @return {Promise<({
|
|
99
|
+
* canStartDataRetrieval: true,
|
|
100
|
+
* cachedData: any,
|
|
101
|
+
* lockId: string
|
|
102
|
+
* }|{
|
|
103
|
+
* canStartDataRetrieval: false,
|
|
104
|
+
* cachedData: any
|
|
105
|
+
* })>}
|
|
106
|
+
*/
|
|
107
|
+
async getCachedOrWaitForCachedOrAcquireLock(cacheId) {
|
|
108
|
+
try {
|
|
109
|
+
const startedAtTimestamp = Date.now();
|
|
110
|
+
let cached = this._cache.get(cacheId);
|
|
111
|
+
let cachedDataBackupIsPresentButExpired = null;
|
|
112
|
+
if (cached != null && !this._removeExpiredCacheAutomatically) {
|
|
113
|
+
const lastUpdateTimestamp =
|
|
114
|
+
this._cache.getLastUpdateTimestamp(cacheId);
|
|
115
|
+
if (
|
|
116
|
+
(lastUpdateTimestamp ?? 0) + this._cacheTtlMs <
|
|
117
|
+
Date.now()
|
|
118
|
+
) {
|
|
119
|
+
/*
|
|
120
|
+
* Here we are manually clearing 'cached' value retrieved from cache to force the data loading.
|
|
121
|
+
* But we save its value first to the backup variable to be able to return this value if ongoing
|
|
122
|
+
* requesting fails.
|
|
123
|
+
*/
|
|
124
|
+
cachedDataBackupIsPresentButExpired = cached;
|
|
125
|
+
cached = null;
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
let calculationId = null;
|
|
129
|
+
let isRetrievedCacheExpired = true;
|
|
130
|
+
let isWaitingForActiveCalculationSucceeded;
|
|
131
|
+
let weStillHaveSomeTimeToProceedExecution = true;
|
|
132
|
+
while (
|
|
133
|
+
calculationId == null &&
|
|
134
|
+
cached == null &&
|
|
135
|
+
isRetrievedCacheExpired &&
|
|
136
|
+
weStillHaveSomeTimeToProceedExecution
|
|
137
|
+
) {
|
|
138
|
+
const result =
|
|
139
|
+
await this._requestsManager.startCalculationOrWaitForActiveToFinish(
|
|
140
|
+
cacheId
|
|
141
|
+
);
|
|
142
|
+
calculationId = typeof result === "string" ? result : null;
|
|
143
|
+
isWaitingForActiveCalculationSucceeded =
|
|
144
|
+
typeof result === "boolean" ? result : null;
|
|
145
|
+
cached = this._cache.get(cacheId);
|
|
146
|
+
isRetrievedCacheExpired =
|
|
147
|
+
isWaitingForActiveCalculationSucceeded && cached == null;
|
|
148
|
+
weStillHaveSomeTimeToProceedExecution =
|
|
149
|
+
Date.now() - startedAtTimestamp < this._maxExecutionTimeMs;
|
|
150
|
+
}
|
|
151
|
+
if (calculationId) {
|
|
152
|
+
return {
|
|
153
|
+
canStartDataRetrieval: true,
|
|
154
|
+
cachedData: cached ?? cachedDataBackupIsPresentButExpired,
|
|
155
|
+
lockId: calculationId,
|
|
156
|
+
};
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
return {
|
|
160
|
+
canStartDataRetrieval: false,
|
|
161
|
+
cachedData: cached ?? cachedDataBackupIsPresentButExpired,
|
|
162
|
+
};
|
|
163
|
+
} catch (e) {
|
|
164
|
+
improveAndRethrow(
|
|
165
|
+
e,
|
|
166
|
+
`${this._bio}.getCachedOrWaitForCachedOrAcquireLock`
|
|
167
|
+
);
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
/**
|
|
172
|
+
* Returns just the current cache value for the given id.
|
|
173
|
+
* Doesn't wait for the active calculation, doesn't acquire lock, just retrieves the current cache as it is.
|
|
174
|
+
*
|
|
175
|
+
* @param cacheId {string}
|
|
176
|
+
* @return {any}
|
|
177
|
+
*/
|
|
178
|
+
getCached(cacheId) {
|
|
179
|
+
try {
|
|
180
|
+
return this._cache.get(cacheId);
|
|
181
|
+
} catch (e) {
|
|
182
|
+
improveAndRethrow(e, "getCached");
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
_getTtl() {
|
|
187
|
+
return this._removeExpiredCacheAutomatically ? this._cacheTtlMs : null;
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
/**
|
|
191
|
+
* Directly acquires the lock despite on cached data availability.
|
|
192
|
+
* So if this method returns result === true you can start the data retrieval.
|
|
193
|
+
*
|
|
194
|
+
* @param cacheId {string}
|
|
195
|
+
* @return {Promise<{ result: true, lockId: string }|{ result: false }>}
|
|
196
|
+
*/
|
|
197
|
+
async acquireLock(cacheId) {
|
|
198
|
+
try {
|
|
199
|
+
return await this._requestsManager.acquireLock(cacheId);
|
|
200
|
+
} catch (e) {
|
|
201
|
+
improveAndRethrow(e, "acquireLock");
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
/**
|
|
206
|
+
* This method should be called only if you acquired a lock successfully.
|
|
207
|
+
*
|
|
208
|
+
* If the current lock id is not equal to the passed one the passed data will be ignored.
|
|
209
|
+
* Or you can do the synchronous data merging on your side and pass the
|
|
210
|
+
* wasDataMergedSynchronouslyWithMostRecentCacheState=true so your data will be stored
|
|
211
|
+
* despite on the lockId.
|
|
212
|
+
* WARNING: you should do this only if you are sure you perform the synchronous update.
|
|
213
|
+
*
|
|
214
|
+
* @param cacheId {string}
|
|
215
|
+
* @param lockId {string}
|
|
216
|
+
* @param data {any}
|
|
217
|
+
* @param [sessionDependentData=true] {boolean}
|
|
218
|
+
* @param [wasDataMergedSynchronouslyWithMostRecentCacheState=false]
|
|
219
|
+
*/
|
|
220
|
+
saveCachedData(
|
|
221
|
+
cacheId,
|
|
222
|
+
lockId,
|
|
223
|
+
data,
|
|
224
|
+
sessionDependentData = true,
|
|
225
|
+
wasDataMergedSynchronouslyWithMostRecentCacheState = false
|
|
226
|
+
) {
|
|
227
|
+
try {
|
|
228
|
+
if (
|
|
229
|
+
wasDataMergedSynchronouslyWithMostRecentCacheState ||
|
|
230
|
+
this._requestsManager.isTheLockActiveOne(cacheId, lockId)
|
|
231
|
+
) {
|
|
232
|
+
/* We save passed data only if the <caller> has the currently acquired lockId.
|
|
233
|
+
* If the passed lockId is not the active one it means that other code cleared/stopped the lock
|
|
234
|
+
* acquired by the <caller> recently due to some urgent/more prior changes.
|
|
235
|
+
*
|
|
236
|
+
* But we allow user to pass the 'wasDataMergedSynchronouslyWithMostRecentCacheState' flag
|
|
237
|
+
* that tells us that the user had taken the most recent cache value and merged his new data
|
|
238
|
+
* with that cached value (AFTER possibly performing async data retrieval). This means that we
|
|
239
|
+
* can ignore the fact that his lockId is no more relevant and save the passed data
|
|
240
|
+
* as it is synchronously merged with the most recent cached data. (Synchronously merged means that
|
|
241
|
+
* the lost update cannot occur during the merge time as JS execute the synchronous functions\
|
|
242
|
+
* till the end).
|
|
243
|
+
*/
|
|
244
|
+
if (sessionDependentData) {
|
|
245
|
+
this._cache.putSessionDependentData(
|
|
246
|
+
cacheId,
|
|
247
|
+
data,
|
|
248
|
+
this._getTtl()
|
|
249
|
+
);
|
|
250
|
+
} else {
|
|
251
|
+
this._cache.put(cacheId, data, this._getTtl());
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
} catch (e) {
|
|
255
|
+
improveAndRethrow(e, `${this._bio}.saveCachedData`);
|
|
256
|
+
}
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
/**
|
|
260
|
+
* Should be called then and only then if you successfully acquired a lock with the lock id.
|
|
261
|
+
*
|
|
262
|
+
* @param cacheId {string}
|
|
263
|
+
* @param lockId {string}
|
|
264
|
+
*/
|
|
265
|
+
releaseLock(cacheId, lockId) {
|
|
266
|
+
try {
|
|
267
|
+
if (this._requestsManager.isTheLockActiveOne(cacheId, lockId)) {
|
|
268
|
+
this._requestsManager.finishActiveCalculation(cacheId);
|
|
269
|
+
}
|
|
270
|
+
} catch (e) {
|
|
271
|
+
improveAndRethrow(e, `${this._bio}.releaseLock`);
|
|
272
|
+
}
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
/**
|
|
276
|
+
* Actualized currently present cached data by key. Applies the provided function to the cached data.
|
|
277
|
+
*
|
|
278
|
+
* @param cacheId {string} id of cache entry
|
|
279
|
+
* @param synchronousCurrentCacheProcessor (function|null} synchronous function accepting cache entry. Should return
|
|
280
|
+
* an object in following format:
|
|
281
|
+
* {
|
|
282
|
+
* isModified: boolean,
|
|
283
|
+
* data: any
|
|
284
|
+
* }
|
|
285
|
+
* the flag signals whether data was changed during the processing or not
|
|
286
|
+
* @param [sessionDependent=true] {boolean} whether to mark the cache entry as session-dependent
|
|
287
|
+
*/
|
|
288
|
+
actualizeCachedData(
|
|
289
|
+
cacheId,
|
|
290
|
+
synchronousCurrentCacheProcessor,
|
|
291
|
+
sessionDependent = true
|
|
292
|
+
) {
|
|
293
|
+
try {
|
|
294
|
+
const cached = this._cache.get(cacheId);
|
|
295
|
+
const result = synchronousCurrentCacheProcessor(cached);
|
|
296
|
+
if (result?.isModified && result?.data != null) {
|
|
297
|
+
if (sessionDependent) {
|
|
298
|
+
this._cache.putSessionDependentData(
|
|
299
|
+
cacheId,
|
|
300
|
+
result?.data,
|
|
301
|
+
this._getTtl()
|
|
302
|
+
);
|
|
303
|
+
} else {
|
|
304
|
+
this._cache.put(cacheId, result?.data, this._getTtl());
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
/* Here we call the lock releasing to ensure the currently active calculation will be ignored.
|
|
308
|
+
* This is needed to ensure no 'lost update'.
|
|
309
|
+
* Lost update can occur if we change data in this method and after that some calculation finishes
|
|
310
|
+
* having the earlier data as its base to calculate its data set result. And the earlier data
|
|
311
|
+
* has no changes applied inside this method, so we will lose them.
|
|
312
|
+
*
|
|
313
|
+
* This is not so good solution: ideally, we should acquire lock before performing any data updating.
|
|
314
|
+
* But the goal of this method is to provide an instant ability to update the cached data.
|
|
315
|
+
* And if we start acquiring the lock here the data update can be postponed significantly.
|
|
316
|
+
* And this kills the desired nature of this method.
|
|
317
|
+
* So we better lose some data retrieval (means abusing the resource a bit) than lose
|
|
318
|
+
* the instant update expected after this method execution.
|
|
319
|
+
*/
|
|
320
|
+
this._requestsManager.finishActiveCalculation(cacheId);
|
|
321
|
+
}
|
|
322
|
+
} catch (e) {
|
|
323
|
+
improveAndRethrow(e, `${this._bio}.actualizeCachedData`);
|
|
324
|
+
}
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
invalidate(key) {
|
|
328
|
+
this._cache.invalidate(key);
|
|
329
|
+
this._requestsManager.finishActiveCalculation(key);
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
invalidateContaining(keyPart) {
|
|
333
|
+
this._cache.invalidateContaining(keyPart);
|
|
334
|
+
this._requestsManager.finishAllActiveCalculations(keyPart);
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
markAsExpiredButDontRemove(key) {
|
|
338
|
+
if (this._removeExpiredCacheAutomatically) {
|
|
339
|
+
this._cache.markCacheItemAsExpiredButDontRemove(
|
|
340
|
+
key,
|
|
341
|
+
this._cacheTtlMs
|
|
342
|
+
);
|
|
343
|
+
} else {
|
|
344
|
+
this._cache.setLastUpdateTimestamp(
|
|
345
|
+
key,
|
|
346
|
+
Date.now() - this._cacheTtlMs - 1
|
|
347
|
+
);
|
|
348
|
+
}
|
|
349
|
+
this._requestsManager.finishAllActiveCalculations(key);
|
|
350
|
+
}
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
/**
|
|
354
|
+
* Util class to control access to a resource when it can be called in parallel for the same result.
|
|
355
|
+
* (E.g. getting today coins-fiat rates from some API).
|
|
356
|
+
*/
|
|
357
|
+
class ManagerOfRequestsToTheSameResource {
|
|
358
|
+
/**
|
|
359
|
+
* @param bio {string} resource-related identifier for logging
|
|
360
|
+
* @param [maxPollsCount=100] {number} max number of attempts to wait when waiting for a lock acquisition
|
|
361
|
+
* @param [timeoutDuration=1000] {number} timeout between the polls for a lock acquisition
|
|
362
|
+
*/
|
|
363
|
+
constructor(bio, maxPollsCount = 100, timeoutDuration = 1000) {
|
|
364
|
+
this.bio = bio;
|
|
365
|
+
this.maxPollsCount = maxPollsCount;
|
|
366
|
+
this.timeoutDuration = timeoutDuration;
|
|
367
|
+
this._activeCalculationsIds = new Map();
|
|
368
|
+
this._nextCalculationIds = new Map();
|
|
369
|
+
}
|
|
370
|
+
|
|
371
|
+
/**
|
|
372
|
+
* If there is no active calculation just creates uuid and returns it.
|
|
373
|
+
* If there is active calculation waits until it removed from the active calculation uuid variable.
|
|
374
|
+
*
|
|
375
|
+
* @param requestHash {string}
|
|
376
|
+
* @return {Promise<string|boolean>} returns uuid of new active calculation or true if waiting for active
|
|
377
|
+
* calculation succeed or false if max attempts count exceeded
|
|
378
|
+
*/
|
|
379
|
+
async startCalculationOrWaitForActiveToFinish(requestHash) {
|
|
380
|
+
try {
|
|
381
|
+
const activeCalculationIdForHash =
|
|
382
|
+
this._activeCalculationsIds.get(requestHash);
|
|
383
|
+
if (activeCalculationIdForHash == null) {
|
|
384
|
+
const id = v4();
|
|
385
|
+
this._activeCalculationsIds.set(requestHash, id);
|
|
386
|
+
return id;
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
return await this._waitForCalculationIdToFinish(
|
|
390
|
+
requestHash,
|
|
391
|
+
activeCalculationIdForHash,
|
|
392
|
+
0
|
|
393
|
+
);
|
|
394
|
+
} catch (e) {
|
|
395
|
+
Logger.logError(
|
|
396
|
+
e,
|
|
397
|
+
`startCalculationOrWaitForActiveToFinish_${this.bio}`
|
|
398
|
+
);
|
|
399
|
+
}
|
|
400
|
+
|
|
401
|
+
return null;
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
/**
|
|
405
|
+
* Acquires lock to the resource by the provided hash.
|
|
406
|
+
*
|
|
407
|
+
* @param requestHash {string}
|
|
408
|
+
* @return {Promise<{ result: true, lockId: string }|{ result: false }>} result is true if the lock is successfully
|
|
409
|
+
* acquired, false if the max allowed time to wait for acquisition expired or any unexpected error occurs
|
|
410
|
+
* during the waiting.
|
|
411
|
+
*/
|
|
412
|
+
async acquireLock(requestHash) {
|
|
413
|
+
try {
|
|
414
|
+
const activeId = this._activeCalculationsIds.get(requestHash);
|
|
415
|
+
const nextId = v4();
|
|
416
|
+
if (activeId == null) {
|
|
417
|
+
this._activeCalculationsIds.set(requestHash, nextId);
|
|
418
|
+
return { result: true, lockId: nextId };
|
|
419
|
+
}
|
|
420
|
+
|
|
421
|
+
const currentNext = this._nextCalculationIds.get(requestHash) ?? [];
|
|
422
|
+
currentNext.push(nextId);
|
|
423
|
+
this._nextCalculationIds.set(requestHash, currentNext);
|
|
424
|
+
|
|
425
|
+
const waitingResult = await this._waitForCalculationIdToFinish(
|
|
426
|
+
requestHash,
|
|
427
|
+
activeId,
|
|
428
|
+
0,
|
|
429
|
+
nextId
|
|
430
|
+
);
|
|
431
|
+
return {
|
|
432
|
+
result: waitingResult,
|
|
433
|
+
lockId: waitingResult ? nextId : undefined,
|
|
434
|
+
};
|
|
435
|
+
} catch (e) {
|
|
436
|
+
improveAndRethrow(e, "acquireLock");
|
|
437
|
+
}
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
/**
|
|
441
|
+
* Clears active calculation id.
|
|
442
|
+
* WARNING: if you forget to call this method the start* one will perform maxPollsCount attempts before finishing
|
|
443
|
+
* @param requestHash {string} hash of request. Helps to distinct the request for the same resource but
|
|
444
|
+
* having different request parameters and hold a dedicated calculation id per this hash
|
|
445
|
+
*/
|
|
446
|
+
finishActiveCalculation(requestHash = "default") {
|
|
447
|
+
try {
|
|
448
|
+
this._activeCalculationsIds.delete(requestHash);
|
|
449
|
+
const next = this._nextCalculationIds.get(requestHash) ?? [];
|
|
450
|
+
if (next.length) {
|
|
451
|
+
this._activeCalculationsIds.set(requestHash, next[0]);
|
|
452
|
+
this._nextCalculationIds.set(requestHash, next.slice(1));
|
|
453
|
+
}
|
|
454
|
+
} catch (e) {
|
|
455
|
+
improveAndRethrow(e, "finishActiveCalculation");
|
|
456
|
+
}
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
finishAllActiveCalculations(keyPart = "") {
|
|
460
|
+
try {
|
|
461
|
+
Array.from(this._activeCalculationsIds.keys()).forEach((hash) => {
|
|
462
|
+
if (
|
|
463
|
+
typeof hash === "string" &&
|
|
464
|
+
new RegExp(keyPart).test(hash)
|
|
465
|
+
) {
|
|
466
|
+
this.finishActiveCalculation(hash);
|
|
467
|
+
}
|
|
468
|
+
});
|
|
469
|
+
} catch (e) {
|
|
470
|
+
improveAndRethrow(e, "finishAllActiveCalculations");
|
|
471
|
+
}
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
/**
|
|
475
|
+
* @param requestHash {string}
|
|
476
|
+
* @param lockId {string}
|
|
477
|
+
* @return {boolean}
|
|
478
|
+
*/
|
|
479
|
+
isTheLockActiveOne(requestHash, lockId) {
|
|
480
|
+
try {
|
|
481
|
+
return this._activeCalculationsIds.get(requestHash) === lockId;
|
|
482
|
+
} catch (e) {
|
|
483
|
+
improveAndRethrow(e, "isTheLockActiveOne");
|
|
484
|
+
}
|
|
485
|
+
}
|
|
486
|
+
|
|
487
|
+
/**
|
|
488
|
+
* @param requestHash {string}
|
|
489
|
+
* @param activeCalculationId {string|null}
|
|
490
|
+
* @param [attemptIndex=0] {number}
|
|
491
|
+
* @param waitForCalculationId {string|null} if you want to wait for an exact id to appear as active then pass this parameter
|
|
492
|
+
* @return {Promise<boolean>} true
|
|
493
|
+
* - if the given calculation id is no more an active one
|
|
494
|
+
* - or it is equal to waitForCalculationId
|
|
495
|
+
* false
|
|
496
|
+
* - if waiting period exceeds the max allowed waiting time or unexpected error occurs
|
|
497
|
+
* @private
|
|
498
|
+
*/
|
|
499
|
+
async _waitForCalculationIdToFinish(
|
|
500
|
+
requestHash,
|
|
501
|
+
activeCalculationId,
|
|
502
|
+
attemptIndex = 0,
|
|
503
|
+
waitForCalculationId = null
|
|
504
|
+
) {
|
|
505
|
+
try {
|
|
506
|
+
if (attemptIndex + 1 > this.maxPollsCount) {
|
|
507
|
+
// Max number of polls for active calculation id change is achieved. So we return false.
|
|
508
|
+
return false;
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
const currentId = this._activeCalculationsIds.get(requestHash);
|
|
512
|
+
if (
|
|
513
|
+
waitForCalculationId == null
|
|
514
|
+
? currentId !== activeCalculationId
|
|
515
|
+
: currentId === waitForCalculationId
|
|
516
|
+
) {
|
|
517
|
+
/* We return true depending on the usage of this function:
|
|
518
|
+
* 1. if there is calculation id that we should wait for to become an active then we return true only
|
|
519
|
+
* if this id becomes the active one.
|
|
520
|
+
*
|
|
521
|
+
* Theoretically we can fail to wait for the desired calculation id. This can be caused by wrong use of
|
|
522
|
+
* this service or by any other mistakes/errors. But this waiting function will return false anyway if
|
|
523
|
+
* the number of polls done exceeds the max allowed.
|
|
524
|
+
*
|
|
525
|
+
* 2. if we just wait for the currently active calculation id to be finished then we return true
|
|
526
|
+
* when we notice that the current active id differs from the original passed into this function.
|
|
527
|
+
*/
|
|
528
|
+
return true;
|
|
529
|
+
} else {
|
|
530
|
+
/* The original calculation id is still the active one, so we are scheduling a new attempt to check
|
|
531
|
+
* whether the active calculation id changed or not in timeoutDuration milliseconds.
|
|
532
|
+
*/
|
|
533
|
+
const it = this;
|
|
534
|
+
return new Promise((resolve, reject) => {
|
|
535
|
+
setTimeout(function () {
|
|
536
|
+
try {
|
|
537
|
+
resolve(
|
|
538
|
+
it._waitForCalculationIdToFinish(
|
|
539
|
+
requestHash,
|
|
540
|
+
activeCalculationId,
|
|
541
|
+
attemptIndex + 1
|
|
542
|
+
)
|
|
543
|
+
);
|
|
544
|
+
} catch (e) {
|
|
545
|
+
reject(e);
|
|
546
|
+
}
|
|
547
|
+
}, this.timeoutDuration);
|
|
548
|
+
});
|
|
549
|
+
}
|
|
550
|
+
} catch (e) {
|
|
551
|
+
Logger.logError(
|
|
552
|
+
e,
|
|
553
|
+
"_waitForCalculationIdToFinish",
|
|
554
|
+
"Failed to wait for active calculation id change."
|
|
555
|
+
);
|
|
556
|
+
return false;
|
|
557
|
+
}
|
|
558
|
+
}
|
|
559
|
+
}
|