@splitsoftware/splitio-commons 1.8.3 → 1.8.4-rc.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/cjs/storages/inMemory/TelemetryCacheInMemory.js +17 -0
- package/cjs/sync/polling/updaters/splitChangesUpdater.js +4 -2
- package/cjs/sync/streaming/UpdateWorkers/MySegmentsUpdateWorker.js +6 -2
- package/cjs/sync/streaming/UpdateWorkers/SplitsUpdateWorker.js +16 -7
- package/cjs/sync/streaming/pushManager.js +18 -3
- package/cjs/trackers/telemetryTracker.js +5 -1
- package/esm/storages/inMemory/TelemetryCacheInMemory.js +17 -0
- package/esm/sync/polling/updaters/splitChangesUpdater.js +4 -2
- package/esm/sync/streaming/UpdateWorkers/MySegmentsUpdateWorker.js +6 -2
- package/esm/sync/streaming/UpdateWorkers/SplitsUpdateWorker.js +13 -4
- package/esm/sync/streaming/pushManager.js +19 -4
- package/esm/trackers/telemetryTracker.js +5 -1
- package/package.json +1 -1
- package/src/storages/inMemory/TelemetryCacheInMemory.ts +21 -1
- package/src/storages/types.ts +2 -1
- package/src/sync/polling/types.ts +2 -1
- package/src/sync/polling/updaters/splitChangesUpdater.ts +6 -4
- package/src/sync/streaming/UpdateWorkers/MySegmentsUpdateWorker.ts +6 -2
- package/src/sync/streaming/UpdateWorkers/SplitsUpdateWorker.ts +15 -6
- package/src/sync/streaming/pushManager.ts +18 -4
- package/src/sync/submitters/types.ts +8 -0
- package/src/trackers/telemetryTracker.ts +6 -1
- package/src/trackers/types.ts +5 -1
- package/types/sdkClient/types.d.ts +18 -0
- package/types/storages/inMemory/CountsCacheInMemory.d.ts +20 -0
- package/types/storages/inMemory/LatenciesCacheInMemory.d.ts +20 -0
- package/types/storages/inMemory/TelemetryCacheInMemory.d.ts +7 -1
- package/types/storages/inRedis/CountsCacheInRedis.d.ts +9 -0
- package/types/storages/inRedis/LatenciesCacheInRedis.d.ts +9 -0
- package/types/storages/metadataBuilder.d.ts +3 -0
- package/types/storages/types.d.ts +2 -1
- package/types/sync/offline/LocalhostFromFile.d.ts +2 -0
- package/types/sync/offline/splitsParser/splitsParserFromFile.d.ts +2 -0
- package/types/sync/polling/types.d.ts +5 -1
- package/types/sync/polling/updaters/splitChangesUpdater.d.ts +4 -1
- package/types/sync/streaming/UpdateWorkers/MySegmentsUpdateWorker.d.ts +2 -1
- package/types/sync/streaming/UpdateWorkers/SplitsUpdateWorker.d.ts +2 -1
- package/types/sync/submitters/eventsSyncTask.d.ts +8 -0
- package/types/sync/submitters/impressionCountsSubmitterInRedis.d.ts +5 -0
- package/types/sync/submitters/impressionCountsSyncTask.d.ts +13 -0
- package/types/sync/submitters/impressionsSyncTask.d.ts +14 -0
- package/types/sync/submitters/metricsSyncTask.d.ts +12 -0
- package/types/sync/submitters/submitterSyncTask.d.ts +10 -0
- package/types/sync/submitters/types.d.ts +6 -0
- package/types/sync/submitters/uniqueKeysSubmitterInRedis.d.ts +5 -0
- package/types/sync/syncTaskComposite.d.ts +5 -0
- package/types/trackers/filter/bloomFilter.d.ts +10 -0
- package/types/trackers/filter/dictionaryFilter.d.ts +8 -0
- package/types/trackers/filter/types.d.ts +5 -0
- package/types/trackers/types.d.ts +5 -1
- package/types/utils/timeTracker/index.d.ts +70 -0
- package/types/utils/inputValidation/sdkKey.d.ts +0 -7
- /package/types/storages/inMemory/{UniqueKeysCacheInMemory.d.ts → uniqueKeysCacheInMemory.d.ts} +0 -0
- /package/types/storages/inMemory/{UniqueKeysCacheInMemoryCS.d.ts → uniqueKeysCacheInMemoryCS.d.ts} +0 -0
- /package/types/storages/inRedis/{UniqueKeysCacheInRedis.d.ts → uniqueKeysCacheInRedis.d.ts} +0 -0
|
@@ -41,6 +41,10 @@ var TelemetryCacheInMemory = /** @class */ (function () {
|
|
|
41
41
|
this.tags = [];
|
|
42
42
|
this.exceptions = {};
|
|
43
43
|
this.latencies = {};
|
|
44
|
+
this.updatesFromSSE = {
|
|
45
|
+
sp: 0,
|
|
46
|
+
ms: 0
|
|
47
|
+
};
|
|
44
48
|
}
|
|
45
49
|
TelemetryCacheInMemory.prototype.isEmpty = function () { return this.e; };
|
|
46
50
|
TelemetryCacheInMemory.prototype.clear = function () { };
|
|
@@ -65,6 +69,7 @@ var TelemetryCacheInMemory = /** @class */ (function () {
|
|
|
65
69
|
eD: this.getEventStats(constants_1.DROPPED),
|
|
66
70
|
sE: this.popStreamingEvents(),
|
|
67
71
|
t: this.popTags(),
|
|
72
|
+
ufs: this.popUpdatesFromSSE(),
|
|
68
73
|
};
|
|
69
74
|
};
|
|
70
75
|
TelemetryCacheInMemory.prototype.getTimeUntilReady = function () {
|
|
@@ -188,6 +193,18 @@ var TelemetryCacheInMemory = /** @class */ (function () {
|
|
|
188
193
|
latencyBuckets[(0, findLatencyIndex_1.findLatencyIndex)(latencyMs)]++;
|
|
189
194
|
this.e = false;
|
|
190
195
|
};
|
|
196
|
+
TelemetryCacheInMemory.prototype.popUpdatesFromSSE = function () {
|
|
197
|
+
var result = this.updatesFromSSE;
|
|
198
|
+
this.updatesFromSSE = {
|
|
199
|
+
sp: 0,
|
|
200
|
+
ms: 0,
|
|
201
|
+
};
|
|
202
|
+
return result;
|
|
203
|
+
};
|
|
204
|
+
TelemetryCacheInMemory.prototype.recordUpdatesFromSSE = function (type) {
|
|
205
|
+
this.updatesFromSSE[type]++;
|
|
206
|
+
this.e = false;
|
|
207
|
+
};
|
|
191
208
|
return TelemetryCacheInMemory;
|
|
192
209
|
}());
|
|
193
210
|
exports.TelemetryCacheInMemory = TelemetryCacheInMemory;
|
|
@@ -86,7 +86,7 @@ function splitChangesUpdaterFactory(log, splitChangesFetcher, splits, segments,
|
|
|
86
86
|
* @param {boolean | undefined} noCache true to revalidate data to fetch
|
|
87
87
|
* @param {boolean | undefined} till query param to bypass CDN requests
|
|
88
88
|
*/
|
|
89
|
-
return function splitChangesUpdater(noCache, till) {
|
|
89
|
+
return function splitChangesUpdater(noCache, till, splitUpdateNotification) {
|
|
90
90
|
/**
|
|
91
91
|
* @param {number} since current changeNumber at splitsCache
|
|
92
92
|
* @param {number} retry current number of retry attempts
|
|
@@ -94,7 +94,9 @@ function splitChangesUpdaterFactory(log, splitChangesFetcher, splits, segments,
|
|
|
94
94
|
function _splitChangesUpdater(since, retry) {
|
|
95
95
|
if (retry === void 0) { retry = 0; }
|
|
96
96
|
log.debug(constants_2.SYNC_SPLITS_FETCH, [since]);
|
|
97
|
-
var fetcherPromise =
|
|
97
|
+
var fetcherPromise = Promise.resolve(splitUpdateNotification ?
|
|
98
|
+
{ splits: [splitUpdateNotification.payload], till: splitUpdateNotification.changeNumber } :
|
|
99
|
+
splitChangesFetcher(since, noCache, till, _promiseDecorator))
|
|
98
100
|
.then(function (splitChanges) {
|
|
99
101
|
startingUp = false;
|
|
100
102
|
var mutation = computeSplitsMutation(splitChanges.splits);
|
|
@@ -2,10 +2,11 @@
|
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.MySegmentsUpdateWorker = void 0;
|
|
4
4
|
var Backoff_1 = require("../../../utils/Backoff");
|
|
5
|
+
var constants_1 = require("../../../utils/constants");
|
|
5
6
|
/**
|
|
6
7
|
* MySegmentsUpdateWorker factory
|
|
7
8
|
*/
|
|
8
|
-
function MySegmentsUpdateWorker(mySegmentsSyncTask) {
|
|
9
|
+
function MySegmentsUpdateWorker(mySegmentsSyncTask, telemetryTracker) {
|
|
9
10
|
var maxChangeNumber = 0; // keeps the maximum changeNumber among queued events
|
|
10
11
|
var currentChangeNumber = -1;
|
|
11
12
|
var handleNewEvent = false;
|
|
@@ -21,8 +22,11 @@ function MySegmentsUpdateWorker(mySegmentsSyncTask) {
|
|
|
21
22
|
mySegmentsSyncTask.execute(_segmentsData, true).then(function (result) {
|
|
22
23
|
if (!isHandlingEvent)
|
|
23
24
|
return; // halt if `stop` has been called
|
|
24
|
-
if (result !== false) // Unlike `Splits|SegmentsUpdateWorker`, we cannot use `mySegmentsCache.getChangeNumber` since `/mySegments` endpoint doesn't provide this value.
|
|
25
|
+
if (result !== false) { // Unlike `Splits|SegmentsUpdateWorker`, we cannot use `mySegmentsCache.getChangeNumber` since `/mySegments` endpoint doesn't provide this value.
|
|
26
|
+
if (_segmentsData)
|
|
27
|
+
telemetryTracker.trackUpdatesFromSSE(constants_1.MY_SEGMENT);
|
|
25
28
|
currentChangeNumber = Math.max(currentChangeNumber, currentMaxChangeNumber_1); // use `currentMaxChangeNumber`, in case that `maxChangeNumber` was updated during fetch.
|
|
29
|
+
}
|
|
26
30
|
if (handleNewEvent) {
|
|
27
31
|
__handleMySegmentsUpdateCall();
|
|
28
32
|
}
|
|
@@ -3,28 +3,33 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
3
3
|
exports.SplitsUpdateWorker = void 0;
|
|
4
4
|
var constants_1 = require("../../../readiness/constants");
|
|
5
5
|
var Backoff_1 = require("../../../utils/Backoff");
|
|
6
|
-
var constants_2 = require("
|
|
6
|
+
var constants_2 = require("../../../utils/constants");
|
|
7
|
+
var constants_3 = require("./constants");
|
|
7
8
|
/**
|
|
8
9
|
* SplitsUpdateWorker factory
|
|
9
10
|
*/
|
|
10
|
-
function SplitsUpdateWorker(log, splitsCache, splitsSyncTask, splitsEventEmitter, segmentsSyncTask) {
|
|
11
|
+
function SplitsUpdateWorker(log, splitsCache, splitsSyncTask, splitsEventEmitter, telemetryTracker, segmentsSyncTask) {
|
|
11
12
|
var maxChangeNumber = 0;
|
|
12
13
|
var handleNewEvent = false;
|
|
13
14
|
var isHandlingEvent;
|
|
14
15
|
var cdnBypass;
|
|
15
|
-
var
|
|
16
|
+
var payload;
|
|
17
|
+
var backoff = new Backoff_1.Backoff(__handleSplitUpdateCall, constants_3.FETCH_BACKOFF_BASE, constants_3.FETCH_BACKOFF_MAX_WAIT);
|
|
16
18
|
function __handleSplitUpdateCall() {
|
|
17
19
|
isHandlingEvent = true;
|
|
18
20
|
if (maxChangeNumber > splitsCache.getChangeNumber()) {
|
|
19
21
|
handleNewEvent = false;
|
|
22
|
+
var splitUpdateNotification_1 = payload ? { payload: payload, changeNumber: maxChangeNumber } : undefined;
|
|
20
23
|
// fetch splits revalidating data if cached
|
|
21
|
-
splitsSyncTask.execute(true, cdnBypass ? maxChangeNumber : undefined).then(function () {
|
|
24
|
+
splitsSyncTask.execute(true, cdnBypass ? maxChangeNumber : undefined, splitUpdateNotification_1).then(function () {
|
|
22
25
|
if (!isHandlingEvent)
|
|
23
26
|
return; // halt if `stop` has been called
|
|
24
27
|
if (handleNewEvent) {
|
|
25
28
|
__handleSplitUpdateCall();
|
|
26
29
|
}
|
|
27
30
|
else {
|
|
31
|
+
if (splitUpdateNotification_1)
|
|
32
|
+
telemetryTracker.trackUpdatesFromSSE(constants_2.SPLITS);
|
|
28
33
|
// fetch new registered segments for server-side API. Not retrying on error
|
|
29
34
|
if (segmentsSyncTask)
|
|
30
35
|
segmentsSyncTask.execute(true);
|
|
@@ -34,7 +39,7 @@ function SplitsUpdateWorker(log, splitsCache, splitsSyncTask, splitsEventEmitter
|
|
|
34
39
|
isHandlingEvent = false;
|
|
35
40
|
return;
|
|
36
41
|
}
|
|
37
|
-
if (attempts <
|
|
42
|
+
if (attempts < constants_3.FETCH_BACKOFF_MAX_RETRIES) {
|
|
38
43
|
backoff.scheduleCall();
|
|
39
44
|
return;
|
|
40
45
|
}
|
|
@@ -59,14 +64,18 @@ function SplitsUpdateWorker(log, splitsCache, splitsSyncTask, splitsEventEmitter
|
|
|
59
64
|
*
|
|
60
65
|
* @param {number} changeNumber change number of the SPLIT_UPDATE notification
|
|
61
66
|
*/
|
|
62
|
-
function put(_a) {
|
|
63
|
-
var changeNumber = _a.changeNumber;
|
|
67
|
+
function put(_a, _payload) {
|
|
68
|
+
var changeNumber = _a.changeNumber, pcn = _a.pcn;
|
|
64
69
|
var currentChangeNumber = splitsCache.getChangeNumber();
|
|
65
70
|
if (changeNumber <= currentChangeNumber || changeNumber <= maxChangeNumber)
|
|
66
71
|
return;
|
|
67
72
|
maxChangeNumber = changeNumber;
|
|
68
73
|
handleNewEvent = true;
|
|
69
74
|
cdnBypass = false;
|
|
75
|
+
payload = undefined;
|
|
76
|
+
if (_payload && currentChangeNumber === pcn) {
|
|
77
|
+
payload = _payload;
|
|
78
|
+
}
|
|
70
79
|
if (backoff.timeoutID || !isHandlingEvent)
|
|
71
80
|
__handleSplitUpdateCall();
|
|
72
81
|
backoff.reset();
|
|
@@ -47,7 +47,7 @@ function pushManagerFactory(params, pollingManager) {
|
|
|
47
47
|
// MySegmentsUpdateWorker (client-side) are initiated in `add` method
|
|
48
48
|
var segmentsUpdateWorker = userKey ? undefined : (0, SegmentsUpdateWorker_1.SegmentsUpdateWorker)(log, pollingManager.segmentsSyncTask, storage.segments);
|
|
49
49
|
// For server-side we pass the segmentsSyncTask, used by SplitsUpdateWorker to fetch new segments
|
|
50
|
-
var splitsUpdateWorker = (0, SplitsUpdateWorker_1.SplitsUpdateWorker)(log, storage.splits, pollingManager.splitsSyncTask, readiness.splits, userKey ? undefined : pollingManager.segmentsSyncTask);
|
|
50
|
+
var splitsUpdateWorker = (0, SplitsUpdateWorker_1.SplitsUpdateWorker)(log, storage.splits, pollingManager.splitsSyncTask, readiness.splits, telemetryTracker, userKey ? undefined : pollingManager.segmentsSyncTask);
|
|
51
51
|
// [Only for client-side] map of hashes to user keys, to dispatch MY_SEGMENTS_UPDATE events to the corresponding MySegmentsUpdateWorker
|
|
52
52
|
var userKeyHashes = {};
|
|
53
53
|
// [Only for client-side] map of user keys to their corresponding hash64 and MySegmentsUpdateWorkers.
|
|
@@ -183,7 +183,22 @@ function pushManagerFactory(params, pollingManager) {
|
|
|
183
183
|
});
|
|
184
184
|
/** Functions related to synchronization (Queues and Workers in the spec) */
|
|
185
185
|
pushEmitter.on(constants_1.SPLIT_KILL, splitsUpdateWorker.killSplit);
|
|
186
|
-
pushEmitter.on(constants_1.SPLIT_UPDATE,
|
|
186
|
+
pushEmitter.on(constants_1.SPLIT_UPDATE, function (parsedData) {
|
|
187
|
+
if (parsedData.d && parsedData.c !== undefined) {
|
|
188
|
+
try {
|
|
189
|
+
var payload = (0, parseUtils_1.parseFFUpdatePayload)(parsedData.c, parsedData.d);
|
|
190
|
+
if (payload) {
|
|
191
|
+
splitsUpdateWorker.put(parsedData, payload);
|
|
192
|
+
return;
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
catch (e) {
|
|
196
|
+
// @TODO define a error code for feature flags parsing
|
|
197
|
+
log.debug(e);
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
splitsUpdateWorker.put(parsedData);
|
|
201
|
+
});
|
|
187
202
|
if (userKey) {
|
|
188
203
|
pushEmitter.on(constants_1.MY_SEGMENTS_UPDATE, function handleMySegmentsUpdate(parsedData, channel) {
|
|
189
204
|
var userKeyHash = channel.split('_')[2];
|
|
@@ -289,7 +304,7 @@ function pushManagerFactory(params, pollingManager) {
|
|
|
289
304
|
var hash = (0, AuthClient_1.hashUserKey)(userKey);
|
|
290
305
|
if (!userKeyHashes[hash]) {
|
|
291
306
|
userKeyHashes[hash] = userKey;
|
|
292
|
-
clients[userKey] = { hash64: (0, murmur3_64_1.hash64)(userKey), worker: (0, MySegmentsUpdateWorker_1.MySegmentsUpdateWorker)(mySegmentsSyncTask) };
|
|
307
|
+
clients[userKey] = { hash64: (0, murmur3_64_1.hash64)(userKey), worker: (0, MySegmentsUpdateWorker_1.MySegmentsUpdateWorker)(mySegmentsSyncTask, telemetryTracker) };
|
|
293
308
|
connectForNewClient = true; // we must reconnect on start, to listen the channel for the new user key
|
|
294
309
|
// Reconnects in case of a new client.
|
|
295
310
|
// Run in next event-loop cycle to save authentication calls
|
|
@@ -54,6 +54,9 @@ function telemetryTrackerFactory(telemetryCache, now) {
|
|
|
54
54
|
// @ts-ignore
|
|
55
55
|
if (telemetryCache.addTag)
|
|
56
56
|
telemetryCache.addTag(tag);
|
|
57
|
+
},
|
|
58
|
+
trackUpdatesFromSSE: function (type) {
|
|
59
|
+
telemetryCache.recordUpdatesFromSSE(type);
|
|
57
60
|
}
|
|
58
61
|
};
|
|
59
62
|
}
|
|
@@ -64,7 +67,8 @@ function telemetryTrackerFactory(telemetryCache, now) {
|
|
|
64
67
|
trackHttp: noopTrack,
|
|
65
68
|
sessionLength: function () { },
|
|
66
69
|
streamingEvent: function () { },
|
|
67
|
-
addTag: function () { }
|
|
70
|
+
addTag: function () { },
|
|
71
|
+
trackUpdatesFromSSE: function () { },
|
|
68
72
|
};
|
|
69
73
|
}
|
|
70
74
|
}
|
|
@@ -36,6 +36,10 @@ var TelemetryCacheInMemory = /** @class */ (function () {
|
|
|
36
36
|
this.tags = [];
|
|
37
37
|
this.exceptions = {};
|
|
38
38
|
this.latencies = {};
|
|
39
|
+
this.updatesFromSSE = {
|
|
40
|
+
sp: 0,
|
|
41
|
+
ms: 0
|
|
42
|
+
};
|
|
39
43
|
}
|
|
40
44
|
TelemetryCacheInMemory.prototype.isEmpty = function () { return this.e; };
|
|
41
45
|
TelemetryCacheInMemory.prototype.clear = function () { };
|
|
@@ -60,6 +64,7 @@ var TelemetryCacheInMemory = /** @class */ (function () {
|
|
|
60
64
|
eD: this.getEventStats(DROPPED),
|
|
61
65
|
sE: this.popStreamingEvents(),
|
|
62
66
|
t: this.popTags(),
|
|
67
|
+
ufs: this.popUpdatesFromSSE(),
|
|
63
68
|
};
|
|
64
69
|
};
|
|
65
70
|
TelemetryCacheInMemory.prototype.getTimeUntilReady = function () {
|
|
@@ -183,6 +188,18 @@ var TelemetryCacheInMemory = /** @class */ (function () {
|
|
|
183
188
|
latencyBuckets[findLatencyIndex(latencyMs)]++;
|
|
184
189
|
this.e = false;
|
|
185
190
|
};
|
|
191
|
+
TelemetryCacheInMemory.prototype.popUpdatesFromSSE = function () {
|
|
192
|
+
var result = this.updatesFromSSE;
|
|
193
|
+
this.updatesFromSSE = {
|
|
194
|
+
sp: 0,
|
|
195
|
+
ms: 0,
|
|
196
|
+
};
|
|
197
|
+
return result;
|
|
198
|
+
};
|
|
199
|
+
TelemetryCacheInMemory.prototype.recordUpdatesFromSSE = function (type) {
|
|
200
|
+
this.updatesFromSSE[type]++;
|
|
201
|
+
this.e = false;
|
|
202
|
+
};
|
|
186
203
|
return TelemetryCacheInMemory;
|
|
187
204
|
}());
|
|
188
205
|
export { TelemetryCacheInMemory };
|
|
@@ -81,7 +81,7 @@ export function splitChangesUpdaterFactory(log, splitChangesFetcher, splits, seg
|
|
|
81
81
|
* @param {boolean | undefined} noCache true to revalidate data to fetch
|
|
82
82
|
* @param {boolean | undefined} till query param to bypass CDN requests
|
|
83
83
|
*/
|
|
84
|
-
return function splitChangesUpdater(noCache, till) {
|
|
84
|
+
return function splitChangesUpdater(noCache, till, splitUpdateNotification) {
|
|
85
85
|
/**
|
|
86
86
|
* @param {number} since current changeNumber at splitsCache
|
|
87
87
|
* @param {number} retry current number of retry attempts
|
|
@@ -89,7 +89,9 @@ export function splitChangesUpdaterFactory(log, splitChangesFetcher, splits, seg
|
|
|
89
89
|
function _splitChangesUpdater(since, retry) {
|
|
90
90
|
if (retry === void 0) { retry = 0; }
|
|
91
91
|
log.debug(SYNC_SPLITS_FETCH, [since]);
|
|
92
|
-
var fetcherPromise =
|
|
92
|
+
var fetcherPromise = Promise.resolve(splitUpdateNotification ?
|
|
93
|
+
{ splits: [splitUpdateNotification.payload], till: splitUpdateNotification.changeNumber } :
|
|
94
|
+
splitChangesFetcher(since, noCache, till, _promiseDecorator))
|
|
93
95
|
.then(function (splitChanges) {
|
|
94
96
|
startingUp = false;
|
|
95
97
|
var mutation = computeSplitsMutation(splitChanges.splits);
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
import { Backoff } from '../../../utils/Backoff';
|
|
2
|
+
import { MY_SEGMENT } from '../../../utils/constants';
|
|
2
3
|
/**
|
|
3
4
|
* MySegmentsUpdateWorker factory
|
|
4
5
|
*/
|
|
5
|
-
export function MySegmentsUpdateWorker(mySegmentsSyncTask) {
|
|
6
|
+
export function MySegmentsUpdateWorker(mySegmentsSyncTask, telemetryTracker) {
|
|
6
7
|
var maxChangeNumber = 0; // keeps the maximum changeNumber among queued events
|
|
7
8
|
var currentChangeNumber = -1;
|
|
8
9
|
var handleNewEvent = false;
|
|
@@ -18,8 +19,11 @@ export function MySegmentsUpdateWorker(mySegmentsSyncTask) {
|
|
|
18
19
|
mySegmentsSyncTask.execute(_segmentsData, true).then(function (result) {
|
|
19
20
|
if (!isHandlingEvent)
|
|
20
21
|
return; // halt if `stop` has been called
|
|
21
|
-
if (result !== false) // Unlike `Splits|SegmentsUpdateWorker`, we cannot use `mySegmentsCache.getChangeNumber` since `/mySegments` endpoint doesn't provide this value.
|
|
22
|
+
if (result !== false) { // Unlike `Splits|SegmentsUpdateWorker`, we cannot use `mySegmentsCache.getChangeNumber` since `/mySegments` endpoint doesn't provide this value.
|
|
23
|
+
if (_segmentsData)
|
|
24
|
+
telemetryTracker.trackUpdatesFromSSE(MY_SEGMENT);
|
|
22
25
|
currentChangeNumber = Math.max(currentChangeNumber, currentMaxChangeNumber_1); // use `currentMaxChangeNumber`, in case that `maxChangeNumber` was updated during fetch.
|
|
26
|
+
}
|
|
23
27
|
if (handleNewEvent) {
|
|
24
28
|
__handleMySegmentsUpdateCall();
|
|
25
29
|
}
|
|
@@ -1,27 +1,32 @@
|
|
|
1
1
|
import { SDK_SPLITS_ARRIVED } from '../../../readiness/constants';
|
|
2
2
|
import { Backoff } from '../../../utils/Backoff';
|
|
3
|
+
import { SPLITS } from '../../../utils/constants';
|
|
3
4
|
import { FETCH_BACKOFF_BASE, FETCH_BACKOFF_MAX_WAIT, FETCH_BACKOFF_MAX_RETRIES } from './constants';
|
|
4
5
|
/**
|
|
5
6
|
* SplitsUpdateWorker factory
|
|
6
7
|
*/
|
|
7
|
-
export function SplitsUpdateWorker(log, splitsCache, splitsSyncTask, splitsEventEmitter, segmentsSyncTask) {
|
|
8
|
+
export function SplitsUpdateWorker(log, splitsCache, splitsSyncTask, splitsEventEmitter, telemetryTracker, segmentsSyncTask) {
|
|
8
9
|
var maxChangeNumber = 0;
|
|
9
10
|
var handleNewEvent = false;
|
|
10
11
|
var isHandlingEvent;
|
|
11
12
|
var cdnBypass;
|
|
13
|
+
var payload;
|
|
12
14
|
var backoff = new Backoff(__handleSplitUpdateCall, FETCH_BACKOFF_BASE, FETCH_BACKOFF_MAX_WAIT);
|
|
13
15
|
function __handleSplitUpdateCall() {
|
|
14
16
|
isHandlingEvent = true;
|
|
15
17
|
if (maxChangeNumber > splitsCache.getChangeNumber()) {
|
|
16
18
|
handleNewEvent = false;
|
|
19
|
+
var splitUpdateNotification_1 = payload ? { payload: payload, changeNumber: maxChangeNumber } : undefined;
|
|
17
20
|
// fetch splits revalidating data if cached
|
|
18
|
-
splitsSyncTask.execute(true, cdnBypass ? maxChangeNumber : undefined).then(function () {
|
|
21
|
+
splitsSyncTask.execute(true, cdnBypass ? maxChangeNumber : undefined, splitUpdateNotification_1).then(function () {
|
|
19
22
|
if (!isHandlingEvent)
|
|
20
23
|
return; // halt if `stop` has been called
|
|
21
24
|
if (handleNewEvent) {
|
|
22
25
|
__handleSplitUpdateCall();
|
|
23
26
|
}
|
|
24
27
|
else {
|
|
28
|
+
if (splitUpdateNotification_1)
|
|
29
|
+
telemetryTracker.trackUpdatesFromSSE(SPLITS);
|
|
25
30
|
// fetch new registered segments for server-side API. Not retrying on error
|
|
26
31
|
if (segmentsSyncTask)
|
|
27
32
|
segmentsSyncTask.execute(true);
|
|
@@ -56,14 +61,18 @@ export function SplitsUpdateWorker(log, splitsCache, splitsSyncTask, splitsEvent
|
|
|
56
61
|
*
|
|
57
62
|
* @param {number} changeNumber change number of the SPLIT_UPDATE notification
|
|
58
63
|
*/
|
|
59
|
-
function put(_a) {
|
|
60
|
-
var changeNumber = _a.changeNumber;
|
|
64
|
+
function put(_a, _payload) {
|
|
65
|
+
var changeNumber = _a.changeNumber, pcn = _a.pcn;
|
|
61
66
|
var currentChangeNumber = splitsCache.getChangeNumber();
|
|
62
67
|
if (changeNumber <= currentChangeNumber || changeNumber <= maxChangeNumber)
|
|
63
68
|
return;
|
|
64
69
|
maxChangeNumber = changeNumber;
|
|
65
70
|
handleNewEvent = true;
|
|
66
71
|
cdnBypass = false;
|
|
72
|
+
payload = undefined;
|
|
73
|
+
if (_payload && currentChangeNumber === pcn) {
|
|
74
|
+
payload = _payload;
|
|
75
|
+
}
|
|
67
76
|
if (backoff.timeoutID || !isHandlingEvent)
|
|
68
77
|
__handleSplitUpdateCall();
|
|
69
78
|
backoff.reset();
|
|
@@ -11,7 +11,7 @@ import { getMatching } from '../../utils/key';
|
|
|
11
11
|
import { MY_SEGMENTS_UPDATE, MY_SEGMENTS_UPDATE_V2, PUSH_NONRETRYABLE_ERROR, PUSH_SUBSYSTEM_DOWN, SECONDS_BEFORE_EXPIRATION, SEGMENT_UPDATE, SPLIT_KILL, SPLIT_UPDATE, PUSH_RETRYABLE_ERROR, PUSH_SUBSYSTEM_UP, ControlType } from './constants';
|
|
12
12
|
import { STREAMING_FALLBACK, STREAMING_REFRESH_TOKEN, STREAMING_CONNECTING, STREAMING_DISABLED, ERROR_STREAMING_AUTH, STREAMING_DISCONNECTING, STREAMING_RECONNECT, STREAMING_PARSING_MY_SEGMENTS_UPDATE_V2 } from '../../logger/constants';
|
|
13
13
|
import { UpdateStrategy } from './SSEHandler/types';
|
|
14
|
-
import { isInBitmap, parseBitmap, parseKeyList } from './parseUtils';
|
|
14
|
+
import { isInBitmap, parseBitmap, parseFFUpdatePayload, parseKeyList } from './parseUtils';
|
|
15
15
|
import { _Set } from '../../utils/lang/sets';
|
|
16
16
|
import { hash64 } from '../../utils/murmur3/murmur3_64';
|
|
17
17
|
import { TOKEN_REFRESH, AUTH_REJECTION } from '../../utils/constants';
|
|
@@ -44,7 +44,7 @@ export function pushManagerFactory(params, pollingManager) {
|
|
|
44
44
|
// MySegmentsUpdateWorker (client-side) are initiated in `add` method
|
|
45
45
|
var segmentsUpdateWorker = userKey ? undefined : SegmentsUpdateWorker(log, pollingManager.segmentsSyncTask, storage.segments);
|
|
46
46
|
// For server-side we pass the segmentsSyncTask, used by SplitsUpdateWorker to fetch new segments
|
|
47
|
-
var splitsUpdateWorker = SplitsUpdateWorker(log, storage.splits, pollingManager.splitsSyncTask, readiness.splits, userKey ? undefined : pollingManager.segmentsSyncTask);
|
|
47
|
+
var splitsUpdateWorker = SplitsUpdateWorker(log, storage.splits, pollingManager.splitsSyncTask, readiness.splits, telemetryTracker, userKey ? undefined : pollingManager.segmentsSyncTask);
|
|
48
48
|
// [Only for client-side] map of hashes to user keys, to dispatch MY_SEGMENTS_UPDATE events to the corresponding MySegmentsUpdateWorker
|
|
49
49
|
var userKeyHashes = {};
|
|
50
50
|
// [Only for client-side] map of user keys to their corresponding hash64 and MySegmentsUpdateWorkers.
|
|
@@ -180,7 +180,22 @@ export function pushManagerFactory(params, pollingManager) {
|
|
|
180
180
|
});
|
|
181
181
|
/** Functions related to synchronization (Queues and Workers in the spec) */
|
|
182
182
|
pushEmitter.on(SPLIT_KILL, splitsUpdateWorker.killSplit);
|
|
183
|
-
pushEmitter.on(SPLIT_UPDATE,
|
|
183
|
+
pushEmitter.on(SPLIT_UPDATE, function (parsedData) {
|
|
184
|
+
if (parsedData.d && parsedData.c !== undefined) {
|
|
185
|
+
try {
|
|
186
|
+
var payload = parseFFUpdatePayload(parsedData.c, parsedData.d);
|
|
187
|
+
if (payload) {
|
|
188
|
+
splitsUpdateWorker.put(parsedData, payload);
|
|
189
|
+
return;
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
catch (e) {
|
|
193
|
+
// @TODO define a error code for feature flags parsing
|
|
194
|
+
log.debug(e);
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
splitsUpdateWorker.put(parsedData);
|
|
198
|
+
});
|
|
184
199
|
if (userKey) {
|
|
185
200
|
pushEmitter.on(MY_SEGMENTS_UPDATE, function handleMySegmentsUpdate(parsedData, channel) {
|
|
186
201
|
var userKeyHash = channel.split('_')[2];
|
|
@@ -286,7 +301,7 @@ export function pushManagerFactory(params, pollingManager) {
|
|
|
286
301
|
var hash = hashUserKey(userKey);
|
|
287
302
|
if (!userKeyHashes[hash]) {
|
|
288
303
|
userKeyHashes[hash] = userKey;
|
|
289
|
-
clients[userKey] = { hash64: hash64(userKey), worker: MySegmentsUpdateWorker(mySegmentsSyncTask) };
|
|
304
|
+
clients[userKey] = { hash64: hash64(userKey), worker: MySegmentsUpdateWorker(mySegmentsSyncTask, telemetryTracker) };
|
|
290
305
|
connectForNewClient = true; // we must reconnect on start, to listen the channel for the new user key
|
|
291
306
|
// Reconnects in case of a new client.
|
|
292
307
|
// Run in next event-loop cycle to save authentication calls
|
|
@@ -51,6 +51,9 @@ export function telemetryTrackerFactory(telemetryCache, now) {
|
|
|
51
51
|
// @ts-ignore
|
|
52
52
|
if (telemetryCache.addTag)
|
|
53
53
|
telemetryCache.addTag(tag);
|
|
54
|
+
},
|
|
55
|
+
trackUpdatesFromSSE: function (type) {
|
|
56
|
+
telemetryCache.recordUpdatesFromSSE(type);
|
|
54
57
|
}
|
|
55
58
|
};
|
|
56
59
|
}
|
|
@@ -61,7 +64,8 @@ export function telemetryTrackerFactory(telemetryCache, now) {
|
|
|
61
64
|
trackHttp: noopTrack,
|
|
62
65
|
sessionLength: function () { },
|
|
63
66
|
streamingEvent: function () { },
|
|
64
|
-
addTag: function () { }
|
|
67
|
+
addTag: function () { },
|
|
68
|
+
trackUpdatesFromSSE: function () { },
|
|
65
69
|
};
|
|
66
70
|
}
|
|
67
71
|
}
|
package/package.json
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { ImpressionDataType, EventDataType, LastSync, HttpErrors, HttpLatencies, StreamingEvent, Method, OperationType, MethodExceptions, MethodLatencies, TelemetryUsageStatsPayload } from '../../sync/submitters/types';
|
|
1
|
+
import { ImpressionDataType, EventDataType, LastSync, HttpErrors, HttpLatencies, StreamingEvent, Method, OperationType, MethodExceptions, MethodLatencies, TelemetryUsageStatsPayload, UpdatesFromSSEEnum } from '../../sync/submitters/types';
|
|
2
2
|
import { DEDUPED, DROPPED, LOCALHOST_MODE, QUEUED } from '../../utils/constants';
|
|
3
3
|
import { findLatencyIndex } from '../findLatencyIndex';
|
|
4
4
|
import { ISegmentsCacheSync, ISplitsCacheSync, IStorageFactoryParams, ITelemetryCacheSync } from '../types';
|
|
@@ -56,6 +56,7 @@ export class TelemetryCacheInMemory implements ITelemetryCacheSync {
|
|
|
56
56
|
eD: this.getEventStats(DROPPED),
|
|
57
57
|
sE: this.popStreamingEvents(),
|
|
58
58
|
t: this.popTags(),
|
|
59
|
+
ufs: this.popUpdatesFromSSE(),
|
|
59
60
|
};
|
|
60
61
|
}
|
|
61
62
|
|
|
@@ -244,4 +245,23 @@ export class TelemetryCacheInMemory implements ITelemetryCacheSync {
|
|
|
244
245
|
this.e = false;
|
|
245
246
|
}
|
|
246
247
|
|
|
248
|
+
private updatesFromSSE = {
|
|
249
|
+
sp: 0,
|
|
250
|
+
ms: 0
|
|
251
|
+
};
|
|
252
|
+
|
|
253
|
+
popUpdatesFromSSE() {
|
|
254
|
+
const result = this.updatesFromSSE;
|
|
255
|
+
this.updatesFromSSE = {
|
|
256
|
+
sp: 0,
|
|
257
|
+
ms: 0,
|
|
258
|
+
};
|
|
259
|
+
return result;
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
recordUpdatesFromSSE(type: UpdatesFromSSEEnum) {
|
|
263
|
+
this.updatesFromSSE[type]++;
|
|
264
|
+
this.e = false;
|
|
265
|
+
}
|
|
266
|
+
|
|
247
267
|
}
|
package/src/storages/types.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { MaybeThenable, ISplit } from '../dtos/types';
|
|
2
|
-
import { EventDataType, HttpErrors, HttpLatencies, ImpressionDataType, LastSync, Method, MethodExceptions, MethodLatencies, MultiMethodExceptions, MultiMethodLatencies, MultiConfigs, OperationType, StoredEventWithMetadata, StoredImpressionWithMetadata, StreamingEvent, UniqueKeysPayloadCs, UniqueKeysPayloadSs, TelemetryUsageStatsPayload } from '../sync/submitters/types';
|
|
2
|
+
import { EventDataType, HttpErrors, HttpLatencies, ImpressionDataType, LastSync, Method, MethodExceptions, MethodLatencies, MultiMethodExceptions, MultiMethodLatencies, MultiConfigs, OperationType, StoredEventWithMetadata, StoredImpressionWithMetadata, StreamingEvent, UniqueKeysPayloadCs, UniqueKeysPayloadSs, TelemetryUsageStatsPayload, UpdatesFromSSEEnum } from '../sync/submitters/types';
|
|
3
3
|
import { SplitIO, ImpressionDTO, ISettings } from '../types';
|
|
4
4
|
|
|
5
5
|
/**
|
|
@@ -409,6 +409,7 @@ export interface ITelemetryRuntimeProducerSync {
|
|
|
409
409
|
recordTokenRefreshes(): void;
|
|
410
410
|
recordStreamingEvents(streamingEvent: StreamingEvent): void;
|
|
411
411
|
recordSessionLength(ms: number): void;
|
|
412
|
+
recordUpdatesFromSSE(type: UpdatesFromSSEEnum): void
|
|
412
413
|
}
|
|
413
414
|
|
|
414
415
|
export interface ITelemetryEvaluationProducerSync {
|
|
@@ -1,8 +1,9 @@
|
|
|
1
|
+
import { ISplit } from '../../dtos/types';
|
|
1
2
|
import { IReadinessManager } from '../../readiness/types';
|
|
2
3
|
import { IStorageSync } from '../../storages/types';
|
|
3
4
|
import { ITask, ISyncTask } from '../types';
|
|
4
5
|
|
|
5
|
-
export interface ISplitsSyncTask extends ISyncTask<[noCache?: boolean, till?: number], boolean> { }
|
|
6
|
+
export interface ISplitsSyncTask extends ISyncTask<[noCache?: boolean, till?: number, splitUpdateNotification?: { payload: ISplit, changeNumber: number }], boolean> { }
|
|
6
7
|
|
|
7
8
|
export interface ISegmentsSyncTask extends ISyncTask<[fetchOnlyNew?: boolean, segmentName?: string, noCache?: boolean, till?: number], boolean> { }
|
|
8
9
|
|
|
@@ -8,7 +8,7 @@ import { SDK_SPLITS_ARRIVED, SDK_SPLITS_CACHE_LOADED } from '../../../readiness/
|
|
|
8
8
|
import { ILogger } from '../../../logger/types';
|
|
9
9
|
import { SYNC_SPLITS_FETCH, SYNC_SPLITS_NEW, SYNC_SPLITS_REMOVED, SYNC_SPLITS_SEGMENTS, SYNC_SPLITS_FETCH_FAILS, SYNC_SPLITS_FETCH_RETRY } from '../../../logger/constants';
|
|
10
10
|
|
|
11
|
-
type ISplitChangesUpdater = (noCache?: boolean, till?: number) => Promise<boolean>
|
|
11
|
+
type ISplitChangesUpdater = (noCache?: boolean, till?: number, splitUpdateNotification?: { payload: ISplit, changeNumber: number }) => Promise<boolean>
|
|
12
12
|
|
|
13
13
|
// Checks that all registered segments have been fetched (changeNumber !== -1 for every segment).
|
|
14
14
|
// Returns a promise that could be rejected.
|
|
@@ -111,7 +111,7 @@ export function splitChangesUpdaterFactory(
|
|
|
111
111
|
* @param {boolean | undefined} noCache true to revalidate data to fetch
|
|
112
112
|
* @param {boolean | undefined} till query param to bypass CDN requests
|
|
113
113
|
*/
|
|
114
|
-
return function splitChangesUpdater(noCache?: boolean, till?: number) {
|
|
114
|
+
return function splitChangesUpdater(noCache?: boolean, till?: number, splitUpdateNotification?: { payload: ISplit, changeNumber: number }) {
|
|
115
115
|
|
|
116
116
|
/**
|
|
117
117
|
* @param {number} since current changeNumber at splitsCache
|
|
@@ -119,8 +119,10 @@ export function splitChangesUpdaterFactory(
|
|
|
119
119
|
*/
|
|
120
120
|
function _splitChangesUpdater(since: number, retry = 0): Promise<boolean> {
|
|
121
121
|
log.debug(SYNC_SPLITS_FETCH, [since]);
|
|
122
|
-
|
|
123
|
-
|
|
122
|
+
const fetcherPromise = Promise.resolve(splitUpdateNotification ?
|
|
123
|
+
{ splits: [splitUpdateNotification.payload], till: splitUpdateNotification.changeNumber } :
|
|
124
|
+
splitChangesFetcher(since, noCache, till, _promiseDecorator)
|
|
125
|
+
)
|
|
124
126
|
.then((splitChanges: ISplitChangesResponse) => {
|
|
125
127
|
startingUp = false;
|
|
126
128
|
|
|
@@ -1,11 +1,13 @@
|
|
|
1
1
|
import { IMySegmentsSyncTask, MySegmentsData } from '../../polling/types';
|
|
2
2
|
import { Backoff } from '../../../utils/Backoff';
|
|
3
3
|
import { IUpdateWorker } from './types';
|
|
4
|
+
import { MY_SEGMENT } from '../../../utils/constants';
|
|
5
|
+
import { ITelemetryTracker } from '../../../trackers/types';
|
|
4
6
|
|
|
5
7
|
/**
|
|
6
8
|
* MySegmentsUpdateWorker factory
|
|
7
9
|
*/
|
|
8
|
-
export function MySegmentsUpdateWorker(mySegmentsSyncTask: IMySegmentsSyncTask): IUpdateWorker {
|
|
10
|
+
export function MySegmentsUpdateWorker(mySegmentsSyncTask: IMySegmentsSyncTask, telemetryTracker: ITelemetryTracker): IUpdateWorker {
|
|
9
11
|
|
|
10
12
|
let maxChangeNumber = 0; // keeps the maximum changeNumber among queued events
|
|
11
13
|
let currentChangeNumber = -1;
|
|
@@ -23,8 +25,10 @@ export function MySegmentsUpdateWorker(mySegmentsSyncTask: IMySegmentsSyncTask):
|
|
|
23
25
|
// fetch mySegments revalidating data if cached
|
|
24
26
|
mySegmentsSyncTask.execute(_segmentsData, true).then((result) => {
|
|
25
27
|
if (!isHandlingEvent) return; // halt if `stop` has been called
|
|
26
|
-
if (result !== false) // Unlike `Splits|SegmentsUpdateWorker`, we cannot use `mySegmentsCache.getChangeNumber` since `/mySegments` endpoint doesn't provide this value.
|
|
28
|
+
if (result !== false) {// Unlike `Splits|SegmentsUpdateWorker`, we cannot use `mySegmentsCache.getChangeNumber` since `/mySegments` endpoint doesn't provide this value.
|
|
29
|
+
if (_segmentsData) telemetryTracker.trackUpdatesFromSSE(MY_SEGMENT);
|
|
27
30
|
currentChangeNumber = Math.max(currentChangeNumber, currentMaxChangeNumber); // use `currentMaxChangeNumber`, in case that `maxChangeNumber` was updated during fetch.
|
|
31
|
+
}
|
|
28
32
|
if (handleNewEvent) {
|
|
29
33
|
__handleMySegmentsUpdateCall();
|
|
30
34
|
} else {
|
|
@@ -1,8 +1,11 @@
|
|
|
1
|
+
import { ISplit } from '../../../dtos/types';
|
|
1
2
|
import { ILogger } from '../../../logger/types';
|
|
2
3
|
import { SDK_SPLITS_ARRIVED } from '../../../readiness/constants';
|
|
3
4
|
import { ISplitsEventEmitter } from '../../../readiness/types';
|
|
4
5
|
import { ISplitsCacheSync } from '../../../storages/types';
|
|
6
|
+
import { ITelemetryTracker } from '../../../trackers/types';
|
|
5
7
|
import { Backoff } from '../../../utils/Backoff';
|
|
8
|
+
import { SPLITS } from '../../../utils/constants';
|
|
6
9
|
import { ISegmentsSyncTask, ISplitsSyncTask } from '../../polling/types';
|
|
7
10
|
import { ISplitKillData, ISplitUpdateData } from '../SSEHandler/types';
|
|
8
11
|
import { FETCH_BACKOFF_BASE, FETCH_BACKOFF_MAX_WAIT, FETCH_BACKOFF_MAX_RETRIES } from './constants';
|
|
@@ -11,25 +14,27 @@ import { IUpdateWorker } from './types';
|
|
|
11
14
|
/**
|
|
12
15
|
* SplitsUpdateWorker factory
|
|
13
16
|
*/
|
|
14
|
-
export function SplitsUpdateWorker(log: ILogger, splitsCache: ISplitsCacheSync, splitsSyncTask: ISplitsSyncTask, splitsEventEmitter: ISplitsEventEmitter, segmentsSyncTask?: ISegmentsSyncTask): IUpdateWorker & { killSplit(event: ISplitKillData): void } {
|
|
17
|
+
export function SplitsUpdateWorker(log: ILogger, splitsCache: ISplitsCacheSync, splitsSyncTask: ISplitsSyncTask, splitsEventEmitter: ISplitsEventEmitter, telemetryTracker: ITelemetryTracker, segmentsSyncTask?: ISegmentsSyncTask): IUpdateWorker & { killSplit(event: ISplitKillData): void } {
|
|
15
18
|
|
|
16
19
|
let maxChangeNumber = 0;
|
|
17
20
|
let handleNewEvent = false;
|
|
18
21
|
let isHandlingEvent: boolean;
|
|
19
22
|
let cdnBypass: boolean;
|
|
23
|
+
let payload: ISplit | undefined;
|
|
20
24
|
const backoff = new Backoff(__handleSplitUpdateCall, FETCH_BACKOFF_BASE, FETCH_BACKOFF_MAX_WAIT);
|
|
21
25
|
|
|
22
26
|
function __handleSplitUpdateCall() {
|
|
23
27
|
isHandlingEvent = true;
|
|
24
28
|
if (maxChangeNumber > splitsCache.getChangeNumber()) {
|
|
25
29
|
handleNewEvent = false;
|
|
26
|
-
|
|
30
|
+
const splitUpdateNotification = payload ? { payload, changeNumber: maxChangeNumber } : undefined;
|
|
27
31
|
// fetch splits revalidating data if cached
|
|
28
|
-
splitsSyncTask.execute(true, cdnBypass ? maxChangeNumber : undefined).then(() => {
|
|
32
|
+
splitsSyncTask.execute(true, cdnBypass ? maxChangeNumber : undefined, splitUpdateNotification).then(() => {
|
|
29
33
|
if (!isHandlingEvent) return; // halt if `stop` has been called
|
|
30
34
|
if (handleNewEvent) {
|
|
31
35
|
__handleSplitUpdateCall();
|
|
32
36
|
} else {
|
|
37
|
+
if (splitUpdateNotification) telemetryTracker.trackUpdatesFromSSE(SPLITS);
|
|
33
38
|
// fetch new registered segments for server-side API. Not retrying on error
|
|
34
39
|
if (segmentsSyncTask) segmentsSyncTask.execute(true);
|
|
35
40
|
|
|
@@ -66,7 +71,7 @@ export function SplitsUpdateWorker(log: ILogger, splitsCache: ISplitsCacheSync,
|
|
|
66
71
|
*
|
|
67
72
|
* @param {number} changeNumber change number of the SPLIT_UPDATE notification
|
|
68
73
|
*/
|
|
69
|
-
function put({ changeNumber }:
|
|
74
|
+
function put({ changeNumber, pcn }: ISplitUpdateData, _payload?: ISplit) {
|
|
70
75
|
const currentChangeNumber = splitsCache.getChangeNumber();
|
|
71
76
|
|
|
72
77
|
if (changeNumber <= currentChangeNumber || changeNumber <= maxChangeNumber) return;
|
|
@@ -74,6 +79,11 @@ export function SplitsUpdateWorker(log: ILogger, splitsCache: ISplitsCacheSync,
|
|
|
74
79
|
maxChangeNumber = changeNumber;
|
|
75
80
|
handleNewEvent = true;
|
|
76
81
|
cdnBypass = false;
|
|
82
|
+
payload = undefined;
|
|
83
|
+
|
|
84
|
+
if (_payload && currentChangeNumber === pcn) {
|
|
85
|
+
payload = _payload;
|
|
86
|
+
}
|
|
77
87
|
|
|
78
88
|
if (backoff.timeoutID || !isHandlingEvent) __handleSplitUpdateCall();
|
|
79
89
|
backoff.reset();
|
|
@@ -81,7 +91,6 @@ export function SplitsUpdateWorker(log: ILogger, splitsCache: ISplitsCacheSync,
|
|
|
81
91
|
|
|
82
92
|
return {
|
|
83
93
|
put,
|
|
84
|
-
|
|
85
94
|
/**
|
|
86
95
|
* Invoked by NotificationProcessor on SPLIT_KILL event
|
|
87
96
|
*
|
|
@@ -95,7 +104,7 @@ export function SplitsUpdateWorker(log: ILogger, splitsCache: ISplitsCacheSync,
|
|
|
95
104
|
splitsEventEmitter.emit(SDK_SPLITS_ARRIVED, true);
|
|
96
105
|
}
|
|
97
106
|
// queues the SplitChanges fetch (only if changeNumber is newer)
|
|
98
|
-
put({ changeNumber });
|
|
107
|
+
put({ changeNumber } as ISplitUpdateData);
|
|
99
108
|
},
|
|
100
109
|
|
|
101
110
|
stop() {
|