@microsoft/1ds-post-js 3.2.1 → 3.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/bundle/{ms.post-3.2.1.gbl.js → ms.post-3.2.2.gbl.js} +296 -266
- package/bundle/ms.post-3.2.2.gbl.js.map +1 -0
- package/bundle/ms.post-3.2.2.gbl.min.js +7 -0
- package/bundle/ms.post-3.2.2.gbl.min.js.map +1 -0
- package/bundle/ms.post-3.2.2.integrity.json +46 -0
- package/bundle/{ms.post-3.2.1.js → ms.post-3.2.2.js} +296 -266
- package/bundle/ms.post-3.2.2.js.map +1 -0
- package/bundle/ms.post-3.2.2.min.js +7 -0
- package/bundle/ms.post-3.2.2.min.js.map +1 -0
- package/bundle/ms.post.gbl.js +295 -265
- package/bundle/ms.post.gbl.js.map +1 -1
- package/bundle/ms.post.gbl.min.js +2 -2
- package/bundle/ms.post.gbl.min.js.map +1 -1
- package/bundle/ms.post.integrity.json +17 -17
- package/bundle/ms.post.js +295 -265
- package/bundle/ms.post.js.map +1 -1
- package/bundle/ms.post.min.js +2 -2
- package/bundle/ms.post.min.js.map +1 -1
- package/dist/ms.post.js +40 -17
- package/dist/ms.post.js.map +1 -1
- package/dist/ms.post.min.js +2 -2
- package/dist/ms.post.min.js.map +1 -1
- package/dist-esm/src/BatchNotificationActions.js +1 -1
- package/dist-esm/src/ClockSkewManager.js +1 -1
- package/dist-esm/src/Constants.js +1 -1
- package/dist-esm/src/DataModels.js +1 -1
- package/dist-esm/src/EventBatch.js +1 -1
- package/dist-esm/src/HttpManager.d.ts +2 -1
- package/dist-esm/src/HttpManager.js +5 -5
- package/dist-esm/src/HttpManager.js.map +1 -1
- package/dist-esm/src/Index.js +1 -1
- package/dist-esm/src/KillSwitch.js +1 -1
- package/dist-esm/src/PostChannel.d.ts +2 -5
- package/dist-esm/src/PostChannel.js +21 -11
- package/dist-esm/src/PostChannel.js.map +1 -1
- package/dist-esm/src/RetryPolicy.js +1 -1
- package/dist-esm/src/Serializer.js +1 -1
- package/dist-esm/src/TimeoutOverrideWrapper.d.ts +18 -0
- package/dist-esm/src/TimeoutOverrideWrapper.js +28 -0
- package/dist-esm/src/TimeoutOverrideWrapper.js.map +1 -0
- package/dist-esm/src/typings/XDomainRequest.js +1 -1
- package/package.json +3 -3
- package/src/HttpManager.ts +5 -4
- package/src/PostChannel.ts +30 -11
- package/src/TimeoutOverrideWrapper.ts +29 -0
- package/bundle/ms.post-3.2.1.gbl.js.map +0 -1
- package/bundle/ms.post-3.2.1.gbl.min.js +0 -7
- package/bundle/ms.post-3.2.1.gbl.min.js.map +0 -1
- package/bundle/ms.post-3.2.1.integrity.json +0 -46
- package/bundle/ms.post-3.2.1.js.map +0 -1
- package/bundle/ms.post-3.2.1.min.js +0 -7
- package/bundle/ms.post-3.2.1.min.js.map +0 -1
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"PostChannel.js.map","sources":["PostChannel.js"],"sourcesContent":["import { __extends } from \"tslib\";\r\n/**\r\n* PostManager.ts\r\n* @author Abhilash Panwar (abpanwar); Hector Hernandez (hectorh); Nev Wylie (newylie)\r\n* @copyright Microsoft 2018-2020\r\n*/\r\nimport { BaseTelemetryPlugin, EventsDiscardedReason, isValueAssigned, setProcessTelemetryTimings, isWindowObjectAvailable, arrForEach, doPerf, objForEachKey, optimizeObject, isChromium, getWindow, isNumber, mergeEvtNamespace, createUniqueNamespace, addPageUnloadEventListener, addPageHideEventListener, addPageShowEventListener, removePageUnloadEventListener, removePageHideEventListener, removePageShowEventListener, _throwInternal } from \"@microsoft/1ds-core-js\";\r\nimport { RT_PROFILE, NRT_PROFILE, BE_PROFILE, } from \"./DataModels\";\r\nimport { EventBatch } from \"./EventBatch\";\r\nimport { HttpManager } from \"./HttpManager\";\r\nimport { retryPolicyGetMillisToBackoffForRetry } from \"./RetryPolicy\";\r\nimport dynamicProto from \"@microsoft/dynamicproto-js\";\r\nimport { strMsaDeviceTicket } from \"./Constants\";\r\nvar FlushCheckTimer = 0.250; // This needs to be in seconds, so this is 250ms\r\nvar MaxNumberEventPerBatch = 500;\r\nvar EventsDroppedAtOneTime = 20;\r\nvar MaxSendAttempts = 6;\r\nvar MaxSyncUnloadSendAttempts = 2; // Assuming 2 based on beforeunload and unload\r\nvar MaxBackoffCount = 4;\r\nvar globalContext = isWindowObjectAvailable ? window : this;\r\nvar MaxConnections = 2;\r\nvar MaxRequestRetriesBeforeBackoff = 1;\r\nvar strEventsDiscarded = \"eventsDiscarded\";\r\nvar strOverrideInstrumentationKey = \"overrideInstrumentationKey\";\r\nvar strMaxEventRetryAttempts = \"maxEventRetryAttempts\";\r\nvar strMaxUnloadEventRetryAttempts = \"maxUnloadEventRetryAttempts\";\r\nvar strAddUnloadCb = \"addUnloadCb\";\r\n/**\r\n * Class that manages adding events to inbound queues and batching of events\r\n * into requests.\r\n */\r\nvar PostChannel = /** @class */ (function (_super) {\r\n __extends(PostChannel, _super);\r\n function PostChannel() {\r\n var _this = _super.call(this) || this;\r\n _this.identifier = \"PostChannel\";\r\n _this.priority = 1011;\r\n _this.version = '3.2.1';\r\n var _config;\r\n var _isTeardownCalled = false;\r\n var _flushCallbackQueue = [];\r\n var _flushCallbackTimerId = null;\r\n var _paused = false;\r\n var _immediateQueueSize = 0;\r\n var _immediateQueueSizeLimit = 500;\r\n var _queueSize = 0;\r\n var _queueSizeLimit = 10000;\r\n var _profiles = {};\r\n var _currentProfile = RT_PROFILE;\r\n var _scheduledTimerId = null;\r\n var _immediateTimerId = null;\r\n var _currentBackoffCount = 0;\r\n var _timerCount = 0;\r\n var _xhrOverride;\r\n var _httpManager;\r\n var _batchQueues = {};\r\n var _autoFlushEventsLimit;\r\n // either MaxBatchSize * (1+ Max Connections) or _queueLimit / 6 (where 3 latency Queues [normal, realtime, cost deferred] * 2 [allow half full -- allow for retry])\r\n var _autoFlushBatchLimit;\r\n var _delayedBatchSendLatency = -1;\r\n var _delayedBatchReason;\r\n var _optimizeObject = true;\r\n var _isPageUnloadTriggered = false;\r\n var _disableXhrSync = false;\r\n var _maxEventSendAttempts = MaxSendAttempts;\r\n var _maxUnloadEventSendAttempts = MaxSyncUnloadSendAttempts;\r\n var _evtNamespace;\r\n dynamicProto(PostChannel, _this, function (_self, _base) {\r\n _initDefaults();\r\n // Special internal method to allow the DebugPlugin to hook embedded objects\r\n _self[\"_getDbgPlgTargets\"] = function () {\r\n return [_httpManager];\r\n };\r\n _self.initialize = function (coreConfig, core, extensions) {\r\n doPerf(core, function () { return \"PostChannel:initialize\"; }, function () {\r\n var extendedCore = core;\r\n _base.initialize(coreConfig, core, extensions);\r\n try {\r\n var hasAddUnloadCb = !!core[strAddUnloadCb];\r\n _evtNamespace = mergeEvtNamespace(createUniqueNamespace(_self.identifier), core.evtNamespace && core.evtNamespace());\r\n var ctx = _self._getTelCtx();\r\n coreConfig.extensionConfig[_self.identifier] = coreConfig.extensionConfig[_self.identifier] || {};\r\n _config = ctx.getExtCfg(_self.identifier);\r\n _self._setTimeoutOverride = _config.setTimeoutOverride ? _config.setTimeoutOverride : setTimeout.bind(globalContext);\r\n _self._clearTimeoutOverride = _config.clearTimeoutOverride ? _config.clearTimeoutOverride : clearTimeout.bind(globalContext);\r\n // Only try and use the optimizeObject() if this appears to be a chromium based browser and it has not been explicitly disabled\r\n _optimizeObject = !_config.disableOptimizeObj && isChromium();\r\n _hookWParam(extendedCore);\r\n if (_config.eventsLimitInMem > 0) {\r\n _queueSizeLimit = _config.eventsLimitInMem;\r\n }\r\n if (_config.immediateEventLimit > 0) {\r\n _immediateQueueSizeLimit = _config.immediateEventLimit;\r\n }\r\n if (_config.autoFlushEventsLimit > 0) {\r\n _autoFlushEventsLimit = _config.autoFlushEventsLimit;\r\n }\r\n _disableXhrSync = _config.disableXhrSync;\r\n if (isNumber(_config[strMaxEventRetryAttempts])) {\r\n _maxEventSendAttempts = _config[strMaxEventRetryAttempts];\r\n }\r\n if (isNumber(_config[strMaxUnloadEventRetryAttempts])) {\r\n _maxUnloadEventSendAttempts = _config[strMaxUnloadEventRetryAttempts];\r\n }\r\n _setAutoLimits();\r\n if (_config.httpXHROverride && _config.httpXHROverride.sendPOST) {\r\n _xhrOverride = _config.httpXHROverride;\r\n }\r\n if (isValueAssigned(coreConfig.anonCookieName)) {\r\n _httpManager.addQueryStringParameter(\"anoncknm\", coreConfig.anonCookieName);\r\n }\r\n _httpManager.sendHook = _config.payloadPreprocessor;\r\n _httpManager.sendListener = _config.payloadListener;\r\n // Override endpointUrl if provided in Post config\r\n var endpointUrl = _config.overrideEndpointUrl ? _config.overrideEndpointUrl : coreConfig.endpointUrl;\r\n _self._notificationManager = coreConfig.extensionConfig.NotificationManager;\r\n _httpManager.initialize(endpointUrl, _self.core, _self, _xhrOverride, _config);\r\n var excludePageUnloadEvents = coreConfig.disablePageUnloadEvents || [];\r\n // When running in Web browsers try to send all telemetry if page is unloaded\r\n addPageUnloadEventListener(_handleUnloadEvents, excludePageUnloadEvents, _evtNamespace);\r\n addPageHideEventListener(_handleUnloadEvents, excludePageUnloadEvents, _evtNamespace);\r\n addPageShowEventListener(_handleShowEvents, coreConfig.disablePageShowEvents, _evtNamespace);\r\n }\r\n catch (e) {\r\n // resetting the initialized state because of failure\r\n _self.setInitialized(false);\r\n throw e;\r\n }\r\n }, function () { return ({ coreConfig: coreConfig, core: core, extensions: extensions }); });\r\n };\r\n _self.processTelemetry = function (ev, itemCtx) {\r\n setProcessTelemetryTimings(ev, _self.identifier);\r\n itemCtx = _self._getTelCtx(itemCtx);\r\n // Get the channel instance from the current request/instance\r\n var channelConfig = itemCtx.getExtCfg(_self.identifier);\r\n // DisableTelemetry was defined in the config provided during initialization\r\n var disableTelemetry = !!_config.disableTelemetry;\r\n if (channelConfig) {\r\n // DisableTelemetry is defined in the config for this request/instance\r\n disableTelemetry = disableTelemetry || !!channelConfig.disableTelemetry;\r\n }\r\n var event = ev;\r\n if (!disableTelemetry && !_isTeardownCalled) {\r\n // Override iKey if provided in Post config if provided for during initialization\r\n if (_config[strOverrideInstrumentationKey]) {\r\n event.iKey = _config[strOverrideInstrumentationKey];\r\n }\r\n // Override iKey if provided in Post config if provided for this instance\r\n if (channelConfig && channelConfig[strOverrideInstrumentationKey]) {\r\n event.iKey = channelConfig[strOverrideInstrumentationKey];\r\n }\r\n _addEventToQueues(event, true);\r\n if (_isPageUnloadTriggered) {\r\n // Unload event has been received so we need to try and flush new events\r\n _releaseAllQueues(2 /* SendBeacon */, 2 /* Unload */);\r\n }\r\n else {\r\n _scheduleTimer();\r\n }\r\n }\r\n _self.processNext(event, itemCtx);\r\n };\r\n _self._doTeardown = function (unloadCtx, unloadState) {\r\n _releaseAllQueues(2 /* SendBeacon */, 2 /* Unload */);\r\n _isTeardownCalled = true;\r\n _httpManager.teardown();\r\n removePageUnloadEventListener(null, _evtNamespace);\r\n removePageHideEventListener(null, _evtNamespace);\r\n removePageShowEventListener(null, _evtNamespace);\r\n // Just register to remove all events associated with this namespace\r\n _initDefaults();\r\n };\r\n function _hookWParam(extendedCore) {\r\n var existingGetWParamMethod = extendedCore.getWParam;\r\n extendedCore.getWParam = function () {\r\n var wparam = 0;\r\n if (_config.ignoreMc1Ms0CookieProcessing) {\r\n wparam = wparam | 2;\r\n }\r\n return wparam | existingGetWParamMethod();\r\n };\r\n }\r\n // Moving event handlers out from the initialize closure so that any local variables can be garbage collected\r\n function _handleUnloadEvents(evt) {\r\n var theEvt = evt || getWindow().event; // IE 8 does not pass the event\r\n if (theEvt.type !== \"beforeunload\") {\r\n // Only set the unload trigger if not beforeunload event as beforeunload can be cancelled while the other events can't\r\n _isPageUnloadTriggered = true;\r\n _httpManager.setUnloading(_isPageUnloadTriggered);\r\n }\r\n _releaseAllQueues(2 /* SendBeacon */, 2 /* Unload */);\r\n }\r\n function _handleShowEvents(evt) {\r\n // Handle the page becoming visible again\r\n _isPageUnloadTriggered = false;\r\n _httpManager.setUnloading(_isPageUnloadTriggered);\r\n }\r\n function _addEventToQueues(event, append) {\r\n // If send attempt field is undefined we should set it to 0.\r\n if (!event.sendAttempt) {\r\n event.sendAttempt = 0;\r\n }\r\n // Add default latency\r\n if (!event.latency) {\r\n event.latency = 1 /* Normal */;\r\n }\r\n // Remove extra AI properties if present\r\n if (event.ext && event.ext[\"trace\"]) {\r\n delete (event.ext[\"trace\"]);\r\n }\r\n if (event.ext && event.ext[\"user\"] && event.ext[\"user\"][\"id\"]) {\r\n delete (event.ext[\"user\"][\"id\"]);\r\n }\r\n // v8 performance optimization for iterating over the keys\r\n if (_optimizeObject) {\r\n event.ext = optimizeObject(event.ext);\r\n if (event.baseData) {\r\n event.baseData = optimizeObject(event.baseData);\r\n }\r\n if (event.data) {\r\n event.data = optimizeObject(event.data);\r\n }\r\n }\r\n if (event.sync) {\r\n // If the transmission is backed off then do not send synchronous events.\r\n // We will convert these events to Real time latency instead.\r\n if (_currentBackoffCount || _paused) {\r\n event.latency = 3 /* RealTime */;\r\n event.sync = false;\r\n }\r\n else {\r\n // Log the event synchronously\r\n if (_httpManager) {\r\n // v8 performance optimization for iterating over the keys\r\n if (_optimizeObject) {\r\n event = optimizeObject(event);\r\n }\r\n _httpManager.sendSynchronousBatch(EventBatch.create(event.iKey, [event]), event.sync === true ? 1 /* Synchronous */ : event.sync, 3 /* SyncEvent */);\r\n return;\r\n }\r\n }\r\n }\r\n var evtLatency = event.latency;\r\n var queueSize = _queueSize;\r\n var queueLimit = _queueSizeLimit;\r\n if (evtLatency === 4 /* Immediate */) {\r\n queueSize = _immediateQueueSize;\r\n queueLimit = _immediateQueueSizeLimit;\r\n }\r\n var eventDropped = false;\r\n // Only add the event if the queue isn't full or it's a direct event (which don't add to the queue sizes)\r\n if (queueSize < queueLimit) {\r\n eventDropped = !_addEventToProperQueue(event, append);\r\n }\r\n else {\r\n var dropLatency = 1 /* Normal */;\r\n var dropNumber = EventsDroppedAtOneTime;\r\n if (evtLatency === 4 /* Immediate */) {\r\n // Only drop other immediate events as they are not technically sharing the general queue\r\n dropLatency = 4 /* Immediate */;\r\n dropNumber = 1;\r\n }\r\n // Drop old event from lower or equal latency\r\n eventDropped = true;\r\n if (_dropEventWithLatencyOrLess(event.iKey, event.latency, dropLatency, dropNumber)) {\r\n eventDropped = !_addEventToProperQueue(event, append);\r\n }\r\n }\r\n if (eventDropped) {\r\n // Can't drop events from current queues because the all the slots are taken by queues that are being flushed.\r\n _notifyEvents(strEventsDiscarded, [event], EventsDiscardedReason.QueueFull);\r\n }\r\n }\r\n _self.setEventQueueLimits = function (eventLimit, autoFlushLimit) {\r\n _queueSizeLimit = eventLimit > 0 ? eventLimit : 10000;\r\n _autoFlushEventsLimit = autoFlushLimit > 0 ? autoFlushLimit : 0;\r\n _setAutoLimits();\r\n // We only do this check here as during normal event addition if the queue is > then events start getting dropped\r\n var doFlush = _queueSize > eventLimit;\r\n if (!doFlush && _autoFlushBatchLimit > 0) {\r\n // Check the auto flush max batch size\r\n for (var latency = 1 /* Normal */; !doFlush && latency <= 3 /* RealTime */; latency++) {\r\n var batchQueue = _batchQueues[latency];\r\n if (batchQueue && batchQueue.batches) {\r\n arrForEach(batchQueue.batches, function (theBatch) {\r\n if (theBatch && theBatch.count() >= _autoFlushBatchLimit) {\r\n // If any 1 batch is > than the limit then trigger an auto flush\r\n doFlush = true;\r\n }\r\n });\r\n }\r\n }\r\n }\r\n _performAutoFlush(true, doFlush);\r\n };\r\n _self.pause = function () {\r\n _clearScheduledTimer();\r\n _paused = true;\r\n _httpManager.pause();\r\n };\r\n _self.resume = function () {\r\n _paused = false;\r\n _httpManager.resume();\r\n _scheduleTimer();\r\n };\r\n _self.addResponseHandler = function (responseHandler) {\r\n _httpManager._responseHandlers.push(responseHandler);\r\n };\r\n _self._loadTransmitProfiles = function (profiles) {\r\n _resetTransmitProfiles();\r\n objForEachKey(profiles, function (profileName, profileValue) {\r\n var profLen = profileValue.length;\r\n if (profLen >= 2) {\r\n var directValue = (profLen > 2 ? profileValue[2] : 0);\r\n profileValue.splice(0, profLen - 2);\r\n // Make sure if a higher latency is set to not send then don't send lower latency\r\n if (profileValue[1] < 0) {\r\n profileValue[0] = -1;\r\n }\r\n // Make sure each latency is multiple of the latency higher then it. If not a multiple\r\n // we round up so that it becomes a multiple.\r\n if (profileValue[1] > 0 && profileValue[0] > 0) {\r\n var timerMultiplier = profileValue[0] / profileValue[1];\r\n profileValue[0] = Math.ceil(timerMultiplier) * profileValue[1];\r\n }\r\n // Add back the direct profile timeout\r\n if (directValue >= 0 && profileValue[1] >= 0 && directValue > profileValue[1]) {\r\n // Make sure if it's not disabled (< 0) then make sure it's not larger than RealTime\r\n directValue = profileValue[1];\r\n }\r\n profileValue.push(directValue);\r\n _profiles[profileName] = profileValue;\r\n }\r\n });\r\n };\r\n _self.flush = function (async, callback, sendReason) {\r\n if (async === void 0) { async = true; }\r\n if (!_paused) {\r\n // Clear the normal schedule timer as we are going to try and flush ASAP\r\n _clearScheduledTimer();\r\n sendReason = sendReason || 1 /* ManualFlush */;\r\n if (async) {\r\n // Move all queued events to the HttpManager\r\n _queueBatches(1 /* Normal */, 0 /* Batched */, sendReason);\r\n // All events (should) have been queue -- lets just make sure the queue counts are correct to avoid queue exhaustion (previous bug #9685112)\r\n _resetQueueCounts();\r\n if (_flushCallbackTimerId == null) {\r\n _flushCallbackTimerId = _createTimer(function () {\r\n _flushImpl(callback, sendReason);\r\n }, 0);\r\n }\r\n else {\r\n // Even if null (no callback) this will ensure after the flushImpl finishes waiting\r\n // for a completely idle connection it will attempt to re-flush any queued events on the next cycle\r\n _flushCallbackQueue.push(callback);\r\n }\r\n }\r\n else {\r\n // Now cause all queued events to be sent synchronously\r\n _sendEventsForLatencyAndAbove(1 /* Normal */, 1 /* Synchronous */, sendReason);\r\n if (callback !== null && callback !== undefined) {\r\n callback();\r\n }\r\n }\r\n }\r\n };\r\n _self.setMsaAuthTicket = function (ticket) {\r\n _httpManager.addHeader(strMsaDeviceTicket, ticket);\r\n };\r\n _self.hasEvents = _hasEvents;\r\n _self._setTransmitProfile = function (profileName) {\r\n if (_currentProfile !== profileName && _profiles[profileName] !== undefined) {\r\n _clearScheduledTimer();\r\n _currentProfile = profileName;\r\n _scheduleTimer();\r\n }\r\n };\r\n /**\r\n * Batch and send events currently in the queue for the given latency.\r\n * @param latency - Latency for which to send events.\r\n */\r\n function _sendEventsForLatencyAndAbove(latency, sendType, sendReason) {\r\n var queued = _queueBatches(latency, sendType, sendReason);\r\n // Always trigger the request as while the post channel may not have queued additional events, the httpManager may already have waiting events\r\n _httpManager.sendQueuedRequests(sendType, sendReason);\r\n return queued;\r\n }\r\n function _hasEvents() {\r\n return _queueSize > 0;\r\n }\r\n /**\r\n * Try to schedule the timer after which events will be sent. If there are\r\n * no events to be sent, or there is already a timer scheduled, or the\r\n * http manager doesn't have any idle connections this method is no-op.\r\n */\r\n function _scheduleTimer() {\r\n // If we had previously attempted to send requests, but the http manager didn't have any idle connections then the requests where delayed\r\n // so try and requeue then again now\r\n if (_delayedBatchSendLatency >= 0 && _queueBatches(_delayedBatchSendLatency, 0 /* Batched */, _delayedBatchReason)) {\r\n _httpManager.sendQueuedRequests(0 /* Batched */, _delayedBatchReason);\r\n }\r\n if (_immediateQueueSize > 0 && !_immediateTimerId && !_paused) {\r\n // During initialization _profiles enforce that the direct [2] is less than real time [1] timer value\r\n // If the immediateTimeout is disabled the immediate events will be sent with Real Time events\r\n var immediateTimeOut = _profiles[_currentProfile][2];\r\n if (immediateTimeOut >= 0) {\r\n _immediateTimerId = _createTimer(function () {\r\n _immediateTimerId = null;\r\n // Only try to send direct events\r\n _sendEventsForLatencyAndAbove(4 /* Immediate */, 0 /* Batched */, 1 /* NormalSchedule */);\r\n _scheduleTimer();\r\n }, immediateTimeOut);\r\n }\r\n }\r\n // During initialization the _profiles enforce that the normal [0] is a multiple of the real time [1] timer value\r\n var timeOut = _profiles[_currentProfile][1];\r\n if (!_scheduledTimerId && !_flushCallbackTimerId && timeOut >= 0 && !_paused) {\r\n if (_hasEvents()) {\r\n _scheduledTimerId = _createTimer(function () {\r\n _scheduledTimerId = null;\r\n _sendEventsForLatencyAndAbove(_timerCount === 0 ? 3 /* RealTime */ : 1 /* Normal */, 0 /* Batched */, 1 /* NormalSchedule */);\r\n // Increment the count for next cycle\r\n _timerCount++;\r\n _timerCount %= 2;\r\n _scheduleTimer();\r\n }, timeOut);\r\n }\r\n else {\r\n _timerCount = 0;\r\n }\r\n }\r\n }\r\n _self._backOffTransmission = function () {\r\n if (_currentBackoffCount < MaxBackoffCount) {\r\n _currentBackoffCount++;\r\n _clearScheduledTimer();\r\n _scheduleTimer();\r\n }\r\n };\r\n _self._clearBackOff = function () {\r\n if (_currentBackoffCount) {\r\n _currentBackoffCount = 0;\r\n _clearScheduledTimer();\r\n _scheduleTimer();\r\n }\r\n };\r\n function _initDefaults() {\r\n _config = null;\r\n _isTeardownCalled = false;\r\n _flushCallbackQueue = [];\r\n _flushCallbackTimerId = null;\r\n _paused = false;\r\n _immediateQueueSize = 0;\r\n _immediateQueueSizeLimit = 500;\r\n _queueSize = 0;\r\n _queueSizeLimit = 10000;\r\n _profiles = {};\r\n _currentProfile = RT_PROFILE;\r\n _scheduledTimerId = null;\r\n _immediateTimerId = null;\r\n _currentBackoffCount = 0;\r\n _timerCount = 0;\r\n _xhrOverride = null;\r\n _batchQueues = {};\r\n _autoFlushEventsLimit = undefined;\r\n // either MaxBatchSize * (1+ Max Connections) or _queueLimit / 6 (where 3 latency Queues [normal, realtime, cost deferred] * 2 [allow half full -- allow for retry])\r\n _autoFlushBatchLimit = 0;\r\n _delayedBatchSendLatency = -1;\r\n _delayedBatchReason = null;\r\n _optimizeObject = true;\r\n _isPageUnloadTriggered = false;\r\n _disableXhrSync = false;\r\n _maxEventSendAttempts = MaxSendAttempts;\r\n _maxUnloadEventSendAttempts = MaxSyncUnloadSendAttempts;\r\n _evtNamespace = null;\r\n _httpManager = new HttpManager(MaxNumberEventPerBatch, MaxConnections, MaxRequestRetriesBeforeBackoff, {\r\n requeue: _requeueEvents,\r\n send: _sendingEvent,\r\n sent: _eventsSentEvent,\r\n drop: _eventsDropped,\r\n rspFail: _eventsResponseFail,\r\n oth: _otherEvent\r\n });\r\n _initializeProfiles();\r\n _clearQueues();\r\n _setAutoLimits();\r\n }\r\n function _createTimer(theTimerFunc, timeOut) {\r\n // If the transmission is backed off make the timer at least 1 sec to allow for back off.\r\n if (timeOut === 0 && _currentBackoffCount) {\r\n timeOut = 1;\r\n }\r\n var timerMultiplier = 1000;\r\n if (_currentBackoffCount) {\r\n timerMultiplier = retryPolicyGetMillisToBackoffForRetry(_currentBackoffCount - 1);\r\n }\r\n return _self._setTimeoutOverride(theTimerFunc, timeOut * timerMultiplier);\r\n }\r\n function _clearScheduledTimer() {\r\n if (_scheduledTimerId !== null) {\r\n _self._clearTimeoutOverride(_scheduledTimerId);\r\n _scheduledTimerId = null;\r\n _timerCount = 0;\r\n }\r\n }\r\n // Try to send all queued events using beacons if available\r\n function _releaseAllQueues(sendType, sendReason) {\r\n _clearScheduledTimer();\r\n // Cancel all flush callbacks\r\n if (_flushCallbackTimerId) {\r\n _self._clearTimeoutOverride(_flushCallbackTimerId);\r\n _flushCallbackTimerId = null;\r\n }\r\n if (!_paused) {\r\n // Queue all the remaining requests to be sent. The requests will be sent using HTML5 Beacons if they are available.\r\n _sendEventsForLatencyAndAbove(1 /* Normal */, sendType, sendReason);\r\n }\r\n }\r\n /**\r\n * Add empty queues for all latencies in the inbound queues map. This is called\r\n * when Transmission Manager is being flushed. This ensures that new events added\r\n * after flush are stored separately till we flush the current events.\r\n */\r\n function _clearQueues() {\r\n _batchQueues[4 /* Immediate */] = {\r\n batches: [],\r\n iKeyMap: {}\r\n };\r\n _batchQueues[3 /* RealTime */] = {\r\n batches: [],\r\n iKeyMap: {}\r\n };\r\n _batchQueues[2 /* CostDeferred */] = {\r\n batches: [],\r\n iKeyMap: {}\r\n };\r\n _batchQueues[1 /* Normal */] = {\r\n batches: [],\r\n iKeyMap: {}\r\n };\r\n }\r\n function _getEventBatch(iKey, latency, create) {\r\n var batchQueue = _batchQueues[latency];\r\n if (!batchQueue) {\r\n latency = 1 /* Normal */;\r\n batchQueue = _batchQueues[latency];\r\n }\r\n var eventBatch = batchQueue.iKeyMap[iKey];\r\n if (!eventBatch && create) {\r\n eventBatch = EventBatch.create(iKey);\r\n batchQueue.batches.push(eventBatch);\r\n batchQueue.iKeyMap[iKey] = eventBatch;\r\n }\r\n return eventBatch;\r\n }\r\n function _performAutoFlush(isAsync, doFlush) {\r\n // Only perform the auto flush check if the httpManager has an idle connection and we are not in a backoff situation\r\n if (_httpManager.canSendRequest() && !_currentBackoffCount) {\r\n if (_autoFlushEventsLimit > 0 && _queueSize > _autoFlushEventsLimit) {\r\n // Force flushing\r\n doFlush = true;\r\n }\r\n if (doFlush && _flushCallbackTimerId == null) {\r\n // Auto flush the queue\r\n _self.flush(isAsync, null, 20 /* MaxQueuedEvents */);\r\n }\r\n }\r\n }\r\n function _addEventToProperQueue(event, append) {\r\n // v8 performance optimization for iterating over the keys\r\n if (_optimizeObject) {\r\n event = optimizeObject(event);\r\n }\r\n var latency = event.latency;\r\n var eventBatch = _getEventBatch(event.iKey, latency, true);\r\n if (eventBatch.addEvent(event)) {\r\n if (latency !== 4 /* Immediate */) {\r\n _queueSize++;\r\n // Check for auto flushing based on total events in the queue, but not for requeued or retry events\r\n if (append && event.sendAttempt === 0) {\r\n // Force the flushing of the batch if the batch (specific iKey / latency combination) reaches it's auto flush limit\r\n _performAutoFlush(!event.sync, _autoFlushBatchLimit > 0 && eventBatch.count() >= _autoFlushBatchLimit);\r\n }\r\n }\r\n else {\r\n // Direct events don't need auto flushing as they are scheduled (by default) for immediate delivery\r\n _immediateQueueSize++;\r\n }\r\n return true;\r\n }\r\n return false;\r\n }\r\n function _dropEventWithLatencyOrLess(iKey, latency, currentLatency, dropNumber) {\r\n while (currentLatency <= latency) {\r\n var eventBatch = _getEventBatch(iKey, latency, true);\r\n if (eventBatch && eventBatch.count() > 0) {\r\n // Dropped oldest events from lowest possible latency\r\n var droppedEvents = eventBatch.split(0, dropNumber);\r\n var droppedCount = droppedEvents.count();\r\n if (droppedCount > 0) {\r\n if (currentLatency === 4 /* Immediate */) {\r\n _immediateQueueSize -= droppedCount;\r\n }\r\n else {\r\n _queueSize -= droppedCount;\r\n }\r\n _notifyBatchEvents(strEventsDiscarded, [droppedEvents], EventsDiscardedReason.QueueFull);\r\n return true;\r\n }\r\n }\r\n currentLatency++;\r\n }\r\n // Unable to drop any events -- lets just make sure the queue counts are correct to avoid exhaustion\r\n _resetQueueCounts();\r\n return false;\r\n }\r\n /**\r\n * Internal helper to reset the queue counts, used as a backstop to avoid future queue exhaustion errors\r\n * that might occur because of counting issues.\r\n */\r\n function _resetQueueCounts() {\r\n var immediateQueue = 0;\r\n var normalQueue = 0;\r\n var _loop_1 = function (latency) {\r\n var batchQueue = _batchQueues[latency];\r\n if (batchQueue && batchQueue.batches) {\r\n arrForEach(batchQueue.batches, function (theBatch) {\r\n if (latency === 4 /* Immediate */) {\r\n immediateQueue += theBatch.count();\r\n }\r\n else {\r\n normalQueue += theBatch.count();\r\n }\r\n });\r\n }\r\n };\r\n for (var latency = 1 /* Normal */; latency <= 4 /* Immediate */; latency++) {\r\n _loop_1(latency);\r\n }\r\n _queueSize = normalQueue;\r\n _immediateQueueSize = immediateQueue;\r\n }\r\n function _queueBatches(latency, sendType, sendReason) {\r\n var eventsQueued = false;\r\n var isAsync = sendType === 0 /* Batched */;\r\n // Only queue batches (to the HttpManager) if this is a sync request or the httpManager has an idle connection\r\n // Thus keeping the events within the PostChannel until the HttpManager has a connection available\r\n // This is so we can drop \"old\" events if the queue is getting full because we can't successfully send events\r\n if (!isAsync || _httpManager.canSendRequest()) {\r\n doPerf(_self.core, function () { return \"PostChannel._queueBatches\"; }, function () {\r\n var droppedEvents = [];\r\n var latencyToProcess = 4 /* Immediate */;\r\n while (latencyToProcess >= latency) {\r\n var batchQueue = _batchQueues[latencyToProcess];\r\n if (batchQueue && batchQueue.batches && batchQueue.batches.length > 0) {\r\n arrForEach(batchQueue.batches, function (theBatch) {\r\n // Add the batch to the http manager to send the requests\r\n if (!_httpManager.addBatch(theBatch)) {\r\n // The events from this iKey are being dropped (killed)\r\n droppedEvents = droppedEvents.concat(theBatch.events());\r\n }\r\n else {\r\n eventsQueued = eventsQueued || (theBatch && theBatch.count() > 0);\r\n }\r\n if (latencyToProcess === 4 /* Immediate */) {\r\n _immediateQueueSize -= theBatch.count();\r\n }\r\n else {\r\n _queueSize -= theBatch.count();\r\n }\r\n });\r\n // Remove all batches from this Queue\r\n batchQueue.batches = [];\r\n batchQueue.iKeyMap = {};\r\n }\r\n latencyToProcess--;\r\n }\r\n if (droppedEvents.length > 0) {\r\n _notifyEvents(strEventsDiscarded, droppedEvents, EventsDiscardedReason.KillSwitch);\r\n }\r\n if (eventsQueued && _delayedBatchSendLatency >= latency) {\r\n // We have queued events at the same level as the delayed values so clear the setting\r\n _delayedBatchSendLatency = -1;\r\n _delayedBatchReason = 0 /* Undefined */;\r\n }\r\n }, function () { return ({ latency: latency, sendType: sendType, sendReason: sendReason }); }, !isAsync);\r\n }\r\n else {\r\n // remember the min latency so that we can re-trigger later\r\n _delayedBatchSendLatency = _delayedBatchSendLatency >= 0 ? Math.min(_delayedBatchSendLatency, latency) : latency;\r\n _delayedBatchReason = Math.max(_delayedBatchReason, sendReason);\r\n }\r\n return eventsQueued;\r\n }\r\n /**\r\n * This is the callback method is called as part of the manual flushing process.\r\n * @param callback\r\n * @param sendReason\r\n */\r\n function _flushImpl(callback, sendReason) {\r\n // Add any additional queued events and cause all queued events to be sent asynchronously\r\n _sendEventsForLatencyAndAbove(1 /* Normal */, 0 /* Batched */, sendReason);\r\n _waitForIdleManager(function () {\r\n // Only called AFTER the httpManager does not have any outstanding requests\r\n if (callback) {\r\n callback();\r\n }\r\n if (_flushCallbackQueue.length > 0) {\r\n _flushCallbackTimerId = _createTimer(function () { return _flushImpl(_flushCallbackQueue.shift(), sendReason); }, 0);\r\n }\r\n else {\r\n // No more flush requests\r\n _flushCallbackTimerId = null;\r\n if (_hasEvents()) {\r\n // We still have events, so restart the normal timer schedule\r\n _scheduleTimer();\r\n }\r\n }\r\n });\r\n }\r\n function _waitForIdleManager(callback) {\r\n if (_httpManager.isCompletelyIdle()) {\r\n callback();\r\n }\r\n else {\r\n _flushCallbackTimerId = _createTimer(function () {\r\n _waitForIdleManager(callback);\r\n }, FlushCheckTimer);\r\n }\r\n }\r\n /**\r\n * Resets the transmit profiles to the default profiles of Real Time, Near Real Time\r\n * and Best Effort. This removes all the custom profiles that were loaded.\r\n */\r\n function _resetTransmitProfiles() {\r\n _clearScheduledTimer();\r\n _initializeProfiles();\r\n _currentProfile = RT_PROFILE;\r\n _scheduleTimer();\r\n }\r\n function _initializeProfiles() {\r\n _profiles = {};\r\n _profiles[RT_PROFILE] = [2, 1, 0];\r\n _profiles[NRT_PROFILE] = [6, 3, 0];\r\n _profiles[BE_PROFILE] = [18, 9, 0];\r\n }\r\n /**\r\n * The notification handler for requeue events\r\n * @ignore\r\n */\r\n function _requeueEvents(batches, reason) {\r\n var droppedEvents = [];\r\n var maxSendAttempts = _maxEventSendAttempts;\r\n if (_isPageUnloadTriggered) {\r\n // If a page unlaod has been triggered reduce the number of times we try to \"retry\"\r\n maxSendAttempts = _maxUnloadEventSendAttempts;\r\n }\r\n arrForEach(batches, function (theBatch) {\r\n if (theBatch && theBatch.count() > 0) {\r\n arrForEach(theBatch.events(), function (theEvent) {\r\n if (theEvent) {\r\n // Check if the request being added back is for a sync event in which case mark it no longer a sync event\r\n if (theEvent.sync) {\r\n theEvent.latency = 4 /* Immediate */;\r\n theEvent.sync = false;\r\n }\r\n if (theEvent.sendAttempt < maxSendAttempts) {\r\n // Reset the event timings\r\n setProcessTelemetryTimings(theEvent, _self.identifier);\r\n _addEventToQueues(theEvent, false);\r\n }\r\n else {\r\n droppedEvents.push(theEvent);\r\n }\r\n }\r\n });\r\n }\r\n });\r\n if (droppedEvents.length > 0) {\r\n _notifyEvents(strEventsDiscarded, droppedEvents, EventsDiscardedReason.NonRetryableStatus);\r\n }\r\n if (_isPageUnloadTriggered) {\r\n // Unload event has been received so we need to try and flush new events\r\n _releaseAllQueues(2 /* SendBeacon */, 2 /* Unload */);\r\n }\r\n }\r\n function _callNotification(evtName, theArgs) {\r\n var manager = (_self._notificationManager || {});\r\n var notifyFunc = manager[evtName];\r\n if (notifyFunc) {\r\n try {\r\n notifyFunc.apply(manager, theArgs);\r\n }\r\n catch (e) {\r\n _throwInternal(_self.diagLog(), 1 /* CRITICAL */, 74 /* NotificationException */, evtName + \" notification failed: \" + e);\r\n }\r\n }\r\n }\r\n function _notifyEvents(evtName, theEvents) {\r\n var extraArgs = [];\r\n for (var _i = 2; _i < arguments.length; _i++) {\r\n extraArgs[_i - 2] = arguments[_i];\r\n }\r\n if (theEvents && theEvents.length > 0) {\r\n _callNotification(evtName, [theEvents].concat(extraArgs));\r\n }\r\n }\r\n function _notifyBatchEvents(evtName, batches) {\r\n var extraArgs = [];\r\n for (var _i = 2; _i < arguments.length; _i++) {\r\n extraArgs[_i - 2] = arguments[_i];\r\n }\r\n if (batches && batches.length > 0) {\r\n arrForEach(batches, function (theBatch) {\r\n if (theBatch && theBatch.count() > 0) {\r\n _callNotification(evtName, [theBatch.events()].concat(extraArgs));\r\n }\r\n });\r\n }\r\n }\r\n /**\r\n * The notification handler for when batches are about to be sent\r\n * @ignore\r\n */\r\n function _sendingEvent(batches, reason, isSyncRequest) {\r\n if (batches && batches.length > 0) {\r\n _callNotification(\"eventsSendRequest\", [(reason >= 1000 /* SendingUndefined */ && reason <= 1999 /* SendingEventMax */ ?\r\n reason - 1000 /* SendingUndefined */ :\r\n 0 /* Undefined */), isSyncRequest !== true]);\r\n }\r\n }\r\n /**\r\n * This event represents that a batch of events have been successfully sent and a response received\r\n * @param batches The notification handler for when the batches have been successfully sent\r\n * @param reason For this event the reason will always be EventBatchNotificationReason.Complete\r\n */\r\n function _eventsSentEvent(batches, reason) {\r\n _notifyBatchEvents(\"eventsSent\", batches, reason);\r\n // Try and schedule the processing timer if we have events\r\n _scheduleTimer();\r\n }\r\n function _eventsDropped(batches, reason) {\r\n _notifyBatchEvents(strEventsDiscarded, batches, (reason >= 8000 /* EventsDropped */ && reason <= 8999 /* EventsDroppedMax */ ?\r\n reason - 8000 /* EventsDropped */ :\r\n EventsDiscardedReason.Unknown));\r\n }\r\n function _eventsResponseFail(batches) {\r\n _notifyBatchEvents(strEventsDiscarded, batches, EventsDiscardedReason.NonRetryableStatus);\r\n // Try and schedule the processing timer if we have events\r\n _scheduleTimer();\r\n }\r\n function _otherEvent(batches, reason) {\r\n _notifyBatchEvents(strEventsDiscarded, batches, EventsDiscardedReason.Unknown);\r\n // Try and schedule the processing timer if we have events\r\n _scheduleTimer();\r\n }\r\n function _setAutoLimits() {\r\n if (!_config || !_config.disableAutoBatchFlushLimit) {\r\n _autoFlushBatchLimit = Math.max(MaxNumberEventPerBatch * (MaxConnections + 1), _queueSizeLimit / 6);\r\n }\r\n else {\r\n _autoFlushBatchLimit = 0;\r\n }\r\n }\r\n });\r\n return _this;\r\n }\r\n /**\r\n * Start the queue manager to batch and send events via post.\r\n * @param config - The core configuration.\r\n */\r\n PostChannel.prototype.initialize = function (coreConfig, core, extensions) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Add an event to the appropriate inbound queue based on its latency.\r\n * @param ev - The event to be added to the queue.\r\n * @param itemCtx - This is the context for the current request, ITelemetryPlugin instances\r\n * can optionally use this to access the current core instance or define / pass additional information\r\n * to later plugins (vs appending items to the telemetry item)\r\n */\r\n PostChannel.prototype.processTelemetry = function (ev, itemCtx) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Sets the event queue limits at runtime (after initialization), if the number of queued events is greater than the\r\n * eventLimit or autoFlushLimit then a flush() operation will be scheduled.\r\n * @param eventLimit The number of events that can be kept in memory before the SDK starts to drop events. If the value passed is less than or\r\n * equal to zero the value will be reset to the default (10,000).\r\n * @param autoFlushLimit When defined, once this number of events has been queued the system perform a flush() to send the queued events\r\n * without waiting for the normal schedule timers. Passing undefined, null or a value less than or equal to zero will disable the auto flush.\r\n */\r\n PostChannel.prototype.setEventQueueLimits = function (eventLimit, autoFlushLimit) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Pause the transmission of any requests\r\n */\r\n PostChannel.prototype.pause = function () {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Resumes transmission of events.\r\n */\r\n PostChannel.prototype.resume = function () {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Add handler to be executed with request response text.\r\n */\r\n PostChannel.prototype.addResponseHandler = function (responseHanlder) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Flush to send data immediately; channel should default to sending data asynchronously\r\n * @param async - send data asynchronously when true\r\n * @param callback - if specified, notify caller when send is complete\r\n */\r\n PostChannel.prototype.flush = function (async, callback, sendReason) {\r\n if (async === void 0) { async = true; }\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Set AuthMsaDeviceTicket header\r\n * @param ticket - Ticket value.\r\n */\r\n PostChannel.prototype.setMsaAuthTicket = function (ticket) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Check if there are any events waiting to be scheduled for sending.\r\n * @returns True if there are events, false otherwise.\r\n */\r\n PostChannel.prototype.hasEvents = function () {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n return false;\r\n };\r\n /**\r\n * Load custom transmission profiles. Each profile should have timers for real time, and normal and can\r\n * optionally specify the immediate latency time in ms (defaults to 0 when not defined). Each profile should\r\n * make sure that a each normal latency timer is a multiple of the real-time latency and the immediate\r\n * is smaller than the real-time.\r\n * Setting the timer value to -1 means that the events for that latency will not be scheduled to be sent.\r\n * Note that once a latency has been set to not send, all latencies below it will also not be sent. The\r\n * timers should be in the form of [normal, high, [immediate]].\r\n * e.g Custom:\r\n * [10,5] - Sets the normal latency time to 10 seconds and real-time to 5 seconds; Immediate will default to 0ms\r\n * [10,5,1] - Sets the normal latency time to 10 seconds and real-time to 5 seconds; Immediate will default to 1ms\r\n * [10,5,0] - Sets the normal latency time to 10 seconds and real-time to 5 seconds; Immediate will default to 0ms\r\n * [10,5,-1] - Sets the normal latency time to 10 seconds and real-time to 5 seconds; Immediate events will not be\r\n * scheduled on their own and but they will be included with real-time or normal events as the first events in a batch.\r\n * This also removes any previously loaded custom profiles.\r\n * @param profiles - A dictionary containing the transmit profiles.\r\n */\r\n PostChannel.prototype._loadTransmitProfiles = function (profiles) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Set the transmit profile to be used. This will change the transmission timers\r\n * based on the transmit profile.\r\n * @param profileName - The name of the transmit profile to be used.\r\n */\r\n PostChannel.prototype._setTransmitProfile = function (profileName) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Backs off transmission. This exponentially increases all the timers.\r\n */\r\n PostChannel.prototype._backOffTransmission = function () {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Clears backoff for transmission.\r\n */\r\n PostChannel.prototype._clearBackOff = function () {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n return PostChannel;\r\n}(BaseTelemetryPlugin));\r\nexport default PostChannel;\r\n//# sourceMappingURL=PostChannel.js.map"],"names":[],"mappings":";;;;;AAAA,gFAAkC;AAClC;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;;;;;;;;;;;;wDA6GM;AACN;AACA;AACA;AACA"}
|
|
1
|
+
{"version":3,"file":"PostChannel.js.map","sources":["PostChannel.js"],"sourcesContent":["import { __extends } from \"tslib\";\r\n/**\r\n* PostManager.ts\r\n* @author Abhilash Panwar (abpanwar); Hector Hernandez (hectorh); Nev Wylie (newylie)\r\n* @copyright Microsoft 2018-2020\r\n*/\r\nimport dynamicProto from \"@microsoft/dynamicproto-js\";\r\nimport { BaseTelemetryPlugin, EventsDiscardedReason, isValueAssigned, setProcessTelemetryTimings, arrForEach, doPerf, objForEachKey, optimizeObject, isChromium, getWindow, isNumber, mergeEvtNamespace, createUniqueNamespace, addPageUnloadEventListener, addPageHideEventListener, addPageShowEventListener, removePageUnloadEventListener, removePageHideEventListener, removePageShowEventListener, _throwInternal, objDefineAccessors } from \"@microsoft/1ds-core-js\";\r\nimport { RT_PROFILE, NRT_PROFILE, BE_PROFILE, } from \"./DataModels\";\r\nimport { EventBatch } from \"./EventBatch\";\r\nimport { HttpManager } from \"./HttpManager\";\r\nimport { retryPolicyGetMillisToBackoffForRetry } from \"./RetryPolicy\";\r\nimport { strMsaDeviceTicket } from \"./Constants\";\r\nimport { createTimeoutWrapper } from \"./TimeoutOverrideWrapper\";\r\nvar FlushCheckTimer = 0.250; // This needs to be in seconds, so this is 250ms\r\nvar MaxNumberEventPerBatch = 500;\r\nvar EventsDroppedAtOneTime = 20;\r\nvar MaxSendAttempts = 6;\r\nvar MaxSyncUnloadSendAttempts = 2; // Assuming 2 based on beforeunload and unload\r\nvar MaxBackoffCount = 4;\r\nvar MaxConnections = 2;\r\nvar MaxRequestRetriesBeforeBackoff = 1;\r\nvar strEventsDiscarded = \"eventsDiscarded\";\r\nvar strOverrideInstrumentationKey = \"overrideInstrumentationKey\";\r\nvar strMaxEventRetryAttempts = \"maxEventRetryAttempts\";\r\nvar strMaxUnloadEventRetryAttempts = \"maxUnloadEventRetryAttempts\";\r\nvar strAddUnloadCb = \"addUnloadCb\";\r\n/**\r\n * Class that manages adding events to inbound queues and batching of events\r\n * into requests.\r\n */\r\nvar PostChannel = /** @class */ (function (_super) {\r\n __extends(PostChannel, _super);\r\n function PostChannel() {\r\n var _this = _super.call(this) || this;\r\n _this.identifier = \"PostChannel\";\r\n _this.priority = 1011;\r\n _this.version = '3.2.2';\r\n var _config;\r\n var _isTeardownCalled = false;\r\n var _flushCallbackQueue = [];\r\n var _flushCallbackTimerId = null;\r\n var _paused = false;\r\n var _immediateQueueSize = 0;\r\n var _immediateQueueSizeLimit = 500;\r\n var _queueSize = 0;\r\n var _queueSizeLimit = 10000;\r\n var _profiles = {};\r\n var _currentProfile = RT_PROFILE;\r\n var _scheduledTimerId = null;\r\n var _immediateTimerId = null;\r\n var _currentBackoffCount = 0;\r\n var _timerCount = 0;\r\n var _xhrOverride;\r\n var _httpManager;\r\n var _batchQueues = {};\r\n var _autoFlushEventsLimit;\r\n // either MaxBatchSize * (1+ Max Connections) or _queueLimit / 6 (where 3 latency Queues [normal, realtime, cost deferred] * 2 [allow half full -- allow for retry])\r\n var _autoFlushBatchLimit;\r\n var _delayedBatchSendLatency = -1;\r\n var _delayedBatchReason;\r\n var _optimizeObject = true;\r\n var _isPageUnloadTriggered = false;\r\n var _disableXhrSync = false;\r\n var _maxEventSendAttempts = MaxSendAttempts;\r\n var _maxUnloadEventSendAttempts = MaxSyncUnloadSendAttempts;\r\n var _evtNamespace;\r\n var _timeoutWrapper;\r\n dynamicProto(PostChannel, _this, function (_self, _base) {\r\n _initDefaults();\r\n // Special internal method to allow the DebugPlugin to hook embedded objects\r\n _self[\"_getDbgPlgTargets\"] = function () {\r\n return [_httpManager];\r\n };\r\n _self.initialize = function (coreConfig, core, extensions) {\r\n doPerf(core, function () { return \"PostChannel:initialize\"; }, function () {\r\n var extendedCore = core;\r\n _base.initialize(coreConfig, core, extensions);\r\n try {\r\n var hasAddUnloadCb = !!core[strAddUnloadCb];\r\n _evtNamespace = mergeEvtNamespace(createUniqueNamespace(_self.identifier), core.evtNamespace && core.evtNamespace());\r\n var ctx = _self._getTelCtx();\r\n coreConfig.extensionConfig[_self.identifier] = coreConfig.extensionConfig[_self.identifier] || {};\r\n _config = ctx.getExtCfg(_self.identifier);\r\n _timeoutWrapper = createTimeoutWrapper(_config.setTimeoutOverride, _config.clearTimeoutOverride);\r\n // Only try and use the optimizeObject() if this appears to be a chromium based browser and it has not been explicitly disabled\r\n _optimizeObject = !_config.disableOptimizeObj && isChromium();\r\n _hookWParam(extendedCore);\r\n if (_config.eventsLimitInMem > 0) {\r\n _queueSizeLimit = _config.eventsLimitInMem;\r\n }\r\n if (_config.immediateEventLimit > 0) {\r\n _immediateQueueSizeLimit = _config.immediateEventLimit;\r\n }\r\n if (_config.autoFlushEventsLimit > 0) {\r\n _autoFlushEventsLimit = _config.autoFlushEventsLimit;\r\n }\r\n _disableXhrSync = _config.disableXhrSync;\r\n if (isNumber(_config[strMaxEventRetryAttempts])) {\r\n _maxEventSendAttempts = _config[strMaxEventRetryAttempts];\r\n }\r\n if (isNumber(_config[strMaxUnloadEventRetryAttempts])) {\r\n _maxUnloadEventSendAttempts = _config[strMaxUnloadEventRetryAttempts];\r\n }\r\n _setAutoLimits();\r\n if (_config.httpXHROverride && _config.httpXHROverride.sendPOST) {\r\n _xhrOverride = _config.httpXHROverride;\r\n }\r\n if (isValueAssigned(coreConfig.anonCookieName)) {\r\n _httpManager.addQueryStringParameter(\"anoncknm\", coreConfig.anonCookieName);\r\n }\r\n _httpManager.sendHook = _config.payloadPreprocessor;\r\n _httpManager.sendListener = _config.payloadListener;\r\n // Override endpointUrl if provided in Post config\r\n var endpointUrl = _config.overrideEndpointUrl ? _config.overrideEndpointUrl : coreConfig.endpointUrl;\r\n _self._notificationManager = coreConfig.extensionConfig.NotificationManager;\r\n _httpManager.initialize(endpointUrl, _self.core, _self, _xhrOverride, _config);\r\n var excludePageUnloadEvents = coreConfig.disablePageUnloadEvents || [];\r\n // When running in Web browsers try to send all telemetry if page is unloaded\r\n addPageUnloadEventListener(_handleUnloadEvents, excludePageUnloadEvents, _evtNamespace);\r\n addPageHideEventListener(_handleUnloadEvents, excludePageUnloadEvents, _evtNamespace);\r\n addPageShowEventListener(_handleShowEvents, coreConfig.disablePageShowEvents, _evtNamespace);\r\n }\r\n catch (e) {\r\n // resetting the initialized state because of failure\r\n _self.setInitialized(false);\r\n throw e;\r\n }\r\n }, function () { return ({ coreConfig: coreConfig, core: core, extensions: extensions }); });\r\n };\r\n _self.processTelemetry = function (ev, itemCtx) {\r\n setProcessTelemetryTimings(ev, _self.identifier);\r\n itemCtx = _self._getTelCtx(itemCtx);\r\n // Get the channel instance from the current request/instance\r\n var channelConfig = itemCtx.getExtCfg(_self.identifier);\r\n // DisableTelemetry was defined in the config provided during initialization\r\n var disableTelemetry = !!_config.disableTelemetry;\r\n if (channelConfig) {\r\n // DisableTelemetry is defined in the config for this request/instance\r\n disableTelemetry = disableTelemetry || !!channelConfig.disableTelemetry;\r\n }\r\n var event = ev;\r\n if (!disableTelemetry && !_isTeardownCalled) {\r\n // Override iKey if provided in Post config if provided for during initialization\r\n if (_config[strOverrideInstrumentationKey]) {\r\n event.iKey = _config[strOverrideInstrumentationKey];\r\n }\r\n // Override iKey if provided in Post config if provided for this instance\r\n if (channelConfig && channelConfig[strOverrideInstrumentationKey]) {\r\n event.iKey = channelConfig[strOverrideInstrumentationKey];\r\n }\r\n _addEventToQueues(event, true);\r\n if (_isPageUnloadTriggered) {\r\n // Unload event has been received so we need to try and flush new events\r\n _releaseAllQueues(2 /* SendBeacon */, 2 /* Unload */);\r\n }\r\n else {\r\n _scheduleTimer();\r\n }\r\n }\r\n _self.processNext(event, itemCtx);\r\n };\r\n _self._doTeardown = function (unloadCtx, unloadState) {\r\n _releaseAllQueues(2 /* SendBeacon */, 2 /* Unload */);\r\n _isTeardownCalled = true;\r\n _httpManager.teardown();\r\n removePageUnloadEventListener(null, _evtNamespace);\r\n removePageHideEventListener(null, _evtNamespace);\r\n removePageShowEventListener(null, _evtNamespace);\r\n // Just register to remove all events associated with this namespace\r\n _initDefaults();\r\n };\r\n function _hookWParam(extendedCore) {\r\n var existingGetWParamMethod = extendedCore.getWParam;\r\n extendedCore.getWParam = function () {\r\n var wparam = 0;\r\n if (_config.ignoreMc1Ms0CookieProcessing) {\r\n wparam = wparam | 2;\r\n }\r\n return wparam | existingGetWParamMethod();\r\n };\r\n }\r\n // Moving event handlers out from the initialize closure so that any local variables can be garbage collected\r\n function _handleUnloadEvents(evt) {\r\n var theEvt = evt || getWindow().event; // IE 8 does not pass the event\r\n if (theEvt.type !== \"beforeunload\") {\r\n // Only set the unload trigger if not beforeunload event as beforeunload can be cancelled while the other events can't\r\n _isPageUnloadTriggered = true;\r\n _httpManager.setUnloading(_isPageUnloadTriggered);\r\n }\r\n _releaseAllQueues(2 /* SendBeacon */, 2 /* Unload */);\r\n }\r\n function _handleShowEvents(evt) {\r\n // Handle the page becoming visible again\r\n _isPageUnloadTriggered = false;\r\n _httpManager.setUnloading(_isPageUnloadTriggered);\r\n }\r\n function _addEventToQueues(event, append) {\r\n // If send attempt field is undefined we should set it to 0.\r\n if (!event.sendAttempt) {\r\n event.sendAttempt = 0;\r\n }\r\n // Add default latency\r\n if (!event.latency) {\r\n event.latency = 1 /* Normal */;\r\n }\r\n // Remove extra AI properties if present\r\n if (event.ext && event.ext[\"trace\"]) {\r\n delete (event.ext[\"trace\"]);\r\n }\r\n if (event.ext && event.ext[\"user\"] && event.ext[\"user\"][\"id\"]) {\r\n delete (event.ext[\"user\"][\"id\"]);\r\n }\r\n // v8 performance optimization for iterating over the keys\r\n if (_optimizeObject) {\r\n event.ext = optimizeObject(event.ext);\r\n if (event.baseData) {\r\n event.baseData = optimizeObject(event.baseData);\r\n }\r\n if (event.data) {\r\n event.data = optimizeObject(event.data);\r\n }\r\n }\r\n if (event.sync) {\r\n // If the transmission is backed off then do not send synchronous events.\r\n // We will convert these events to Real time latency instead.\r\n if (_currentBackoffCount || _paused) {\r\n event.latency = 3 /* RealTime */;\r\n event.sync = false;\r\n }\r\n else {\r\n // Log the event synchronously\r\n if (_httpManager) {\r\n // v8 performance optimization for iterating over the keys\r\n if (_optimizeObject) {\r\n event = optimizeObject(event);\r\n }\r\n _httpManager.sendSynchronousBatch(EventBatch.create(event.iKey, [event]), event.sync === true ? 1 /* Synchronous */ : event.sync, 3 /* SyncEvent */);\r\n return;\r\n }\r\n }\r\n }\r\n var evtLatency = event.latency;\r\n var queueSize = _queueSize;\r\n var queueLimit = _queueSizeLimit;\r\n if (evtLatency === 4 /* Immediate */) {\r\n queueSize = _immediateQueueSize;\r\n queueLimit = _immediateQueueSizeLimit;\r\n }\r\n var eventDropped = false;\r\n // Only add the event if the queue isn't full or it's a direct event (which don't add to the queue sizes)\r\n if (queueSize < queueLimit) {\r\n eventDropped = !_addEventToProperQueue(event, append);\r\n }\r\n else {\r\n var dropLatency = 1 /* Normal */;\r\n var dropNumber = EventsDroppedAtOneTime;\r\n if (evtLatency === 4 /* Immediate */) {\r\n // Only drop other immediate events as they are not technically sharing the general queue\r\n dropLatency = 4 /* Immediate */;\r\n dropNumber = 1;\r\n }\r\n // Drop old event from lower or equal latency\r\n eventDropped = true;\r\n if (_dropEventWithLatencyOrLess(event.iKey, event.latency, dropLatency, dropNumber)) {\r\n eventDropped = !_addEventToProperQueue(event, append);\r\n }\r\n }\r\n if (eventDropped) {\r\n // Can't drop events from current queues because the all the slots are taken by queues that are being flushed.\r\n _notifyEvents(strEventsDiscarded, [event], EventsDiscardedReason.QueueFull);\r\n }\r\n }\r\n _self.setEventQueueLimits = function (eventLimit, autoFlushLimit) {\r\n _queueSizeLimit = eventLimit > 0 ? eventLimit : 10000;\r\n _autoFlushEventsLimit = autoFlushLimit > 0 ? autoFlushLimit : 0;\r\n _setAutoLimits();\r\n // We only do this check here as during normal event addition if the queue is > then events start getting dropped\r\n var doFlush = _queueSize > eventLimit;\r\n if (!doFlush && _autoFlushBatchLimit > 0) {\r\n // Check the auto flush max batch size\r\n for (var latency = 1 /* Normal */; !doFlush && latency <= 3 /* RealTime */; latency++) {\r\n var batchQueue = _batchQueues[latency];\r\n if (batchQueue && batchQueue.batches) {\r\n arrForEach(batchQueue.batches, function (theBatch) {\r\n if (theBatch && theBatch.count() >= _autoFlushBatchLimit) {\r\n // If any 1 batch is > than the limit then trigger an auto flush\r\n doFlush = true;\r\n }\r\n });\r\n }\r\n }\r\n }\r\n _performAutoFlush(true, doFlush);\r\n };\r\n _self.pause = function () {\r\n _clearScheduledTimer();\r\n _paused = true;\r\n _httpManager.pause();\r\n };\r\n _self.resume = function () {\r\n _paused = false;\r\n _httpManager.resume();\r\n _scheduleTimer();\r\n };\r\n _self.addResponseHandler = function (responseHandler) {\r\n _httpManager._responseHandlers.push(responseHandler);\r\n };\r\n _self._loadTransmitProfiles = function (profiles) {\r\n _resetTransmitProfiles();\r\n objForEachKey(profiles, function (profileName, profileValue) {\r\n var profLen = profileValue.length;\r\n if (profLen >= 2) {\r\n var directValue = (profLen > 2 ? profileValue[2] : 0);\r\n profileValue.splice(0, profLen - 2);\r\n // Make sure if a higher latency is set to not send then don't send lower latency\r\n if (profileValue[1] < 0) {\r\n profileValue[0] = -1;\r\n }\r\n // Make sure each latency is multiple of the latency higher then it. If not a multiple\r\n // we round up so that it becomes a multiple.\r\n if (profileValue[1] > 0 && profileValue[0] > 0) {\r\n var timerMultiplier = profileValue[0] / profileValue[1];\r\n profileValue[0] = Math.ceil(timerMultiplier) * profileValue[1];\r\n }\r\n // Add back the direct profile timeout\r\n if (directValue >= 0 && profileValue[1] >= 0 && directValue > profileValue[1]) {\r\n // Make sure if it's not disabled (< 0) then make sure it's not larger than RealTime\r\n directValue = profileValue[1];\r\n }\r\n profileValue.push(directValue);\r\n _profiles[profileName] = profileValue;\r\n }\r\n });\r\n };\r\n _self.flush = function (async, callback, sendReason) {\r\n if (async === void 0) { async = true; }\r\n if (!_paused) {\r\n // Clear the normal schedule timer as we are going to try and flush ASAP\r\n _clearScheduledTimer();\r\n sendReason = sendReason || 1 /* ManualFlush */;\r\n if (async) {\r\n // Move all queued events to the HttpManager\r\n _queueBatches(1 /* Normal */, 0 /* Batched */, sendReason);\r\n // All events (should) have been queue -- lets just make sure the queue counts are correct to avoid queue exhaustion (previous bug #9685112)\r\n _resetQueueCounts();\r\n if (_flushCallbackTimerId == null) {\r\n _flushCallbackTimerId = _createTimer(function () {\r\n _flushImpl(callback, sendReason);\r\n }, 0);\r\n }\r\n else {\r\n // Even if null (no callback) this will ensure after the flushImpl finishes waiting\r\n // for a completely idle connection it will attempt to re-flush any queued events on the next cycle\r\n _flushCallbackQueue.push(callback);\r\n }\r\n }\r\n else {\r\n // Now cause all queued events to be sent synchronously\r\n _sendEventsForLatencyAndAbove(1 /* Normal */, 1 /* Synchronous */, sendReason);\r\n if (callback !== null && callback !== undefined) {\r\n callback();\r\n }\r\n }\r\n }\r\n };\r\n _self.setMsaAuthTicket = function (ticket) {\r\n _httpManager.addHeader(strMsaDeviceTicket, ticket);\r\n };\r\n _self.hasEvents = _hasEvents;\r\n _self._setTransmitProfile = function (profileName) {\r\n if (_currentProfile !== profileName && _profiles[profileName] !== undefined) {\r\n _clearScheduledTimer();\r\n _currentProfile = profileName;\r\n _scheduleTimer();\r\n }\r\n };\r\n /**\r\n * Batch and send events currently in the queue for the given latency.\r\n * @param latency - Latency for which to send events.\r\n */\r\n function _sendEventsForLatencyAndAbove(latency, sendType, sendReason) {\r\n var queued = _queueBatches(latency, sendType, sendReason);\r\n // Always trigger the request as while the post channel may not have queued additional events, the httpManager may already have waiting events\r\n _httpManager.sendQueuedRequests(sendType, sendReason);\r\n return queued;\r\n }\r\n function _hasEvents() {\r\n return _queueSize > 0;\r\n }\r\n /**\r\n * Try to schedule the timer after which events will be sent. If there are\r\n * no events to be sent, or there is already a timer scheduled, or the\r\n * http manager doesn't have any idle connections this method is no-op.\r\n */\r\n function _scheduleTimer() {\r\n // If we had previously attempted to send requests, but the http manager didn't have any idle connections then the requests where delayed\r\n // so try and requeue then again now\r\n if (_delayedBatchSendLatency >= 0 && _queueBatches(_delayedBatchSendLatency, 0 /* Batched */, _delayedBatchReason)) {\r\n _httpManager.sendQueuedRequests(0 /* Batched */, _delayedBatchReason);\r\n }\r\n if (_immediateQueueSize > 0 && !_immediateTimerId && !_paused) {\r\n // During initialization _profiles enforce that the direct [2] is less than real time [1] timer value\r\n // If the immediateTimeout is disabled the immediate events will be sent with Real Time events\r\n var immediateTimeOut = _profiles[_currentProfile][2];\r\n if (immediateTimeOut >= 0) {\r\n _immediateTimerId = _createTimer(function () {\r\n _immediateTimerId = null;\r\n // Only try to send direct events\r\n _sendEventsForLatencyAndAbove(4 /* Immediate */, 0 /* Batched */, 1 /* NormalSchedule */);\r\n _scheduleTimer();\r\n }, immediateTimeOut);\r\n }\r\n }\r\n // During initialization the _profiles enforce that the normal [0] is a multiple of the real time [1] timer value\r\n var timeOut = _profiles[_currentProfile][1];\r\n if (!_scheduledTimerId && !_flushCallbackTimerId && timeOut >= 0 && !_paused) {\r\n if (_hasEvents()) {\r\n _scheduledTimerId = _createTimer(function () {\r\n _scheduledTimerId = null;\r\n _sendEventsForLatencyAndAbove(_timerCount === 0 ? 3 /* RealTime */ : 1 /* Normal */, 0 /* Batched */, 1 /* NormalSchedule */);\r\n // Increment the count for next cycle\r\n _timerCount++;\r\n _timerCount %= 2;\r\n _scheduleTimer();\r\n }, timeOut);\r\n }\r\n else {\r\n _timerCount = 0;\r\n }\r\n }\r\n }\r\n _self._backOffTransmission = function () {\r\n if (_currentBackoffCount < MaxBackoffCount) {\r\n _currentBackoffCount++;\r\n _clearScheduledTimer();\r\n _scheduleTimer();\r\n }\r\n };\r\n _self._clearBackOff = function () {\r\n if (_currentBackoffCount) {\r\n _currentBackoffCount = 0;\r\n _clearScheduledTimer();\r\n _scheduleTimer();\r\n }\r\n };\r\n function _initDefaults() {\r\n _config = null;\r\n _isTeardownCalled = false;\r\n _flushCallbackQueue = [];\r\n _flushCallbackTimerId = null;\r\n _paused = false;\r\n _immediateQueueSize = 0;\r\n _immediateQueueSizeLimit = 500;\r\n _queueSize = 0;\r\n _queueSizeLimit = 10000;\r\n _profiles = {};\r\n _currentProfile = RT_PROFILE;\r\n _scheduledTimerId = null;\r\n _immediateTimerId = null;\r\n _currentBackoffCount = 0;\r\n _timerCount = 0;\r\n _xhrOverride = null;\r\n _batchQueues = {};\r\n _autoFlushEventsLimit = undefined;\r\n // either MaxBatchSize * (1+ Max Connections) or _queueLimit / 6 (where 3 latency Queues [normal, realtime, cost deferred] * 2 [allow half full -- allow for retry])\r\n _autoFlushBatchLimit = 0;\r\n _delayedBatchSendLatency = -1;\r\n _delayedBatchReason = null;\r\n _optimizeObject = true;\r\n _isPageUnloadTriggered = false;\r\n _disableXhrSync = false;\r\n _maxEventSendAttempts = MaxSendAttempts;\r\n _maxUnloadEventSendAttempts = MaxSyncUnloadSendAttempts;\r\n _evtNamespace = null;\r\n _timeoutWrapper = createTimeoutWrapper();\r\n _httpManager = new HttpManager(MaxNumberEventPerBatch, MaxConnections, MaxRequestRetriesBeforeBackoff, {\r\n requeue: _requeueEvents,\r\n send: _sendingEvent,\r\n sent: _eventsSentEvent,\r\n drop: _eventsDropped,\r\n rspFail: _eventsResponseFail,\r\n oth: _otherEvent\r\n }, _timeoutWrapper);\r\n _initializeProfiles();\r\n _clearQueues();\r\n _setAutoLimits();\r\n }\r\n function _createTimer(theTimerFunc, timeOut) {\r\n // If the transmission is backed off make the timer at least 1 sec to allow for back off.\r\n if (timeOut === 0 && _currentBackoffCount) {\r\n timeOut = 1;\r\n }\r\n var timerMultiplier = 1000;\r\n if (_currentBackoffCount) {\r\n timerMultiplier = retryPolicyGetMillisToBackoffForRetry(_currentBackoffCount - 1);\r\n }\r\n return _timeoutWrapper.set(theTimerFunc, timeOut * timerMultiplier);\r\n }\r\n function _clearScheduledTimer() {\r\n if (_scheduledTimerId !== null) {\r\n _timeoutWrapper.clear(_scheduledTimerId);\r\n _scheduledTimerId = null;\r\n _timerCount = 0;\r\n }\r\n }\r\n // Try to send all queued events using beacons if available\r\n function _releaseAllQueues(sendType, sendReason) {\r\n _clearScheduledTimer();\r\n // Cancel all flush callbacks\r\n if (_flushCallbackTimerId) {\r\n _timeoutWrapper.clear(_flushCallbackTimerId);\r\n _flushCallbackTimerId = null;\r\n }\r\n if (!_paused) {\r\n // Queue all the remaining requests to be sent. The requests will be sent using HTML5 Beacons if they are available.\r\n _sendEventsForLatencyAndAbove(1 /* Normal */, sendType, sendReason);\r\n }\r\n }\r\n /**\r\n * Add empty queues for all latencies in the inbound queues map. This is called\r\n * when Transmission Manager is being flushed. This ensures that new events added\r\n * after flush are stored separately till we flush the current events.\r\n */\r\n function _clearQueues() {\r\n _batchQueues[4 /* Immediate */] = {\r\n batches: [],\r\n iKeyMap: {}\r\n };\r\n _batchQueues[3 /* RealTime */] = {\r\n batches: [],\r\n iKeyMap: {}\r\n };\r\n _batchQueues[2 /* CostDeferred */] = {\r\n batches: [],\r\n iKeyMap: {}\r\n };\r\n _batchQueues[1 /* Normal */] = {\r\n batches: [],\r\n iKeyMap: {}\r\n };\r\n }\r\n function _getEventBatch(iKey, latency, create) {\r\n var batchQueue = _batchQueues[latency];\r\n if (!batchQueue) {\r\n latency = 1 /* Normal */;\r\n batchQueue = _batchQueues[latency];\r\n }\r\n var eventBatch = batchQueue.iKeyMap[iKey];\r\n if (!eventBatch && create) {\r\n eventBatch = EventBatch.create(iKey);\r\n batchQueue.batches.push(eventBatch);\r\n batchQueue.iKeyMap[iKey] = eventBatch;\r\n }\r\n return eventBatch;\r\n }\r\n function _performAutoFlush(isAsync, doFlush) {\r\n // Only perform the auto flush check if the httpManager has an idle connection and we are not in a backoff situation\r\n if (_httpManager.canSendRequest() && !_currentBackoffCount) {\r\n if (_autoFlushEventsLimit > 0 && _queueSize > _autoFlushEventsLimit) {\r\n // Force flushing\r\n doFlush = true;\r\n }\r\n if (doFlush && _flushCallbackTimerId == null) {\r\n // Auto flush the queue\r\n _self.flush(isAsync, null, 20 /* MaxQueuedEvents */);\r\n }\r\n }\r\n }\r\n function _addEventToProperQueue(event, append) {\r\n // v8 performance optimization for iterating over the keys\r\n if (_optimizeObject) {\r\n event = optimizeObject(event);\r\n }\r\n var latency = event.latency;\r\n var eventBatch = _getEventBatch(event.iKey, latency, true);\r\n if (eventBatch.addEvent(event)) {\r\n if (latency !== 4 /* Immediate */) {\r\n _queueSize++;\r\n // Check for auto flushing based on total events in the queue, but not for requeued or retry events\r\n if (append && event.sendAttempt === 0) {\r\n // Force the flushing of the batch if the batch (specific iKey / latency combination) reaches it's auto flush limit\r\n _performAutoFlush(!event.sync, _autoFlushBatchLimit > 0 && eventBatch.count() >= _autoFlushBatchLimit);\r\n }\r\n }\r\n else {\r\n // Direct events don't need auto flushing as they are scheduled (by default) for immediate delivery\r\n _immediateQueueSize++;\r\n }\r\n return true;\r\n }\r\n return false;\r\n }\r\n function _dropEventWithLatencyOrLess(iKey, latency, currentLatency, dropNumber) {\r\n while (currentLatency <= latency) {\r\n var eventBatch = _getEventBatch(iKey, latency, true);\r\n if (eventBatch && eventBatch.count() > 0) {\r\n // Dropped oldest events from lowest possible latency\r\n var droppedEvents = eventBatch.split(0, dropNumber);\r\n var droppedCount = droppedEvents.count();\r\n if (droppedCount > 0) {\r\n if (currentLatency === 4 /* Immediate */) {\r\n _immediateQueueSize -= droppedCount;\r\n }\r\n else {\r\n _queueSize -= droppedCount;\r\n }\r\n _notifyBatchEvents(strEventsDiscarded, [droppedEvents], EventsDiscardedReason.QueueFull);\r\n return true;\r\n }\r\n }\r\n currentLatency++;\r\n }\r\n // Unable to drop any events -- lets just make sure the queue counts are correct to avoid exhaustion\r\n _resetQueueCounts();\r\n return false;\r\n }\r\n /**\r\n * Internal helper to reset the queue counts, used as a backstop to avoid future queue exhaustion errors\r\n * that might occur because of counting issues.\r\n */\r\n function _resetQueueCounts() {\r\n var immediateQueue = 0;\r\n var normalQueue = 0;\r\n var _loop_1 = function (latency) {\r\n var batchQueue = _batchQueues[latency];\r\n if (batchQueue && batchQueue.batches) {\r\n arrForEach(batchQueue.batches, function (theBatch) {\r\n if (latency === 4 /* Immediate */) {\r\n immediateQueue += theBatch.count();\r\n }\r\n else {\r\n normalQueue += theBatch.count();\r\n }\r\n });\r\n }\r\n };\r\n for (var latency = 1 /* Normal */; latency <= 4 /* Immediate */; latency++) {\r\n _loop_1(latency);\r\n }\r\n _queueSize = normalQueue;\r\n _immediateQueueSize = immediateQueue;\r\n }\r\n function _queueBatches(latency, sendType, sendReason) {\r\n var eventsQueued = false;\r\n var isAsync = sendType === 0 /* Batched */;\r\n // Only queue batches (to the HttpManager) if this is a sync request or the httpManager has an idle connection\r\n // Thus keeping the events within the PostChannel until the HttpManager has a connection available\r\n // This is so we can drop \"old\" events if the queue is getting full because we can't successfully send events\r\n if (!isAsync || _httpManager.canSendRequest()) {\r\n doPerf(_self.core, function () { return \"PostChannel._queueBatches\"; }, function () {\r\n var droppedEvents = [];\r\n var latencyToProcess = 4 /* Immediate */;\r\n while (latencyToProcess >= latency) {\r\n var batchQueue = _batchQueues[latencyToProcess];\r\n if (batchQueue && batchQueue.batches && batchQueue.batches.length > 0) {\r\n arrForEach(batchQueue.batches, function (theBatch) {\r\n // Add the batch to the http manager to send the requests\r\n if (!_httpManager.addBatch(theBatch)) {\r\n // The events from this iKey are being dropped (killed)\r\n droppedEvents = droppedEvents.concat(theBatch.events());\r\n }\r\n else {\r\n eventsQueued = eventsQueued || (theBatch && theBatch.count() > 0);\r\n }\r\n if (latencyToProcess === 4 /* Immediate */) {\r\n _immediateQueueSize -= theBatch.count();\r\n }\r\n else {\r\n _queueSize -= theBatch.count();\r\n }\r\n });\r\n // Remove all batches from this Queue\r\n batchQueue.batches = [];\r\n batchQueue.iKeyMap = {};\r\n }\r\n latencyToProcess--;\r\n }\r\n if (droppedEvents.length > 0) {\r\n _notifyEvents(strEventsDiscarded, droppedEvents, EventsDiscardedReason.KillSwitch);\r\n }\r\n if (eventsQueued && _delayedBatchSendLatency >= latency) {\r\n // We have queued events at the same level as the delayed values so clear the setting\r\n _delayedBatchSendLatency = -1;\r\n _delayedBatchReason = 0 /* Undefined */;\r\n }\r\n }, function () { return ({ latency: latency, sendType: sendType, sendReason: sendReason }); }, !isAsync);\r\n }\r\n else {\r\n // remember the min latency so that we can re-trigger later\r\n _delayedBatchSendLatency = _delayedBatchSendLatency >= 0 ? Math.min(_delayedBatchSendLatency, latency) : latency;\r\n _delayedBatchReason = Math.max(_delayedBatchReason, sendReason);\r\n }\r\n return eventsQueued;\r\n }\r\n /**\r\n * This is the callback method is called as part of the manual flushing process.\r\n * @param callback\r\n * @param sendReason\r\n */\r\n function _flushImpl(callback, sendReason) {\r\n // Add any additional queued events and cause all queued events to be sent asynchronously\r\n _sendEventsForLatencyAndAbove(1 /* Normal */, 0 /* Batched */, sendReason);\r\n _waitForIdleManager(function () {\r\n // Only called AFTER the httpManager does not have any outstanding requests\r\n if (callback) {\r\n callback();\r\n }\r\n if (_flushCallbackQueue.length > 0) {\r\n _flushCallbackTimerId = _createTimer(function () { return _flushImpl(_flushCallbackQueue.shift(), sendReason); }, 0);\r\n }\r\n else {\r\n // No more flush requests\r\n _flushCallbackTimerId = null;\r\n if (_hasEvents()) {\r\n // We still have events, so restart the normal timer schedule\r\n _scheduleTimer();\r\n }\r\n }\r\n });\r\n }\r\n function _waitForIdleManager(callback) {\r\n if (_httpManager.isCompletelyIdle()) {\r\n callback();\r\n }\r\n else {\r\n _flushCallbackTimerId = _createTimer(function () {\r\n _waitForIdleManager(callback);\r\n }, FlushCheckTimer);\r\n }\r\n }\r\n /**\r\n * Resets the transmit profiles to the default profiles of Real Time, Near Real Time\r\n * and Best Effort. This removes all the custom profiles that were loaded.\r\n */\r\n function _resetTransmitProfiles() {\r\n _clearScheduledTimer();\r\n _initializeProfiles();\r\n _currentProfile = RT_PROFILE;\r\n _scheduleTimer();\r\n }\r\n function _initializeProfiles() {\r\n _profiles = {};\r\n _profiles[RT_PROFILE] = [2, 1, 0];\r\n _profiles[NRT_PROFILE] = [6, 3, 0];\r\n _profiles[BE_PROFILE] = [18, 9, 0];\r\n }\r\n /**\r\n * The notification handler for requeue events\r\n * @ignore\r\n */\r\n function _requeueEvents(batches, reason) {\r\n var droppedEvents = [];\r\n var maxSendAttempts = _maxEventSendAttempts;\r\n if (_isPageUnloadTriggered) {\r\n // If a page unlaod has been triggered reduce the number of times we try to \"retry\"\r\n maxSendAttempts = _maxUnloadEventSendAttempts;\r\n }\r\n arrForEach(batches, function (theBatch) {\r\n if (theBatch && theBatch.count() > 0) {\r\n arrForEach(theBatch.events(), function (theEvent) {\r\n if (theEvent) {\r\n // Check if the request being added back is for a sync event in which case mark it no longer a sync event\r\n if (theEvent.sync) {\r\n theEvent.latency = 4 /* Immediate */;\r\n theEvent.sync = false;\r\n }\r\n if (theEvent.sendAttempt < maxSendAttempts) {\r\n // Reset the event timings\r\n setProcessTelemetryTimings(theEvent, _self.identifier);\r\n _addEventToQueues(theEvent, false);\r\n }\r\n else {\r\n droppedEvents.push(theEvent);\r\n }\r\n }\r\n });\r\n }\r\n });\r\n if (droppedEvents.length > 0) {\r\n _notifyEvents(strEventsDiscarded, droppedEvents, EventsDiscardedReason.NonRetryableStatus);\r\n }\r\n if (_isPageUnloadTriggered) {\r\n // Unload event has been received so we need to try and flush new events\r\n _releaseAllQueues(2 /* SendBeacon */, 2 /* Unload */);\r\n }\r\n }\r\n function _callNotification(evtName, theArgs) {\r\n var manager = (_self._notificationManager || {});\r\n var notifyFunc = manager[evtName];\r\n if (notifyFunc) {\r\n try {\r\n notifyFunc.apply(manager, theArgs);\r\n }\r\n catch (e) {\r\n _throwInternal(_self.diagLog(), 1 /* CRITICAL */, 74 /* NotificationException */, evtName + \" notification failed: \" + e);\r\n }\r\n }\r\n }\r\n function _notifyEvents(evtName, theEvents) {\r\n var extraArgs = [];\r\n for (var _i = 2; _i < arguments.length; _i++) {\r\n extraArgs[_i - 2] = arguments[_i];\r\n }\r\n if (theEvents && theEvents.length > 0) {\r\n _callNotification(evtName, [theEvents].concat(extraArgs));\r\n }\r\n }\r\n function _notifyBatchEvents(evtName, batches) {\r\n var extraArgs = [];\r\n for (var _i = 2; _i < arguments.length; _i++) {\r\n extraArgs[_i - 2] = arguments[_i];\r\n }\r\n if (batches && batches.length > 0) {\r\n arrForEach(batches, function (theBatch) {\r\n if (theBatch && theBatch.count() > 0) {\r\n _callNotification(evtName, [theBatch.events()].concat(extraArgs));\r\n }\r\n });\r\n }\r\n }\r\n /**\r\n * The notification handler for when batches are about to be sent\r\n * @ignore\r\n */\r\n function _sendingEvent(batches, reason, isSyncRequest) {\r\n if (batches && batches.length > 0) {\r\n _callNotification(\"eventsSendRequest\", [(reason >= 1000 /* SendingUndefined */ && reason <= 1999 /* SendingEventMax */ ?\r\n reason - 1000 /* SendingUndefined */ :\r\n 0 /* Undefined */), isSyncRequest !== true]);\r\n }\r\n }\r\n /**\r\n * This event represents that a batch of events have been successfully sent and a response received\r\n * @param batches The notification handler for when the batches have been successfully sent\r\n * @param reason For this event the reason will always be EventBatchNotificationReason.Complete\r\n */\r\n function _eventsSentEvent(batches, reason) {\r\n _notifyBatchEvents(\"eventsSent\", batches, reason);\r\n // Try and schedule the processing timer if we have events\r\n _scheduleTimer();\r\n }\r\n function _eventsDropped(batches, reason) {\r\n _notifyBatchEvents(strEventsDiscarded, batches, (reason >= 8000 /* EventsDropped */ && reason <= 8999 /* EventsDroppedMax */ ?\r\n reason - 8000 /* EventsDropped */ :\r\n EventsDiscardedReason.Unknown));\r\n }\r\n function _eventsResponseFail(batches) {\r\n _notifyBatchEvents(strEventsDiscarded, batches, EventsDiscardedReason.NonRetryableStatus);\r\n // Try and schedule the processing timer if we have events\r\n _scheduleTimer();\r\n }\r\n function _otherEvent(batches, reason) {\r\n _notifyBatchEvents(strEventsDiscarded, batches, EventsDiscardedReason.Unknown);\r\n // Try and schedule the processing timer if we have events\r\n _scheduleTimer();\r\n }\r\n function _setAutoLimits() {\r\n if (!_config || !_config.disableAutoBatchFlushLimit) {\r\n _autoFlushBatchLimit = Math.max(MaxNumberEventPerBatch * (MaxConnections + 1), _queueSizeLimit / 6);\r\n }\r\n else {\r\n _autoFlushBatchLimit = 0;\r\n }\r\n }\r\n // Provided for backward compatibility they are not \"expected\" to be in current use but they are public\r\n objDefineAccessors(_self, \"_setTimeoutOverride\", function () { return _timeoutWrapper.set; }, function (value) {\r\n // Recreate the timeout wrapper\r\n _timeoutWrapper = createTimeoutWrapper(value, _timeoutWrapper.clear);\r\n });\r\n objDefineAccessors(_self, \"_clearTimeoutOverride\", function () { return _timeoutWrapper.clear; }, function (value) {\r\n // Recreate the timeout wrapper\r\n _timeoutWrapper = createTimeoutWrapper(_timeoutWrapper.set, value);\r\n });\r\n });\r\n return _this;\r\n }\r\n /**\r\n * Start the queue manager to batch and send events via post.\r\n * @param config - The core configuration.\r\n */\r\n PostChannel.prototype.initialize = function (coreConfig, core, extensions) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Add an event to the appropriate inbound queue based on its latency.\r\n * @param ev - The event to be added to the queue.\r\n * @param itemCtx - This is the context for the current request, ITelemetryPlugin instances\r\n * can optionally use this to access the current core instance or define / pass additional information\r\n * to later plugins (vs appending items to the telemetry item)\r\n */\r\n PostChannel.prototype.processTelemetry = function (ev, itemCtx) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Sets the event queue limits at runtime (after initialization), if the number of queued events is greater than the\r\n * eventLimit or autoFlushLimit then a flush() operation will be scheduled.\r\n * @param eventLimit The number of events that can be kept in memory before the SDK starts to drop events. If the value passed is less than or\r\n * equal to zero the value will be reset to the default (10,000).\r\n * @param autoFlushLimit When defined, once this number of events has been queued the system perform a flush() to send the queued events\r\n * without waiting for the normal schedule timers. Passing undefined, null or a value less than or equal to zero will disable the auto flush.\r\n */\r\n PostChannel.prototype.setEventQueueLimits = function (eventLimit, autoFlushLimit) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Pause the transmission of any requests\r\n */\r\n PostChannel.prototype.pause = function () {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Resumes transmission of events.\r\n */\r\n PostChannel.prototype.resume = function () {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Add handler to be executed with request response text.\r\n */\r\n PostChannel.prototype.addResponseHandler = function (responseHanlder) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Flush to send data immediately; channel should default to sending data asynchronously\r\n * @param async - send data asynchronously when true\r\n * @param callback - if specified, notify caller when send is complete\r\n */\r\n PostChannel.prototype.flush = function (async, callback, sendReason) {\r\n if (async === void 0) { async = true; }\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Set AuthMsaDeviceTicket header\r\n * @param ticket - Ticket value.\r\n */\r\n PostChannel.prototype.setMsaAuthTicket = function (ticket) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Check if there are any events waiting to be scheduled for sending.\r\n * @returns True if there are events, false otherwise.\r\n */\r\n PostChannel.prototype.hasEvents = function () {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n return false;\r\n };\r\n /**\r\n * Load custom transmission profiles. Each profile should have timers for real time, and normal and can\r\n * optionally specify the immediate latency time in ms (defaults to 0 when not defined). Each profile should\r\n * make sure that a each normal latency timer is a multiple of the real-time latency and the immediate\r\n * is smaller than the real-time.\r\n * Setting the timer value to -1 means that the events for that latency will not be scheduled to be sent.\r\n * Note that once a latency has been set to not send, all latencies below it will also not be sent. The\r\n * timers should be in the form of [normal, high, [immediate]].\r\n * e.g Custom:\r\n * [10,5] - Sets the normal latency time to 10 seconds and real-time to 5 seconds; Immediate will default to 0ms\r\n * [10,5,1] - Sets the normal latency time to 10 seconds and real-time to 5 seconds; Immediate will default to 1ms\r\n * [10,5,0] - Sets the normal latency time to 10 seconds and real-time to 5 seconds; Immediate will default to 0ms\r\n * [10,5,-1] - Sets the normal latency time to 10 seconds and real-time to 5 seconds; Immediate events will not be\r\n * scheduled on their own and but they will be included with real-time or normal events as the first events in a batch.\r\n * This also removes any previously loaded custom profiles.\r\n * @param profiles - A dictionary containing the transmit profiles.\r\n */\r\n PostChannel.prototype._loadTransmitProfiles = function (profiles) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Set the transmit profile to be used. This will change the transmission timers\r\n * based on the transmit profile.\r\n * @param profileName - The name of the transmit profile to be used.\r\n */\r\n PostChannel.prototype._setTransmitProfile = function (profileName) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Backs off transmission. This exponentially increases all the timers.\r\n */\r\n PostChannel.prototype._backOffTransmission = function () {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Clears backoff for transmission.\r\n */\r\n PostChannel.prototype._clearBackOff = function () {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n return PostChannel;\r\n}(BaseTelemetryPlugin));\r\nexport default PostChannel;\r\n//# sourceMappingURL=PostChannel.js.map"],"names":[],"mappings":";;;;;AAAA,gFAAkC;AAClC;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;;;;;;;;;;;;wDA6GM;AACN;AACA;AACA;AACA"}
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* TimeoutOverrideWrapper.ts
|
|
3
|
+
* @author Nev Wylie (newylie)
|
|
4
|
+
* @copyright Microsoft 2022
|
|
5
|
+
* Simple internal timeout wrapper
|
|
6
|
+
*/
|
|
7
|
+
export declare type TimeoutSetFunc<T = any> = (callback: (...args: any[]) => void, ms: number, ...args: any[]) => T;
|
|
8
|
+
export declare type TimeoutClearFunc<T = any> = (timeoutId?: T) => void;
|
|
9
|
+
export interface ITimeoutOverrideWrapper<T = any> {
|
|
10
|
+
set: TimeoutSetFunc<T>;
|
|
11
|
+
clear: TimeoutClearFunc<T>;
|
|
12
|
+
}
|
|
13
|
+
export declare function defaultSetTimeout<T = any>(callback: (...args: any[]) => void, ms: number, ...args: any[]): T;
|
|
14
|
+
export declare function defaultClearTimeout<T = any>(timeoutId?: T): void;
|
|
15
|
+
export declare function createTimeoutWrapper<T = any>(argSetTimeout?: TimeoutSetFunc<T>, argClearTimeout?: TimeoutClearFunc<T>): {
|
|
16
|
+
set: TimeoutSetFunc<T>;
|
|
17
|
+
clear: TimeoutClearFunc<T>;
|
|
18
|
+
};
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* 1DS JS SDK POST plugin, 3.2.2
|
|
3
|
+
* Copyright (c) Microsoft and contributors. All rights reserved.
|
|
4
|
+
* (Microsoft Internal Only)
|
|
5
|
+
*/
|
|
6
|
+
/**
|
|
7
|
+
* TimeoutOverrideWrapper.ts
|
|
8
|
+
* @author Nev Wylie (newylie)
|
|
9
|
+
* @copyright Microsoft 2022
|
|
10
|
+
* Simple internal timeout wrapper
|
|
11
|
+
*/
|
|
12
|
+
export function defaultSetTimeout(callback, ms) {
|
|
13
|
+
var args = [];
|
|
14
|
+
for (var _i = 2; _i < arguments.length; _i++) {
|
|
15
|
+
args[_i - 2] = arguments[_i];
|
|
16
|
+
}
|
|
17
|
+
return setTimeout(callback, ms, args);
|
|
18
|
+
}
|
|
19
|
+
export function defaultClearTimeout(timeoutId) {
|
|
20
|
+
clearTimeout(timeoutId);
|
|
21
|
+
}
|
|
22
|
+
export function createTimeoutWrapper(argSetTimeout, argClearTimeout) {
|
|
23
|
+
return {
|
|
24
|
+
set: argSetTimeout || defaultSetTimeout,
|
|
25
|
+
clear: argClearTimeout || defaultClearTimeout
|
|
26
|
+
};
|
|
27
|
+
}
|
|
28
|
+
//# sourceMappingURL=TimeoutOverrideWrapper.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"TimeoutOverrideWrapper.js.map","sources":["TimeoutOverrideWrapper.js"],"sourcesContent":["/**\r\n * TimeoutOverrideWrapper.ts\r\n * @author Nev Wylie (newylie)\r\n * @copyright Microsoft 2022\r\n * Simple internal timeout wrapper\r\n */\r\nexport function defaultSetTimeout(callback, ms) {\r\n var args = [];\r\n for (var _i = 2; _i < arguments.length; _i++) {\r\n args[_i - 2] = arguments[_i];\r\n }\r\n return setTimeout(callback, ms, args);\r\n}\r\nexport function defaultClearTimeout(timeoutId) {\r\n clearTimeout(timeoutId);\r\n}\r\nexport function createTimeoutWrapper(argSetTimeout, argClearTimeout) {\r\n return {\r\n set: argSetTimeout || defaultSetTimeout,\r\n clear: argClearTimeout || defaultClearTimeout\r\n };\r\n}\r\n//# sourceMappingURL=TimeoutOverrideWrapper.js.map"],"names":[],"mappings":";;;;;AAAA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA"}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@microsoft/1ds-post-js",
|
|
3
|
-
"version": "3.2.
|
|
3
|
+
"version": "3.2.2",
|
|
4
4
|
"description": "Microsoft Application Insights JavaScript SDK - 1ds-post-js extensions",
|
|
5
5
|
"author": "Microsoft Application Insights Team",
|
|
6
6
|
"homepage": "https://github.com/microsoft/ApplicationInsights-JS#readme",
|
|
@@ -15,8 +15,8 @@
|
|
|
15
15
|
},
|
|
16
16
|
"dependencies": {
|
|
17
17
|
"@microsoft/applicationinsights-shims": "^2.0.1",
|
|
18
|
-
"@microsoft/dynamicproto-js": "^1.1.
|
|
19
|
-
"@microsoft/1ds-core-js": "3.2.
|
|
18
|
+
"@microsoft/dynamicproto-js": "^1.1.6",
|
|
19
|
+
"@microsoft/1ds-core-js": "3.2.2"
|
|
20
20
|
},
|
|
21
21
|
"devDependencies": {
|
|
22
22
|
"grunt": "^1.4.1",
|
package/src/HttpManager.ts
CHANGED
|
@@ -30,6 +30,7 @@ import {
|
|
|
30
30
|
strKillTokensHeader, strMsaDeviceTicket, strMsfpc, strNoResponseBody, strOther, strRequeue, strResponseFail, strSending, strTimeDeltaHeader,
|
|
31
31
|
strTimeDeltaToApply, strUploadTime
|
|
32
32
|
} from "./Constants";
|
|
33
|
+
import { ITimeoutOverrideWrapper } from "./TimeoutOverrideWrapper";
|
|
33
34
|
|
|
34
35
|
const strSendAttempt = "sendAttempt";
|
|
35
36
|
|
|
@@ -141,7 +142,7 @@ export class HttpManager {
|
|
|
141
142
|
* @constructor
|
|
142
143
|
* @param requestQueue - The queue that contains the requests to be sent.
|
|
143
144
|
*/
|
|
144
|
-
constructor(maxEventsPerBatch: number, maxConnections: number, maxRequestRetriesBeforeBackoff: number, actions: BatchNotificationActions) {
|
|
145
|
+
constructor(maxEventsPerBatch: number, maxConnections: number, maxRequestRetriesBeforeBackoff: number, actions: BatchNotificationActions, timeoutOverride: ITimeoutOverrideWrapper) {
|
|
145
146
|
let _urlString: string = "?cors=true&" + strContentTypeHeader.toLowerCase() + "=" + defaultContentType;
|
|
146
147
|
let _killSwitch: EVTKillSwitch = new EVTKillSwitch();
|
|
147
148
|
let _paused = false;
|
|
@@ -313,7 +314,7 @@ export class HttpManager {
|
|
|
313
314
|
if (sync) {
|
|
314
315
|
xdr.send(payload.data);
|
|
315
316
|
} else {
|
|
316
|
-
|
|
317
|
+
timeoutOverride.set(() => {
|
|
317
318
|
xdr.send(payload.data);
|
|
318
319
|
}, 0);
|
|
319
320
|
}
|
|
@@ -385,7 +386,7 @@ export class HttpManager {
|
|
|
385
386
|
|
|
386
387
|
if (!responseHandled && payload.timeout > 0) {
|
|
387
388
|
// Simulate timeout
|
|
388
|
-
|
|
389
|
+
timeoutOverride.set(() => {
|
|
389
390
|
if (!responseHandled) {
|
|
390
391
|
// Assume a 500 response (which will cause a retry)
|
|
391
392
|
responseHandled = true;
|
|
@@ -1135,7 +1136,7 @@ export class HttpManager {
|
|
|
1135
1136
|
if (isSync) {
|
|
1136
1137
|
cb();
|
|
1137
1138
|
} else {
|
|
1138
|
-
|
|
1139
|
+
timeoutOverride.set(cb, interval);
|
|
1139
1140
|
}
|
|
1140
1141
|
}
|
|
1141
1142
|
|
package/src/PostChannel.ts
CHANGED
|
@@ -3,17 +3,17 @@
|
|
|
3
3
|
* @author Abhilash Panwar (abpanwar); Hector Hernandez (hectorh); Nev Wylie (newylie)
|
|
4
4
|
* @copyright Microsoft 2018-2020
|
|
5
5
|
*/
|
|
6
|
+
import dynamicProto from "@microsoft/dynamicproto-js";
|
|
6
7
|
import {
|
|
7
8
|
BaseTelemetryPlugin, IChannelControls, IExtendedConfiguration, IExtendedAppInsightsCore,
|
|
8
9
|
EventLatencyValue, NotificationManager, EventsDiscardedReason, IPlugin, ITelemetryItem,
|
|
9
|
-
IAppInsightsCore, isValueAssigned, setProcessTelemetryTimings,
|
|
10
|
+
IAppInsightsCore, isValueAssigned, setProcessTelemetryTimings,
|
|
10
11
|
IProcessTelemetryContext, SendRequestReason, arrForEach, eLoggingSeverity, _eExtendedInternalMessageId,
|
|
11
12
|
doPerf, objForEachKey, optimizeObject, isChromium, getWindow, EventSendType, isNumber, mergeEvtNamespace,
|
|
12
13
|
createUniqueNamespace, IProcessTelemetryUnloadContext, ITelemetryUnloadState,
|
|
13
14
|
addPageUnloadEventListener, addPageHideEventListener, addPageShowEventListener,
|
|
14
15
|
removePageUnloadEventListener, removePageHideEventListener, removePageShowEventListener,
|
|
15
|
-
_throwInternal,
|
|
16
|
-
_eInternalMessageId
|
|
16
|
+
_throwInternal, _eInternalMessageId, objDefineAccessors
|
|
17
17
|
} from "@microsoft/1ds-core-js";
|
|
18
18
|
import {
|
|
19
19
|
IChannelConfiguration, RT_PROFILE, NRT_PROFILE, IPostChannel,
|
|
@@ -23,8 +23,8 @@ import {
|
|
|
23
23
|
import { EventBatch } from "./EventBatch";
|
|
24
24
|
import { HttpManager } from "./HttpManager";
|
|
25
25
|
import { retryPolicyGetMillisToBackoffForRetry } from "./RetryPolicy";
|
|
26
|
-
import dynamicProto from "@microsoft/dynamicproto-js";
|
|
27
26
|
import { strMsaDeviceTicket } from "./Constants";
|
|
27
|
+
import { createTimeoutWrapper, ITimeoutOverrideWrapper, TimeoutClearFunc, TimeoutSetFunc } from "./TimeoutOverrideWrapper";
|
|
28
28
|
|
|
29
29
|
const FlushCheckTimer = 0.250; // This needs to be in seconds, so this is 250ms
|
|
30
30
|
const MaxNumberEventPerBatch = 500;
|
|
@@ -32,7 +32,6 @@ const EventsDroppedAtOneTime = 20;
|
|
|
32
32
|
const MaxSendAttempts = 6;
|
|
33
33
|
const MaxSyncUnloadSendAttempts = 2; // Assuming 2 based on beforeunload and unload
|
|
34
34
|
const MaxBackoffCount = 4;
|
|
35
|
-
const globalContext = isWindowObjectAvailable ? window : this;
|
|
36
35
|
const MaxConnections = 2;
|
|
37
36
|
const MaxRequestRetriesBeforeBackoff = 1;
|
|
38
37
|
|
|
@@ -66,7 +65,11 @@ export default class PostChannel extends BaseTelemetryPlugin implements IChannel
|
|
|
66
65
|
public priority = 1011;
|
|
67
66
|
public version = "#version#";
|
|
68
67
|
public _notificationManager: NotificationManager | undefined;
|
|
68
|
+
|
|
69
|
+
/** @deprecated This property is not intended to be used directly please let us know if you have taken a dependency on this property as it may be removed in a future release */
|
|
69
70
|
public _setTimeoutOverride: typeof setTimeout;
|
|
71
|
+
|
|
72
|
+
/** @deprecated This property is not intended to be used directly please let us know if you have taken a dependency on this property as it may be removed in a future release */
|
|
70
73
|
public _clearTimeoutOverride: typeof clearTimeout;
|
|
71
74
|
|
|
72
75
|
constructor() {
|
|
@@ -101,6 +104,7 @@ export default class PostChannel extends BaseTelemetryPlugin implements IChannel
|
|
|
101
104
|
let _maxEventSendAttempts: number = MaxSendAttempts;
|
|
102
105
|
let _maxUnloadEventSendAttempts: number = MaxSyncUnloadSendAttempts;
|
|
103
106
|
let _evtNamespace: string | string[];
|
|
107
|
+
let _timeoutWrapper: ITimeoutOverrideWrapper;
|
|
104
108
|
|
|
105
109
|
dynamicProto(PostChannel, this, (_self, _base) => {
|
|
106
110
|
_initDefaults();
|
|
@@ -121,8 +125,7 @@ export default class PostChannel extends BaseTelemetryPlugin implements IChannel
|
|
|
121
125
|
let ctx = _self._getTelCtx();
|
|
122
126
|
coreConfig.extensionConfig[_self.identifier] = coreConfig.extensionConfig[_self.identifier] || {};
|
|
123
127
|
_config = ctx.getExtCfg(_self.identifier);
|
|
124
|
-
|
|
125
|
-
_self._clearTimeoutOverride = _config.clearTimeoutOverride ? _config.clearTimeoutOverride : clearTimeout.bind(globalContext);
|
|
128
|
+
_timeoutWrapper = createTimeoutWrapper(_config.setTimeoutOverride, _config.clearTimeoutOverride);
|
|
126
129
|
|
|
127
130
|
// Only try and use the optimizeObject() if this appears to be a chromium based browser and it has not been explicitly disabled
|
|
128
131
|
_optimizeObject = !_config.disableOptimizeObj && isChromium();
|
|
@@ -576,6 +579,7 @@ export default class PostChannel extends BaseTelemetryPlugin implements IChannel
|
|
|
576
579
|
_maxEventSendAttempts = MaxSendAttempts;
|
|
577
580
|
_maxUnloadEventSendAttempts = MaxSyncUnloadSendAttempts;
|
|
578
581
|
_evtNamespace = null;
|
|
582
|
+
_timeoutWrapper = createTimeoutWrapper();
|
|
579
583
|
_httpManager = new HttpManager(MaxNumberEventPerBatch, MaxConnections, MaxRequestRetriesBeforeBackoff, {
|
|
580
584
|
requeue: _requeueEvents,
|
|
581
585
|
send: _sendingEvent,
|
|
@@ -583,7 +587,7 @@ export default class PostChannel extends BaseTelemetryPlugin implements IChannel
|
|
|
583
587
|
drop: _eventsDropped,
|
|
584
588
|
rspFail: _eventsResponseFail,
|
|
585
589
|
oth: _otherEvent
|
|
586
|
-
});
|
|
590
|
+
}, _timeoutWrapper);
|
|
587
591
|
|
|
588
592
|
_initializeProfiles();
|
|
589
593
|
_clearQueues();
|
|
@@ -601,11 +605,11 @@ export default class PostChannel extends BaseTelemetryPlugin implements IChannel
|
|
|
601
605
|
timerMultiplier = retryPolicyGetMillisToBackoffForRetry(_currentBackoffCount - 1);
|
|
602
606
|
}
|
|
603
607
|
|
|
604
|
-
return
|
|
608
|
+
return _timeoutWrapper.set(theTimerFunc, timeOut * timerMultiplier);
|
|
605
609
|
}
|
|
606
610
|
function _clearScheduledTimer() {
|
|
607
611
|
if (_scheduledTimerId !== null) {
|
|
608
|
-
|
|
612
|
+
_timeoutWrapper.clear(_scheduledTimerId);
|
|
609
613
|
_scheduledTimerId = null;
|
|
610
614
|
_timerCount = 0;
|
|
611
615
|
}
|
|
@@ -617,7 +621,7 @@ export default class PostChannel extends BaseTelemetryPlugin implements IChannel
|
|
|
617
621
|
|
|
618
622
|
// Cancel all flush callbacks
|
|
619
623
|
if (_flushCallbackTimerId) {
|
|
620
|
-
|
|
624
|
+
_timeoutWrapper.clear(_flushCallbackTimerId);
|
|
621
625
|
_flushCallbackTimerId = null;
|
|
622
626
|
}
|
|
623
627
|
|
|
@@ -1008,6 +1012,21 @@ export default class PostChannel extends BaseTelemetryPlugin implements IChannel
|
|
|
1008
1012
|
_autoFlushBatchLimit = 0;
|
|
1009
1013
|
}
|
|
1010
1014
|
}
|
|
1015
|
+
|
|
1016
|
+
// Provided for backward compatibility they are not "expected" to be in current use but they are public
|
|
1017
|
+
objDefineAccessors(_self, "_setTimeoutOverride",
|
|
1018
|
+
() => _timeoutWrapper.set,
|
|
1019
|
+
(value: TimeoutSetFunc<any>) => {
|
|
1020
|
+
// Recreate the timeout wrapper
|
|
1021
|
+
_timeoutWrapper = createTimeoutWrapper(value, _timeoutWrapper.clear);
|
|
1022
|
+
});
|
|
1023
|
+
|
|
1024
|
+
objDefineAccessors(_self, "_clearTimeoutOverride",
|
|
1025
|
+
() => _timeoutWrapper.clear,
|
|
1026
|
+
(value: TimeoutClearFunc<any>) => {
|
|
1027
|
+
// Recreate the timeout wrapper
|
|
1028
|
+
_timeoutWrapper = createTimeoutWrapper(_timeoutWrapper.set, value);
|
|
1029
|
+
});
|
|
1011
1030
|
});
|
|
1012
1031
|
}
|
|
1013
1032
|
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* TimeoutOverrideWrapper.ts
|
|
3
|
+
* @author Nev Wylie (newylie)
|
|
4
|
+
* @copyright Microsoft 2022
|
|
5
|
+
* Simple internal timeout wrapper
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
export type TimeoutSetFunc<T = any> = (callback: (...args: any[]) => void, ms: number, ...args: any[]) => T;
|
|
9
|
+
export type TimeoutClearFunc<T = any> = (timeoutId?: T) => void;
|
|
10
|
+
|
|
11
|
+
export interface ITimeoutOverrideWrapper<T = any> {
|
|
12
|
+
set: TimeoutSetFunc<T>;
|
|
13
|
+
clear: TimeoutClearFunc<T>;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
export function defaultSetTimeout<T = any>(callback: (...args: any[]) => void, ms: number, ...args: any[]): T {
|
|
17
|
+
return setTimeout(callback, ms, args) as any;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
export function defaultClearTimeout<T = any>(timeoutId?: T): void {
|
|
21
|
+
clearTimeout(timeoutId as any);
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
export function createTimeoutWrapper<T = any>(argSetTimeout?: TimeoutSetFunc<T>, argClearTimeout?: TimeoutClearFunc<T>) {
|
|
25
|
+
return {
|
|
26
|
+
set: argSetTimeout || defaultSetTimeout,
|
|
27
|
+
clear: argClearTimeout || defaultClearTimeout
|
|
28
|
+
}
|
|
29
|
+
}
|