@microsoft/1ds-post-js 4.0.1 → 4.0.2-nightly3.2307-25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/README.md +10 -26
  2. package/bundle/es5/{ms.post-4.0.1.gbl.js → ms.post-4.0.2-nightly3.2307-25.gbl.js} +507 -426
  3. package/bundle/es5/ms.post-4.0.2-nightly3.2307-25.gbl.js.map +1 -0
  4. package/bundle/es5/ms.post-4.0.2-nightly3.2307-25.gbl.min.js +7 -0
  5. package/bundle/es5/ms.post-4.0.2-nightly3.2307-25.gbl.min.js.map +1 -0
  6. package/bundle/es5/ms.post-4.0.2-nightly3.2307-25.integrity.json +46 -0
  7. package/bundle/es5/{ms.post-4.0.1.js → ms.post-4.0.2-nightly3.2307-25.js} +507 -426
  8. package/bundle/es5/ms.post-4.0.2-nightly3.2307-25.js.map +1 -0
  9. package/bundle/es5/ms.post-4.0.2-nightly3.2307-25.min.js +7 -0
  10. package/bundle/es5/ms.post-4.0.2-nightly3.2307-25.min.js.map +1 -0
  11. package/bundle/es5/ms.post.gbl.js +506 -425
  12. package/bundle/es5/ms.post.gbl.js.map +1 -1
  13. package/bundle/es5/ms.post.gbl.min.js +2 -2
  14. package/bundle/es5/ms.post.gbl.min.js.map +1 -1
  15. package/bundle/es5/ms.post.integrity.json +17 -17
  16. package/bundle/es5/ms.post.js +506 -425
  17. package/bundle/es5/ms.post.js.map +1 -1
  18. package/bundle/es5/ms.post.min.js +2 -2
  19. package/bundle/es5/ms.post.min.js.map +1 -1
  20. package/dist/es5/ms.post.js +504 -423
  21. package/dist/es5/ms.post.js.map +1 -1
  22. package/dist/es5/ms.post.min.js +2 -2
  23. package/dist/es5/ms.post.min.js.map +1 -1
  24. package/dist-es5/BatchNotificationActions.js +1 -1
  25. package/dist-es5/ClockSkewManager.js +8 -8
  26. package/dist-es5/ClockSkewManager.js.map +1 -1
  27. package/dist-es5/DataModels.js +1 -1
  28. package/dist-es5/EventBatch.js +13 -12
  29. package/dist-es5/EventBatch.js.map +1 -1
  30. package/dist-es5/HttpManager.js +173 -173
  31. package/dist-es5/HttpManager.js.map +1 -1
  32. package/dist-es5/Index.js +3 -3
  33. package/dist-es5/Index.js.map +1 -1
  34. package/dist-es5/InternalConstants.js +1 -1
  35. package/dist-es5/KillSwitch.js +8 -8
  36. package/dist-es5/KillSwitch.js.map +1 -1
  37. package/dist-es5/PostChannel.js +92 -90
  38. package/dist-es5/PostChannel.js.map +1 -1
  39. package/dist-es5/RetryPolicy.js +1 -1
  40. package/dist-es5/Serializer.js +4 -5
  41. package/dist-es5/Serializer.js.map +1 -1
  42. package/dist-es5/TimeoutOverrideWrapper.js +1 -1
  43. package/dist-es5/__DynamicConstants.js +83 -0
  44. package/dist-es5/__DynamicConstants.js.map +1 -0
  45. package/dist-es5/typings/XDomainRequest.js +1 -1
  46. package/package.json +39 -23
  47. package/types/1ds-post-js.d.ts +5 -3
  48. package/types/1ds-post-js.namespaced.d.ts +5 -3
  49. package/bundle/es5/ms.post-4.0.1.gbl.js.map +0 -1
  50. package/bundle/es5/ms.post-4.0.1.gbl.min.js +0 -7
  51. package/bundle/es5/ms.post-4.0.1.gbl.min.js.map +0 -1
  52. package/bundle/es5/ms.post-4.0.1.integrity.json +0 -46
  53. package/bundle/es5/ms.post-4.0.1.js.map +0 -1
  54. package/bundle/es5/ms.post-4.0.1.min.js +0 -7
  55. package/bundle/es5/ms.post-4.0.1.min.js.map +0 -1
@@ -1 +1 @@
1
- {"version":3,"file":"PostChannel.js.map","sources":["PostChannel.js"],"sourcesContent":["import { __extends } from \"tslib\";\r\n/**\r\n* PostManager.ts\r\n* @author Abhilash Panwar (abpanwar); Hector Hernandez (hectorh); Nev Wylie (newylie)\r\n* @copyright Microsoft 2018-2020\r\n*/\r\nimport dynamicProto from \"@microsoft/dynamicproto-js\";\r\nimport { BaseTelemetryPlugin, EventsDiscardedReason, _throwInternal, addPageHideEventListener, addPageShowEventListener, addPageUnloadEventListener, arrForEach, createProcessTelemetryContext, createUniqueNamespace, doPerf, getWindow, isChromium, isGreaterThanZero, isNumber, mergeEvtNamespace, objForEachKey, onConfigChange, optimizeObject, proxyFunctions, removePageHideEventListener, removePageShowEventListener, removePageUnloadEventListener, setProcessTelemetryTimings } from \"@microsoft/1ds-core-js\";\r\nimport { createPromise } from \"@nevware21/ts-async\";\r\nimport { objDeepFreeze } from \"@nevware21/ts-utils\";\r\nimport { BE_PROFILE, NRT_PROFILE, RT_PROFILE } from \"./DataModels\";\r\nimport { EventBatch } from \"./EventBatch\";\r\nimport { HttpManager } from \"./HttpManager\";\r\nimport { STR_MSA_DEVICE_TICKET, STR_TRACE, STR_USER } from \"./InternalConstants\";\r\nimport { retryPolicyGetMillisToBackoffForRetry } from \"./RetryPolicy\";\r\nimport { createTimeoutWrapper } from \"./TimeoutOverrideWrapper\";\r\nvar FlushCheckTimer = 0.250; // This needs to be in seconds, so this is 250ms\r\nvar MaxNumberEventPerBatch = 500;\r\nvar EventsDroppedAtOneTime = 20;\r\nvar MaxSendAttempts = 6;\r\nvar MaxSyncUnloadSendAttempts = 2; // Assuming 2 based on beforeunload and unload\r\nvar MaxBackoffCount = 4;\r\nvar MaxConnections = 2;\r\nvar MaxRequestRetriesBeforeBackoff = 1;\r\nvar MaxEventsLimitInMem = 10000;\r\nvar strEventsDiscarded = \"eventsDiscarded\";\r\nvar undefValue = undefined;\r\n/**\r\n * The default settings for the config.\r\n * WE MUST include all defaults here to ensure that the config is created with all of the properties\r\n * defined as dynamic.\r\n */\r\nvar defaultPostChannelConfig = objDeepFreeze({\r\n eventsLimitInMem: { isVal: isGreaterThanZero, v: MaxEventsLimitInMem },\r\n immediateEventLimit: { isVal: isGreaterThanZero, v: 500 },\r\n autoFlushEventsLimit: { isVal: isGreaterThanZero, v: 0 },\r\n disableAutoBatchFlushLimit: false,\r\n httpXHROverride: { isVal: isOverrideFn, v: undefValue },\r\n overrideInstrumentationKey: undefValue,\r\n overrideEndpointUrl: undefValue,\r\n disableTelemetry: false,\r\n ignoreMc1Ms0CookieProcessing: false,\r\n setTimeoutOverride: undefValue,\r\n clearTimeoutOverride: undefValue,\r\n payloadPreprocessor: undefValue,\r\n payloadListener: undefValue,\r\n disableEventTimings: undefValue,\r\n valueSanitizer: undefValue,\r\n stringifyObjects: undefValue,\r\n enableCompoundKey: undefValue,\r\n disableOptimizeObj: false,\r\n // disableCacheHeader: undefValue, // See Task #7178858 - Collector requires a change to support this\r\n transports: undefValue,\r\n unloadTransports: undefValue,\r\n useSendBeacon: undefValue,\r\n disableFetchKeepAlive: undefValue,\r\n avoidOptions: false,\r\n xhrTimeout: undefValue,\r\n disableXhrSync: undefValue,\r\n alwaysUseXhrOverride: false,\r\n maxEventRetryAttempts: { isVal: isNumber, v: MaxSendAttempts },\r\n maxUnloadEventRetryAttempts: { isVal: isNumber, v: MaxSyncUnloadSendAttempts },\r\n addNoResponse: undefValue\r\n});\r\nfunction isOverrideFn(httpXHROverride) {\r\n return httpXHROverride && httpXHROverride.sendPOST;\r\n}\r\n/**\r\n * Class that manages adding events to inbound queues and batching of events\r\n * into requests.\r\n */\r\nvar PostChannel = /** @class */ (function (_super) {\r\n __extends(PostChannel, _super);\r\n function PostChannel() {\r\n var _this = _super.call(this) || this;\r\n _this.identifier = \"PostChannel\";\r\n _this.priority = 1011;\r\n _this.version = '4.0.1';\r\n var _postConfig;\r\n var _isTeardownCalled = false;\r\n var _flushCallbackQueue = [];\r\n var _flushCallbackTimer;\r\n var _paused = false;\r\n var _immediateQueueSize = 0;\r\n var _immediateQueueSizeLimit;\r\n var _queueSize = 0;\r\n var _queueSizeLimit;\r\n var _profiles = {};\r\n var _currentProfile = RT_PROFILE;\r\n var _scheduledTimer;\r\n var _immediateTimer;\r\n var _currentBackoffCount;\r\n var _timerCount;\r\n var _httpManager;\r\n var _batchQueues;\r\n var _autoFlushEventsLimit;\r\n // either MaxBatchSize * (1+ Max Connections) or _queueLimit / 6 (where 3 latency Queues [normal, realtime, cost deferred] * 2 [allow half full -- allow for retry])\r\n var _autoFlushBatchLimit;\r\n var _delayedBatchSendLatency;\r\n var _delayedBatchReason;\r\n var _optimizeObject;\r\n var _isPageUnloadTriggered;\r\n var _maxEventSendAttempts;\r\n var _maxUnloadEventSendAttempts;\r\n var _evtNamespace;\r\n var _timeoutWrapper;\r\n var _ignoreMc1Ms0CookieProcessing;\r\n var _disableAutoBatchFlushLimit;\r\n var _notificationManager;\r\n var _unloadHandlersAdded;\r\n var _overrideInstrumentationKey;\r\n var _disableTelemetry;\r\n dynamicProto(PostChannel, _this, function (_self, _base) {\r\n _initDefaults();\r\n // Special internal method to allow the DebugPlugin to hook embedded objects\r\n _self[\"_getDbgPlgTargets\"] = function () {\r\n return [_httpManager, _postConfig];\r\n };\r\n _self.initialize = function (theConfig, core, extensions) {\r\n doPerf(core, function () { return \"PostChannel:initialize\"; }, function () {\r\n _base.initialize(theConfig, core, extensions);\r\n _notificationManager = core.getNotifyMgr();\r\n try {\r\n _evtNamespace = mergeEvtNamespace(createUniqueNamespace(_self.identifier), core.evtNamespace && core.evtNamespace());\r\n _self._addHook(onConfigChange(theConfig, function (details) {\r\n var coreConfig = details.cfg;\r\n var ctx = createProcessTelemetryContext(null, coreConfig, core);\r\n _postConfig = ctx.getExtCfg(_self.identifier, defaultPostChannelConfig);\r\n _timeoutWrapper = createTimeoutWrapper(_postConfig.setTimeoutOverride, _postConfig.clearTimeoutOverride);\r\n // Only try and use the optimizeObject() if this appears to be a chromium based browser and it has not been explicitly disabled\r\n _optimizeObject = !_postConfig.disableOptimizeObj && isChromium();\r\n _ignoreMc1Ms0CookieProcessing = _postConfig.ignoreMc1Ms0CookieProcessing;\r\n _hookWParam(core); // _hookWParam uses _ignoreMc1Ms0CookieProcessing \r\n _queueSizeLimit = _postConfig.eventsLimitInMem;\r\n _immediateQueueSizeLimit = _postConfig.immediateEventLimit;\r\n _autoFlushEventsLimit = _postConfig.autoFlushEventsLimit;\r\n _maxEventSendAttempts = _postConfig.maxEventRetryAttempts;\r\n _maxUnloadEventSendAttempts = _postConfig.maxUnloadEventRetryAttempts;\r\n _disableAutoBatchFlushLimit = _postConfig.disableAutoBatchFlushLimit;\r\n _setAutoLimits();\r\n // Override iKey if provided in Post config if provided for during initialization\r\n _overrideInstrumentationKey = _postConfig.overrideInstrumentationKey;\r\n // DisableTelemetry was defined in the config provided during initialization\r\n _disableTelemetry = !!_postConfig.disableTelemetry;\r\n if (_unloadHandlersAdded) {\r\n _removeUnloadHandlers();\r\n }\r\n var excludePageUnloadEvents = coreConfig.disablePageUnloadEvents || [];\r\n // When running in Web browsers try to send all telemetry if page is unloaded\r\n _unloadHandlersAdded = addPageUnloadEventListener(_handleUnloadEvents, excludePageUnloadEvents, _evtNamespace);\r\n _unloadHandlersAdded = addPageHideEventListener(_handleUnloadEvents, excludePageUnloadEvents, _evtNamespace) || _unloadHandlersAdded;\r\n _unloadHandlersAdded = addPageShowEventListener(_handleShowEvents, coreConfig.disablePageShowEvents, _evtNamespace) || _unloadHandlersAdded;\r\n }));\r\n // only initialize the manager once\r\n _httpManager.initialize(theConfig, _self.core, _self);\r\n }\r\n catch (e) {\r\n // resetting the initialized state because of failure\r\n _self.setInitialized(false);\r\n throw e;\r\n }\r\n }, function () { return ({ theConfig: theConfig, core: core, extensions: extensions }); });\r\n };\r\n _self.processTelemetry = function (ev, itemCtx) {\r\n setProcessTelemetryTimings(ev, _self.identifier);\r\n itemCtx = itemCtx || _self._getTelCtx(itemCtx);\r\n var event = ev;\r\n if (!_disableTelemetry && !_isTeardownCalled) {\r\n // Override iKey if provided in Post config if provided for during initialization\r\n if (_overrideInstrumentationKey) {\r\n event.iKey = _overrideInstrumentationKey;\r\n }\r\n _addEventToQueues(event, true);\r\n if (_isPageUnloadTriggered) {\r\n // Unload event has been received so we need to try and flush new events\r\n _releaseAllQueues(2 /* EventSendType.SendBeacon */, 2 /* SendRequestReason.Unload */);\r\n }\r\n else {\r\n _scheduleTimer();\r\n }\r\n }\r\n _self.processNext(event, itemCtx);\r\n };\r\n _self._doTeardown = function (unloadCtx, unloadState) {\r\n _releaseAllQueues(2 /* EventSendType.SendBeacon */, 2 /* SendRequestReason.Unload */);\r\n _isTeardownCalled = true;\r\n _httpManager.teardown();\r\n _removeUnloadHandlers();\r\n // Just register to remove all events associated with this namespace\r\n _initDefaults();\r\n };\r\n function _removeUnloadHandlers() {\r\n removePageUnloadEventListener(null, _evtNamespace);\r\n removePageHideEventListener(null, _evtNamespace);\r\n removePageShowEventListener(null, _evtNamespace);\r\n }\r\n function _hookWParam(core) {\r\n var existingGetWParamMethod = core.getWParam;\r\n core.getWParam = function () {\r\n var wparam = 0;\r\n if (_ignoreMc1Ms0CookieProcessing) {\r\n wparam = wparam | 2;\r\n }\r\n return wparam | existingGetWParamMethod.call(core);\r\n };\r\n }\r\n // Moving event handlers out from the initialize closure so that any local variables can be garbage collected\r\n function _handleUnloadEvents(evt) {\r\n var theEvt = evt || getWindow().event; // IE 8 does not pass the event\r\n if (theEvt.type !== \"beforeunload\") {\r\n // Only set the unload trigger if not beforeunload event as beforeunload can be cancelled while the other events can't\r\n _isPageUnloadTriggered = true;\r\n _httpManager.setUnloading(_isPageUnloadTriggered);\r\n }\r\n _releaseAllQueues(2 /* EventSendType.SendBeacon */, 2 /* SendRequestReason.Unload */);\r\n }\r\n function _handleShowEvents(evt) {\r\n // Handle the page becoming visible again\r\n _isPageUnloadTriggered = false;\r\n _httpManager.setUnloading(_isPageUnloadTriggered);\r\n }\r\n function _addEventToQueues(event, append) {\r\n // If send attempt field is undefined we should set it to 0.\r\n if (!event.sendAttempt) {\r\n event.sendAttempt = 0;\r\n }\r\n // Add default latency\r\n if (!event.latency) {\r\n event.latency = 1 /* EventLatencyValue.Normal */;\r\n }\r\n // Remove extra AI properties if present\r\n if (event.ext && event.ext[STR_TRACE]) {\r\n delete (event.ext[STR_TRACE]);\r\n }\r\n if (event.ext && event.ext[STR_USER] && event.ext[STR_USER][\"id\"]) {\r\n delete (event.ext[STR_USER][\"id\"]);\r\n }\r\n // v8 performance optimization for iterating over the keys\r\n if (_optimizeObject) {\r\n event.ext = optimizeObject(event.ext);\r\n if (event.baseData) {\r\n event.baseData = optimizeObject(event.baseData);\r\n }\r\n if (event.data) {\r\n event.data = optimizeObject(event.data);\r\n }\r\n }\r\n if (event.sync) {\r\n // If the transmission is backed off then do not send synchronous events.\r\n // We will convert these events to Real time latency instead.\r\n if (_currentBackoffCount || _paused) {\r\n event.latency = 3 /* EventLatencyValue.RealTime */;\r\n event.sync = false;\r\n }\r\n else {\r\n // Log the event synchronously\r\n if (_httpManager) {\r\n // v8 performance optimization for iterating over the keys\r\n if (_optimizeObject) {\r\n event = optimizeObject(event);\r\n }\r\n _httpManager.sendSynchronousBatch(EventBatch.create(event.iKey, [event]), event.sync === true ? 1 /* EventSendType.Synchronous */ : event.sync, 3 /* SendRequestReason.SyncEvent */);\r\n return;\r\n }\r\n }\r\n }\r\n var evtLatency = event.latency;\r\n var queueSize = _queueSize;\r\n var queueLimit = _queueSizeLimit;\r\n if (evtLatency === 4 /* EventLatencyValue.Immediate */) {\r\n queueSize = _immediateQueueSize;\r\n queueLimit = _immediateQueueSizeLimit;\r\n }\r\n var eventDropped = false;\r\n // Only add the event if the queue isn't full or it's a direct event (which don't add to the queue sizes)\r\n if (queueSize < queueLimit) {\r\n eventDropped = !_addEventToProperQueue(event, append);\r\n }\r\n else {\r\n var dropLatency = 1 /* EventLatencyValue.Normal */;\r\n var dropNumber = EventsDroppedAtOneTime;\r\n if (evtLatency === 4 /* EventLatencyValue.Immediate */) {\r\n // Only drop other immediate events as they are not technically sharing the general queue\r\n dropLatency = 4 /* EventLatencyValue.Immediate */;\r\n dropNumber = 1;\r\n }\r\n // Drop old event from lower or equal latency\r\n eventDropped = true;\r\n if (_dropEventWithLatencyOrLess(event.iKey, event.latency, dropLatency, dropNumber)) {\r\n eventDropped = !_addEventToProperQueue(event, append);\r\n }\r\n }\r\n if (eventDropped) {\r\n // Can't drop events from current queues because the all the slots are taken by queues that are being flushed.\r\n _notifyEvents(strEventsDiscarded, [event], EventsDiscardedReason.QueueFull);\r\n }\r\n }\r\n _self.setEventQueueLimits = function (eventLimit, autoFlushLimit) {\r\n _postConfig.eventsLimitInMem = _queueSizeLimit = isGreaterThanZero(eventLimit) ? eventLimit : MaxEventsLimitInMem;\r\n _postConfig.autoFlushEventsLimit = _autoFlushEventsLimit = isGreaterThanZero(autoFlushLimit) ? autoFlushLimit : 0;\r\n _setAutoLimits();\r\n // We only do this check here as during normal event addition if the queue is > then events start getting dropped\r\n var doFlush = _queueSize > eventLimit;\r\n if (!doFlush && _autoFlushBatchLimit > 0) {\r\n // Check the auto flush max batch size\r\n for (var latency = 1 /* EventLatencyValue.Normal */; !doFlush && latency <= 3 /* EventLatencyValue.RealTime */; latency++) {\r\n var batchQueue = _batchQueues[latency];\r\n if (batchQueue && batchQueue.batches) {\r\n arrForEach(batchQueue.batches, function (theBatch) {\r\n if (theBatch && theBatch.count() >= _autoFlushBatchLimit) {\r\n // If any 1 batch is > than the limit then trigger an auto flush\r\n doFlush = true;\r\n }\r\n });\r\n }\r\n }\r\n }\r\n _performAutoFlush(true, doFlush);\r\n };\r\n _self.pause = function () {\r\n _clearScheduledTimer();\r\n _paused = true;\r\n _httpManager.pause();\r\n };\r\n _self.resume = function () {\r\n _paused = false;\r\n _httpManager.resume();\r\n _scheduleTimer();\r\n };\r\n _self._loadTransmitProfiles = function (profiles) {\r\n _resetTransmitProfiles();\r\n objForEachKey(profiles, function (profileName, profileValue) {\r\n var profLen = profileValue.length;\r\n if (profLen >= 2) {\r\n var directValue = (profLen > 2 ? profileValue[2] : 0);\r\n profileValue.splice(0, profLen - 2);\r\n // Make sure if a higher latency is set to not send then don't send lower latency\r\n if (profileValue[1] < 0) {\r\n profileValue[0] = -1;\r\n }\r\n // Make sure each latency is multiple of the latency higher then it. If not a multiple\r\n // we round up so that it becomes a multiple.\r\n if (profileValue[1] > 0 && profileValue[0] > 0) {\r\n var timerMultiplier = profileValue[0] / profileValue[1];\r\n profileValue[0] = Math.ceil(timerMultiplier) * profileValue[1];\r\n }\r\n // Add back the direct profile timeout\r\n if (directValue >= 0 && profileValue[1] >= 0 && directValue > profileValue[1]) {\r\n // Make sure if it's not disabled (< 0) then make sure it's not larger than RealTime\r\n directValue = profileValue[1];\r\n }\r\n profileValue.push(directValue);\r\n _profiles[profileName] = profileValue;\r\n }\r\n });\r\n };\r\n _self.flush = function (async, callback, sendReason) {\r\n if (async === void 0) { async = true; }\r\n var result;\r\n if (!_paused) {\r\n sendReason = sendReason || 1 /* SendRequestReason.ManualFlush */;\r\n if (async) {\r\n if (!callback) {\r\n result = createPromise(function (resolve) {\r\n // Set the callback to the promise resolve callback\r\n callback = resolve;\r\n });\r\n }\r\n if (_flushCallbackTimer == null) {\r\n // Clear the normal schedule timer as we are going to try and flush ASAP\r\n _clearScheduledTimer();\r\n // Move all queued events to the HttpManager so that we don't discard new events (Auto flush scenario)\r\n _queueBatches(1 /* EventLatencyValue.Normal */, 0 /* EventSendType.Batched */, sendReason);\r\n _flushCallbackTimer = _createTimer(function () {\r\n _flushCallbackTimer = null;\r\n _flushImpl(callback, sendReason);\r\n }, 0);\r\n }\r\n else {\r\n // Even if null (no callback) this will ensure after the flushImpl finishes waiting\r\n // for a completely idle connection it will attempt to re-flush any queued events on the next cycle\r\n _flushCallbackQueue.push(callback);\r\n }\r\n }\r\n else {\r\n // Clear the normal schedule timer as we are going to try and flush ASAP\r\n var cleared = _clearScheduledTimer();\r\n // Now cause all queued events to be sent synchronously\r\n _sendEventsForLatencyAndAbove(1 /* EventLatencyValue.Normal */, 1 /* EventSendType.Synchronous */, sendReason);\r\n callback && callback();\r\n if (cleared) {\r\n // restart the normal event timer if it was cleared\r\n _scheduleTimer();\r\n }\r\n }\r\n }\r\n return result;\r\n };\r\n _self.setMsaAuthTicket = function (ticket) {\r\n _httpManager.addHeader(STR_MSA_DEVICE_TICKET, ticket);\r\n };\r\n _self.hasEvents = _hasEvents;\r\n _self._setTransmitProfile = function (profileName) {\r\n if (_currentProfile !== profileName && _profiles[profileName] !== undefined) {\r\n _clearScheduledTimer();\r\n _currentProfile = profileName;\r\n _scheduleTimer();\r\n }\r\n };\r\n proxyFunctions(_self, function () { return _httpManager; }, [\"addResponseHandler\"]);\r\n /**\r\n * Batch and send events currently in the queue for the given latency.\r\n * @param latency - Latency for which to send events.\r\n */\r\n function _sendEventsForLatencyAndAbove(latency, sendType, sendReason) {\r\n var queued = _queueBatches(latency, sendType, sendReason);\r\n // Always trigger the request as while the post channel may not have queued additional events, the httpManager may already have waiting events\r\n _httpManager.sendQueuedRequests(sendType, sendReason);\r\n return queued;\r\n }\r\n function _hasEvents() {\r\n return _queueSize > 0;\r\n }\r\n /**\r\n * Try to schedule the timer after which events will be sent. If there are\r\n * no events to be sent, or there is already a timer scheduled, or the\r\n * http manager doesn't have any idle connections this method is no-op.\r\n */\r\n function _scheduleTimer() {\r\n // If we had previously attempted to send requests, but the http manager didn't have any idle connections then the requests where delayed\r\n // so try and requeue then again now\r\n if (_delayedBatchSendLatency >= 0 && _queueBatches(_delayedBatchSendLatency, 0 /* EventSendType.Batched */, _delayedBatchReason)) {\r\n _httpManager.sendQueuedRequests(0 /* EventSendType.Batched */, _delayedBatchReason);\r\n }\r\n if (_immediateQueueSize > 0 && !_immediateTimer && !_paused) {\r\n // During initialization _profiles enforce that the direct [2] is less than real time [1] timer value\r\n // If the immediateTimeout is disabled the immediate events will be sent with Real Time events\r\n var immediateTimeOut = _profiles[_currentProfile][2];\r\n if (immediateTimeOut >= 0) {\r\n _immediateTimer = _createTimer(function () {\r\n _immediateTimer = null;\r\n // Only try to send direct events\r\n _sendEventsForLatencyAndAbove(4 /* EventLatencyValue.Immediate */, 0 /* EventSendType.Batched */, 1 /* SendRequestReason.NormalSchedule */);\r\n _scheduleTimer();\r\n }, immediateTimeOut);\r\n }\r\n }\r\n // During initialization the _profiles enforce that the normal [0] is a multiple of the real time [1] timer value\r\n var timeOut = _profiles[_currentProfile][1];\r\n if (!_scheduledTimer && !_flushCallbackTimer && timeOut >= 0 && !_paused) {\r\n if (_hasEvents()) {\r\n _scheduledTimer = _createTimer(function () {\r\n _scheduledTimer = null;\r\n _sendEventsForLatencyAndAbove(_timerCount === 0 ? 3 /* EventLatencyValue.RealTime */ : 1 /* EventLatencyValue.Normal */, 0 /* EventSendType.Batched */, 1 /* SendRequestReason.NormalSchedule */);\r\n // Increment the count for next cycle\r\n _timerCount++;\r\n _timerCount %= 2;\r\n _scheduleTimer();\r\n }, timeOut);\r\n }\r\n else {\r\n _timerCount = 0;\r\n }\r\n }\r\n }\r\n _self._backOffTransmission = function () {\r\n if (_currentBackoffCount < MaxBackoffCount) {\r\n _currentBackoffCount++;\r\n _clearScheduledTimer();\r\n _scheduleTimer();\r\n }\r\n };\r\n _self._clearBackOff = function () {\r\n if (_currentBackoffCount) {\r\n _currentBackoffCount = 0;\r\n _clearScheduledTimer();\r\n _scheduleTimer();\r\n }\r\n };\r\n function _initDefaults() {\r\n _postConfig = null;\r\n _isTeardownCalled = false;\r\n _flushCallbackQueue = [];\r\n _flushCallbackTimer = null;\r\n _paused = false;\r\n _immediateQueueSize = 0;\r\n _immediateQueueSizeLimit = 500;\r\n _queueSize = 0;\r\n _queueSizeLimit = MaxEventsLimitInMem;\r\n _profiles = {};\r\n _currentProfile = RT_PROFILE;\r\n _scheduledTimer = null;\r\n _immediateTimer = null;\r\n _currentBackoffCount = 0;\r\n _timerCount = 0;\r\n _batchQueues = {};\r\n _autoFlushEventsLimit = 0;\r\n _unloadHandlersAdded = false;\r\n // either MaxBatchSize * (1+ Max Connections) or _queueLimit / 6 (where 3 latency Queues [normal, realtime, cost deferred] * 2 [allow half full -- allow for retry])\r\n _autoFlushBatchLimit = 0;\r\n _delayedBatchSendLatency = -1;\r\n _delayedBatchReason = null;\r\n _optimizeObject = true;\r\n _isPageUnloadTriggered = false;\r\n _maxEventSendAttempts = MaxSendAttempts;\r\n _maxUnloadEventSendAttempts = MaxSyncUnloadSendAttempts;\r\n _evtNamespace = null;\r\n _overrideInstrumentationKey = null;\r\n _disableTelemetry = false;\r\n _timeoutWrapper = createTimeoutWrapper();\r\n _httpManager = new HttpManager(MaxNumberEventPerBatch, MaxConnections, MaxRequestRetriesBeforeBackoff, {\r\n requeue: _requeueEvents,\r\n send: _sendingEvent,\r\n sent: _eventsSentEvent,\r\n drop: _eventsDropped,\r\n rspFail: _eventsResponseFail,\r\n oth: _otherEvent\r\n });\r\n _initializeProfiles();\r\n _clearQueues();\r\n _setAutoLimits();\r\n }\r\n function _createTimer(theTimerFunc, timeOut) {\r\n // If the transmission is backed off make the timer at least 1 sec to allow for back off.\r\n if (timeOut === 0 && _currentBackoffCount) {\r\n timeOut = 1;\r\n }\r\n var timerMultiplier = 1000;\r\n if (_currentBackoffCount) {\r\n timerMultiplier = retryPolicyGetMillisToBackoffForRetry(_currentBackoffCount - 1);\r\n }\r\n return _timeoutWrapper.set(theTimerFunc, timeOut * timerMultiplier);\r\n }\r\n function _clearScheduledTimer() {\r\n if (_scheduledTimer !== null) {\r\n _scheduledTimer.cancel();\r\n _scheduledTimer = null;\r\n _timerCount = 0;\r\n return true;\r\n }\r\n return false;\r\n }\r\n // Try to send all queued events using beacons if available\r\n function _releaseAllQueues(sendType, sendReason) {\r\n _clearScheduledTimer();\r\n // Cancel all flush callbacks\r\n if (_flushCallbackTimer) {\r\n _flushCallbackTimer.cancel();\r\n _flushCallbackTimer = null;\r\n }\r\n if (!_paused) {\r\n // Queue all the remaining requests to be sent. The requests will be sent using HTML5 Beacons if they are available.\r\n _sendEventsForLatencyAndAbove(1 /* EventLatencyValue.Normal */, sendType, sendReason);\r\n }\r\n }\r\n /**\r\n * Add empty queues for all latencies in the inbound queues map. This is called\r\n * when Transmission Manager is being flushed. This ensures that new events added\r\n * after flush are stored separately till we flush the current events.\r\n */\r\n function _clearQueues() {\r\n _batchQueues[4 /* EventLatencyValue.Immediate */] = {\r\n batches: [],\r\n iKeyMap: {}\r\n };\r\n _batchQueues[3 /* EventLatencyValue.RealTime */] = {\r\n batches: [],\r\n iKeyMap: {}\r\n };\r\n _batchQueues[2 /* EventLatencyValue.CostDeferred */] = {\r\n batches: [],\r\n iKeyMap: {}\r\n };\r\n _batchQueues[1 /* EventLatencyValue.Normal */] = {\r\n batches: [],\r\n iKeyMap: {}\r\n };\r\n }\r\n function _getEventBatch(iKey, latency, create) {\r\n var batchQueue = _batchQueues[latency];\r\n if (!batchQueue) {\r\n latency = 1 /* EventLatencyValue.Normal */;\r\n batchQueue = _batchQueues[latency];\r\n }\r\n var eventBatch = batchQueue.iKeyMap[iKey];\r\n if (!eventBatch && create) {\r\n eventBatch = EventBatch.create(iKey);\r\n batchQueue.batches.push(eventBatch);\r\n batchQueue.iKeyMap[iKey] = eventBatch;\r\n }\r\n return eventBatch;\r\n }\r\n function _performAutoFlush(isAsync, doFlush) {\r\n // Only perform the auto flush check if the httpManager has an idle connection and we are not in a backoff situation\r\n if (_httpManager.canSendRequest() && !_currentBackoffCount) {\r\n if (_autoFlushEventsLimit > 0 && _queueSize > _autoFlushEventsLimit) {\r\n // Force flushing\r\n doFlush = true;\r\n }\r\n if (doFlush && _flushCallbackTimer == null) {\r\n // Auto flush the queue, adding a callback to avoid the creation of a promise\r\n _self.flush(isAsync, function () { }, 20 /* SendRequestReason.MaxQueuedEvents */);\r\n }\r\n }\r\n }\r\n function _addEventToProperQueue(event, append) {\r\n // v8 performance optimization for iterating over the keys\r\n if (_optimizeObject) {\r\n event = optimizeObject(event);\r\n }\r\n var latency = event.latency;\r\n var eventBatch = _getEventBatch(event.iKey, latency, true);\r\n if (eventBatch.addEvent(event)) {\r\n if (latency !== 4 /* EventLatencyValue.Immediate */) {\r\n _queueSize++;\r\n // Check for auto flushing based on total events in the queue, but not for requeued or retry events\r\n if (append && event.sendAttempt === 0) {\r\n // Force the flushing of the batch if the batch (specific iKey / latency combination) reaches it's auto flush limit\r\n _performAutoFlush(!event.sync, _autoFlushBatchLimit > 0 && eventBatch.count() >= _autoFlushBatchLimit);\r\n }\r\n }\r\n else {\r\n // Direct events don't need auto flushing as they are scheduled (by default) for immediate delivery\r\n _immediateQueueSize++;\r\n }\r\n return true;\r\n }\r\n return false;\r\n }\r\n function _dropEventWithLatencyOrLess(iKey, latency, currentLatency, dropNumber) {\r\n while (currentLatency <= latency) {\r\n var eventBatch = _getEventBatch(iKey, latency, true);\r\n if (eventBatch && eventBatch.count() > 0) {\r\n // Dropped oldest events from lowest possible latency\r\n var droppedEvents = eventBatch.split(0, dropNumber);\r\n var droppedCount = droppedEvents.count();\r\n if (droppedCount > 0) {\r\n if (currentLatency === 4 /* EventLatencyValue.Immediate */) {\r\n _immediateQueueSize -= droppedCount;\r\n }\r\n else {\r\n _queueSize -= droppedCount;\r\n }\r\n _notifyBatchEvents(strEventsDiscarded, [droppedEvents], EventsDiscardedReason.QueueFull);\r\n return true;\r\n }\r\n }\r\n currentLatency++;\r\n }\r\n // Unable to drop any events -- lets just make sure the queue counts are correct to avoid exhaustion\r\n _resetQueueCounts();\r\n return false;\r\n }\r\n /**\r\n * Internal helper to reset the queue counts, used as a backstop to avoid future queue exhaustion errors\r\n * that might occur because of counting issues.\r\n */\r\n function _resetQueueCounts() {\r\n var immediateQueue = 0;\r\n var normalQueue = 0;\r\n var _loop_1 = function (latency) {\r\n var batchQueue = _batchQueues[latency];\r\n if (batchQueue && batchQueue.batches) {\r\n arrForEach(batchQueue.batches, function (theBatch) {\r\n if (latency === 4 /* EventLatencyValue.Immediate */) {\r\n immediateQueue += theBatch.count();\r\n }\r\n else {\r\n normalQueue += theBatch.count();\r\n }\r\n });\r\n }\r\n };\r\n for (var latency = 1 /* EventLatencyValue.Normal */; latency <= 4 /* EventLatencyValue.Immediate */; latency++) {\r\n _loop_1(latency);\r\n }\r\n _queueSize = normalQueue;\r\n _immediateQueueSize = immediateQueue;\r\n }\r\n function _queueBatches(latency, sendType, sendReason) {\r\n var eventsQueued = false;\r\n var isAsync = sendType === 0 /* EventSendType.Batched */;\r\n // Only queue batches (to the HttpManager) if this is a sync request or the httpManager has an idle connection\r\n // Thus keeping the events within the PostChannel until the HttpManager has a connection available\r\n // This is so we can drop \"old\" events if the queue is getting full because we can't successfully send events\r\n if (!isAsync || _httpManager.canSendRequest()) {\r\n doPerf(_self.core, function () { return \"PostChannel._queueBatches\"; }, function () {\r\n var droppedEvents = [];\r\n var latencyToProcess = 4 /* EventLatencyValue.Immediate */;\r\n while (latencyToProcess >= latency) {\r\n var batchQueue = _batchQueues[latencyToProcess];\r\n if (batchQueue && batchQueue.batches && batchQueue.batches.length > 0) {\r\n arrForEach(batchQueue.batches, function (theBatch) {\r\n // Add the batch to the http manager to send the requests\r\n if (!_httpManager.addBatch(theBatch)) {\r\n // The events from this iKey are being dropped (killed)\r\n droppedEvents = droppedEvents.concat(theBatch.events());\r\n }\r\n else {\r\n eventsQueued = eventsQueued || (theBatch && theBatch.count() > 0);\r\n }\r\n if (latencyToProcess === 4 /* EventLatencyValue.Immediate */) {\r\n _immediateQueueSize -= theBatch.count();\r\n }\r\n else {\r\n _queueSize -= theBatch.count();\r\n }\r\n });\r\n // Remove all batches from this Queue\r\n batchQueue.batches = [];\r\n batchQueue.iKeyMap = {};\r\n }\r\n latencyToProcess--;\r\n }\r\n if (droppedEvents.length > 0) {\r\n _notifyEvents(strEventsDiscarded, droppedEvents, EventsDiscardedReason.KillSwitch);\r\n }\r\n if (eventsQueued && _delayedBatchSendLatency >= latency) {\r\n // We have queued events at the same level as the delayed values so clear the setting\r\n _delayedBatchSendLatency = -1;\r\n _delayedBatchReason = 0 /* SendRequestReason.Undefined */;\r\n }\r\n }, function () { return ({ latency: latency, sendType: sendType, sendReason: sendReason }); }, !isAsync);\r\n }\r\n else {\r\n // remember the min latency so that we can re-trigger later\r\n _delayedBatchSendLatency = _delayedBatchSendLatency >= 0 ? Math.min(_delayedBatchSendLatency, latency) : latency;\r\n _delayedBatchReason = Math.max(_delayedBatchReason, sendReason);\r\n }\r\n return eventsQueued;\r\n }\r\n /**\r\n * This is the callback method is called as part of the manual flushing process.\r\n * @param callback\r\n * @param sendReason\r\n */\r\n function _flushImpl(callback, sendReason) {\r\n // Add any additional queued events and cause all queued events to be sent asynchronously\r\n _sendEventsForLatencyAndAbove(1 /* EventLatencyValue.Normal */, 0 /* EventSendType.Batched */, sendReason);\r\n // All events (should) have been queue -- lets just make sure the queue counts are correct to avoid queue exhaustion (previous bug #9685112)\r\n _resetQueueCounts();\r\n _waitForIdleManager(function () {\r\n // Only called AFTER the httpManager does not have any outstanding requests\r\n if (callback) {\r\n callback();\r\n }\r\n if (_flushCallbackQueue.length > 0) {\r\n _flushCallbackTimer = _createTimer(function () {\r\n _flushCallbackTimer = null;\r\n _flushImpl(_flushCallbackQueue.shift(), sendReason);\r\n }, 0);\r\n }\r\n else {\r\n // No more flush requests\r\n _flushCallbackTimer = null;\r\n // Restart the normal timer schedule\r\n _scheduleTimer();\r\n }\r\n });\r\n }\r\n function _waitForIdleManager(callback) {\r\n if (_httpManager.isCompletelyIdle()) {\r\n callback();\r\n }\r\n else {\r\n _flushCallbackTimer = _createTimer(function () {\r\n _flushCallbackTimer = null;\r\n _waitForIdleManager(callback);\r\n }, FlushCheckTimer);\r\n }\r\n }\r\n /**\r\n * Resets the transmit profiles to the default profiles of Real Time, Near Real Time\r\n * and Best Effort. This removes all the custom profiles that were loaded.\r\n */\r\n function _resetTransmitProfiles() {\r\n _clearScheduledTimer();\r\n _initializeProfiles();\r\n _currentProfile = RT_PROFILE;\r\n _scheduleTimer();\r\n }\r\n function _initializeProfiles() {\r\n _profiles = {};\r\n _profiles[RT_PROFILE] = [2, 1, 0];\r\n _profiles[NRT_PROFILE] = [6, 3, 0];\r\n _profiles[BE_PROFILE] = [18, 9, 0];\r\n }\r\n /**\r\n * The notification handler for requeue events\r\n * @ignore\r\n */\r\n function _requeueEvents(batches, reason) {\r\n var droppedEvents = [];\r\n var maxSendAttempts = _maxEventSendAttempts;\r\n if (_isPageUnloadTriggered) {\r\n // If a page unlaod has been triggered reduce the number of times we try to \"retry\"\r\n maxSendAttempts = _maxUnloadEventSendAttempts;\r\n }\r\n arrForEach(batches, function (theBatch) {\r\n if (theBatch && theBatch.count() > 0) {\r\n arrForEach(theBatch.events(), function (theEvent) {\r\n if (theEvent) {\r\n // Check if the request being added back is for a sync event in which case mark it no longer a sync event\r\n if (theEvent.sync) {\r\n theEvent.latency = 4 /* EventLatencyValue.Immediate */;\r\n theEvent.sync = false;\r\n }\r\n if (theEvent.sendAttempt < maxSendAttempts) {\r\n // Reset the event timings\r\n setProcessTelemetryTimings(theEvent, _self.identifier);\r\n _addEventToQueues(theEvent, false);\r\n }\r\n else {\r\n droppedEvents.push(theEvent);\r\n }\r\n }\r\n });\r\n }\r\n });\r\n if (droppedEvents.length > 0) {\r\n _notifyEvents(strEventsDiscarded, droppedEvents, EventsDiscardedReason.NonRetryableStatus);\r\n }\r\n if (_isPageUnloadTriggered) {\r\n // Unload event has been received so we need to try and flush new events\r\n _releaseAllQueues(2 /* EventSendType.SendBeacon */, 2 /* SendRequestReason.Unload */);\r\n }\r\n }\r\n function _callNotification(evtName, theArgs) {\r\n var manager = (_notificationManager || {});\r\n var notifyFunc = manager[evtName];\r\n if (notifyFunc) {\r\n try {\r\n notifyFunc.apply(manager, theArgs);\r\n }\r\n catch (e) {\r\n _throwInternal(_self.diagLog(), 1 /* eLoggingSeverity.CRITICAL */, 74 /* _eInternalMessageId.NotificationException */, evtName + \" notification failed: \" + e);\r\n }\r\n }\r\n }\r\n function _notifyEvents(evtName, theEvents) {\r\n var extraArgs = [];\r\n for (var _i = 2; _i < arguments.length; _i++) {\r\n extraArgs[_i - 2] = arguments[_i];\r\n }\r\n if (theEvents && theEvents.length > 0) {\r\n _callNotification(evtName, [theEvents].concat(extraArgs));\r\n }\r\n }\r\n function _notifyBatchEvents(evtName, batches) {\r\n var extraArgs = [];\r\n for (var _i = 2; _i < arguments.length; _i++) {\r\n extraArgs[_i - 2] = arguments[_i];\r\n }\r\n if (batches && batches.length > 0) {\r\n arrForEach(batches, function (theBatch) {\r\n if (theBatch && theBatch.count() > 0) {\r\n _callNotification(evtName, [theBatch.events()].concat(extraArgs));\r\n }\r\n });\r\n }\r\n }\r\n /**\r\n * The notification handler for when batches are about to be sent\r\n * @ignore\r\n */\r\n function _sendingEvent(batches, reason, isSyncRequest) {\r\n if (batches && batches.length > 0) {\r\n _callNotification(\"eventsSendRequest\", [(reason >= 1000 /* EventBatchNotificationReason.SendingUndefined */ && reason <= 1999 /* EventBatchNotificationReason.SendingEventMax */ ?\r\n reason - 1000 /* EventBatchNotificationReason.SendingUndefined */ :\r\n 0 /* SendRequestReason.Undefined */), isSyncRequest !== true]);\r\n }\r\n }\r\n /**\r\n * This event represents that a batch of events have been successfully sent and a response received\r\n * @param batches The notification handler for when the batches have been successfully sent\r\n * @param reason For this event the reason will always be EventBatchNotificationReason.Complete\r\n */\r\n function _eventsSentEvent(batches, reason) {\r\n _notifyBatchEvents(\"eventsSent\", batches, reason);\r\n // Try and schedule the processing timer if we have events\r\n _scheduleTimer();\r\n }\r\n function _eventsDropped(batches, reason) {\r\n _notifyBatchEvents(strEventsDiscarded, batches, (reason >= 8000 /* EventBatchNotificationReason.EventsDropped */ && reason <= 8999 /* EventBatchNotificationReason.EventsDroppedMax */ ?\r\n reason - 8000 /* EventBatchNotificationReason.EventsDropped */ :\r\n EventsDiscardedReason.Unknown));\r\n }\r\n function _eventsResponseFail(batches) {\r\n _notifyBatchEvents(strEventsDiscarded, batches, EventsDiscardedReason.NonRetryableStatus);\r\n // Try and schedule the processing timer if we have events\r\n _scheduleTimer();\r\n }\r\n function _otherEvent(batches, reason) {\r\n _notifyBatchEvents(strEventsDiscarded, batches, EventsDiscardedReason.Unknown);\r\n // Try and schedule the processing timer if we have events\r\n _scheduleTimer();\r\n }\r\n function _setAutoLimits() {\r\n if (!_disableAutoBatchFlushLimit) {\r\n _autoFlushBatchLimit = Math.max(MaxNumberEventPerBatch * (MaxConnections + 1), _queueSizeLimit / 6);\r\n }\r\n else {\r\n _autoFlushBatchLimit = 0;\r\n }\r\n }\r\n });\r\n return _this;\r\n }\r\n /**\r\n * Start the queue manager to batch and send events via post.\r\n * @param config - The core configuration.\r\n */\r\n PostChannel.prototype.initialize = function (coreConfig, core, extensions) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Add an event to the appropriate inbound queue based on its latency.\r\n * @param ev - The event to be added to the queue.\r\n * @param itemCtx - This is the context for the current request, ITelemetryPlugin instances\r\n * can optionally use this to access the current core instance or define / pass additional information\r\n * to later plugins (vs appending items to the telemetry item)\r\n */\r\n PostChannel.prototype.processTelemetry = function (ev, itemCtx) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Sets the event queue limits at runtime (after initialization), if the number of queued events is greater than the\r\n * eventLimit or autoFlushLimit then a flush() operation will be scheduled.\r\n * @param eventLimit The number of events that can be kept in memory before the SDK starts to drop events. If the value passed is less than or\r\n * equal to zero the value will be reset to the default (10,000).\r\n * @param autoFlushLimit When defined, once this number of events has been queued the system perform a flush() to send the queued events\r\n * without waiting for the normal schedule timers. Passing undefined, null or a value less than or equal to zero will disable the auto flush.\r\n */\r\n PostChannel.prototype.setEventQueueLimits = function (eventLimit, autoFlushLimit) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Pause the transmission of any requests\r\n */\r\n PostChannel.prototype.pause = function () {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Resumes transmission of events.\r\n */\r\n PostChannel.prototype.resume = function () {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Add handler to be executed with request response text.\r\n */\r\n PostChannel.prototype.addResponseHandler = function (responseHanlder) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n return null;\r\n };\r\n /**\r\n * Flush to send data immediately; channel should default to sending data asynchronously. If executing asynchronously (the default) and\r\n * you DO NOT pass a callback function then a [IPromise](https://nevware21.github.io/ts-async/typedoc/interfaces/IPromise.html)\r\n * will be returned which will resolve once the flush is complete. The actual implementation of the `IPromise`\r\n * will be a native Promise (if supported) or the default as supplied by [ts-async library](https://github.com/nevware21/ts-async)\r\n * @param async - send data asynchronously when true\r\n * @param callBack - if specified, notify caller when send is complete, the channel should return true to indicate to the caller that it will be called.\r\n * If the caller doesn't return true the caller should assume that it may never be called.\r\n * @param sendReason - specify the reason that you are calling \"flush\" defaults to ManualFlush (1) if not specified\r\n * @returns - If a callback is provided `true` to indicate that callback will be called after the flush is complete otherwise the caller\r\n * should assume that any provided callback will never be called, Nothing or if occurring asynchronously a\r\n * [IPromise](https://nevware21.github.io/ts-async/typedoc/interfaces/IPromise.html) which will be resolved once the unload is complete,\r\n * the [IPromise](https://nevware21.github.io/ts-async/typedoc/interfaces/IPromise.html) will only be returned when no callback is provided\r\n * and async is true.\r\n */\r\n PostChannel.prototype.flush = function (async, callBack, sendReason) {\r\n if (async === void 0) { async = true; }\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Set AuthMsaDeviceTicket header\r\n * @param ticket - Ticket value.\r\n */\r\n PostChannel.prototype.setMsaAuthTicket = function (ticket) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Check if there are any events waiting to be scheduled for sending.\r\n * @returns True if there are events, false otherwise.\r\n */\r\n PostChannel.prototype.hasEvents = function () {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n return false;\r\n };\r\n /**\r\n * Load custom transmission profiles. Each profile should have timers for real time, and normal and can\r\n * optionally specify the immediate latency time in ms (defaults to 0 when not defined). Each profile should\r\n * make sure that a each normal latency timer is a multiple of the real-time latency and the immediate\r\n * is smaller than the real-time.\r\n * Setting the timer value to -1 means that the events for that latency will not be scheduled to be sent.\r\n * Note that once a latency has been set to not send, all latencies below it will also not be sent. The\r\n * timers should be in the form of [normal, high, [immediate]].\r\n * e.g Custom:\r\n * [10,5] - Sets the normal latency time to 10 seconds and real-time to 5 seconds; Immediate will default to 0ms\r\n * [10,5,1] - Sets the normal latency time to 10 seconds and real-time to 5 seconds; Immediate will default to 1ms\r\n * [10,5,0] - Sets the normal latency time to 10 seconds and real-time to 5 seconds; Immediate will default to 0ms\r\n * [10,5,-1] - Sets the normal latency time to 10 seconds and real-time to 5 seconds; Immediate events will not be\r\n * scheduled on their own and but they will be included with real-time or normal events as the first events in a batch.\r\n * This also removes any previously loaded custom profiles.\r\n * @param profiles - A dictionary containing the transmit profiles.\r\n */\r\n PostChannel.prototype._loadTransmitProfiles = function (profiles) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Set the transmit profile to be used. This will change the transmission timers\r\n * based on the transmit profile.\r\n * @param profileName - The name of the transmit profile to be used.\r\n */\r\n PostChannel.prototype._setTransmitProfile = function (profileName) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Backs off transmission. This exponentially increases all the timers.\r\n */\r\n PostChannel.prototype._backOffTransmission = function () {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Clears backoff for transmission.\r\n */\r\n PostChannel.prototype._clearBackOff = function () {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n return PostChannel;\r\n}(BaseTelemetryPlugin));\r\nexport { PostChannel };\r\n//# sourceMappingURL=PostChannel.js.map"],"names":[],"mappings":";;;;;AAAA,gFAAkC;AAClC;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;;;;;;;;;;;;wDAwHM,CAAC;;;;;;uBACgB;AACvB;AACA;AACA"}
1
+ {"version":3,"file":"PostChannel.js.map","sources":["PostChannel.js"],"sourcesContent":["import { __extends } from \"tslib\";\r\n/**\r\n* PostManager.ts\r\n* @author Abhilash Panwar (abpanwar); Hector Hernandez (hectorh); Nev Wylie (newylie)\r\n* @copyright Microsoft 2018-2020\r\n*/\r\nimport dynamicProto from \"@microsoft/dynamicproto-js\";\r\nimport { BaseTelemetryPlugin, EventsDiscardedReason, _throwInternal, addPageHideEventListener, addPageShowEventListener, addPageUnloadEventListener, arrForEach, createProcessTelemetryContext, createUniqueNamespace, doPerf, getWindow, isChromium, isGreaterThanZero, isNumber, mergeEvtNamespace, objForEachKey, onConfigChange, optimizeObject, proxyFunctions, removePageHideEventListener, removePageShowEventListener, removePageUnloadEventListener, setProcessTelemetryTimings } from \"@microsoft/1ds-core-js\";\r\nimport { createPromise } from \"@nevware21/ts-async\";\r\nimport { objDeepFreeze } from \"@nevware21/ts-utils\";\r\nimport { BE_PROFILE, NRT_PROFILE, RT_PROFILE } from \"./DataModels\";\r\nimport { EventBatch } from \"./EventBatch\";\r\nimport { HttpManager } from \"./HttpManager\";\r\nimport { STR_MSA_DEVICE_TICKET, STR_TRACE, STR_USER } from \"./InternalConstants\";\r\nimport { retryPolicyGetMillisToBackoffForRetry } from \"./RetryPolicy\";\r\nimport { createTimeoutWrapper } from \"./TimeoutOverrideWrapper\";\r\nimport { _DYN_AUTO_FLUSH_EVENTS_LI13, _DYN_BASE_DATA, _DYN_BATCHES, _DYN_CAN_SEND_REQUEST, _DYN_CLEAR_TIMEOUT_OVERRI3, _DYN_CONCAT, _DYN_COUNT, _DYN_DATA, _DYN_DISABLE_AUTO_BATCH_F14, _DYN_DISABLE_OPTIMIZE_OBJ, _DYN_DISABLE_TELEMETRY, _DYN_EVENTS, _DYN_EVENTS_LIMIT_IN_MEM, _DYN_GET_WPARAM, _DYN_IDENTIFIER, _DYN_IGNORE_MC1_MS0_COOKI12, _DYN_INITIALIZE, _DYN_IS_COMPLETELY_IDLE, _DYN_I_KEY, _DYN_LATENCY, _DYN_LENGTH, _DYN_OVERRIDE_INSTRUMENTA15, _DYN_PUSH, _DYN_SEND_ATTEMPT, _DYN_SEND_POST, _DYN_SEND_QUEUED_REQUESTS, _DYN_SEND_SYNCHRONOUS_BAT9, _DYN_SET_TIMEOUT_OVERRIDE, _DYN_SET_UNLOADING, _DYN_SPLICE, _DYN_SPLIT, _DYN_SYNC, _DYN__BACK_OFF_TRANSMISSI11 } from \"./__DynamicConstants\";\r\nvar FlushCheckTimer = 0.250; // This needs to be in seconds, so this is 250ms\r\nvar MaxNumberEventPerBatch = 500;\r\nvar EventsDroppedAtOneTime = 20;\r\nvar MaxSendAttempts = 6;\r\nvar MaxSyncUnloadSendAttempts = 2; // Assuming 2 based on beforeunload and unload\r\nvar MaxBackoffCount = 4;\r\nvar MaxConnections = 2;\r\nvar MaxRequestRetriesBeforeBackoff = 1;\r\nvar MaxEventsLimitInMem = 10000;\r\nvar strEventsDiscarded = \"eventsDiscarded\";\r\nvar undefValue = undefined;\r\n/**\r\n * The default settings for the config.\r\n * WE MUST include all defaults here to ensure that the config is created with all of the properties\r\n * defined as dynamic.\r\n */\r\nvar defaultPostChannelConfig = objDeepFreeze({\r\n eventsLimitInMem: { isVal: isGreaterThanZero, v: MaxEventsLimitInMem },\r\n immediateEventLimit: { isVal: isGreaterThanZero, v: 500 },\r\n autoFlushEventsLimit: { isVal: isGreaterThanZero, v: 0 },\r\n disableAutoBatchFlushLimit: false,\r\n httpXHROverride: { isVal: isOverrideFn, v: undefValue },\r\n overrideInstrumentationKey: undefValue,\r\n overrideEndpointUrl: undefValue,\r\n disableTelemetry: false,\r\n ignoreMc1Ms0CookieProcessing: false,\r\n setTimeoutOverride: undefValue,\r\n clearTimeoutOverride: undefValue,\r\n payloadPreprocessor: undefValue,\r\n payloadListener: undefValue,\r\n disableEventTimings: undefValue,\r\n valueSanitizer: undefValue,\r\n stringifyObjects: undefValue,\r\n enableCompoundKey: undefValue,\r\n disableOptimizeObj: false,\r\n // disableCacheHeader: undefValue, // See Task #7178858 - Collector requires a change to support this\r\n transports: undefValue,\r\n unloadTransports: undefValue,\r\n useSendBeacon: undefValue,\r\n disableFetchKeepAlive: undefValue,\r\n avoidOptions: false,\r\n xhrTimeout: undefValue,\r\n disableXhrSync: undefValue,\r\n alwaysUseXhrOverride: false,\r\n maxEventRetryAttempts: { isVal: isNumber, v: MaxSendAttempts },\r\n maxUnloadEventRetryAttempts: { isVal: isNumber, v: MaxSyncUnloadSendAttempts },\r\n addNoResponse: undefValue\r\n});\r\nfunction isOverrideFn(httpXHROverride) {\r\n return httpXHROverride && httpXHROverride[_DYN_SEND_POST /* @min:%2esendPOST */];\r\n}\r\n/**\r\n * Class that manages adding events to inbound queues and batching of events\r\n * into requests.\r\n * @group Classes\r\n * @group Entrypoint\r\n */\r\nvar PostChannel = /** @class */ (function (_super) {\r\n __extends(PostChannel, _super);\r\n function PostChannel() {\r\n var _this = _super.call(this) || this;\r\n _this.identifier = \"PostChannel\";\r\n _this.priority = 1011;\r\n _this.version = '4.0.2-nightly3.2307-25';\r\n var _postConfig;\r\n var _isTeardownCalled = false;\r\n var _flushCallbackQueue = [];\r\n var _flushCallbackTimer;\r\n var _paused = false;\r\n var _immediateQueueSize = 0;\r\n var _immediateQueueSizeLimit;\r\n var _queueSize = 0;\r\n var _queueSizeLimit;\r\n var _profiles = {};\r\n var _currentProfile = RT_PROFILE;\r\n var _scheduledTimer;\r\n var _immediateTimer;\r\n var _currentBackoffCount;\r\n var _timerCount;\r\n var _httpManager;\r\n var _batchQueues;\r\n var _autoFlushEventsLimit;\r\n // either MaxBatchSize * (1+ Max Connections) or _queueLimit / 6 (where 3 latency Queues [normal, realtime, cost deferred] * 2 [allow half full -- allow for retry])\r\n var _autoFlushBatchLimit;\r\n var _delayedBatchSendLatency;\r\n var _delayedBatchReason;\r\n var _optimizeObject;\r\n var _isPageUnloadTriggered;\r\n var _maxEventSendAttempts;\r\n var _maxUnloadEventSendAttempts;\r\n var _evtNamespace;\r\n var _timeoutWrapper;\r\n var _ignoreMc1Ms0CookieProcessing;\r\n var _disableAutoBatchFlushLimit;\r\n var _notificationManager;\r\n var _unloadHandlersAdded;\r\n var _overrideInstrumentationKey;\r\n var _disableTelemetry;\r\n dynamicProto(PostChannel, _this, function (_self, _base) {\r\n _initDefaults();\r\n // Special internal method to allow the DebugPlugin to hook embedded objects\r\n _self[\"_getDbgPlgTargets\"] = function () {\r\n return [_httpManager, _postConfig];\r\n };\r\n _self[_DYN_INITIALIZE /* @min:%2einitialize */] = function (theConfig, core, extensions) {\r\n doPerf(core, function () { return \"PostChannel:initialize\"; }, function () {\r\n _base[_DYN_INITIALIZE /* @min:%2einitialize */](theConfig, core, extensions);\r\n _notificationManager = core.getNotifyMgr();\r\n try {\r\n _evtNamespace = mergeEvtNamespace(createUniqueNamespace(_self[_DYN_IDENTIFIER /* @min:%2eidentifier */]), core.evtNamespace && core.evtNamespace());\r\n _self._addHook(onConfigChange(theConfig, function (details) {\r\n var coreConfig = details.cfg;\r\n var ctx = createProcessTelemetryContext(null, coreConfig, core);\r\n _postConfig = ctx.getExtCfg(_self[_DYN_IDENTIFIER /* @min:%2eidentifier */], defaultPostChannelConfig);\r\n _timeoutWrapper = createTimeoutWrapper(_postConfig[_DYN_SET_TIMEOUT_OVERRIDE /* @min:%2esetTimeoutOverride */], _postConfig[_DYN_CLEAR_TIMEOUT_OVERRI3 /* @min:%2eclearTimeoutOverride */]);\r\n // Only try and use the optimizeObject() if this appears to be a chromium based browser and it has not been explicitly disabled\r\n _optimizeObject = !_postConfig[_DYN_DISABLE_OPTIMIZE_OBJ /* @min:%2edisableOptimizeObj */] && isChromium();\r\n _ignoreMc1Ms0CookieProcessing = _postConfig[_DYN_IGNORE_MC1_MS0_COOKI12 /* @min:%2eignoreMc1Ms0CookieProcessing */];\r\n _hookWParam(core); // _hookWParam uses _ignoreMc1Ms0CookieProcessing\r\n _queueSizeLimit = _postConfig[_DYN_EVENTS_LIMIT_IN_MEM /* @min:%2eeventsLimitInMem */];\r\n _immediateQueueSizeLimit = _postConfig.immediateEventLimit;\r\n _autoFlushEventsLimit = _postConfig[_DYN_AUTO_FLUSH_EVENTS_LI13 /* @min:%2eautoFlushEventsLimit */];\r\n _maxEventSendAttempts = _postConfig.maxEventRetryAttempts;\r\n _maxUnloadEventSendAttempts = _postConfig.maxUnloadEventRetryAttempts;\r\n _disableAutoBatchFlushLimit = _postConfig[_DYN_DISABLE_AUTO_BATCH_F14 /* @min:%2edisableAutoBatchFlushLimit */];\r\n _setAutoLimits();\r\n // Override iKey if provided in Post config if provided for during initialization\r\n _overrideInstrumentationKey = _postConfig[_DYN_OVERRIDE_INSTRUMENTA15 /* @min:%2eoverrideInstrumentationKey */];\r\n // DisableTelemetry was defined in the config provided during initialization\r\n _disableTelemetry = !!_postConfig[_DYN_DISABLE_TELEMETRY /* @min:%2edisableTelemetry */];\r\n if (_unloadHandlersAdded) {\r\n _removeUnloadHandlers();\r\n }\r\n var excludePageUnloadEvents = coreConfig.disablePageUnloadEvents || [];\r\n // When running in Web browsers try to send all telemetry if page is unloaded\r\n _unloadHandlersAdded = addPageUnloadEventListener(_handleUnloadEvents, excludePageUnloadEvents, _evtNamespace);\r\n _unloadHandlersAdded = addPageHideEventListener(_handleUnloadEvents, excludePageUnloadEvents, _evtNamespace) || _unloadHandlersAdded;\r\n _unloadHandlersAdded = addPageShowEventListener(_handleShowEvents, coreConfig.disablePageShowEvents, _evtNamespace) || _unloadHandlersAdded;\r\n }));\r\n // only initialize the manager once\r\n _httpManager[_DYN_INITIALIZE /* @min:%2einitialize */](theConfig, _self.core, _self);\r\n }\r\n catch (e) {\r\n // resetting the initialized state because of failure\r\n _self.setInitialized(false);\r\n throw e;\r\n }\r\n }, function () { return ({ theConfig: theConfig, core: core, extensions: extensions }); });\r\n };\r\n _self.processTelemetry = function (ev, itemCtx) {\r\n setProcessTelemetryTimings(ev, _self[_DYN_IDENTIFIER /* @min:%2eidentifier */]);\r\n itemCtx = itemCtx || _self._getTelCtx(itemCtx);\r\n var event = ev;\r\n if (!_disableTelemetry && !_isTeardownCalled) {\r\n // Override iKey if provided in Post config if provided for during initialization\r\n if (_overrideInstrumentationKey) {\r\n event[_DYN_I_KEY /* @min:%2eiKey */] = _overrideInstrumentationKey;\r\n }\r\n _addEventToQueues(event, true);\r\n if (_isPageUnloadTriggered) {\r\n // Unload event has been received so we need to try and flush new events\r\n _releaseAllQueues(2 /* EventSendType.SendBeacon */, 2 /* SendRequestReason.Unload */);\r\n }\r\n else {\r\n _scheduleTimer();\r\n }\r\n }\r\n _self.processNext(event, itemCtx);\r\n };\r\n _self._doTeardown = function (unloadCtx, unloadState) {\r\n _releaseAllQueues(2 /* EventSendType.SendBeacon */, 2 /* SendRequestReason.Unload */);\r\n _isTeardownCalled = true;\r\n _httpManager.teardown();\r\n _removeUnloadHandlers();\r\n // Just register to remove all events associated with this namespace\r\n _initDefaults();\r\n };\r\n function _removeUnloadHandlers() {\r\n removePageUnloadEventListener(null, _evtNamespace);\r\n removePageHideEventListener(null, _evtNamespace);\r\n removePageShowEventListener(null, _evtNamespace);\r\n }\r\n function _hookWParam(core) {\r\n var existingGetWParamMethod = core[_DYN_GET_WPARAM /* @min:%2egetWParam */];\r\n core[_DYN_GET_WPARAM /* @min:%2egetWParam */] = function () {\r\n var wparam = 0;\r\n if (_ignoreMc1Ms0CookieProcessing) {\r\n wparam = wparam | 2;\r\n }\r\n return wparam | existingGetWParamMethod.call(core);\r\n };\r\n }\r\n // Moving event handlers out from the initialize closure so that any local variables can be garbage collected\r\n function _handleUnloadEvents(evt) {\r\n var theEvt = evt || getWindow().event; // IE 8 does not pass the event\r\n if (theEvt.type !== \"beforeunload\") {\r\n // Only set the unload trigger if not beforeunload event as beforeunload can be cancelled while the other events can't\r\n _isPageUnloadTriggered = true;\r\n _httpManager[_DYN_SET_UNLOADING /* @min:%2esetUnloading */](_isPageUnloadTriggered);\r\n }\r\n _releaseAllQueues(2 /* EventSendType.SendBeacon */, 2 /* SendRequestReason.Unload */);\r\n }\r\n function _handleShowEvents(evt) {\r\n // Handle the page becoming visible again\r\n _isPageUnloadTriggered = false;\r\n _httpManager[_DYN_SET_UNLOADING /* @min:%2esetUnloading */](_isPageUnloadTriggered);\r\n }\r\n function _addEventToQueues(event, append) {\r\n // If send attempt field is undefined we should set it to 0.\r\n if (!event[_DYN_SEND_ATTEMPT /* @min:%2esendAttempt */]) {\r\n event[_DYN_SEND_ATTEMPT /* @min:%2esendAttempt */] = 0;\r\n }\r\n // Add default latency\r\n if (!event[_DYN_LATENCY /* @min:%2elatency */]) {\r\n event[_DYN_LATENCY /* @min:%2elatency */] = 1 /* EventLatencyValue.Normal */;\r\n }\r\n // Remove extra AI properties if present\r\n if (event.ext && event.ext[STR_TRACE]) {\r\n delete (event.ext[STR_TRACE]);\r\n }\r\n if (event.ext && event.ext[STR_USER] && event.ext[STR_USER][\"id\"]) {\r\n delete (event.ext[STR_USER][\"id\"]);\r\n }\r\n // v8 performance optimization for iterating over the keys\r\n if (_optimizeObject) {\r\n event.ext = optimizeObject(event.ext);\r\n if (event[_DYN_BASE_DATA /* @min:%2ebaseData */]) {\r\n event[_DYN_BASE_DATA /* @min:%2ebaseData */] = optimizeObject(event[_DYN_BASE_DATA /* @min:%2ebaseData */]);\r\n }\r\n if (event[_DYN_DATA /* @min:%2edata */]) {\r\n event[_DYN_DATA /* @min:%2edata */] = optimizeObject(event[_DYN_DATA /* @min:%2edata */]);\r\n }\r\n }\r\n if (event[_DYN_SYNC /* @min:%2esync */]) {\r\n // If the transmission is backed off then do not send synchronous events.\r\n // We will convert these events to Real time latency instead.\r\n if (_currentBackoffCount || _paused) {\r\n event[_DYN_LATENCY /* @min:%2elatency */] = 3 /* EventLatencyValue.RealTime */;\r\n event[_DYN_SYNC /* @min:%2esync */] = false;\r\n }\r\n else {\r\n // Log the event synchronously\r\n if (_httpManager) {\r\n // v8 performance optimization for iterating over the keys\r\n if (_optimizeObject) {\r\n event = optimizeObject(event);\r\n }\r\n _httpManager[_DYN_SEND_SYNCHRONOUS_BAT9 /* @min:%2esendSynchronousBatch */](EventBatch.create(event[_DYN_I_KEY /* @min:%2eiKey */], [event]), event[_DYN_SYNC /* @min:%2esync */] === true ? 1 /* EventSendType.Synchronous */ : event[_DYN_SYNC /* @min:%2esync */], 3 /* SendRequestReason.SyncEvent */);\r\n return;\r\n }\r\n }\r\n }\r\n var evtLatency = event[_DYN_LATENCY /* @min:%2elatency */];\r\n var queueSize = _queueSize;\r\n var queueLimit = _queueSizeLimit;\r\n if (evtLatency === 4 /* EventLatencyValue.Immediate */) {\r\n queueSize = _immediateQueueSize;\r\n queueLimit = _immediateQueueSizeLimit;\r\n }\r\n var eventDropped = false;\r\n // Only add the event if the queue isn't full or it's a direct event (which don't add to the queue sizes)\r\n if (queueSize < queueLimit) {\r\n eventDropped = !_addEventToProperQueue(event, append);\r\n }\r\n else {\r\n var dropLatency = 1 /* EventLatencyValue.Normal */;\r\n var dropNumber = EventsDroppedAtOneTime;\r\n if (evtLatency === 4 /* EventLatencyValue.Immediate */) {\r\n // Only drop other immediate events as they are not technically sharing the general queue\r\n dropLatency = 4 /* EventLatencyValue.Immediate */;\r\n dropNumber = 1;\r\n }\r\n // Drop old event from lower or equal latency\r\n eventDropped = true;\r\n if (_dropEventWithLatencyOrLess(event[_DYN_I_KEY /* @min:%2eiKey */], event[_DYN_LATENCY /* @min:%2elatency */], dropLatency, dropNumber)) {\r\n eventDropped = !_addEventToProperQueue(event, append);\r\n }\r\n }\r\n if (eventDropped) {\r\n // Can't drop events from current queues because the all the slots are taken by queues that are being flushed.\r\n _notifyEvents(strEventsDiscarded, [event], EventsDiscardedReason.QueueFull);\r\n }\r\n }\r\n _self.setEventQueueLimits = function (eventLimit, autoFlushLimit) {\r\n _postConfig[_DYN_EVENTS_LIMIT_IN_MEM /* @min:%2eeventsLimitInMem */] = _queueSizeLimit = isGreaterThanZero(eventLimit) ? eventLimit : MaxEventsLimitInMem;\r\n _postConfig[_DYN_AUTO_FLUSH_EVENTS_LI13 /* @min:%2eautoFlushEventsLimit */] = _autoFlushEventsLimit = isGreaterThanZero(autoFlushLimit) ? autoFlushLimit : 0;\r\n _setAutoLimits();\r\n // We only do this check here as during normal event addition if the queue is > then events start getting dropped\r\n var doFlush = _queueSize > eventLimit;\r\n if (!doFlush && _autoFlushBatchLimit > 0) {\r\n // Check the auto flush max batch size\r\n for (var latency = 1 /* EventLatencyValue.Normal */; !doFlush && latency <= 3 /* EventLatencyValue.RealTime */; latency++) {\r\n var batchQueue = _batchQueues[latency];\r\n if (batchQueue && batchQueue[_DYN_BATCHES /* @min:%2ebatches */]) {\r\n arrForEach(batchQueue[_DYN_BATCHES /* @min:%2ebatches */], function (theBatch) {\r\n if (theBatch && theBatch[_DYN_COUNT /* @min:%2ecount */]() >= _autoFlushBatchLimit) {\r\n // If any 1 batch is > than the limit then trigger an auto flush\r\n doFlush = true;\r\n }\r\n });\r\n }\r\n }\r\n }\r\n _performAutoFlush(true, doFlush);\r\n };\r\n _self.pause = function () {\r\n _clearScheduledTimer();\r\n _paused = true;\r\n _httpManager.pause();\r\n };\r\n _self.resume = function () {\r\n _paused = false;\r\n _httpManager.resume();\r\n _scheduleTimer();\r\n };\r\n _self._loadTransmitProfiles = function (profiles) {\r\n _resetTransmitProfiles();\r\n objForEachKey(profiles, function (profileName, profileValue) {\r\n var profLen = profileValue[_DYN_LENGTH /* @min:%2elength */];\r\n if (profLen >= 2) {\r\n var directValue = (profLen > 2 ? profileValue[2] : 0);\r\n profileValue[_DYN_SPLICE /* @min:%2esplice */](0, profLen - 2);\r\n // Make sure if a higher latency is set to not send then don't send lower latency\r\n if (profileValue[1] < 0) {\r\n profileValue[0] = -1;\r\n }\r\n // Make sure each latency is multiple of the latency higher then it. If not a multiple\r\n // we round up so that it becomes a multiple.\r\n if (profileValue[1] > 0 && profileValue[0] > 0) {\r\n var timerMultiplier = profileValue[0] / profileValue[1];\r\n profileValue[0] = Math.ceil(timerMultiplier) * profileValue[1];\r\n }\r\n // Add back the direct profile timeout\r\n if (directValue >= 0 && profileValue[1] >= 0 && directValue > profileValue[1]) {\r\n // Make sure if it's not disabled (< 0) then make sure it's not larger than RealTime\r\n directValue = profileValue[1];\r\n }\r\n profileValue[_DYN_PUSH /* @min:%2epush */](directValue);\r\n _profiles[profileName] = profileValue;\r\n }\r\n });\r\n };\r\n _self.flush = function (async, callback, sendReason) {\r\n if (async === void 0) { async = true; }\r\n var result;\r\n if (!_paused) {\r\n sendReason = sendReason || 1 /* SendRequestReason.ManualFlush */;\r\n if (async) {\r\n if (!callback) {\r\n result = createPromise(function (resolve) {\r\n // Set the callback to the promise resolve callback\r\n callback = resolve;\r\n });\r\n }\r\n if (_flushCallbackTimer == null) {\r\n // Clear the normal schedule timer as we are going to try and flush ASAP\r\n _clearScheduledTimer();\r\n // Move all queued events to the HttpManager so that we don't discard new events (Auto flush scenario)\r\n _queueBatches(1 /* EventLatencyValue.Normal */, 0 /* EventSendType.Batched */, sendReason);\r\n _flushCallbackTimer = _createTimer(function () {\r\n _flushCallbackTimer = null;\r\n _flushImpl(callback, sendReason);\r\n }, 0);\r\n }\r\n else {\r\n // Even if null (no callback) this will ensure after the flushImpl finishes waiting\r\n // for a completely idle connection it will attempt to re-flush any queued events on the next cycle\r\n _flushCallbackQueue[_DYN_PUSH /* @min:%2epush */](callback);\r\n }\r\n }\r\n else {\r\n // Clear the normal schedule timer as we are going to try and flush ASAP\r\n var cleared = _clearScheduledTimer();\r\n // Now cause all queued events to be sent synchronously\r\n _sendEventsForLatencyAndAbove(1 /* EventLatencyValue.Normal */, 1 /* EventSendType.Synchronous */, sendReason);\r\n callback && callback();\r\n if (cleared) {\r\n // restart the normal event timer if it was cleared\r\n _scheduleTimer();\r\n }\r\n }\r\n }\r\n return result;\r\n };\r\n _self.setMsaAuthTicket = function (ticket) {\r\n _httpManager.addHeader(STR_MSA_DEVICE_TICKET, ticket);\r\n };\r\n _self.hasEvents = _hasEvents;\r\n _self._setTransmitProfile = function (profileName) {\r\n if (_currentProfile !== profileName && _profiles[profileName] !== undefined) {\r\n _clearScheduledTimer();\r\n _currentProfile = profileName;\r\n _scheduleTimer();\r\n }\r\n };\r\n proxyFunctions(_self, function () { return _httpManager; }, [\"addResponseHandler\"]);\r\n /**\r\n * Batch and send events currently in the queue for the given latency.\r\n * @param latency - Latency for which to send events.\r\n */\r\n function _sendEventsForLatencyAndAbove(latency, sendType, sendReason) {\r\n var queued = _queueBatches(latency, sendType, sendReason);\r\n // Always trigger the request as while the post channel may not have queued additional events, the httpManager may already have waiting events\r\n _httpManager[_DYN_SEND_QUEUED_REQUESTS /* @min:%2esendQueuedRequests */](sendType, sendReason);\r\n return queued;\r\n }\r\n function _hasEvents() {\r\n return _queueSize > 0;\r\n }\r\n /**\r\n * Try to schedule the timer after which events will be sent. If there are\r\n * no events to be sent, or there is already a timer scheduled, or the\r\n * http manager doesn't have any idle connections this method is no-op.\r\n */\r\n function _scheduleTimer() {\r\n // If we had previously attempted to send requests, but the http manager didn't have any idle connections then the requests where delayed\r\n // so try and requeue then again now\r\n if (_delayedBatchSendLatency >= 0 && _queueBatches(_delayedBatchSendLatency, 0 /* EventSendType.Batched */, _delayedBatchReason)) {\r\n _httpManager[_DYN_SEND_QUEUED_REQUESTS /* @min:%2esendQueuedRequests */](0 /* EventSendType.Batched */, _delayedBatchReason);\r\n }\r\n if (_immediateQueueSize > 0 && !_immediateTimer && !_paused) {\r\n // During initialization _profiles enforce that the direct [2] is less than real time [1] timer value\r\n // If the immediateTimeout is disabled the immediate events will be sent with Real Time events\r\n var immediateTimeOut = _profiles[_currentProfile][2];\r\n if (immediateTimeOut >= 0) {\r\n _immediateTimer = _createTimer(function () {\r\n _immediateTimer = null;\r\n // Only try to send direct events\r\n _sendEventsForLatencyAndAbove(4 /* EventLatencyValue.Immediate */, 0 /* EventSendType.Batched */, 1 /* SendRequestReason.NormalSchedule */);\r\n _scheduleTimer();\r\n }, immediateTimeOut);\r\n }\r\n }\r\n // During initialization the _profiles enforce that the normal [0] is a multiple of the real time [1] timer value\r\n var timeOut = _profiles[_currentProfile][1];\r\n if (!_scheduledTimer && !_flushCallbackTimer && timeOut >= 0 && !_paused) {\r\n if (_hasEvents()) {\r\n _scheduledTimer = _createTimer(function () {\r\n _scheduledTimer = null;\r\n _sendEventsForLatencyAndAbove(_timerCount === 0 ? 3 /* EventLatencyValue.RealTime */ : 1 /* EventLatencyValue.Normal */, 0 /* EventSendType.Batched */, 1 /* SendRequestReason.NormalSchedule */);\r\n // Increment the count for next cycle\r\n _timerCount++;\r\n _timerCount %= 2;\r\n _scheduleTimer();\r\n }, timeOut);\r\n }\r\n else {\r\n _timerCount = 0;\r\n }\r\n }\r\n }\r\n _self[_DYN__BACK_OFF_TRANSMISSI11 /* @min:%2e_backOffTransmission */] = function () {\r\n if (_currentBackoffCount < MaxBackoffCount) {\r\n _currentBackoffCount++;\r\n _clearScheduledTimer();\r\n _scheduleTimer();\r\n }\r\n };\r\n _self._clearBackOff = function () {\r\n if (_currentBackoffCount) {\r\n _currentBackoffCount = 0;\r\n _clearScheduledTimer();\r\n _scheduleTimer();\r\n }\r\n };\r\n function _initDefaults() {\r\n _postConfig = null;\r\n _isTeardownCalled = false;\r\n _flushCallbackQueue = [];\r\n _flushCallbackTimer = null;\r\n _paused = false;\r\n _immediateQueueSize = 0;\r\n _immediateQueueSizeLimit = 500;\r\n _queueSize = 0;\r\n _queueSizeLimit = MaxEventsLimitInMem;\r\n _profiles = {};\r\n _currentProfile = RT_PROFILE;\r\n _scheduledTimer = null;\r\n _immediateTimer = null;\r\n _currentBackoffCount = 0;\r\n _timerCount = 0;\r\n _batchQueues = {};\r\n _autoFlushEventsLimit = 0;\r\n _unloadHandlersAdded = false;\r\n // either MaxBatchSize * (1+ Max Connections) or _queueLimit / 6 (where 3 latency Queues [normal, realtime, cost deferred] * 2 [allow half full -- allow for retry])\r\n _autoFlushBatchLimit = 0;\r\n _delayedBatchSendLatency = -1;\r\n _delayedBatchReason = null;\r\n _optimizeObject = true;\r\n _isPageUnloadTriggered = false;\r\n _maxEventSendAttempts = MaxSendAttempts;\r\n _maxUnloadEventSendAttempts = MaxSyncUnloadSendAttempts;\r\n _evtNamespace = null;\r\n _overrideInstrumentationKey = null;\r\n _disableTelemetry = false;\r\n _timeoutWrapper = createTimeoutWrapper();\r\n _httpManager = new HttpManager(MaxNumberEventPerBatch, MaxConnections, MaxRequestRetriesBeforeBackoff, {\r\n requeue: _requeueEvents,\r\n send: _sendingEvent,\r\n sent: _eventsSentEvent,\r\n drop: _eventsDropped,\r\n rspFail: _eventsResponseFail,\r\n oth: _otherEvent\r\n });\r\n _initializeProfiles();\r\n _clearQueues();\r\n _setAutoLimits();\r\n }\r\n function _createTimer(theTimerFunc, timeOut) {\r\n // If the transmission is backed off make the timer at least 1 sec to allow for back off.\r\n if (timeOut === 0 && _currentBackoffCount) {\r\n timeOut = 1;\r\n }\r\n var timerMultiplier = 1000;\r\n if (_currentBackoffCount) {\r\n timerMultiplier = retryPolicyGetMillisToBackoffForRetry(_currentBackoffCount - 1);\r\n }\r\n return _timeoutWrapper.set(theTimerFunc, timeOut * timerMultiplier);\r\n }\r\n function _clearScheduledTimer() {\r\n if (_scheduledTimer !== null) {\r\n _scheduledTimer.cancel();\r\n _scheduledTimer = null;\r\n _timerCount = 0;\r\n return true;\r\n }\r\n return false;\r\n }\r\n // Try to send all queued events using beacons if available\r\n function _releaseAllQueues(sendType, sendReason) {\r\n _clearScheduledTimer();\r\n // Cancel all flush callbacks\r\n if (_flushCallbackTimer) {\r\n _flushCallbackTimer.cancel();\r\n _flushCallbackTimer = null;\r\n }\r\n if (!_paused) {\r\n // Queue all the remaining requests to be sent. The requests will be sent using HTML5 Beacons if they are available.\r\n _sendEventsForLatencyAndAbove(1 /* EventLatencyValue.Normal */, sendType, sendReason);\r\n }\r\n }\r\n /**\r\n * Add empty queues for all latencies in the inbound queues map. This is called\r\n * when Transmission Manager is being flushed. This ensures that new events added\r\n * after flush are stored separately till we flush the current events.\r\n */\r\n function _clearQueues() {\r\n _batchQueues[4 /* EventLatencyValue.Immediate */] = {\r\n batches: [],\r\n iKeyMap: {}\r\n };\r\n _batchQueues[3 /* EventLatencyValue.RealTime */] = {\r\n batches: [],\r\n iKeyMap: {}\r\n };\r\n _batchQueues[2 /* EventLatencyValue.CostDeferred */] = {\r\n batches: [],\r\n iKeyMap: {}\r\n };\r\n _batchQueues[1 /* EventLatencyValue.Normal */] = {\r\n batches: [],\r\n iKeyMap: {}\r\n };\r\n }\r\n function _getEventBatch(iKey, latency, create) {\r\n var batchQueue = _batchQueues[latency];\r\n if (!batchQueue) {\r\n latency = 1 /* EventLatencyValue.Normal */;\r\n batchQueue = _batchQueues[latency];\r\n }\r\n var eventBatch = batchQueue.iKeyMap[iKey];\r\n if (!eventBatch && create) {\r\n eventBatch = EventBatch.create(iKey);\r\n batchQueue.batches[_DYN_PUSH /* @min:%2epush */](eventBatch);\r\n batchQueue.iKeyMap[iKey] = eventBatch;\r\n }\r\n return eventBatch;\r\n }\r\n function _performAutoFlush(isAsync, doFlush) {\r\n // Only perform the auto flush check if the httpManager has an idle connection and we are not in a backoff situation\r\n if (_httpManager[_DYN_CAN_SEND_REQUEST /* @min:%2ecanSendRequest */]() && !_currentBackoffCount) {\r\n if (_autoFlushEventsLimit > 0 && _queueSize > _autoFlushEventsLimit) {\r\n // Force flushing\r\n doFlush = true;\r\n }\r\n if (doFlush && _flushCallbackTimer == null) {\r\n // Auto flush the queue, adding a callback to avoid the creation of a promise\r\n _self.flush(isAsync, function () { }, 20 /* SendRequestReason.MaxQueuedEvents */);\r\n }\r\n }\r\n }\r\n function _addEventToProperQueue(event, append) {\r\n // v8 performance optimization for iterating over the keys\r\n if (_optimizeObject) {\r\n event = optimizeObject(event);\r\n }\r\n var latency = event[_DYN_LATENCY /* @min:%2elatency */];\r\n var eventBatch = _getEventBatch(event[_DYN_I_KEY /* @min:%2eiKey */], latency, true);\r\n if (eventBatch.addEvent(event)) {\r\n if (latency !== 4 /* EventLatencyValue.Immediate */) {\r\n _queueSize++;\r\n // Check for auto flushing based on total events in the queue, but not for requeued or retry events\r\n if (append && event[_DYN_SEND_ATTEMPT /* @min:%2esendAttempt */] === 0) {\r\n // Force the flushing of the batch if the batch (specific iKey / latency combination) reaches it's auto flush limit\r\n _performAutoFlush(!event.sync, _autoFlushBatchLimit > 0 && eventBatch[_DYN_COUNT /* @min:%2ecount */]() >= _autoFlushBatchLimit);\r\n }\r\n }\r\n else {\r\n // Direct events don't need auto flushing as they are scheduled (by default) for immediate delivery\r\n _immediateQueueSize++;\r\n }\r\n return true;\r\n }\r\n return false;\r\n }\r\n function _dropEventWithLatencyOrLess(iKey, latency, currentLatency, dropNumber) {\r\n while (currentLatency <= latency) {\r\n var eventBatch = _getEventBatch(iKey, latency, true);\r\n if (eventBatch && eventBatch[_DYN_COUNT /* @min:%2ecount */]() > 0) {\r\n // Dropped oldest events from lowest possible latency\r\n var droppedEvents = eventBatch[_DYN_SPLIT /* @min:%2esplit */](0, dropNumber);\r\n var droppedCount = droppedEvents[_DYN_COUNT /* @min:%2ecount */]();\r\n if (droppedCount > 0) {\r\n if (currentLatency === 4 /* EventLatencyValue.Immediate */) {\r\n _immediateQueueSize -= droppedCount;\r\n }\r\n else {\r\n _queueSize -= droppedCount;\r\n }\r\n _notifyBatchEvents(strEventsDiscarded, [droppedEvents], EventsDiscardedReason.QueueFull);\r\n return true;\r\n }\r\n }\r\n currentLatency++;\r\n }\r\n // Unable to drop any events -- lets just make sure the queue counts are correct to avoid exhaustion\r\n _resetQueueCounts();\r\n return false;\r\n }\r\n /**\r\n * Internal helper to reset the queue counts, used as a backstop to avoid future queue exhaustion errors\r\n * that might occur because of counting issues.\r\n */\r\n function _resetQueueCounts() {\r\n var immediateQueue = 0;\r\n var normalQueue = 0;\r\n var _loop_1 = function (latency) {\r\n var batchQueue = _batchQueues[latency];\r\n if (batchQueue && batchQueue[_DYN_BATCHES /* @min:%2ebatches */]) {\r\n arrForEach(batchQueue[_DYN_BATCHES /* @min:%2ebatches */], function (theBatch) {\r\n if (latency === 4 /* EventLatencyValue.Immediate */) {\r\n immediateQueue += theBatch[_DYN_COUNT /* @min:%2ecount */]();\r\n }\r\n else {\r\n normalQueue += theBatch[_DYN_COUNT /* @min:%2ecount */]();\r\n }\r\n });\r\n }\r\n };\r\n for (var latency = 1 /* EventLatencyValue.Normal */; latency <= 4 /* EventLatencyValue.Immediate */; latency++) {\r\n _loop_1(latency);\r\n }\r\n _queueSize = normalQueue;\r\n _immediateQueueSize = immediateQueue;\r\n }\r\n function _queueBatches(latency, sendType, sendReason) {\r\n var eventsQueued = false;\r\n var isAsync = sendType === 0 /* EventSendType.Batched */;\r\n // Only queue batches (to the HttpManager) if this is a sync request or the httpManager has an idle connection\r\n // Thus keeping the events within the PostChannel until the HttpManager has a connection available\r\n // This is so we can drop \"old\" events if the queue is getting full because we can't successfully send events\r\n if (!isAsync || _httpManager[_DYN_CAN_SEND_REQUEST /* @min:%2ecanSendRequest */]()) {\r\n doPerf(_self.core, function () { return \"PostChannel._queueBatches\"; }, function () {\r\n var droppedEvents = [];\r\n var latencyToProcess = 4 /* EventLatencyValue.Immediate */;\r\n while (latencyToProcess >= latency) {\r\n var batchQueue = _batchQueues[latencyToProcess];\r\n if (batchQueue && batchQueue.batches && batchQueue.batches[_DYN_LENGTH /* @min:%2elength */] > 0) {\r\n arrForEach(batchQueue[_DYN_BATCHES /* @min:%2ebatches */], function (theBatch) {\r\n // Add the batch to the http manager to send the requests\r\n if (!_httpManager.addBatch(theBatch)) {\r\n // The events from this iKey are being dropped (killed)\r\n droppedEvents = droppedEvents[_DYN_CONCAT /* @min:%2econcat */](theBatch[_DYN_EVENTS /* @min:%2eevents */]());\r\n }\r\n else {\r\n eventsQueued = eventsQueued || (theBatch && theBatch[_DYN_COUNT /* @min:%2ecount */]() > 0);\r\n }\r\n if (latencyToProcess === 4 /* EventLatencyValue.Immediate */) {\r\n _immediateQueueSize -= theBatch[_DYN_COUNT /* @min:%2ecount */]();\r\n }\r\n else {\r\n _queueSize -= theBatch[_DYN_COUNT /* @min:%2ecount */]();\r\n }\r\n });\r\n // Remove all batches from this Queue\r\n batchQueue[_DYN_BATCHES /* @min:%2ebatches */] = [];\r\n batchQueue.iKeyMap = {};\r\n }\r\n latencyToProcess--;\r\n }\r\n if (droppedEvents[_DYN_LENGTH /* @min:%2elength */] > 0) {\r\n _notifyEvents(strEventsDiscarded, droppedEvents, EventsDiscardedReason.KillSwitch);\r\n }\r\n if (eventsQueued && _delayedBatchSendLatency >= latency) {\r\n // We have queued events at the same level as the delayed values so clear the setting\r\n _delayedBatchSendLatency = -1;\r\n _delayedBatchReason = 0 /* SendRequestReason.Undefined */;\r\n }\r\n }, function () { return ({ latency: latency, sendType: sendType, sendReason: sendReason }); }, !isAsync);\r\n }\r\n else {\r\n // remember the min latency so that we can re-trigger later\r\n _delayedBatchSendLatency = _delayedBatchSendLatency >= 0 ? Math.min(_delayedBatchSendLatency, latency) : latency;\r\n _delayedBatchReason = Math.max(_delayedBatchReason, sendReason);\r\n }\r\n return eventsQueued;\r\n }\r\n /**\r\n * This is the callback method is called as part of the manual flushing process.\r\n * @param callback\r\n * @param sendReason\r\n */\r\n function _flushImpl(callback, sendReason) {\r\n // Add any additional queued events and cause all queued events to be sent asynchronously\r\n _sendEventsForLatencyAndAbove(1 /* EventLatencyValue.Normal */, 0 /* EventSendType.Batched */, sendReason);\r\n // All events (should) have been queue -- lets just make sure the queue counts are correct to avoid queue exhaustion (previous bug #9685112)\r\n _resetQueueCounts();\r\n _waitForIdleManager(function () {\r\n // Only called AFTER the httpManager does not have any outstanding requests\r\n if (callback) {\r\n callback();\r\n }\r\n if (_flushCallbackQueue[_DYN_LENGTH /* @min:%2elength */] > 0) {\r\n _flushCallbackTimer = _createTimer(function () {\r\n _flushCallbackTimer = null;\r\n _flushImpl(_flushCallbackQueue.shift(), sendReason);\r\n }, 0);\r\n }\r\n else {\r\n // No more flush requests\r\n _flushCallbackTimer = null;\r\n // Restart the normal timer schedule\r\n _scheduleTimer();\r\n }\r\n });\r\n }\r\n function _waitForIdleManager(callback) {\r\n if (_httpManager[_DYN_IS_COMPLETELY_IDLE /* @min:%2eisCompletelyIdle */]()) {\r\n callback();\r\n }\r\n else {\r\n _flushCallbackTimer = _createTimer(function () {\r\n _flushCallbackTimer = null;\r\n _waitForIdleManager(callback);\r\n }, FlushCheckTimer);\r\n }\r\n }\r\n /**\r\n * Resets the transmit profiles to the default profiles of Real Time, Near Real Time\r\n * and Best Effort. This removes all the custom profiles that were loaded.\r\n */\r\n function _resetTransmitProfiles() {\r\n _clearScheduledTimer();\r\n _initializeProfiles();\r\n _currentProfile = RT_PROFILE;\r\n _scheduleTimer();\r\n }\r\n function _initializeProfiles() {\r\n _profiles = {};\r\n _profiles[RT_PROFILE] = [2, 1, 0];\r\n _profiles[NRT_PROFILE] = [6, 3, 0];\r\n _profiles[BE_PROFILE] = [18, 9, 0];\r\n }\r\n /**\r\n * The notification handler for requeue events\r\n * @ignore\r\n */\r\n function _requeueEvents(batches, reason) {\r\n var droppedEvents = [];\r\n var maxSendAttempts = _maxEventSendAttempts;\r\n if (_isPageUnloadTriggered) {\r\n // If a page unlaod has been triggered reduce the number of times we try to \"retry\"\r\n maxSendAttempts = _maxUnloadEventSendAttempts;\r\n }\r\n arrForEach(batches, function (theBatch) {\r\n if (theBatch && theBatch[_DYN_COUNT /* @min:%2ecount */]() > 0) {\r\n arrForEach(theBatch[_DYN_EVENTS /* @min:%2eevents */](), function (theEvent) {\r\n if (theEvent) {\r\n // Check if the request being added back is for a sync event in which case mark it no longer a sync event\r\n if (theEvent[_DYN_SYNC /* @min:%2esync */]) {\r\n theEvent[_DYN_LATENCY /* @min:%2elatency */] = 4 /* EventLatencyValue.Immediate */;\r\n theEvent[_DYN_SYNC /* @min:%2esync */] = false;\r\n }\r\n if (theEvent[_DYN_SEND_ATTEMPT /* @min:%2esendAttempt */] < maxSendAttempts) {\r\n // Reset the event timings\r\n setProcessTelemetryTimings(theEvent, _self[_DYN_IDENTIFIER /* @min:%2eidentifier */]);\r\n _addEventToQueues(theEvent, false);\r\n }\r\n else {\r\n droppedEvents[_DYN_PUSH /* @min:%2epush */](theEvent);\r\n }\r\n }\r\n });\r\n }\r\n });\r\n if (droppedEvents[_DYN_LENGTH /* @min:%2elength */] > 0) {\r\n _notifyEvents(strEventsDiscarded, droppedEvents, EventsDiscardedReason.NonRetryableStatus);\r\n }\r\n if (_isPageUnloadTriggered) {\r\n // Unload event has been received so we need to try and flush new events\r\n _releaseAllQueues(2 /* EventSendType.SendBeacon */, 2 /* SendRequestReason.Unload */);\r\n }\r\n }\r\n function _callNotification(evtName, theArgs) {\r\n var manager = (_notificationManager || {});\r\n var notifyFunc = manager[evtName];\r\n if (notifyFunc) {\r\n try {\r\n notifyFunc.apply(manager, theArgs);\r\n }\r\n catch (e) {\r\n _throwInternal(_self.diagLog(), 1 /* eLoggingSeverity.CRITICAL */, 74 /* _eInternalMessageId.NotificationException */, evtName + \" notification failed: \" + e);\r\n }\r\n }\r\n }\r\n function _notifyEvents(evtName, theEvents) {\r\n var extraArgs = [];\r\n for (var _i = 2; _i < arguments.length; _i++) {\r\n extraArgs[_i - 2] = arguments[_i];\r\n }\r\n if (theEvents && theEvents[_DYN_LENGTH /* @min:%2elength */] > 0) {\r\n _callNotification(evtName, [theEvents][_DYN_CONCAT /* @min:%2econcat */](extraArgs));\r\n }\r\n }\r\n function _notifyBatchEvents(evtName, batches) {\r\n var extraArgs = [];\r\n for (var _i = 2; _i < arguments.length; _i++) {\r\n extraArgs[_i - 2] = arguments[_i];\r\n }\r\n if (batches && batches[_DYN_LENGTH /* @min:%2elength */] > 0) {\r\n arrForEach(batches, function (theBatch) {\r\n if (theBatch && theBatch[_DYN_COUNT /* @min:%2ecount */]() > 0) {\r\n _callNotification(evtName, [theBatch.events()][_DYN_CONCAT /* @min:%2econcat */](extraArgs));\r\n }\r\n });\r\n }\r\n }\r\n /**\r\n * The notification handler for when batches are about to be sent\r\n * @ignore\r\n */\r\n function _sendingEvent(batches, reason, isSyncRequest) {\r\n if (batches && batches[_DYN_LENGTH /* @min:%2elength */] > 0) {\r\n _callNotification(\"eventsSendRequest\", [(reason >= 1000 /* EventBatchNotificationReason.SendingUndefined */ && reason <= 1999 /* EventBatchNotificationReason.SendingEventMax */ ?\r\n reason - 1000 /* EventBatchNotificationReason.SendingUndefined */ :\r\n 0 /* SendRequestReason.Undefined */), isSyncRequest !== true]);\r\n }\r\n }\r\n /**\r\n * This event represents that a batch of events have been successfully sent and a response received\r\n * @param batches The notification handler for when the batches have been successfully sent\r\n * @param reason For this event the reason will always be EventBatchNotificationReason.Complete\r\n */\r\n function _eventsSentEvent(batches, reason) {\r\n _notifyBatchEvents(\"eventsSent\", batches, reason);\r\n // Try and schedule the processing timer if we have events\r\n _scheduleTimer();\r\n }\r\n function _eventsDropped(batches, reason) {\r\n _notifyBatchEvents(strEventsDiscarded, batches, (reason >= 8000 /* EventBatchNotificationReason.EventsDropped */ && reason <= 8999 /* EventBatchNotificationReason.EventsDroppedMax */ ?\r\n reason - 8000 /* EventBatchNotificationReason.EventsDropped */ :\r\n EventsDiscardedReason.Unknown));\r\n }\r\n function _eventsResponseFail(batches) {\r\n _notifyBatchEvents(strEventsDiscarded, batches, EventsDiscardedReason.NonRetryableStatus);\r\n // Try and schedule the processing timer if we have events\r\n _scheduleTimer();\r\n }\r\n function _otherEvent(batches, reason) {\r\n _notifyBatchEvents(strEventsDiscarded, batches, EventsDiscardedReason.Unknown);\r\n // Try and schedule the processing timer if we have events\r\n _scheduleTimer();\r\n }\r\n function _setAutoLimits() {\r\n if (!_disableAutoBatchFlushLimit) {\r\n _autoFlushBatchLimit = Math.max(MaxNumberEventPerBatch * (MaxConnections + 1), _queueSizeLimit / 6);\r\n }\r\n else {\r\n _autoFlushBatchLimit = 0;\r\n }\r\n }\r\n });\r\n return _this;\r\n }\r\n /**\r\n * Start the queue manager to batch and send events via post.\r\n * @param config - The core configuration.\r\n */\r\n PostChannel.prototype.initialize = function (coreConfig, core, extensions) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Add an event to the appropriate inbound queue based on its latency.\r\n * @param ev - The event to be added to the queue.\r\n * @param itemCtx - This is the context for the current request, ITelemetryPlugin instances\r\n * can optionally use this to access the current core instance or define / pass additional information\r\n * to later plugins (vs appending items to the telemetry item)\r\n */\r\n PostChannel.prototype.processTelemetry = function (ev, itemCtx) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Sets the event queue limits at runtime (after initialization), if the number of queued events is greater than the\r\n * eventLimit or autoFlushLimit then a flush() operation will be scheduled.\r\n * @param eventLimit The number of events that can be kept in memory before the SDK starts to drop events. If the value passed is less than or\r\n * equal to zero the value will be reset to the default (10,000).\r\n * @param autoFlushLimit When defined, once this number of events has been queued the system perform a flush() to send the queued events\r\n * without waiting for the normal schedule timers. Passing undefined, null or a value less than or equal to zero will disable the auto flush.\r\n */\r\n PostChannel.prototype.setEventQueueLimits = function (eventLimit, autoFlushLimit) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Pause the transmission of any requests\r\n */\r\n PostChannel.prototype.pause = function () {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Resumes transmission of events.\r\n */\r\n PostChannel.prototype.resume = function () {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Add handler to be executed with request response text.\r\n */\r\n PostChannel.prototype.addResponseHandler = function (responseHanlder) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n return null;\r\n };\r\n /**\r\n * Flush to send data immediately; channel should default to sending data asynchronously. If executing asynchronously (the default) and\r\n * you DO NOT pass a callback function then a [IPromise](https://nevware21.github.io/ts-async/typedoc/interfaces/IPromise.html)\r\n * will be returned which will resolve once the flush is complete. The actual implementation of the `IPromise`\r\n * will be a native Promise (if supported) or the default as supplied by [ts-async library](https://github.com/nevware21/ts-async)\r\n * @param async - send data asynchronously when true\r\n * @param callBack - if specified, notify caller when send is complete, the channel should return true to indicate to the caller that it will be called.\r\n * If the caller doesn't return true the caller should assume that it may never be called.\r\n * @param sendReason - specify the reason that you are calling \"flush\" defaults to ManualFlush (1) if not specified\r\n * @returns - If a callback is provided `true` to indicate that callback will be called after the flush is complete otherwise the caller\r\n * should assume that any provided callback will never be called, Nothing or if occurring asynchronously a\r\n * [IPromise](https://nevware21.github.io/ts-async/typedoc/interfaces/IPromise.html) which will be resolved once the unload is complete,\r\n * the [IPromise](https://nevware21.github.io/ts-async/typedoc/interfaces/IPromise.html) will only be returned when no callback is provided\r\n * and async is true.\r\n */\r\n PostChannel.prototype.flush = function (async, callBack, sendReason) {\r\n if (async === void 0) { async = true; }\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Set AuthMsaDeviceTicket header\r\n * @param ticket - Ticket value.\r\n */\r\n PostChannel.prototype.setMsaAuthTicket = function (ticket) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Check if there are any events waiting to be scheduled for sending.\r\n * @returns True if there are events, false otherwise.\r\n */\r\n PostChannel.prototype.hasEvents = function () {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n return false;\r\n };\r\n /**\r\n * Load custom transmission profiles. Each profile should have timers for real time, and normal and can\r\n * optionally specify the immediate latency time in ms (defaults to 0 when not defined). Each profile should\r\n * make sure that a each normal latency timer is a multiple of the real-time latency and the immediate\r\n * is smaller than the real-time.\r\n * Setting the timer value to -1 means that the events for that latency will not be scheduled to be sent.\r\n * Note that once a latency has been set to not send, all latencies below it will also not be sent. The\r\n * timers should be in the form of [normal, high, [immediate]].\r\n * e.g Custom:\r\n * [10,5] - Sets the normal latency time to 10 seconds and real-time to 5 seconds; Immediate will default to 0ms\r\n * [10,5,1] - Sets the normal latency time to 10 seconds and real-time to 5 seconds; Immediate will default to 1ms\r\n * [10,5,0] - Sets the normal latency time to 10 seconds and real-time to 5 seconds; Immediate will default to 0ms\r\n * [10,5,-1] - Sets the normal latency time to 10 seconds and real-time to 5 seconds; Immediate events will not be\r\n * scheduled on their own and but they will be included with real-time or normal events as the first events in a batch.\r\n * This also removes any previously loaded custom profiles.\r\n * @param profiles - A dictionary containing the transmit profiles.\r\n */\r\n PostChannel.prototype._loadTransmitProfiles = function (profiles) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Set the transmit profile to be used. This will change the transmission timers\r\n * based on the transmit profile.\r\n * @param profileName - The name of the transmit profile to be used.\r\n */\r\n PostChannel.prototype._setTransmitProfile = function (profileName) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Backs off transmission. This exponentially increases all the timers.\r\n */\r\n PostChannel.prototype._backOffTransmission = function () {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n /**\r\n * Clears backoff for transmission.\r\n */\r\n PostChannel.prototype._clearBackOff = function () {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n };\r\n return PostChannel;\r\n}(BaseTelemetryPlugin));\r\nexport { PostChannel };\r\n//# sourceMappingURL=PostChannel.js.map"],"names":[],"mappings":";;;;;AAAA,gFAAkC;AAClC;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;;;;;;;;;;;;wDAwHM,CAAC;;;;;uBACgB;AACvB;AACA;AACA"}
@@ -1,5 +1,5 @@
1
1
  /*
2
- * 1DS JS SDK POST plugin, 4.0.1
2
+ * 1DS JS SDK POST plugin, 4.0.2-nightly3.2307-25
3
3
  * Copyright (c) Microsoft and contributors. All rights reserved.
4
4
  * (Microsoft Internal Only)
5
5
  */
@@ -1,5 +1,5 @@
1
1
  /*
2
- * 1DS JS SDK POST plugin, 4.0.1
2
+ * 1DS JS SDK POST plugin, 4.0.2-nightly3.2307-25
3
3
  * Copyright (c) Microsoft and contributors. All rights reserved.
4
4
  * (Microsoft Internal Only)
5
5
  */
@@ -128,11 +128,11 @@ var Serializer = /** @class */ (function () {
128
128
  }
129
129
  lp++;
130
130
  }
131
- if (sizeExceeded && sizeExceeded.length > 0) {
131
+ if (sizeExceeded.length > 0) {
132
132
  payload.sizeExceed.push(EventBatch.create(theBatch.iKey(), sizeExceeded));
133
133
  // Remove the exceeded events from the batch
134
134
  }
135
- if (failedEvts && failedEvts.length > 0) {
135
+ if (failedEvts.length > 0) {
136
136
  payload.failedEvts.push(EventBatch.create(theBatch.iKey(), failedEvts));
137
137
  // Remove the failed events from the batch
138
138
  }
@@ -263,9 +263,8 @@ var Serializer = /** @class */ (function () {
263
263
  // Removed Stub for Serializer.prototype.getEventBlob.
264
264
  // Removed Stub for Serializer.prototype.handleField.
265
265
  // Removed Stub for Serializer.prototype.getSanitizer.
266
- // This is a workaround for an IE8 bug when using dynamicProto() with classes that don't have any
266
+ // This is a workaround for an IE bug when using dynamicProto() with classes that don't have any
267
267
  // non-dynamic functions or static properties/functions when using uglify-js to minify the resulting code.
268
- // this will be removed when ES3 support is dropped.
269
268
  Serializer.__ieDyn=1;
270
269
 
271
270
  return Serializer;
@@ -1 +1 @@
1
- {"version":3,"file":"Serializer.js.map","sources":["Serializer.js"],"sourcesContent":["/**\r\n* Serializer.ts\r\n* @author Abhilash Panwar (abpanwar); Hector Hernandez (hectorh); Nev Wylie (newylie)\r\n* @copyright Microsoft 2018-2020\r\n*/\r\n// @skip-file-minify\r\nimport dynamicProto from \"@microsoft/dynamicproto-js\";\r\nimport { arrIndexOf, doPerf, getCommonSchemaMetaData, getTenantId, isArray, isValueAssigned, objForEachKey, sanitizeProperty, strStartsWith } from \"@microsoft/1ds-core-js\";\r\nimport { EventBatch } from \"./EventBatch\";\r\nimport { STR_EMPTY } from \"./InternalConstants\";\r\nimport { strSubstr } from \"@nevware21/ts-utils\";\r\n/**\r\n * Note: This is an optimization for V8-based browsers. When V8 concatenates a string,\r\n * the strings are only joined logically using a \"cons string\" or \"constructed/concatenated\r\n * string\". These containers keep references to one another and can result in very large\r\n * memory usage. For example, if a 2MB string is constructed by concatenating 4 bytes\r\n * together at a time, the memory usage will be ~44MB; so ~22x increase. The strings are\r\n * only joined together when an operation requiring their joining takes place, such as\r\n * substr(). This function is called when adding data to this buffer to ensure these\r\n * types of strings are periodically joined to reduce the memory footprint.\r\n * Setting to every 20 events as the JSON.stringify() may have joined many strings\r\n * and calling this too much causes a minor delay while processing.\r\n */\r\nvar _MAX_STRING_JOINS = 20;\r\nvar RequestSizeLimitBytes = 3984588; // approx 3.8 Mb\r\nvar BeaconRequestSizeLimitBytes = 65000; // approx 64kb (the current Edge, Firefox and Chrome max limit)\r\nvar MaxRecordSize = 2000000; // approx 2 Mb\r\nvar MaxBeaconRecordSize = Math.min(MaxRecordSize, BeaconRequestSizeLimitBytes);\r\nvar metadata = \"metadata\";\r\nvar f = \"f\";\r\nvar rCheckDot = /\\./;\r\n/**\r\n* Class to handle serialization of event and request.\r\n* Currently uses Bond for serialization. Please note that this may be subject to change.\r\n*/\r\nvar Serializer = /** @class */ (function () {\r\n function Serializer(perfManager, valueSanitizer, stringifyObjects, enableCompoundKey) {\r\n var strData = \"data\";\r\n var strBaseData = \"baseData\";\r\n var strExt = \"ext\";\r\n var _checkForCompoundkey = !!enableCompoundKey;\r\n var _processSubMetaData = true;\r\n var _theSanitizer = valueSanitizer;\r\n var _isReservedCache = {};\r\n dynamicProto(Serializer, this, function (_self) {\r\n _self.createPayload = function (retryCnt, isTeardown, isSync, isReducedPayload, sendReason, sendType) {\r\n return {\r\n apiKeys: [],\r\n payloadBlob: STR_EMPTY,\r\n overflow: null,\r\n sizeExceed: [],\r\n failedEvts: [],\r\n batches: [],\r\n numEvents: 0,\r\n retryCnt: retryCnt,\r\n isTeardown: isTeardown,\r\n isSync: isSync,\r\n isBeacon: isReducedPayload,\r\n sendType: sendType,\r\n sendReason: sendReason\r\n };\r\n };\r\n _self.appendPayload = function (payload, theBatch, maxEventsPerBatch) {\r\n var canAddEvents = payload && theBatch && !payload.overflow;\r\n if (canAddEvents) {\r\n doPerf(perfManager, function () { return \"Serializer:appendPayload\"; }, function () {\r\n var theEvents = theBatch.events();\r\n var payloadBlob = payload.payloadBlob;\r\n var payloadEvents = payload.numEvents;\r\n var eventsAdded = false;\r\n var sizeExceeded = [];\r\n var failedEvts = [];\r\n var isBeaconPayload = payload.isBeacon;\r\n var requestMaxSize = isBeaconPayload ? BeaconRequestSizeLimitBytes : RequestSizeLimitBytes;\r\n var recordMaxSize = isBeaconPayload ? MaxBeaconRecordSize : MaxRecordSize;\r\n var lp = 0;\r\n var joinCount = 0;\r\n while (lp < theEvents.length) {\r\n var theEvent = theEvents[lp];\r\n if (theEvent) {\r\n if (payloadEvents >= maxEventsPerBatch) {\r\n // Maximum events per payload reached, so don't add any more\r\n payload.overflow = theBatch.split(lp);\r\n break;\r\n }\r\n var eventBlob = _self.getEventBlob(theEvent);\r\n if (eventBlob && eventBlob.length <= recordMaxSize) {\r\n // This event will fit into the payload\r\n var blobLength = eventBlob.length;\r\n var currentSize = payloadBlob.length;\r\n if (currentSize + blobLength > requestMaxSize) {\r\n // Request or batch size exceeded, so don't add any more to the payload\r\n payload.overflow = theBatch.split(lp);\r\n break;\r\n }\r\n if (payloadBlob) {\r\n payloadBlob += \"\\n\";\r\n }\r\n payloadBlob += eventBlob;\r\n joinCount++;\r\n // v8 memory optimization only\r\n if (joinCount > _MAX_STRING_JOINS) {\r\n // this substr() should cause the constructed string to join\r\n strSubstr(payloadBlob, 0, 1);\r\n joinCount = 0;\r\n }\r\n eventsAdded = true;\r\n payloadEvents++;\r\n }\r\n else {\r\n if (eventBlob) {\r\n // Single event size exceeded so remove from the batch\r\n sizeExceeded.push(theEvent);\r\n }\r\n else {\r\n failedEvts.push(theEvent);\r\n }\r\n // We also need to remove this event from the existing array, otherwise a notification will be sent\r\n // indicating that it was successfully sent\r\n theEvents.splice(lp, 1);\r\n lp--;\r\n }\r\n }\r\n lp++;\r\n }\r\n if (sizeExceeded && sizeExceeded.length > 0) {\r\n payload.sizeExceed.push(EventBatch.create(theBatch.iKey(), sizeExceeded));\r\n // Remove the exceeded events from the batch\r\n }\r\n if (failedEvts && failedEvts.length > 0) {\r\n payload.failedEvts.push(EventBatch.create(theBatch.iKey(), failedEvts));\r\n // Remove the failed events from the batch\r\n }\r\n if (eventsAdded) {\r\n payload.batches.push(theBatch);\r\n payload.payloadBlob = payloadBlob;\r\n payload.numEvents = payloadEvents;\r\n var apiKey = theBatch.iKey();\r\n if (arrIndexOf(payload.apiKeys, apiKey) === -1) {\r\n payload.apiKeys.push(apiKey);\r\n }\r\n }\r\n }, function () { return ({ payload: payload, theBatch: { iKey: theBatch.iKey(), evts: theBatch.events() }, max: maxEventsPerBatch }); });\r\n }\r\n return canAddEvents;\r\n };\r\n _self.getEventBlob = function (eventData) {\r\n try {\r\n return doPerf(perfManager, function () { return \"Serializer.getEventBlob\"; }, function () {\r\n var serializedEvent = {};\r\n // Adding as dynamic keys for v8 performance\r\n serializedEvent.name = eventData.name;\r\n serializedEvent.time = eventData.time;\r\n serializedEvent.ver = eventData.ver;\r\n serializedEvent.iKey = \"o:\" + getTenantId(eventData.iKey);\r\n // Assigning local var so usage in part b/c don't throw if there is no ext\r\n var serializedExt = {};\r\n // Part A\r\n var eventExt = eventData[strExt];\r\n if (eventExt) {\r\n // Only assign ext if the event had one (There are tests covering this use case)\r\n serializedEvent[strExt] = serializedExt;\r\n objForEachKey(eventExt, function (key, value) {\r\n var data = serializedExt[key] = {};\r\n // Don't include a metadata callback as we don't currently set metadata Part A fields\r\n _processPathKeys(value, data, \"ext.\" + key, true, null, null, true);\r\n });\r\n }\r\n var serializedData = serializedEvent[strData] = {};\r\n serializedData.baseType = eventData.baseType;\r\n var serializedBaseData = serializedData[strBaseData] = {};\r\n // Part B\r\n _processPathKeys(eventData.baseData, serializedBaseData, strBaseData, false, [strBaseData], function (pathKeys, name, value) {\r\n _addJSONPropertyMetaData(serializedExt, pathKeys, name, value);\r\n }, _processSubMetaData);\r\n // Part C\r\n _processPathKeys(eventData.data, serializedData, strData, false, [], function (pathKeys, name, value) {\r\n _addJSONPropertyMetaData(serializedExt, pathKeys, name, value);\r\n }, _processSubMetaData);\r\n return JSON.stringify(serializedEvent);\r\n }, function () { return ({ item: eventData }); });\r\n }\r\n catch (e) {\r\n return null;\r\n }\r\n };\r\n function _isReservedField(path, name) {\r\n var result = _isReservedCache[path];\r\n if (result === undefined) {\r\n if (path.length >= 7) {\r\n // Do not allow the changing of fields located in the ext.metadata or ext.web extension\r\n result = strStartsWith(path, \"ext.metadata\") || strStartsWith(path, \"ext.web\");\r\n }\r\n _isReservedCache[path] = result;\r\n }\r\n return result;\r\n }\r\n function _processPathKeys(srcObj, target, thePath, checkReserved, metadataPathKeys, metadataCallback, processSubKeys) {\r\n objForEachKey(srcObj, function (key, srcValue) {\r\n var prop = null;\r\n if (srcValue || isValueAssigned(srcValue)) {\r\n var path = thePath;\r\n var name_1 = key;\r\n var theMetaPathKeys = metadataPathKeys;\r\n var destObj = target;\r\n // Handle keys with embedded '.', like \"TestObject.testProperty\"\r\n if (_checkForCompoundkey && !checkReserved && rCheckDot.test(key)) {\r\n var subKeys = key.split(\".\");\r\n var keyLen = subKeys.length;\r\n if (keyLen > 1) {\r\n if (theMetaPathKeys) {\r\n // Create a copy of the meta path keys so we can add the extra ones\r\n theMetaPathKeys = theMetaPathKeys.slice();\r\n }\r\n for (var lp = 0; lp < keyLen - 1; lp++) {\r\n var subKey = subKeys[lp];\r\n // Add/reuse the sub key object\r\n destObj = destObj[subKey] = destObj[subKey] || {};\r\n path += \".\" + subKey;\r\n if (theMetaPathKeys) {\r\n theMetaPathKeys.push(subKey);\r\n }\r\n }\r\n name_1 = subKeys[keyLen - 1];\r\n }\r\n }\r\n var isReserved = checkReserved && _isReservedField(path, name_1);\r\n if (!isReserved && _theSanitizer && _theSanitizer.handleField(path, name_1)) {\r\n prop = _theSanitizer.value(path, name_1, srcValue, stringifyObjects);\r\n }\r\n else {\r\n prop = sanitizeProperty(name_1, srcValue, stringifyObjects);\r\n }\r\n if (prop) {\r\n // Set the value\r\n var newValue = prop.value;\r\n destObj[name_1] = newValue;\r\n if (metadataCallback) {\r\n metadataCallback(theMetaPathKeys, name_1, prop);\r\n }\r\n if (processSubKeys && typeof newValue === \"object\" && !isArray(newValue)) {\r\n var newPath = theMetaPathKeys;\r\n if (newPath) {\r\n newPath = newPath.slice();\r\n newPath.push(name_1);\r\n }\r\n // Make sure we process sub objects as well (for value sanitization and metadata)\r\n _processPathKeys(srcValue, newValue, path + \".\" + name_1, checkReserved, newPath, metadataCallback, processSubKeys);\r\n }\r\n }\r\n }\r\n });\r\n }\r\n });\r\n }\r\n /**\r\n * Create a serializer payload package\r\n * @param retryCnt The retry count for the events in this payload\r\n * @param isTeardown Is this payload being created as part of a teardown request\r\n * @param isSync Should this payload be sent as a synchronous request\r\n * @param isReducedPayload Is this payload going to be sent via sendBeacon() API\r\n * @param sendReason The reason the payload is being sent\r\n * @param sendType Identifies how this payload will be sent\r\n */\r\n Serializer.prototype.createPayload = function (retryCnt, isTeardown, isSync, isReducedPayload, sendReason, sendType) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n return null;\r\n };\r\n /**\r\n * Append the batched events into the payload\r\n * @param payload The serialized payload detail object\r\n * @param theBatch The batched events to append to the payload\r\n * @param maxEventsPerBatch The maximum number of events to allow in the payload\r\n * @returns True if the events from the new batch where attempted to be added to the payload otherwise false\r\n */\r\n Serializer.prototype.appendPayload = function (payload, theBatch, maxEventsPerBatch) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n return false;\r\n };\r\n /**\r\n * Bond serialize the event.\r\n * @param eventData - The event that needs to be serialized.\r\n * @returns The serialized json event.\r\n */\r\n Serializer.prototype.getEventBlob = function (eventData) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n return null;\r\n };\r\n /**\r\n * Does this field value sanitizer handle this path / field combination\r\n * @param path - The field path\r\n * @param name - The name of the field\r\n */\r\n Serializer.prototype.handleField = function (path, name) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n return false;\r\n };\r\n /**\r\n * Get the field sanitizer for this type of field based on the field type, value kind and/or event property type\r\n * @param path - The field path\r\n * @param name - The name of the field\r\n * @param theType - The type of field\r\n * @param theKind - The value kind of the field\r\n * @param propType - The property type of the field\r\n */\r\n Serializer.prototype.getSanitizer = function (path, name, theType, theKind, propType) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n return null;\r\n };\r\n return Serializer;\r\n}());\r\nexport { Serializer };\r\n/**\r\n * @ignore\r\n */\r\nfunction _addJSONPropertyMetaData(json, propKeys, name, propertyValue) {\r\n if (propertyValue && json) {\r\n var encodedTypeValue = getCommonSchemaMetaData(propertyValue.value, propertyValue.kind, propertyValue.propertyType);\r\n if (encodedTypeValue > -1) {\r\n // Add the root metadata\r\n var metaData = json[metadata];\r\n if (!metaData) {\r\n // Sets the root 'f'\r\n metaData = json[metadata] = { f: {} };\r\n }\r\n var metaTarget = metaData[f];\r\n if (!metaTarget) {\r\n // This can occur if someone has manually added an ext.metadata object\r\n // Such as ext.metadata.privLevel and ext.metadata.privTags\r\n metaTarget = metaData[f] = {};\r\n }\r\n // Traverse the metadata path and build each object (contains an 'f' key) -- if required\r\n if (propKeys) {\r\n for (var lp = 0; lp < propKeys.length; lp++) {\r\n var key = propKeys[lp];\r\n if (!metaTarget[key]) {\r\n metaTarget[key] = { f: {} };\r\n }\r\n var newTarget = metaTarget[key][f];\r\n if (!newTarget) {\r\n // Not expected, but can occur if the metadata context was pre-created as part of the event\r\n newTarget = metaTarget[key][f] = {};\r\n }\r\n metaTarget = newTarget;\r\n }\r\n }\r\n metaTarget = metaTarget[name] = {};\r\n if (isArray(propertyValue.value)) {\r\n metaTarget[\"a\"] = {\r\n t: encodedTypeValue\r\n };\r\n }\r\n else {\r\n metaTarget[\"t\"] = encodedTypeValue;\r\n }\r\n }\r\n }\r\n}\r\n//# sourceMappingURL=Serializer.js.map"],"names":[],"mappings":";;;;;AAAA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;;;;sDAqDM,CAAC;;;;;;sBACe;AACtB;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA"}
1
+ {"version":3,"file":"Serializer.js.map","sources":["Serializer.js"],"sourcesContent":["/**\r\n* Serializer.ts\r\n* @author Abhilash Panwar (abpanwar); Hector Hernandez (hectorh); Nev Wylie (newylie)\r\n* @copyright Microsoft 2018-2020\r\n*/\r\n// @skip-file-minify\r\nimport dynamicProto from \"@microsoft/dynamicproto-js\";\r\nimport { arrIndexOf, doPerf, getCommonSchemaMetaData, getTenantId, isArray, isValueAssigned, objForEachKey, sanitizeProperty, strStartsWith } from \"@microsoft/1ds-core-js\";\r\nimport { EventBatch } from \"./EventBatch\";\r\nimport { STR_EMPTY } from \"./InternalConstants\";\r\nimport { strSubstr } from \"@nevware21/ts-utils\";\r\n/**\r\n * Note: This is an optimization for V8-based browsers. When V8 concatenates a string,\r\n * the strings are only joined logically using a \"cons string\" or \"constructed/concatenated\r\n * string\". These containers keep references to one another and can result in very large\r\n * memory usage. For example, if a 2MB string is constructed by concatenating 4 bytes\r\n * together at a time, the memory usage will be ~44MB; so ~22x increase. The strings are\r\n * only joined together when an operation requiring their joining takes place, such as\r\n * substr(). This function is called when adding data to this buffer to ensure these\r\n * types of strings are periodically joined to reduce the memory footprint.\r\n * Setting to every 20 events as the JSON.stringify() may have joined many strings\r\n * and calling this too much causes a minor delay while processing.\r\n */\r\nvar _MAX_STRING_JOINS = 20;\r\nvar RequestSizeLimitBytes = 3984588; // approx 3.8 Mb\r\nvar BeaconRequestSizeLimitBytes = 65000; // approx 64kb (the current Edge, Firefox and Chrome max limit)\r\nvar MaxRecordSize = 2000000; // approx 2 Mb\r\nvar MaxBeaconRecordSize = Math.min(MaxRecordSize, BeaconRequestSizeLimitBytes);\r\nvar metadata = \"metadata\";\r\nvar f = \"f\";\r\nvar rCheckDot = /\\./;\r\n/**\r\n* Class to handle serialization of event and request.\r\n* Currently uses Bond for serialization. Please note that this may be subject to change.\r\n*/\r\nvar Serializer = /** @class */ (function () {\r\n function Serializer(perfManager, valueSanitizer, stringifyObjects, enableCompoundKey) {\r\n var strData = \"data\";\r\n var strBaseData = \"baseData\";\r\n var strExt = \"ext\";\r\n var _checkForCompoundkey = !!enableCompoundKey;\r\n var _processSubMetaData = true;\r\n var _theSanitizer = valueSanitizer;\r\n var _isReservedCache = {};\r\n dynamicProto(Serializer, this, function (_self) {\r\n _self.createPayload = function (retryCnt, isTeardown, isSync, isReducedPayload, sendReason, sendType) {\r\n return {\r\n apiKeys: [],\r\n payloadBlob: STR_EMPTY,\r\n overflow: null,\r\n sizeExceed: [],\r\n failedEvts: [],\r\n batches: [],\r\n numEvents: 0,\r\n retryCnt: retryCnt,\r\n isTeardown: isTeardown,\r\n isSync: isSync,\r\n isBeacon: isReducedPayload,\r\n sendType: sendType,\r\n sendReason: sendReason\r\n };\r\n };\r\n _self.appendPayload = function (payload, theBatch, maxEventsPerBatch) {\r\n var canAddEvents = payload && theBatch && !payload.overflow;\r\n if (canAddEvents) {\r\n doPerf(perfManager, function () { return \"Serializer:appendPayload\"; }, function () {\r\n var theEvents = theBatch.events();\r\n var payloadBlob = payload.payloadBlob;\r\n var payloadEvents = payload.numEvents;\r\n var eventsAdded = false;\r\n var sizeExceeded = [];\r\n var failedEvts = [];\r\n var isBeaconPayload = payload.isBeacon;\r\n var requestMaxSize = isBeaconPayload ? BeaconRequestSizeLimitBytes : RequestSizeLimitBytes;\r\n var recordMaxSize = isBeaconPayload ? MaxBeaconRecordSize : MaxRecordSize;\r\n var lp = 0;\r\n var joinCount = 0;\r\n while (lp < theEvents.length) {\r\n var theEvent = theEvents[lp];\r\n if (theEvent) {\r\n if (payloadEvents >= maxEventsPerBatch) {\r\n // Maximum events per payload reached, so don't add any more\r\n payload.overflow = theBatch.split(lp);\r\n break;\r\n }\r\n var eventBlob = _self.getEventBlob(theEvent);\r\n if (eventBlob && eventBlob.length <= recordMaxSize) {\r\n // This event will fit into the payload\r\n var blobLength = eventBlob.length;\r\n var currentSize = payloadBlob.length;\r\n if (currentSize + blobLength > requestMaxSize) {\r\n // Request or batch size exceeded, so don't add any more to the payload\r\n payload.overflow = theBatch.split(lp);\r\n break;\r\n }\r\n if (payloadBlob) {\r\n payloadBlob += \"\\n\";\r\n }\r\n payloadBlob += eventBlob;\r\n joinCount++;\r\n // v8 memory optimization only\r\n if (joinCount > _MAX_STRING_JOINS) {\r\n // this substr() should cause the constructed string to join\r\n strSubstr(payloadBlob, 0, 1);\r\n joinCount = 0;\r\n }\r\n eventsAdded = true;\r\n payloadEvents++;\r\n }\r\n else {\r\n if (eventBlob) {\r\n // Single event size exceeded so remove from the batch\r\n sizeExceeded.push(theEvent);\r\n }\r\n else {\r\n failedEvts.push(theEvent);\r\n }\r\n // We also need to remove this event from the existing array, otherwise a notification will be sent\r\n // indicating that it was successfully sent\r\n theEvents.splice(lp, 1);\r\n lp--;\r\n }\r\n }\r\n lp++;\r\n }\r\n if (sizeExceeded.length > 0) {\r\n payload.sizeExceed.push(EventBatch.create(theBatch.iKey(), sizeExceeded));\r\n // Remove the exceeded events from the batch\r\n }\r\n if (failedEvts.length > 0) {\r\n payload.failedEvts.push(EventBatch.create(theBatch.iKey(), failedEvts));\r\n // Remove the failed events from the batch\r\n }\r\n if (eventsAdded) {\r\n payload.batches.push(theBatch);\r\n payload.payloadBlob = payloadBlob;\r\n payload.numEvents = payloadEvents;\r\n var apiKey = theBatch.iKey();\r\n if (arrIndexOf(payload.apiKeys, apiKey) === -1) {\r\n payload.apiKeys.push(apiKey);\r\n }\r\n }\r\n }, function () { return ({ payload: payload, theBatch: { iKey: theBatch.iKey(), evts: theBatch.events() }, max: maxEventsPerBatch }); });\r\n }\r\n return canAddEvents;\r\n };\r\n _self.getEventBlob = function (eventData) {\r\n try {\r\n return doPerf(perfManager, function () { return \"Serializer.getEventBlob\"; }, function () {\r\n var serializedEvent = {};\r\n // Adding as dynamic keys for v8 performance\r\n serializedEvent.name = eventData.name;\r\n serializedEvent.time = eventData.time;\r\n serializedEvent.ver = eventData.ver;\r\n serializedEvent.iKey = \"o:\" + getTenantId(eventData.iKey);\r\n // Assigning local var so usage in part b/c don't throw if there is no ext\r\n var serializedExt = {};\r\n // Part A\r\n var eventExt = eventData[strExt];\r\n if (eventExt) {\r\n // Only assign ext if the event had one (There are tests covering this use case)\r\n serializedEvent[strExt] = serializedExt;\r\n objForEachKey(eventExt, function (key, value) {\r\n var data = serializedExt[key] = {};\r\n // Don't include a metadata callback as we don't currently set metadata Part A fields\r\n _processPathKeys(value, data, \"ext.\" + key, true, null, null, true);\r\n });\r\n }\r\n var serializedData = serializedEvent[strData] = {};\r\n serializedData.baseType = eventData.baseType;\r\n var serializedBaseData = serializedData[strBaseData] = {};\r\n // Part B\r\n _processPathKeys(eventData.baseData, serializedBaseData, strBaseData, false, [strBaseData], function (pathKeys, name, value) {\r\n _addJSONPropertyMetaData(serializedExt, pathKeys, name, value);\r\n }, _processSubMetaData);\r\n // Part C\r\n _processPathKeys(eventData.data, serializedData, strData, false, [], function (pathKeys, name, value) {\r\n _addJSONPropertyMetaData(serializedExt, pathKeys, name, value);\r\n }, _processSubMetaData);\r\n return JSON.stringify(serializedEvent);\r\n }, function () { return ({ item: eventData }); });\r\n }\r\n catch (e) {\r\n return null;\r\n }\r\n };\r\n function _isReservedField(path, name) {\r\n var result = _isReservedCache[path];\r\n if (result === undefined) {\r\n if (path.length >= 7) {\r\n // Do not allow the changing of fields located in the ext.metadata or ext.web extension\r\n result = strStartsWith(path, \"ext.metadata\") || strStartsWith(path, \"ext.web\");\r\n }\r\n _isReservedCache[path] = result;\r\n }\r\n return result;\r\n }\r\n function _processPathKeys(srcObj, target, thePath, checkReserved, metadataPathKeys, metadataCallback, processSubKeys) {\r\n objForEachKey(srcObj, function (key, srcValue) {\r\n var prop = null;\r\n if (srcValue || isValueAssigned(srcValue)) {\r\n var path = thePath;\r\n var name_1 = key;\r\n var theMetaPathKeys = metadataPathKeys;\r\n var destObj = target;\r\n // Handle keys with embedded '.', like \"TestObject.testProperty\"\r\n if (_checkForCompoundkey && !checkReserved && rCheckDot.test(key)) {\r\n var subKeys = key.split(\".\");\r\n var keyLen = subKeys.length;\r\n if (keyLen > 1) {\r\n if (theMetaPathKeys) {\r\n // Create a copy of the meta path keys so we can add the extra ones\r\n theMetaPathKeys = theMetaPathKeys.slice();\r\n }\r\n for (var lp = 0; lp < keyLen - 1; lp++) {\r\n var subKey = subKeys[lp];\r\n // Add/reuse the sub key object\r\n destObj = destObj[subKey] = destObj[subKey] || {};\r\n path += \".\" + subKey;\r\n if (theMetaPathKeys) {\r\n theMetaPathKeys.push(subKey);\r\n }\r\n }\r\n name_1 = subKeys[keyLen - 1];\r\n }\r\n }\r\n var isReserved = checkReserved && _isReservedField(path, name_1);\r\n if (!isReserved && _theSanitizer && _theSanitizer.handleField(path, name_1)) {\r\n prop = _theSanitizer.value(path, name_1, srcValue, stringifyObjects);\r\n }\r\n else {\r\n prop = sanitizeProperty(name_1, srcValue, stringifyObjects);\r\n }\r\n if (prop) {\r\n // Set the value\r\n var newValue = prop.value;\r\n destObj[name_1] = newValue;\r\n if (metadataCallback) {\r\n metadataCallback(theMetaPathKeys, name_1, prop);\r\n }\r\n if (processSubKeys && typeof newValue === \"object\" && !isArray(newValue)) {\r\n var newPath = theMetaPathKeys;\r\n if (newPath) {\r\n newPath = newPath.slice();\r\n newPath.push(name_1);\r\n }\r\n // Make sure we process sub objects as well (for value sanitization and metadata)\r\n _processPathKeys(srcValue, newValue, path + \".\" + name_1, checkReserved, newPath, metadataCallback, processSubKeys);\r\n }\r\n }\r\n }\r\n });\r\n }\r\n });\r\n }\r\n /**\r\n * Create a serializer payload package\r\n * @param retryCnt The retry count for the events in this payload\r\n * @param isTeardown Is this payload being created as part of a teardown request\r\n * @param isSync Should this payload be sent as a synchronous request\r\n * @param isReducedPayload Is this payload going to be sent via sendBeacon() API\r\n * @param sendReason The reason the payload is being sent\r\n * @param sendType Identifies how this payload will be sent\r\n */\r\n Serializer.prototype.createPayload = function (retryCnt, isTeardown, isSync, isReducedPayload, sendReason, sendType) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n return null;\r\n };\r\n /**\r\n * Append the batched events into the payload\r\n * @param payload The serialized payload detail object\r\n * @param theBatch The batched events to append to the payload\r\n * @param maxEventsPerBatch The maximum number of events to allow in the payload\r\n * @returns True if the events from the new batch where attempted to be added to the payload otherwise false\r\n */\r\n Serializer.prototype.appendPayload = function (payload, theBatch, maxEventsPerBatch) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n return false;\r\n };\r\n /**\r\n * Bond serialize the event.\r\n * @param eventData - The event that needs to be serialized.\r\n * @returns The serialized json event.\r\n */\r\n Serializer.prototype.getEventBlob = function (eventData) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n return null;\r\n };\r\n /**\r\n * Does this field value sanitizer handle this path / field combination\r\n * @param path - The field path\r\n * @param name - The name of the field\r\n */\r\n Serializer.prototype.handleField = function (path, name) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n return false;\r\n };\r\n /**\r\n * Get the field sanitizer for this type of field based on the field type, value kind and/or event property type\r\n * @param path - The field path\r\n * @param name - The name of the field\r\n * @param theType - The type of field\r\n * @param theKind - The value kind of the field\r\n * @param propType - The property type of the field\r\n */\r\n Serializer.prototype.getSanitizer = function (path, name, theType, theKind, propType) {\r\n // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging\r\n return null;\r\n };\r\n return Serializer;\r\n}());\r\nexport { Serializer };\r\n/**\r\n * @ignore\r\n */\r\nfunction _addJSONPropertyMetaData(json, propKeys, name, propertyValue) {\r\n if (propertyValue && json) {\r\n var encodedTypeValue = getCommonSchemaMetaData(propertyValue.value, propertyValue.kind, propertyValue.propertyType);\r\n if (encodedTypeValue > -1) {\r\n // Add the root metadata\r\n var metaData = json[metadata];\r\n if (!metaData) {\r\n // Sets the root 'f'\r\n metaData = json[metadata] = { f: {} };\r\n }\r\n var metaTarget = metaData[f];\r\n if (!metaTarget) {\r\n // This can occur if someone has manually added an ext.metadata object\r\n // Such as ext.metadata.privLevel and ext.metadata.privTags\r\n metaTarget = metaData[f] = {};\r\n }\r\n // Traverse the metadata path and build each object (contains an 'f' key) -- if required\r\n if (propKeys) {\r\n for (var lp = 0; lp < propKeys.length; lp++) {\r\n var key = propKeys[lp];\r\n if (!metaTarget[key]) {\r\n metaTarget[key] = { f: {} };\r\n }\r\n var newTarget = metaTarget[key][f];\r\n if (!newTarget) {\r\n // Not expected, but can occur if the metadata context was pre-created as part of the event\r\n newTarget = metaTarget[key][f] = {};\r\n }\r\n metaTarget = newTarget;\r\n }\r\n }\r\n metaTarget = metaTarget[name] = {};\r\n if (isArray(propertyValue.value)) {\r\n metaTarget[\"a\"] = {\r\n t: encodedTypeValue\r\n };\r\n }\r\n else {\r\n metaTarget[\"t\"] = encodedTypeValue;\r\n }\r\n }\r\n }\r\n}\r\n//# sourceMappingURL=Serializer.js.map"],"names":[],"mappings":";;;;;AAAA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;;;;sDAqDM,CAAC;;;;;sBACe;AACtB;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA"}
@@ -1,5 +1,5 @@
1
1
  /*
2
- * 1DS JS SDK POST plugin, 4.0.1
2
+ * 1DS JS SDK POST plugin, 4.0.2-nightly3.2307-25
3
3
  * Copyright (c) Microsoft and contributors. All rights reserved.
4
4
  * (Microsoft Internal Only)
5
5
  */
@@ -0,0 +1,83 @@
1
+ /*
2
+ * 1DS JS SDK POST plugin, 4.0.2-nightly3.2307-25
3
+ * Copyright (c) Microsoft and contributors. All rights reserved.
4
+ * (Microsoft Internal Only)
5
+ */
6
+
7
+ // Licensed under the MIT License.
8
+ // @skip-file-minify
9
+ // ##############################################################
10
+ // AUTO GENERATED FILE: This file is Auto Generated during build.
11
+ // ##############################################################
12
+ // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
13
+ // Note: DON'T Export these const from the package as we are still targeting ES3 this will export a mutable variables that someone could change!!!
14
+ // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15
+ export var _DYN_ALLOW_REQUEST_SENDIN0 = "allowRequestSending"; // Count: 3
16
+ export var _DYN_FIRST_REQUEST_SENT = "firstRequestSent"; // Count: 2
17
+ export var _DYN_SHOULD_ADD_CLOCK_SKE1 = "shouldAddClockSkewHeaders"; // Count: 2
18
+ export var _DYN_GET_CLOCK_SKEW_HEADE2 = "getClockSkewHeaderValue"; // Count: 2
19
+ export var _DYN_SET_CLOCK_SKEW = "setClockSkew"; // Count: 3
20
+ export var _DYN_DATA = "data"; // Count: 12
21
+ export var _DYN_LENGTH = "length"; // Count: 37
22
+ export var _DYN_CONCAT = "concat"; // Count: 9
23
+ export var _DYN_I_KEY = "iKey"; // Count: 10
24
+ export var _DYN_COUNT = "count"; // Count: 18
25
+ export var _DYN_EVENTS = "events"; // Count: 8
26
+ export var _DYN_PUSH = "push"; // Count: 13
27
+ export var _DYN_SPLIT = "split"; // Count: 7
28
+ export var _DYN_SPLICE = "splice"; // Count: 4
29
+ export var _DYN_TO_LOWER_CASE = "toLowerCase"; // Count: 5
30
+ export var _DYN_HDRS = "hdrs"; // Count: 6
31
+ export var _DYN_USE_HDRS = "useHdrs"; // Count: 5
32
+ export var _DYN_INITIALIZE = "initialize"; // Count: 4
33
+ export var _DYN_SET_TIMEOUT_OVERRIDE = "setTimeoutOverride"; // Count: 3
34
+ export var _DYN_CLEAR_TIMEOUT_OVERRI3 = "clearTimeoutOverride"; // Count: 3
35
+ export var _DYN_PAYLOAD_PREPROCESSOR = "payloadPreprocessor"; // Count: 2
36
+ export var _DYN_OVERRIDE_ENDPOINT_UR4 = "overrideEndpointUrl"; // Count: 3
37
+ export var _DYN_AVOID_OPTIONS = "avoidOptions"; // Count: 3
38
+ export var _DYN_DISABLE_EVENT_TIMING5 = "disableEventTimings"; // Count: 2
39
+ export var _DYN_STRINGIFY_OBJECTS = "stringifyObjects"; // Count: 2
40
+ export var _DYN_ENABLE_COMPOUND_KEY = "enableCompoundKey"; // Count: 4
41
+ export var _DYN_DISABLE_XHR_SYNC = "disableXhrSync"; // Count: 6
42
+ export var _DYN_DISABLE_FETCH_KEEP_A6 = "disableFetchKeepAlive"; // Count: 5
43
+ export var _DYN_USE_SEND_BEACON = "useSendBeacon"; // Count: 3
44
+ export var _DYN_ALWAYS_USE_XHR_OVERR7 = "alwaysUseXhrOverride"; // Count: 3
45
+ export var _DYN_UNLOAD_TRANSPORTS = "unloadTransports"; // Count: 2
46
+ export var _DYN_URL_STRING = "urlString"; // Count: 7
47
+ export var _DYN_TIMEOUT = "timeout"; // Count: 9
48
+ export var _DYN_ONTIMEOUT = "ontimeout"; // Count: 3
49
+ export var _DYN__SEND_REASON = "_sendReason"; // Count: 5
50
+ export var _DYN_HEADERS = "headers"; // Count: 12
51
+ export var _DYN_GET_RESPONSE_HEADER = "getResponseHeader"; // Count: 2
52
+ export var _DYN_GET_ALL_RESPONSE_HEA8 = "getAllResponseHeaders"; // Count: 2
53
+ export var _DYN__THE_PAYLOAD = "_thePayload"; // Count: 6
54
+ export var _DYN_BATCHES = "batches"; // Count: 15
55
+ export var _DYN_SEND_TYPE = "sendType"; // Count: 13
56
+ export var _DYN_CAN_SEND_REQUEST = "canSendRequest"; // Count: 3
57
+ export var _DYN_SEND_QUEUED_REQUESTS = "sendQueuedRequests"; // Count: 5
58
+ export var _DYN_IS_COMPLETELY_IDLE = "isCompletelyIdle"; // Count: 2
59
+ export var _DYN_SET_UNLOADING = "setUnloading"; // Count: 3
60
+ export var _DYN_IS_TENANT_KILLED = "isTenantKilled"; // Count: 3
61
+ export var _DYN_SEND_SYNCHRONOUS_BAT9 = "sendSynchronousBatch"; // Count: 2
62
+ export var _DYN__TRANSPORT = "_transport"; // Count: 4
63
+ export var _DYN_GET_WPARAM = "getWParam"; // Count: 4
64
+ export var _DYN_IS_BEACON = "isBeacon"; // Count: 4
65
+ export var _DYN_TIMINGS = "timings"; // Count: 4
66
+ export var _DYN_IS_TEARDOWN = "isTeardown"; // Count: 3
67
+ export var _DYN_IS_SYNC = "isSync"; // Count: 4
68
+ export var _DYN_SEND_POST = "sendPOST"; // Count: 3
69
+ export var _DYN_SET_KILL_SWITCH_TENA10 = "setKillSwitchTenants"; // Count: 2
70
+ export var _DYN__BACK_OFF_TRANSMISSI11 = "_backOffTransmission"; // Count: 2
71
+ export var _DYN_IDENTIFIER = "identifier"; // Count: 4
72
+ export var _DYN_DISABLE_OPTIMIZE_OBJ = "disableOptimizeObj"; // Count: 2
73
+ export var _DYN_IGNORE_MC1_MS0_COOKI12 = "ignoreMc1Ms0CookieProcessing"; // Count: 2
74
+ export var _DYN_EVENTS_LIMIT_IN_MEM = "eventsLimitInMem"; // Count: 2
75
+ export var _DYN_AUTO_FLUSH_EVENTS_LI13 = "autoFlushEventsLimit"; // Count: 2
76
+ export var _DYN_DISABLE_AUTO_BATCH_F14 = "disableAutoBatchFlushLimit"; // Count: 2
77
+ export var _DYN_OVERRIDE_INSTRUMENTA15 = "overrideInstrumentationKey"; // Count: 2
78
+ export var _DYN_DISABLE_TELEMETRY = "disableTelemetry"; // Count: 2
79
+ export var _DYN_SEND_ATTEMPT = "sendAttempt"; // Count: 4
80
+ export var _DYN_LATENCY = "latency"; // Count: 7
81
+ export var _DYN_BASE_DATA = "baseData"; // Count: 3
82
+ export var _DYN_SYNC = "sync"; // Count: 7
83
+ //# sourceMappingURL=__DynamicConstants.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"__DynamicConstants.js.map","sources":["__DynamicConstants.js"],"sourcesContent":["// Copyright (c) Microsoft Corporation. All rights reserved.\r\n// Licensed under the MIT License.\r\n// @skip-file-minify\r\n// ##############################################################\r\n// AUTO GENERATED FILE: This file is Auto Generated during build.\r\n// ##############################################################\r\n// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\n// Note: DON'T Export these const from the package as we are still targeting ES3 this will export a mutable variables that someone could change!!!\r\n// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\nexport var _DYN_ALLOW_REQUEST_SENDIN0 = \"allowRequestSending\"; // Count: 3\r\nexport var _DYN_FIRST_REQUEST_SENT = \"firstRequestSent\"; // Count: 2\r\nexport var _DYN_SHOULD_ADD_CLOCK_SKE1 = \"shouldAddClockSkewHeaders\"; // Count: 2\r\nexport var _DYN_GET_CLOCK_SKEW_HEADE2 = \"getClockSkewHeaderValue\"; // Count: 2\r\nexport var _DYN_SET_CLOCK_SKEW = \"setClockSkew\"; // Count: 3\r\nexport var _DYN_DATA = \"data\"; // Count: 12\r\nexport var _DYN_LENGTH = \"length\"; // Count: 37\r\nexport var _DYN_CONCAT = \"concat\"; // Count: 9\r\nexport var _DYN_I_KEY = \"iKey\"; // Count: 10\r\nexport var _DYN_COUNT = \"count\"; // Count: 18\r\nexport var _DYN_EVENTS = \"events\"; // Count: 8\r\nexport var _DYN_PUSH = \"push\"; // Count: 13\r\nexport var _DYN_SPLIT = \"split\"; // Count: 7\r\nexport var _DYN_SPLICE = \"splice\"; // Count: 4\r\nexport var _DYN_TO_LOWER_CASE = \"toLowerCase\"; // Count: 5\r\nexport var _DYN_HDRS = \"hdrs\"; // Count: 6\r\nexport var _DYN_USE_HDRS = \"useHdrs\"; // Count: 5\r\nexport var _DYN_INITIALIZE = \"initialize\"; // Count: 4\r\nexport var _DYN_SET_TIMEOUT_OVERRIDE = \"setTimeoutOverride\"; // Count: 3\r\nexport var _DYN_CLEAR_TIMEOUT_OVERRI3 = \"clearTimeoutOverride\"; // Count: 3\r\nexport var _DYN_PAYLOAD_PREPROCESSOR = \"payloadPreprocessor\"; // Count: 2\r\nexport var _DYN_OVERRIDE_ENDPOINT_UR4 = \"overrideEndpointUrl\"; // Count: 3\r\nexport var _DYN_AVOID_OPTIONS = \"avoidOptions\"; // Count: 3\r\nexport var _DYN_DISABLE_EVENT_TIMING5 = \"disableEventTimings\"; // Count: 2\r\nexport var _DYN_STRINGIFY_OBJECTS = \"stringifyObjects\"; // Count: 2\r\nexport var _DYN_ENABLE_COMPOUND_KEY = \"enableCompoundKey\"; // Count: 4\r\nexport var _DYN_DISABLE_XHR_SYNC = \"disableXhrSync\"; // Count: 6\r\nexport var _DYN_DISABLE_FETCH_KEEP_A6 = \"disableFetchKeepAlive\"; // Count: 5\r\nexport var _DYN_USE_SEND_BEACON = \"useSendBeacon\"; // Count: 3\r\nexport var _DYN_ALWAYS_USE_XHR_OVERR7 = \"alwaysUseXhrOverride\"; // Count: 3\r\nexport var _DYN_UNLOAD_TRANSPORTS = \"unloadTransports\"; // Count: 2\r\nexport var _DYN_URL_STRING = \"urlString\"; // Count: 7\r\nexport var _DYN_TIMEOUT = \"timeout\"; // Count: 9\r\nexport var _DYN_ONTIMEOUT = \"ontimeout\"; // Count: 3\r\nexport var _DYN__SEND_REASON = \"_sendReason\"; // Count: 5\r\nexport var _DYN_HEADERS = \"headers\"; // Count: 12\r\nexport var _DYN_GET_RESPONSE_HEADER = \"getResponseHeader\"; // Count: 2\r\nexport var _DYN_GET_ALL_RESPONSE_HEA8 = \"getAllResponseHeaders\"; // Count: 2\r\nexport var _DYN__THE_PAYLOAD = \"_thePayload\"; // Count: 6\r\nexport var _DYN_BATCHES = \"batches\"; // Count: 15\r\nexport var _DYN_SEND_TYPE = \"sendType\"; // Count: 13\r\nexport var _DYN_CAN_SEND_REQUEST = \"canSendRequest\"; // Count: 3\r\nexport var _DYN_SEND_QUEUED_REQUESTS = \"sendQueuedRequests\"; // Count: 5\r\nexport var _DYN_IS_COMPLETELY_IDLE = \"isCompletelyIdle\"; // Count: 2\r\nexport var _DYN_SET_UNLOADING = \"setUnloading\"; // Count: 3\r\nexport var _DYN_IS_TENANT_KILLED = \"isTenantKilled\"; // Count: 3\r\nexport var _DYN_SEND_SYNCHRONOUS_BAT9 = \"sendSynchronousBatch\"; // Count: 2\r\nexport var _DYN__TRANSPORT = \"_transport\"; // Count: 4\r\nexport var _DYN_GET_WPARAM = \"getWParam\"; // Count: 4\r\nexport var _DYN_IS_BEACON = \"isBeacon\"; // Count: 4\r\nexport var _DYN_TIMINGS = \"timings\"; // Count: 4\r\nexport var _DYN_IS_TEARDOWN = \"isTeardown\"; // Count: 3\r\nexport var _DYN_IS_SYNC = \"isSync\"; // Count: 4\r\nexport var _DYN_SEND_POST = \"sendPOST\"; // Count: 3\r\nexport var _DYN_SET_KILL_SWITCH_TENA10 = \"setKillSwitchTenants\"; // Count: 2\r\nexport var _DYN__BACK_OFF_TRANSMISSI11 = \"_backOffTransmission\"; // Count: 2\r\nexport var _DYN_IDENTIFIER = \"identifier\"; // Count: 4\r\nexport var _DYN_DISABLE_OPTIMIZE_OBJ = \"disableOptimizeObj\"; // Count: 2\r\nexport var _DYN_IGNORE_MC1_MS0_COOKI12 = \"ignoreMc1Ms0CookieProcessing\"; // Count: 2\r\nexport var _DYN_EVENTS_LIMIT_IN_MEM = \"eventsLimitInMem\"; // Count: 2\r\nexport var _DYN_AUTO_FLUSH_EVENTS_LI13 = \"autoFlushEventsLimit\"; // Count: 2\r\nexport var _DYN_DISABLE_AUTO_BATCH_F14 = \"disableAutoBatchFlushLimit\"; // Count: 2\r\nexport var _DYN_OVERRIDE_INSTRUMENTA15 = \"overrideInstrumentationKey\"; // Count: 2\r\nexport var _DYN_DISABLE_TELEMETRY = \"disableTelemetry\"; // Count: 2\r\nexport var _DYN_SEND_ATTEMPT = \"sendAttempt\"; // Count: 4\r\nexport var _DYN_LATENCY = \"latency\"; // Count: 7\r\nexport var _DYN_BASE_DATA = \"baseData\"; // Count: 3\r\nexport var _DYN_SYNC = \"sync\"; // Count: 7\r\n//# sourceMappingURL=__DynamicConstants.js.map"],"names":[],"mappings":";;;;;AAA4D;AAC5D;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA"}
@@ -1,5 +1,5 @@
1
1
  /*
2
- * 1DS JS SDK POST plugin, 4.0.1
2
+ * 1DS JS SDK POST plugin, 4.0.2-nightly3.2307-25
3
3
  * Copyright (c) Microsoft and contributors. All rights reserved.
4
4
  * (Microsoft Internal Only)
5
5
  */