@microsoft/1ds-post-js 3.2.11 → 4.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. package/README.md +1 -1
  2. package/bundle/es5/ms.post-4.0.0.gbl.js +5816 -0
  3. package/bundle/es5/ms.post-4.0.0.gbl.js.map +1 -0
  4. package/bundle/es5/ms.post-4.0.0.gbl.min.js +7 -0
  5. package/bundle/es5/ms.post-4.0.0.gbl.min.js.map +1 -0
  6. package/bundle/es5/ms.post-4.0.0.integrity.json +46 -0
  7. package/bundle/es5/ms.post-4.0.0.js +5820 -0
  8. package/bundle/es5/ms.post-4.0.0.js.map +1 -0
  9. package/bundle/es5/ms.post-4.0.0.min.js +7 -0
  10. package/bundle/es5/ms.post-4.0.0.min.js.map +1 -0
  11. package/bundle/es5/ms.post.gbl.js +5816 -0
  12. package/bundle/es5/ms.post.gbl.js.map +1 -0
  13. package/bundle/es5/ms.post.gbl.min.js +7 -0
  14. package/bundle/es5/ms.post.gbl.min.js.map +1 -0
  15. package/bundle/es5/ms.post.integrity.json +46 -0
  16. package/bundle/es5/ms.post.js +5820 -0
  17. package/bundle/es5/ms.post.js.map +1 -0
  18. package/bundle/es5/ms.post.min.js +7 -0
  19. package/bundle/es5/ms.post.min.js.map +1 -0
  20. package/{bundle → dist/es5}/ms.post.js +2942 -1670
  21. package/dist/es5/ms.post.js.map +1 -0
  22. package/dist/es5/ms.post.min.js +7 -0
  23. package/dist/es5/ms.post.min.js.map +1 -0
  24. package/{dist-esm/src → dist-es5}/BatchNotificationActions.js +1 -1
  25. package/{dist-esm/src → dist-es5}/ClockSkewManager.js +2 -2
  26. package/{dist-esm/src → dist-es5}/ClockSkewManager.js.map +1 -1
  27. package/{dist-esm/src → dist-es5}/DataModels.js +1 -1
  28. package/{dist-esm/src → dist-es5}/EventBatch.js +1 -1
  29. package/{dist-esm/src → dist-es5}/HttpManager.js +175 -94
  30. package/dist-es5/HttpManager.js.map +1 -0
  31. package/{dist-esm/src → dist-es5}/Index.js +2 -2
  32. package/dist-es5/Index.js.map +1 -0
  33. package/{dist-esm/src → dist-es5}/InternalConstants.js +1 -1
  34. package/{dist-esm/src → dist-es5}/KillSwitch.js +2 -2
  35. package/{dist-esm/src → dist-es5}/KillSwitch.js.map +1 -1
  36. package/{dist-esm/src → dist-es5}/PostChannel.js +166 -144
  37. package/dist-es5/PostChannel.js.map +1 -0
  38. package/{dist-esm/src → dist-es5}/RetryPolicy.js +1 -1
  39. package/{dist-esm/src → dist-es5}/Serializer.js +3 -2
  40. package/dist-es5/Serializer.js.map +1 -0
  41. package/dist-es5/TimeoutOverrideWrapper.js +24 -0
  42. package/dist-es5/TimeoutOverrideWrapper.js.map +1 -0
  43. package/{dist-esm/src → dist-es5}/typings/XDomainRequest.js +1 -1
  44. package/package.json +15 -10
  45. package/tsconfig.json +5 -2
  46. package/{src/DataModels.ts → types/1ds-post-js.d.ts} +408 -467
  47. package/types/1ds-post-js.namespaced.d.ts +404 -0
  48. package/bundle/ms.post-3.2.11.gbl.js +0 -4524
  49. package/bundle/ms.post-3.2.11.gbl.js.map +0 -1
  50. package/bundle/ms.post-3.2.11.gbl.min.js +0 -7
  51. package/bundle/ms.post-3.2.11.gbl.min.js.map +0 -1
  52. package/bundle/ms.post-3.2.11.integrity.json +0 -46
  53. package/bundle/ms.post-3.2.11.js +0 -4527
  54. package/bundle/ms.post-3.2.11.js.map +0 -1
  55. package/bundle/ms.post-3.2.11.min.js +0 -7
  56. package/bundle/ms.post-3.2.11.min.js.map +0 -1
  57. package/bundle/ms.post.gbl.js +0 -4524
  58. package/bundle/ms.post.gbl.js.map +0 -1
  59. package/bundle/ms.post.gbl.min.js +0 -7
  60. package/bundle/ms.post.gbl.min.js.map +0 -1
  61. package/bundle/ms.post.integrity.json +0 -46
  62. package/bundle/ms.post.js.map +0 -1
  63. package/bundle/ms.post.min.js +0 -7
  64. package/bundle/ms.post.min.js.map +0 -1
  65. package/dist/ms.post.js +0 -2144
  66. package/dist/ms.post.js.map +0 -1
  67. package/dist/ms.post.min.js +0 -7
  68. package/dist/ms.post.min.js.map +0 -1
  69. package/dist-esm/src/BatchNotificationActions.d.ts +0 -36
  70. package/dist-esm/src/ClockSkewManager.d.ts +0 -38
  71. package/dist-esm/src/DataModels.d.ts +0 -405
  72. package/dist-esm/src/EventBatch.d.ts +0 -47
  73. package/dist-esm/src/HttpManager.d.ts +0 -88
  74. package/dist-esm/src/HttpManager.js.map +0 -1
  75. package/dist-esm/src/Index.d.ts +0 -9
  76. package/dist-esm/src/Index.js.map +0 -1
  77. package/dist-esm/src/InternalConstants.d.ts +0 -28
  78. package/dist-esm/src/KillSwitch.d.ts +0 -26
  79. package/dist-esm/src/PostChannel.d.ts +0 -101
  80. package/dist-esm/src/PostChannel.js.map +0 -1
  81. package/dist-esm/src/RetryPolicy.d.ts +0 -21
  82. package/dist-esm/src/Serializer.d.ts +0 -108
  83. package/dist-esm/src/Serializer.js.map +0 -1
  84. package/dist-esm/src/TimeoutOverrideWrapper.d.ts +0 -18
  85. package/dist-esm/src/TimeoutOverrideWrapper.js +0 -28
  86. package/dist-esm/src/TimeoutOverrideWrapper.js.map +0 -1
  87. package/dist-esm/src/typings/XDomainRequest.d.ts +0 -17
  88. package/src/BatchNotificationActions.ts +0 -44
  89. package/src/ClockSkewManager.ts +0 -127
  90. package/src/EventBatch.ts +0 -137
  91. package/src/HttpManager.ts +0 -1379
  92. package/src/Index.ts +0 -18
  93. package/src/InternalConstants.ts +0 -42
  94. package/src/KillSwitch.ts +0 -84
  95. package/src/PostChannel.ts +0 -1163
  96. package/src/RetryPolicy.ts +0 -46
  97. package/src/Serializer.ts +0 -487
  98. package/src/TimeoutOverrideWrapper.ts +0 -29
  99. package/src/typings/XDomainRequest.ts +0 -23
  100. /package/{dist-esm/src → dist-es5}/BatchNotificationActions.js.map +0 -0
  101. /package/{dist-esm/src → dist-es5}/DataModels.js.map +0 -0
  102. /package/{dist-esm/src → dist-es5}/EventBatch.js.map +0 -0
  103. /package/{dist-esm/src → dist-es5}/InternalConstants.js.map +0 -0
  104. /package/{dist-esm/src → dist-es5}/RetryPolicy.js.map +0 -0
  105. /package/{dist-esm/src → dist-es5}/typings/XDomainRequest.js.map +0 -0
@@ -1,1163 +0,0 @@
1
- /**
2
- * PostManager.ts
3
- * @author Abhilash Panwar (abpanwar); Hector Hernandez (hectorh); Nev Wylie (newylie)
4
- * @copyright Microsoft 2018-2020
5
- */
6
- import dynamicProto from "@microsoft/dynamicproto-js";
7
- import {
8
- BaseTelemetryPlugin, EventLatencyValue, EventSendType, EventsDiscardedReason, IAppInsightsCore, IChannelControls,
9
- IExtendedAppInsightsCore, IExtendedConfiguration, IPlugin, IProcessTelemetryContext, IProcessTelemetryUnloadContext, ITelemetryItem,
10
- ITelemetryUnloadState, NotificationManager, SendRequestReason, _eInternalMessageId, _throwInternal, addPageHideEventListener,
11
- addPageShowEventListener, addPageUnloadEventListener, arrForEach, createUniqueNamespace, doPerf, eLoggingSeverity, getWindow, isChromium,
12
- isNumber, isValueAssigned, mergeEvtNamespace, objDefineAccessors, objForEachKey, optimizeObject, removePageHideEventListener,
13
- removePageShowEventListener, removePageUnloadEventListener, setProcessTelemetryTimings
14
- } from "@microsoft/1ds-core-js";
15
- import {
16
- BE_PROFILE, EventBatchNotificationReason, IChannelConfiguration, IPostChannel, IPostTransmissionTelemetryItem, IXHROverride, NRT_PROFILE,
17
- RT_PROFILE
18
- } from "./DataModels";
19
- import { EventBatch } from "./EventBatch";
20
- import { HttpManager } from "./HttpManager";
21
- import { STR_MSA_DEVICE_TICKET, STR_TRACE, STR_USER } from "./InternalConstants";
22
- import { retryPolicyGetMillisToBackoffForRetry } from "./RetryPolicy";
23
- import { ITimeoutOverrideWrapper, TimeoutClearFunc, TimeoutSetFunc, createTimeoutWrapper } from "./TimeoutOverrideWrapper";
24
-
25
- const FlushCheckTimer = 0.250; // This needs to be in seconds, so this is 250ms
26
- const MaxNumberEventPerBatch = 500;
27
- const EventsDroppedAtOneTime = 20;
28
- const MaxSendAttempts = 6;
29
- const MaxSyncUnloadSendAttempts = 2; // Assuming 2 based on beforeunload and unload
30
- const MaxBackoffCount = 4;
31
- const MaxConnections = 2;
32
- const MaxRequestRetriesBeforeBackoff = 1;
33
-
34
- const strEventsDiscarded = "eventsDiscarded";
35
- const strOverrideInstrumentationKey = "overrideInstrumentationKey";
36
-
37
- const strMaxEventRetryAttempts = "maxEventRetryAttempts";
38
- const strMaxUnloadEventRetryAttempts = "maxUnloadEventRetryAttempts";
39
-
40
- const strAddUnloadCb = "addUnloadCb";
41
-
42
- interface IPostChannelBatchQueue {
43
- /**
44
- * This is the actual queue of event batches
45
- */
46
- batches: EventBatch[];
47
-
48
- /**
49
- * This is just a lookup map using the iKey to link to the batch in the batches queue
50
- */
51
- iKeyMap: { [iKey: string]: EventBatch };
52
- }
53
-
54
- /**
55
- * Class that manages adding events to inbound queues and batching of events
56
- * into requests.
57
- */
58
- export default class PostChannel extends BaseTelemetryPlugin implements IChannelControls, IPostChannel {
59
-
60
- public identifier = "PostChannel";
61
- public priority = 1011;
62
- public version = "#version#";
63
- public _notificationManager: NotificationManager | undefined;
64
-
65
- /** @deprecated This property is not intended to be used directly please let us know if you have taken a dependency on this property as it may be removed in a future release */
66
- public _setTimeoutOverride: typeof setTimeout;
67
-
68
- /** @deprecated This property is not intended to be used directly please let us know if you have taken a dependency on this property as it may be removed in a future release */
69
- public _clearTimeoutOverride: typeof clearTimeout;
70
-
71
- constructor() {
72
- super();
73
-
74
- let _config: IChannelConfiguration;
75
- let _isTeardownCalled = false;
76
- let _flushCallbackQueue: Array<() => void> = [];
77
- let _flushCallbackTimerId: any = null;
78
- let _paused = false;
79
- let _immediateQueueSize = 0;
80
- let _immediateQueueSizeLimit = 500;
81
- let _queueSize = 0;
82
- let _queueSizeLimit = 10000;
83
- let _profiles: { [profileName: string]: number[] } = {};
84
- let _currentProfile = RT_PROFILE;
85
- let _scheduledTimerId: any = null;
86
- let _immediateTimerId: any = null;
87
- let _currentBackoffCount = 0;
88
- let _timerCount = 0;
89
- let _xhrOverride: IXHROverride | undefined;
90
- let _httpManager: HttpManager;
91
- let _batchQueues: { [eventLatency: number]: IPostChannelBatchQueue } = {};
92
- let _autoFlushEventsLimit: number | undefined;
93
- // either MaxBatchSize * (1+ Max Connections) or _queueLimit / 6 (where 3 latency Queues [normal, realtime, cost deferred] * 2 [allow half full -- allow for retry])
94
- let _autoFlushBatchLimit: number;
95
- let _delayedBatchSendLatency = -1;
96
- let _delayedBatchReason: SendRequestReason;
97
- let _optimizeObject: boolean = true;
98
- let _isPageUnloadTriggered = false;
99
- let _maxEventSendAttempts: number = MaxSendAttempts;
100
- let _maxUnloadEventSendAttempts: number = MaxSyncUnloadSendAttempts;
101
- let _evtNamespace: string | string[];
102
- let _timeoutWrapper: ITimeoutOverrideWrapper;
103
-
104
- dynamicProto(PostChannel, this, (_self, _base) => {
105
- _initDefaults();
106
-
107
- // Special internal method to allow the DebugPlugin to hook embedded objects
108
- _self["_getDbgPlgTargets"] = () => {
109
- return [_httpManager];
110
- };
111
-
112
- _self.initialize = (coreConfig: IExtendedConfiguration, core: IAppInsightsCore, extensions: IPlugin[]) => {
113
- doPerf(core, () => "PostChannel:initialize", () => {
114
- let extendedCore = core as IExtendedAppInsightsCore;
115
- _base.initialize(coreConfig, core, extensions);
116
- try {
117
- let hasAddUnloadCb = !!core[strAddUnloadCb];
118
- _evtNamespace = mergeEvtNamespace(createUniqueNamespace(_self.identifier), core.evtNamespace && core.evtNamespace());
119
-
120
- let ctx = _self._getTelCtx();
121
- coreConfig.extensionConfig[_self.identifier] = coreConfig.extensionConfig[_self.identifier] || {};
122
- _config = ctx.getExtCfg(_self.identifier);
123
- _timeoutWrapper = createTimeoutWrapper(_config.setTimeoutOverride, _config.clearTimeoutOverride);
124
-
125
- // Only try and use the optimizeObject() if this appears to be a chromium based browser and it has not been explicitly disabled
126
- _optimizeObject = !_config.disableOptimizeObj && isChromium();
127
-
128
- _hookWParam(extendedCore);
129
-
130
- if (_config.eventsLimitInMem > 0) {
131
- _queueSizeLimit = _config.eventsLimitInMem;
132
- }
133
-
134
- if (_config.immediateEventLimit > 0) {
135
- _immediateQueueSizeLimit = _config.immediateEventLimit;
136
- }
137
-
138
- if (_config.autoFlushEventsLimit > 0) {
139
- _autoFlushEventsLimit = _config.autoFlushEventsLimit;
140
- }
141
-
142
- if (isNumber(_config[strMaxEventRetryAttempts])) {
143
- _maxEventSendAttempts = _config[strMaxEventRetryAttempts];
144
- }
145
-
146
- if (isNumber(_config[strMaxUnloadEventRetryAttempts])) {
147
- _maxUnloadEventSendAttempts = _config[strMaxUnloadEventRetryAttempts];
148
- }
149
-
150
- _setAutoLimits();
151
-
152
- if (_config.httpXHROverride && _config.httpXHROverride.sendPOST) {
153
- _xhrOverride = _config.httpXHROverride;
154
- }
155
- if (isValueAssigned(coreConfig.anonCookieName)) {
156
- _httpManager.addQueryStringParameter("anoncknm", coreConfig.anonCookieName);
157
- }
158
- _httpManager.sendHook = _config.payloadPreprocessor;
159
- _httpManager.sendListener = _config.payloadListener;
160
-
161
- // Override endpointUrl if provided in Post config
162
- let endpointUrl = _config.overrideEndpointUrl ? _config.overrideEndpointUrl : coreConfig.endpointUrl;
163
- _self._notificationManager = core.getNotifyMgr();
164
- _httpManager.initialize(endpointUrl, _self.core as IExtendedAppInsightsCore, _self, _xhrOverride, _config);
165
-
166
- let excludePageUnloadEvents = coreConfig.disablePageUnloadEvents || [];
167
-
168
- // When running in Web browsers try to send all telemetry if page is unloaded
169
- addPageUnloadEventListener(_handleUnloadEvents, excludePageUnloadEvents, _evtNamespace);
170
- addPageHideEventListener(_handleUnloadEvents, excludePageUnloadEvents, _evtNamespace);
171
- addPageShowEventListener(_handleShowEvents, coreConfig.disablePageShowEvents, _evtNamespace);
172
- } catch (e) {
173
- // resetting the initialized state because of failure
174
- _self.setInitialized(false);
175
- throw e;
176
- }
177
- }, () => ({ coreConfig, core, extensions }));
178
- };
179
-
180
- _self.processTelemetry = (ev: ITelemetryItem, itemCtx?: IProcessTelemetryContext): void => {
181
- setProcessTelemetryTimings(ev, _self.identifier);
182
- itemCtx = _self._getTelCtx(itemCtx);
183
-
184
- // Get the channel instance from the current request/instance
185
- let channelConfig: IChannelConfiguration = itemCtx.getExtCfg(_self.identifier);
186
-
187
- // DisableTelemetry was defined in the config provided during initialization
188
- let disableTelemetry = !!_config.disableTelemetry;
189
- if (channelConfig) {
190
- // DisableTelemetry is defined in the config for this request/instance
191
- disableTelemetry = disableTelemetry || !!channelConfig.disableTelemetry;
192
- }
193
-
194
- var event = ev as IPostTransmissionTelemetryItem;
195
- if (!disableTelemetry && !_isTeardownCalled) {
196
- // Override iKey if provided in Post config if provided for during initialization
197
- if (_config[strOverrideInstrumentationKey]) {
198
- event.iKey = _config[strOverrideInstrumentationKey];
199
- }
200
-
201
- // Override iKey if provided in Post config if provided for this instance
202
- if (channelConfig && channelConfig[strOverrideInstrumentationKey]) {
203
- event.iKey = channelConfig[strOverrideInstrumentationKey];
204
- }
205
-
206
- _addEventToQueues(event, true);
207
-
208
- if (_isPageUnloadTriggered) {
209
- // Unload event has been received so we need to try and flush new events
210
- _releaseAllQueues(EventSendType.SendBeacon, SendRequestReason.Unload);
211
- } else {
212
- _scheduleTimer();
213
- }
214
- }
215
-
216
- _self.processNext(event, itemCtx);
217
- };
218
-
219
- _self._doTeardown = (unloadCtx?: IProcessTelemetryUnloadContext, unloadState?: ITelemetryUnloadState) => {
220
- _releaseAllQueues(EventSendType.SendBeacon, SendRequestReason.Unload);
221
- _isTeardownCalled = true;
222
- _httpManager.teardown();
223
-
224
- removePageUnloadEventListener(null, _evtNamespace);
225
- removePageHideEventListener(null, _evtNamespace);
226
- removePageShowEventListener(null, _evtNamespace);
227
-
228
- // Just register to remove all events associated with this namespace
229
- _initDefaults();
230
- };
231
-
232
- function _hookWParam(extendedCore: IExtendedAppInsightsCore) {
233
- var existingGetWParamMethod = extendedCore.getWParam;
234
- extendedCore.getWParam = () => {
235
- var wparam = 0;
236
- if (_config.ignoreMc1Ms0CookieProcessing) {
237
- wparam = wparam | 2;
238
- }
239
- return wparam | existingGetWParamMethod();
240
- };
241
- }
242
-
243
- // Moving event handlers out from the initialize closure so that any local variables can be garbage collected
244
- function _handleUnloadEvents(evt: any) {
245
- let theEvt = evt || getWindow().event; // IE 8 does not pass the event
246
- if (theEvt.type !== "beforeunload") {
247
- // Only set the unload trigger if not beforeunload event as beforeunload can be cancelled while the other events can't
248
- _isPageUnloadTriggered = true;
249
- _httpManager.setUnloading(_isPageUnloadTriggered);
250
- }
251
-
252
- _releaseAllQueues(EventSendType.SendBeacon, SendRequestReason.Unload);
253
- }
254
-
255
- function _handleShowEvents(evt: any) {
256
- // Handle the page becoming visible again
257
- _isPageUnloadTriggered = false;
258
- _httpManager.setUnloading(_isPageUnloadTriggered);
259
- }
260
-
261
- function _addEventToQueues(event: IPostTransmissionTelemetryItem, append: boolean) {
262
- // If send attempt field is undefined we should set it to 0.
263
- if (!event.sendAttempt) {
264
- event.sendAttempt = 0;
265
- }
266
- // Add default latency
267
- if (!event.latency) {
268
- event.latency = EventLatencyValue.Normal;
269
- }
270
-
271
- // Remove extra AI properties if present
272
- if (event.ext && event.ext[STR_TRACE]) {
273
- delete (event.ext[STR_TRACE]);
274
- }
275
- if (event.ext && event.ext[STR_USER] && event.ext[STR_USER]["id"]) {
276
- delete (event.ext[STR_USER]["id"]);
277
- }
278
-
279
- // v8 performance optimization for iterating over the keys
280
- if (_optimizeObject) {setProcessTelemetryTimings
281
- event.ext = optimizeObject(event.ext);
282
- if (event.baseData) {
283
- event.baseData = optimizeObject(event.baseData);
284
- }
285
- if (event.data) {
286
- event.data = optimizeObject(event.data);
287
- }
288
- }
289
-
290
- if (event.sync) {
291
- // If the transmission is backed off then do not send synchronous events.
292
- // We will convert these events to Real time latency instead.
293
- if (_currentBackoffCount || _paused) {
294
- event.latency = EventLatencyValue.RealTime;
295
- event.sync = false;
296
- } else {
297
- // Log the event synchronously
298
- if (_httpManager) {
299
- // v8 performance optimization for iterating over the keys
300
- if (_optimizeObject) {
301
- event = optimizeObject(event);
302
- }
303
-
304
- _httpManager.sendSynchronousBatch(
305
- EventBatch.create(event.iKey, [event]),
306
- event.sync === true ? EventSendType.Synchronous : event.sync as EventSendType,
307
- SendRequestReason.SyncEvent);
308
- return;
309
- }
310
- }
311
- }
312
-
313
- let evtLatency = event.latency;
314
- let queueSize = _queueSize;
315
- let queueLimit = _queueSizeLimit;
316
- if (evtLatency === EventLatencyValue.Immediate) {
317
- queueSize = _immediateQueueSize;
318
- queueLimit = _immediateQueueSizeLimit;
319
- }
320
-
321
- let eventDropped = false;
322
- // Only add the event if the queue isn't full or it's a direct event (which don't add to the queue sizes)
323
- if (queueSize < queueLimit) {
324
- eventDropped = !_addEventToProperQueue(event, append);
325
- } else {
326
- let dropLatency = EventLatencyValue.Normal;
327
- let dropNumber = EventsDroppedAtOneTime;
328
- if (evtLatency === EventLatencyValue.Immediate) {
329
- // Only drop other immediate events as they are not technically sharing the general queue
330
- dropLatency = EventLatencyValue.Immediate;
331
- dropNumber = 1;
332
- }
333
-
334
- // Drop old event from lower or equal latency
335
- eventDropped = true;
336
- if (_dropEventWithLatencyOrLess(event.iKey, event.latency, dropLatency, dropNumber)) {
337
- eventDropped = !_addEventToProperQueue(event, append);
338
- }
339
- }
340
-
341
- if (eventDropped) {
342
- // Can't drop events from current queues because the all the slots are taken by queues that are being flushed.
343
- _notifyEvents(strEventsDiscarded, [event], EventsDiscardedReason.QueueFull);
344
- }
345
- }
346
-
347
- _self.setEventQueueLimits = (eventLimit: number, autoFlushLimit?: number) => {
348
- _queueSizeLimit = eventLimit > 0 ? eventLimit : 10000;
349
- _autoFlushEventsLimit = autoFlushLimit > 0 ? autoFlushLimit : 0;
350
- _setAutoLimits();
351
-
352
- // We only do this check here as during normal event addition if the queue is > then events start getting dropped
353
- let doFlush = _queueSize > eventLimit;
354
-
355
- if (!doFlush && _autoFlushBatchLimit > 0) {
356
- // Check the auto flush max batch size
357
- for (let latency = EventLatencyValue.Normal; !doFlush && latency <= EventLatencyValue.RealTime; latency++) {
358
- let batchQueue: IPostChannelBatchQueue = _batchQueues[latency];
359
- if (batchQueue && batchQueue.batches) {
360
- arrForEach(batchQueue.batches, (theBatch) => {
361
- if (theBatch && theBatch.count() >= _autoFlushBatchLimit) {
362
- // If any 1 batch is > than the limit then trigger an auto flush
363
- doFlush = true;
364
- }
365
- });
366
- }
367
- }
368
- }
369
-
370
- _performAutoFlush(true, doFlush);
371
- };
372
-
373
- _self.pause = () => {
374
- _clearScheduledTimer();
375
- _paused = true;
376
- _httpManager.pause();
377
- };
378
-
379
- _self.resume = () => {
380
- _paused = false;
381
- _httpManager.resume();
382
- _scheduleTimer();
383
- };
384
-
385
- _self.addResponseHandler = (responseHandler: (responseText: string) => void) => {
386
- _httpManager._responseHandlers.push(responseHandler);
387
- };
388
-
389
- _self._loadTransmitProfiles = (profiles: { [profileName: string]: number[] }) => {
390
- _resetTransmitProfiles();
391
- objForEachKey(profiles, (profileName, profileValue) => {
392
- let profLen = profileValue.length;
393
- if (profLen >= 2) {
394
- let directValue = (profLen > 2 ? profileValue[2] : 0);
395
- profileValue.splice(0, profLen - 2);
396
- // Make sure if a higher latency is set to not send then don't send lower latency
397
- if (profileValue[1] < 0) {
398
- profileValue[0] = -1;
399
- }
400
-
401
- // Make sure each latency is multiple of the latency higher then it. If not a multiple
402
- // we round up so that it becomes a multiple.
403
- if (profileValue[1] > 0 && profileValue[0] > 0) {
404
- let timerMultiplier = profileValue[0] / profileValue[1];
405
- profileValue[0] = Math.ceil(timerMultiplier) * profileValue[1];
406
- }
407
-
408
- // Add back the direct profile timeout
409
- if (directValue >= 0 && profileValue[1] >= 0 && directValue > profileValue[1]) {
410
- // Make sure if it's not disabled (< 0) then make sure it's not larger than RealTime
411
- directValue = profileValue[1];
412
- }
413
- profileValue.push(directValue);
414
- _profiles[profileName] = profileValue;
415
- }
416
- });
417
- };
418
-
419
- _self.flush = (async = true, callback?: () => void, sendReason?: SendRequestReason) => {
420
- if (!_paused) {
421
-
422
- sendReason = sendReason || SendRequestReason.ManualFlush;
423
-
424
- if (async) {
425
- if (_flushCallbackTimerId == null) {
426
- // Clear the normal schedule timer as we are going to try and flush ASAP
427
- _clearScheduledTimer();
428
-
429
- // Move all queued events to the HttpManager so that we don't discard new events (Auto flush scenario)
430
- _queueBatches(EventLatencyValue.Normal, EventSendType.Batched, sendReason);
431
-
432
- _flushCallbackTimerId = _createTimer(() => {
433
- _flushCallbackTimerId = null;
434
- _flushImpl(callback, sendReason);
435
- }, 0);
436
- } else {
437
- // Even if null (no callback) this will ensure after the flushImpl finishes waiting
438
- // for a completely idle connection it will attempt to re-flush any queued events on the next cycle
439
- _flushCallbackQueue.push(callback);
440
- }
441
- } else {
442
- // Clear the normal schedule timer as we are going to try and flush ASAP
443
- let cleared = _clearScheduledTimer();
444
-
445
- // Now cause all queued events to be sent synchronously
446
- _sendEventsForLatencyAndAbove(EventLatencyValue.Normal, EventSendType.Synchronous, sendReason);
447
-
448
- if (callback !== null && callback !== undefined) {
449
- callback();
450
- }
451
-
452
- if (cleared) {
453
- // restart the normal event timer if it was cleared
454
- _scheduleTimer();
455
- }
456
- }
457
- }
458
- };
459
-
460
- _self.setMsaAuthTicket = (ticket: string) => {
461
- _httpManager.addHeader(STR_MSA_DEVICE_TICKET, ticket);
462
- };
463
-
464
- _self.hasEvents = _hasEvents;
465
-
466
- _self._setTransmitProfile = (profileName: string) => {
467
- if (_currentProfile !== profileName && _profiles[profileName] !== undefined) {
468
- _clearScheduledTimer();
469
- _currentProfile = profileName;
470
- _scheduleTimer();
471
- }
472
- };
473
-
474
- /**
475
- * Batch and send events currently in the queue for the given latency.
476
- * @param latency - Latency for which to send events.
477
- */
478
- function _sendEventsForLatencyAndAbove(latency: number, sendType: EventSendType, sendReason: SendRequestReason): boolean {
479
- let queued = _queueBatches(latency, sendType, sendReason);
480
-
481
- // Always trigger the request as while the post channel may not have queued additional events, the httpManager may already have waiting events
482
- _httpManager.sendQueuedRequests(sendType, sendReason);
483
-
484
- return queued;
485
- }
486
-
487
- function _hasEvents(): boolean {
488
- return _queueSize > 0;
489
- }
490
-
491
- /**
492
- * Try to schedule the timer after which events will be sent. If there are
493
- * no events to be sent, or there is already a timer scheduled, or the
494
- * http manager doesn't have any idle connections this method is no-op.
495
- */
496
- function _scheduleTimer() {
497
- // If we had previously attempted to send requests, but the http manager didn't have any idle connections then the requests where delayed
498
- // so try and requeue then again now
499
- if (_delayedBatchSendLatency >= 0 && _queueBatches(_delayedBatchSendLatency, EventSendType.Batched, _delayedBatchReason)) {
500
- _httpManager.sendQueuedRequests(EventSendType.Batched, _delayedBatchReason);
501
- }
502
-
503
- if (_immediateQueueSize > 0 && !_immediateTimerId && !_paused) {
504
- // During initialization _profiles enforce that the direct [2] is less than real time [1] timer value
505
- // If the immediateTimeout is disabled the immediate events will be sent with Real Time events
506
- let immediateTimeOut = _profiles[_currentProfile][2];
507
- if (immediateTimeOut >= 0) {
508
- _immediateTimerId = _createTimer(() => {
509
- _immediateTimerId = null;
510
- // Only try to send direct events
511
- _sendEventsForLatencyAndAbove(EventLatencyValue.Immediate, EventSendType.Batched, SendRequestReason.NormalSchedule);
512
- _scheduleTimer();
513
- }, immediateTimeOut);
514
- }
515
- }
516
-
517
- // During initialization the _profiles enforce that the normal [0] is a multiple of the real time [1] timer value
518
- let timeOut = _profiles[_currentProfile][1];
519
- if (!_scheduledTimerId && !_flushCallbackTimerId && timeOut >= 0 && !_paused) {
520
- if (_hasEvents()) {
521
- _scheduledTimerId = _createTimer(() => {
522
- _scheduledTimerId = null;
523
- _sendEventsForLatencyAndAbove(_timerCount === 0 ? EventLatencyValue.RealTime : EventLatencyValue.Normal, EventSendType.Batched, SendRequestReason.NormalSchedule);
524
-
525
- // Increment the count for next cycle
526
- _timerCount++;
527
- _timerCount %= 2;
528
-
529
- _scheduleTimer();
530
- }, timeOut);
531
- } else {
532
- _timerCount = 0;
533
- }
534
- }
535
- }
536
-
537
- _self._backOffTransmission = () => {
538
- if (_currentBackoffCount < MaxBackoffCount) {
539
- _currentBackoffCount++;
540
- _clearScheduledTimer();
541
- _scheduleTimer();
542
- }
543
- };
544
-
545
- _self._clearBackOff = () => {
546
- if (_currentBackoffCount) {
547
- _currentBackoffCount = 0;
548
- _clearScheduledTimer();
549
- _scheduleTimer();
550
- }
551
- };
552
-
553
- function _initDefaults() {
554
- _config = null;
555
- _isTeardownCalled = false;
556
- _flushCallbackQueue = [];
557
- _flushCallbackTimerId = null;
558
- _paused = false;
559
- _immediateQueueSize = 0;
560
- _immediateQueueSizeLimit = 500;
561
- _queueSize = 0;
562
- _queueSizeLimit = 10000;
563
- _profiles = {};
564
- _currentProfile = RT_PROFILE;
565
- _scheduledTimerId = null;
566
- _immediateTimerId = null;
567
- _currentBackoffCount = 0;
568
- _timerCount = 0;
569
- _xhrOverride = null;
570
- _batchQueues = {};
571
- _autoFlushEventsLimit = undefined;
572
-
573
- // either MaxBatchSize * (1+ Max Connections) or _queueLimit / 6 (where 3 latency Queues [normal, realtime, cost deferred] * 2 [allow half full -- allow for retry])
574
- _autoFlushBatchLimit = 0;
575
- _delayedBatchSendLatency = -1;
576
- _delayedBatchReason = null;
577
- _optimizeObject = true;
578
- _isPageUnloadTriggered = false;
579
- _maxEventSendAttempts = MaxSendAttempts;
580
- _maxUnloadEventSendAttempts = MaxSyncUnloadSendAttempts;
581
- _evtNamespace = null;
582
- _timeoutWrapper = createTimeoutWrapper();
583
- _httpManager = new HttpManager(MaxNumberEventPerBatch, MaxConnections, MaxRequestRetriesBeforeBackoff, {
584
- requeue: _requeueEvents,
585
- send: _sendingEvent,
586
- sent: _eventsSentEvent,
587
- drop: _eventsDropped,
588
- rspFail: _eventsResponseFail,
589
- oth: _otherEvent
590
- }, _timeoutWrapper);
591
-
592
- _initializeProfiles();
593
- _clearQueues();
594
- _setAutoLimits();
595
- }
596
-
597
- function _createTimer(theTimerFunc: () => void, timeOut: number): any {
598
- // If the transmission is backed off make the timer at least 1 sec to allow for back off.
599
- if (timeOut === 0 && _currentBackoffCount) {
600
- timeOut = 1;
601
- }
602
-
603
- let timerMultiplier = 1000;
604
- if (_currentBackoffCount) {
605
- timerMultiplier = retryPolicyGetMillisToBackoffForRetry(_currentBackoffCount - 1);
606
- }
607
-
608
- return _timeoutWrapper.set(theTimerFunc, timeOut * timerMultiplier);
609
- }
610
- function _clearScheduledTimer() {
611
- if (_scheduledTimerId !== null) {
612
- _timeoutWrapper.clear(_scheduledTimerId);
613
- _scheduledTimerId = null;
614
- _timerCount = 0;
615
- return true;
616
- }
617
-
618
- return false;
619
- }
620
-
621
- // Try to send all queued events using beacons if available
622
- function _releaseAllQueues(sendType: EventSendType, sendReason: SendRequestReason) {
623
- _clearScheduledTimer();
624
-
625
- // Cancel all flush callbacks
626
- if (_flushCallbackTimerId) {
627
- _timeoutWrapper.clear(_flushCallbackTimerId);
628
- _flushCallbackTimerId = null;
629
- }
630
-
631
- if (!_paused) {
632
- // Queue all the remaining requests to be sent. The requests will be sent using HTML5 Beacons if they are available.
633
- _sendEventsForLatencyAndAbove(EventLatencyValue.Normal, sendType, sendReason);
634
- }
635
- }
636
-
637
- /**
638
- * Add empty queues for all latencies in the inbound queues map. This is called
639
- * when Transmission Manager is being flushed. This ensures that new events added
640
- * after flush are stored separately till we flush the current events.
641
- */
642
- function _clearQueues() {
643
- _batchQueues[EventLatencyValue.Immediate] = {
644
- batches: [],
645
- iKeyMap: {}
646
- };
647
- _batchQueues[EventLatencyValue.RealTime] = {
648
- batches: [],
649
- iKeyMap: {}
650
- };
651
- _batchQueues[EventLatencyValue.CostDeferred] = {
652
- batches: [],
653
- iKeyMap: {}
654
- };
655
- _batchQueues[EventLatencyValue.Normal] = {
656
- batches: [],
657
- iKeyMap: {}
658
- };
659
- }
660
-
661
- function _getEventBatch(iKey: string, latency: number, create: boolean) {
662
- let batchQueue: IPostChannelBatchQueue = _batchQueues[latency];
663
- if (!batchQueue) {
664
- latency = EventLatencyValue.Normal;
665
- batchQueue = _batchQueues[latency];
666
- }
667
-
668
- let eventBatch = batchQueue.iKeyMap[iKey];
669
- if (!eventBatch && create) {
670
- eventBatch = EventBatch.create(iKey);
671
- batchQueue.batches.push(eventBatch);
672
- batchQueue.iKeyMap[iKey] = eventBatch;
673
- }
674
-
675
- return eventBatch;
676
- }
677
-
678
- function _performAutoFlush(isAsync: boolean, doFlush?: boolean) {
679
- // Only perform the auto flush check if the httpManager has an idle connection and we are not in a backoff situation
680
- if (_httpManager.canSendRequest() && !_currentBackoffCount) {
681
- if (_autoFlushEventsLimit > 0 && _queueSize > _autoFlushEventsLimit) {
682
- // Force flushing
683
- doFlush = true;
684
- }
685
-
686
- if (doFlush && _flushCallbackTimerId == null) {
687
- // Auto flush the queue
688
- _self.flush(isAsync, null, SendRequestReason.MaxQueuedEvents);
689
- }
690
- }
691
- }
692
-
693
- function _addEventToProperQueue(event: IPostTransmissionTelemetryItem, append: boolean): boolean {
694
- // v8 performance optimization for iterating over the keys
695
- if (_optimizeObject) {
696
- event = optimizeObject(event);
697
- }
698
-
699
- const latency = event.latency;
700
- let eventBatch = _getEventBatch(event.iKey, latency, true);
701
- if (eventBatch.addEvent(event)) {
702
- if (latency !== EventLatencyValue.Immediate) {
703
- _queueSize++;
704
-
705
- // Check for auto flushing based on total events in the queue, but not for requeued or retry events
706
- if (append && event.sendAttempt === 0) {
707
- // Force the flushing of the batch if the batch (specific iKey / latency combination) reaches it's auto flush limit
708
- _performAutoFlush(!event.sync, _autoFlushBatchLimit > 0 && eventBatch.count() >= _autoFlushBatchLimit);
709
- }
710
- } else {
711
- // Direct events don't need auto flushing as they are scheduled (by default) for immediate delivery
712
- _immediateQueueSize++;
713
- }
714
-
715
- return true;
716
- }
717
-
718
- return false;
719
- }
720
-
721
- function _dropEventWithLatencyOrLess(iKey: string, latency: number, currentLatency: number, dropNumber: number): boolean {
722
- while (currentLatency <= latency) {
723
- let eventBatch = _getEventBatch(iKey, latency, true);
724
- if (eventBatch && eventBatch.count() > 0) {
725
- // Dropped oldest events from lowest possible latency
726
- let droppedEvents = eventBatch.split(0, dropNumber);
727
- let droppedCount = droppedEvents.count();
728
- if (droppedCount > 0) {
729
- if (currentLatency === EventLatencyValue.Immediate) {
730
- _immediateQueueSize -= droppedCount;
731
- } else {
732
- _queueSize -= droppedCount;
733
- }
734
-
735
- _notifyBatchEvents(strEventsDiscarded, [droppedEvents], EventsDiscardedReason.QueueFull);
736
- return true;
737
- }
738
- }
739
-
740
- currentLatency++;
741
- }
742
-
743
- // Unable to drop any events -- lets just make sure the queue counts are correct to avoid exhaustion
744
- _resetQueueCounts();
745
-
746
- return false;
747
- }
748
-
749
- /**
750
- * Internal helper to reset the queue counts, used as a backstop to avoid future queue exhaustion errors
751
- * that might occur because of counting issues.
752
- */
753
- function _resetQueueCounts() {
754
- let immediateQueue = 0;
755
- let normalQueue = 0;
756
- for (let latency = EventLatencyValue.Normal; latency <= EventLatencyValue.Immediate; latency++) {
757
- let batchQueue: IPostChannelBatchQueue = _batchQueues[latency];
758
- if (batchQueue && batchQueue.batches) {
759
- arrForEach(batchQueue.batches, (theBatch) => {
760
- if (latency === EventLatencyValue.Immediate) {
761
- immediateQueue += theBatch.count();
762
- } else {
763
- normalQueue += theBatch.count();
764
- }
765
- });
766
- }
767
- }
768
-
769
- _queueSize = normalQueue;
770
- _immediateQueueSize = immediateQueue;
771
- }
772
-
773
- function _queueBatches(latency: number, sendType: EventSendType, sendReason: SendRequestReason): boolean {
774
- let eventsQueued = false;
775
- let isAsync = sendType === EventSendType.Batched;
776
-
777
- // Only queue batches (to the HttpManager) if this is a sync request or the httpManager has an idle connection
778
- // Thus keeping the events within the PostChannel until the HttpManager has a connection available
779
- // This is so we can drop "old" events if the queue is getting full because we can't successfully send events
780
- if (!isAsync || _httpManager.canSendRequest()) {
781
- doPerf(_self.core, () => "PostChannel._queueBatches", () => {
782
- let droppedEvents = [];
783
- let latencyToProcess = EventLatencyValue.Immediate;
784
- while (latencyToProcess >= latency) {
785
- let batchQueue: IPostChannelBatchQueue = _batchQueues[latencyToProcess];
786
- if (batchQueue && batchQueue.batches && batchQueue.batches.length > 0) {
787
- arrForEach(batchQueue.batches, (theBatch) => {
788
- // Add the batch to the http manager to send the requests
789
- if (!_httpManager.addBatch(theBatch)) {
790
- // The events from this iKey are being dropped (killed)
791
- droppedEvents = droppedEvents.concat(theBatch.events());
792
- } else {
793
- eventsQueued = eventsQueued || (theBatch && theBatch.count() > 0);
794
- }
795
-
796
- if (latencyToProcess === EventLatencyValue.Immediate) {
797
- _immediateQueueSize -= theBatch.count();
798
- } else {
799
- _queueSize -= theBatch.count();
800
- }
801
- });
802
-
803
- // Remove all batches from this Queue
804
- batchQueue.batches = [];
805
- batchQueue.iKeyMap = {};
806
- }
807
-
808
- latencyToProcess--;
809
- }
810
-
811
- if (droppedEvents.length > 0) {
812
- _notifyEvents(strEventsDiscarded, droppedEvents, EventsDiscardedReason.KillSwitch);
813
- }
814
-
815
- if (eventsQueued && _delayedBatchSendLatency >= latency) {
816
- // We have queued events at the same level as the delayed values so clear the setting
817
- _delayedBatchSendLatency = -1;
818
- _delayedBatchReason = SendRequestReason.Undefined;
819
- }
820
- }, () => ({ latency, sendType, sendReason }), !isAsync);
821
- } else {
822
- // remember the min latency so that we can re-trigger later
823
- _delayedBatchSendLatency = _delayedBatchSendLatency >= 0 ? Math.min(_delayedBatchSendLatency, latency) : latency;
824
- _delayedBatchReason = Math.max(_delayedBatchReason, sendReason);
825
- }
826
-
827
- return eventsQueued;
828
- }
829
-
830
- /**
831
- * This is the callback method is called as part of the manual flushing process.
832
- * @param callback
833
- * @param sendReason
834
- */
835
- function _flushImpl(callback: () => void, sendReason: SendRequestReason) {
836
- // Add any additional queued events and cause all queued events to be sent asynchronously
837
- _sendEventsForLatencyAndAbove(EventLatencyValue.Normal, EventSendType.Batched, sendReason);
838
-
839
- // All events (should) have been queue -- lets just make sure the queue counts are correct to avoid queue exhaustion (previous bug #9685112)
840
- _resetQueueCounts();
841
-
842
- _waitForIdleManager(() => {
843
- // Only called AFTER the httpManager does not have any outstanding requests
844
- if (callback) {
845
- callback();
846
- }
847
-
848
- if (_flushCallbackQueue.length > 0) {
849
- _flushCallbackTimerId = _createTimer(() => {
850
- _flushCallbackTimerId = null;
851
- _flushImpl(_flushCallbackQueue.shift(), sendReason);
852
- }, 0);
853
- } else {
854
- // No more flush requests
855
- _flushCallbackTimerId = null;
856
-
857
- // Restart the normal timer schedule
858
- _scheduleTimer();
859
- }
860
- });
861
- }
862
-
863
- function _waitForIdleManager(callback: () => void) {
864
- if (_httpManager.isCompletelyIdle()) {
865
- callback();
866
- } else {
867
- _flushCallbackTimerId = _createTimer(() => {
868
- _flushCallbackTimerId = null;
869
- _waitForIdleManager(callback);
870
- }, FlushCheckTimer);
871
- }
872
- }
873
-
874
- /**
875
- * Resets the transmit profiles to the default profiles of Real Time, Near Real Time
876
- * and Best Effort. This removes all the custom profiles that were loaded.
877
- */
878
- function _resetTransmitProfiles() {
879
- _clearScheduledTimer();
880
- _initializeProfiles();
881
- _currentProfile = RT_PROFILE;
882
- _scheduleTimer();
883
- }
884
-
885
- function _initializeProfiles() {
886
- _profiles = {};
887
- _profiles[RT_PROFILE] = [2, 1, 0];
888
- _profiles[NRT_PROFILE] = [6, 3, 0];
889
- _profiles[BE_PROFILE] = [18, 9, 0];
890
- }
891
-
892
- /**
893
- * The notification handler for requeue events
894
- * @ignore
895
- */
896
- function _requeueEvents(batches: EventBatch[], reason?: number) {
897
- let droppedEvents: IPostTransmissionTelemetryItem[] = [];
898
- let maxSendAttempts = _maxEventSendAttempts;
899
- if (_isPageUnloadTriggered) {
900
- // If a page unlaod has been triggered reduce the number of times we try to "retry"
901
- maxSendAttempts = _maxUnloadEventSendAttempts;
902
- }
903
-
904
- arrForEach(batches, (theBatch) => {
905
- if (theBatch && theBatch.count() > 0) {
906
-
907
- arrForEach(theBatch.events(), (theEvent: IPostTransmissionTelemetryItem) => {
908
- if (theEvent) {
909
- // Check if the request being added back is for a sync event in which case mark it no longer a sync event
910
- if (theEvent.sync) {
911
- theEvent.latency = EventLatencyValue.Immediate;
912
- theEvent.sync = false;
913
- }
914
-
915
- if (theEvent.sendAttempt < maxSendAttempts) {
916
- // Reset the event timings
917
- setProcessTelemetryTimings(theEvent, _self.identifier);
918
- _addEventToQueues(theEvent, false);
919
- } else {
920
- droppedEvents.push(theEvent);
921
- }
922
- }
923
- });
924
- }
925
- });
926
-
927
- if (droppedEvents.length > 0) {
928
- _notifyEvents(strEventsDiscarded, droppedEvents, EventsDiscardedReason.NonRetryableStatus);
929
- }
930
-
931
- if (_isPageUnloadTriggered) {
932
- // Unload event has been received so we need to try and flush new events
933
- _releaseAllQueues(EventSendType.SendBeacon, SendRequestReason.Unload);
934
- }
935
- }
936
-
937
- function _callNotification(evtName: string, theArgs: any[]) {
938
- let manager = (_self._notificationManager || ({} as NotificationManager));
939
- let notifyFunc = manager[evtName];
940
- if (notifyFunc) {
941
- try {
942
- notifyFunc.apply(manager, theArgs);
943
- } catch (e) {
944
- _throwInternal(_self.diagLog(),
945
- eLoggingSeverity.CRITICAL,
946
- _eInternalMessageId.NotificationException,
947
- evtName + " notification failed: " + e);
948
- }
949
- }
950
- }
951
-
952
- function _notifyEvents(evtName: string, theEvents: IPostTransmissionTelemetryItem[], ...extraArgs) {
953
- if (theEvents && theEvents.length > 0) {
954
- _callNotification(evtName, [theEvents].concat(extraArgs));
955
- }
956
- }
957
-
958
- function _notifyBatchEvents(evtName: string, batches: EventBatch[], ...extraArgs) {
959
- if (batches && batches.length > 0) {
960
- arrForEach(batches, (theBatch) => {
961
- if (theBatch && theBatch.count() > 0) {
962
- _callNotification(evtName, [theBatch.events()].concat(extraArgs));
963
- }
964
- });
965
- }
966
- }
967
-
968
- /**
969
- * The notification handler for when batches are about to be sent
970
- * @ignore
971
- */
972
- function _sendingEvent(batches: EventBatch[], reason?: number, isSyncRequest?: boolean) {
973
- if (batches && batches.length > 0) {
974
- _callNotification(
975
- "eventsSendRequest",
976
- [(reason >= EventBatchNotificationReason.SendingUndefined && reason <= EventBatchNotificationReason.SendingEventMax ?
977
- reason - EventBatchNotificationReason.SendingUndefined :
978
- SendRequestReason.Undefined), isSyncRequest !== true]);
979
- }
980
- }
981
-
982
- /**
983
- * This event represents that a batch of events have been successfully sent and a response received
984
- * @param batches The notification handler for when the batches have been successfully sent
985
- * @param reason For this event the reason will always be EventBatchNotificationReason.Complete
986
- */
987
- function _eventsSentEvent(batches: EventBatch[], reason?: number) {
988
- _notifyBatchEvents("eventsSent", batches, reason);
989
-
990
- // Try and schedule the processing timer if we have events
991
- _scheduleTimer();
992
- }
993
-
994
- function _eventsDropped(batches: EventBatch[], reason?: number) {
995
- _notifyBatchEvents(
996
- strEventsDiscarded,
997
- batches,
998
- (reason >= EventBatchNotificationReason.EventsDropped && reason <= EventBatchNotificationReason.EventsDroppedMax ?
999
- reason - EventBatchNotificationReason.EventsDropped :
1000
- EventsDiscardedReason.Unknown));
1001
- }
1002
-
1003
- function _eventsResponseFail(batches: EventBatch[]) {
1004
- _notifyBatchEvents(strEventsDiscarded, batches, EventsDiscardedReason.NonRetryableStatus);
1005
-
1006
- // Try and schedule the processing timer if we have events
1007
- _scheduleTimer();
1008
- }
1009
-
1010
- function _otherEvent(batches: EventBatch[], reason?: number) {
1011
- _notifyBatchEvents(strEventsDiscarded, batches, EventsDiscardedReason.Unknown);
1012
-
1013
- // Try and schedule the processing timer if we have events
1014
- _scheduleTimer();
1015
- }
1016
-
1017
- function _setAutoLimits() {
1018
- if (!_config || !_config.disableAutoBatchFlushLimit) {
1019
- _autoFlushBatchLimit = Math.max(MaxNumberEventPerBatch * (MaxConnections + 1), _queueSizeLimit / 6);
1020
- } else {
1021
- _autoFlushBatchLimit = 0;
1022
- }
1023
- }
1024
-
1025
- // Provided for backward compatibility they are not "expected" to be in current use but they are public
1026
- objDefineAccessors(_self, "_setTimeoutOverride",
1027
- () => _timeoutWrapper.set,
1028
- (value: TimeoutSetFunc<any>) => {
1029
- // Recreate the timeout wrapper
1030
- _timeoutWrapper = createTimeoutWrapper(value, _timeoutWrapper.clear);
1031
- });
1032
-
1033
- objDefineAccessors(_self, "_clearTimeoutOverride",
1034
- () => _timeoutWrapper.clear,
1035
- (value: TimeoutClearFunc<any>) => {
1036
- // Recreate the timeout wrapper
1037
- _timeoutWrapper = createTimeoutWrapper(_timeoutWrapper.set, value);
1038
- });
1039
- });
1040
- }
1041
-
1042
- /**
1043
- * Start the queue manager to batch and send events via post.
1044
- * @param config - The core configuration.
1045
- */
1046
- public initialize(coreConfig: IExtendedConfiguration, core: IAppInsightsCore, extensions: IPlugin[]) {
1047
- // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging
1048
- }
1049
-
1050
- /**
1051
- * Add an event to the appropriate inbound queue based on its latency.
1052
- * @param ev - The event to be added to the queue.
1053
- * @param itemCtx - This is the context for the current request, ITelemetryPlugin instances
1054
- * can optionally use this to access the current core instance or define / pass additional information
1055
- * to later plugins (vs appending items to the telemetry item)
1056
- */
1057
- public processTelemetry(ev: ITelemetryItem, itemCtx?: IProcessTelemetryContext): void {
1058
- // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging
1059
- }
1060
-
1061
- /**
1062
- * Sets the event queue limits at runtime (after initialization), if the number of queued events is greater than the
1063
- * eventLimit or autoFlushLimit then a flush() operation will be scheduled.
1064
- * @param eventLimit The number of events that can be kept in memory before the SDK starts to drop events. If the value passed is less than or
1065
- * equal to zero the value will be reset to the default (10,000).
1066
- * @param autoFlushLimit When defined, once this number of events has been queued the system perform a flush() to send the queued events
1067
- * without waiting for the normal schedule timers. Passing undefined, null or a value less than or equal to zero will disable the auto flush.
1068
- */
1069
- public setEventQueueLimits(eventLimit: number, autoFlushLimit?: number) {
1070
- // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging
1071
- }
1072
-
1073
- /**
1074
- * Pause the transmission of any requests
1075
- */
1076
- public pause() {
1077
- // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging
1078
- }
1079
-
1080
- /**
1081
- * Resumes transmission of events.
1082
- */
1083
- public resume() {
1084
- // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging
1085
- }
1086
-
1087
- /**
1088
- * Add handler to be executed with request response text.
1089
- */
1090
- public addResponseHandler(responseHanlder: (responseText: string) => void) {
1091
- // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging
1092
- }
1093
-
1094
- /**
1095
- * Flush to send data immediately; channel should default to sending data asynchronously
1096
- * @param async - send data asynchronously when true
1097
- * @param callback - if specified, notify caller when send is complete
1098
- */
1099
- public flush(async = true, callback?: () => void, sendReason?: SendRequestReason) {
1100
- // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging
1101
- }
1102
-
1103
- /**
1104
- * Set AuthMsaDeviceTicket header
1105
- * @param ticket - Ticket value.
1106
- */
1107
- public setMsaAuthTicket(ticket: string) {
1108
- // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging
1109
- }
1110
-
1111
- /**
1112
- * Check if there are any events waiting to be scheduled for sending.
1113
- * @returns True if there are events, false otherwise.
1114
- */
1115
- public hasEvents(): boolean {
1116
- // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging
1117
- return false;
1118
- }
1119
-
1120
- /**
1121
- * Load custom transmission profiles. Each profile should have timers for real time, and normal and can
1122
- * optionally specify the immediate latency time in ms (defaults to 0 when not defined). Each profile should
1123
- * make sure that a each normal latency timer is a multiple of the real-time latency and the immediate
1124
- * is smaller than the real-time.
1125
- * Setting the timer value to -1 means that the events for that latency will not be scheduled to be sent.
1126
- * Note that once a latency has been set to not send, all latencies below it will also not be sent. The
1127
- * timers should be in the form of [normal, high, [immediate]].
1128
- * e.g Custom:
1129
- * [10,5] - Sets the normal latency time to 10 seconds and real-time to 5 seconds; Immediate will default to 0ms
1130
- * [10,5,1] - Sets the normal latency time to 10 seconds and real-time to 5 seconds; Immediate will default to 1ms
1131
- * [10,5,0] - Sets the normal latency time to 10 seconds and real-time to 5 seconds; Immediate will default to 0ms
1132
- * [10,5,-1] - Sets the normal latency time to 10 seconds and real-time to 5 seconds; Immediate events will not be
1133
- * scheduled on their own and but they will be included with real-time or normal events as the first events in a batch.
1134
- * This also removes any previously loaded custom profiles.
1135
- * @param profiles - A dictionary containing the transmit profiles.
1136
- */
1137
- public _loadTransmitProfiles(profiles: { [profileName: string]: number[] }) {
1138
- // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging
1139
- }
1140
-
1141
- /**
1142
- * Set the transmit profile to be used. This will change the transmission timers
1143
- * based on the transmit profile.
1144
- * @param profileName - The name of the transmit profile to be used.
1145
- */
1146
- public _setTransmitProfile(profileName: string) {
1147
- // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging
1148
- }
1149
-
1150
- /**
1151
- * Backs off transmission. This exponentially increases all the timers.
1152
- */
1153
- public _backOffTransmission() {
1154
- // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging
1155
- }
1156
-
1157
- /**
1158
- * Clears backoff for transmission.
1159
- */
1160
- public _clearBackOff() {
1161
- // @DynamicProtoStub - DO NOT add any code as this will be removed during packaging
1162
- }
1163
- }