@v-tilt/browser 1.0.11 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,78 @@
1
+ /**
2
+ * Request Queue - Event Batching (PostHog-style)
3
+ *
4
+ * Batches multiple events together and sends them at configurable intervals.
5
+ * This reduces the number of HTTP requests significantly for active users.
6
+ *
7
+ * Features:
8
+ * - Configurable flush interval (default 3 seconds)
9
+ * - Batches events by URL/batchKey
10
+ * - Uses sendBeacon on page unload for reliable delivery
11
+ * - Converts absolute timestamps to relative offsets before sending
12
+ */
13
+ import type { TrackingEvent } from "./types";
14
+ export declare const DEFAULT_FLUSH_INTERVAL_MS = 3000;
15
+ export interface QueuedRequest {
16
+ url: string;
17
+ event: TrackingEvent;
18
+ batchKey?: string;
19
+ transport?: "xhr" | "sendBeacon";
20
+ }
21
+ export interface BatchedRequest {
22
+ url: string;
23
+ events: TrackingEvent[];
24
+ batchKey?: string;
25
+ transport?: "xhr" | "sendBeacon";
26
+ }
27
+ export interface RequestQueueConfig {
28
+ flush_interval_ms?: number;
29
+ }
30
+ export declare class RequestQueue {
31
+ private _isPaused;
32
+ private _queue;
33
+ private _flushTimeout?;
34
+ private _flushTimeoutMs;
35
+ private _sendRequest;
36
+ constructor(sendRequest: (req: BatchedRequest) => void, config?: RequestQueueConfig);
37
+ /**
38
+ * Get the current queue length
39
+ */
40
+ get length(): number;
41
+ /**
42
+ * Enqueue an event for batched sending
43
+ */
44
+ enqueue(req: QueuedRequest): void;
45
+ /**
46
+ * Flush all queued events immediately using sendBeacon
47
+ * Called on page unload to ensure events are delivered
48
+ */
49
+ unload(): void;
50
+ /**
51
+ * Enable the queue and start flushing
52
+ */
53
+ enable(): void;
54
+ /**
55
+ * Pause the queue (stops flushing but keeps events)
56
+ */
57
+ pause(): void;
58
+ /**
59
+ * Force an immediate flush
60
+ */
61
+ flush(): void;
62
+ /**
63
+ * Set up the flush timeout
64
+ */
65
+ private _setFlushTimeout;
66
+ /**
67
+ * Clear the flush timeout
68
+ */
69
+ private _clearFlushTimeout;
70
+ /**
71
+ * Flush all queued events now
72
+ */
73
+ private _flushNow;
74
+ /**
75
+ * Format the queue into batched requests by URL/batchKey
76
+ */
77
+ private _formatQueue;
78
+ }
@@ -0,0 +1,54 @@
1
+ /**
2
+ * Request utilities for vTilt tracking
3
+ *
4
+ * Handles HTTP requests with:
5
+ * - GZip compression (via fflate)
6
+ * - Multiple transport methods (fetch, XHR, sendBeacon)
7
+ * - Automatic fallback between transports
8
+ *
9
+ * Based on PostHog's request.ts pattern
10
+ */
11
+ /**
12
+ * Compression methods supported by the SDK
13
+ */
14
+ export declare enum Compression {
15
+ GZipJS = "gzip-js",
16
+ None = "none"
17
+ }
18
+ /**
19
+ * Response from a request
20
+ */
21
+ export interface RequestResponse {
22
+ statusCode: number;
23
+ text?: string;
24
+ json?: any;
25
+ }
26
+ /**
27
+ * Options for making a request
28
+ */
29
+ export interface RequestOptions {
30
+ url: string;
31
+ data?: any;
32
+ method?: "POST" | "GET";
33
+ headers?: Record<string, string>;
34
+ transport?: "XHR" | "fetch" | "sendBeacon";
35
+ compression?: Compression;
36
+ timeout?: number;
37
+ callback?: (response: RequestResponse) => void;
38
+ }
39
+ /**
40
+ * JSON stringify with BigInt support
41
+ */
42
+ export declare const jsonStringify: (data: any) => string;
43
+ /**
44
+ * Main request function - handles transport selection and dispatching
45
+ */
46
+ export declare const request: (options: RequestOptions) => void;
47
+ /**
48
+ * Promise-based request wrapper
49
+ */
50
+ export declare const requestAsync: (options: Omit<RequestOptions, "callback">) => Promise<RequestResponse>;
51
+ /**
52
+ * Check if compression is supported and beneficial
53
+ */
54
+ export declare const shouldCompress: (data: any) => boolean;
@@ -0,0 +1,64 @@
1
+ /**
2
+ * Retry Queue - Exponential Backoff (PostHog-style)
3
+ *
4
+ * Retries failed requests with jittered exponential backoff.
5
+ * Detects online/offline status and pauses retries when offline.
6
+ *
7
+ * Features:
8
+ * - Exponential backoff: 3s, 6s, 12s, 24s... up to 30 minutes
9
+ * - Jitter: +/- 50% to prevent thundering herd
10
+ * - Online/offline detection
11
+ * - Max 10 retries before giving up
12
+ * - Uses sendBeacon on page unload for final attempt
13
+ */
14
+ import type { BatchedRequest } from "./request-queue";
15
+ /**
16
+ * Generates a jittered exponential backoff delay in milliseconds
17
+ *
18
+ * Base value is 3 seconds, doubled with each retry up to 30 minutes max.
19
+ * Each value has +/- 50% jitter.
20
+ *
21
+ * @param retriesPerformedSoFar - Number of retries already attempted
22
+ * @returns Delay in milliseconds
23
+ */
24
+ export declare function pickNextRetryDelay(retriesPerformedSoFar: number): number;
25
+ export interface RetryQueueConfig {
26
+ sendRequest: (req: BatchedRequest) => Promise<{
27
+ statusCode: number;
28
+ }>;
29
+ sendBeacon: (req: BatchedRequest) => void;
30
+ }
31
+ export declare class RetryQueue {
32
+ private _isPolling;
33
+ private _poller?;
34
+ private _pollIntervalMs;
35
+ private _queue;
36
+ private _areWeOnline;
37
+ private _sendRequest;
38
+ private _sendBeacon;
39
+ constructor(config: RetryQueueConfig);
40
+ /**
41
+ * Get current queue length
42
+ */
43
+ get length(): number;
44
+ /**
45
+ * Enqueue a failed request for retry
46
+ */
47
+ enqueue(request: BatchedRequest, retriesPerformedSoFar?: number): void;
48
+ /**
49
+ * Attempt to send a request with retry on failure
50
+ */
51
+ retriableRequest(request: BatchedRequest): Promise<void>;
52
+ /**
53
+ * Start polling for retries
54
+ */
55
+ private _poll;
56
+ /**
57
+ * Flush ready items from the queue
58
+ */
59
+ private _flush;
60
+ /**
61
+ * Flush all queued requests using sendBeacon on page unload
62
+ */
63
+ unload(): void;
64
+ }
package/dist/types.d.ts CHANGED
@@ -13,6 +13,7 @@ export interface VTiltConfig {
13
13
  globalAttributes?: Record<string, string>;
14
14
  persistence?: "localStorage" | "cookie";
15
15
  crossSubdomainCookie?: boolean;
16
+ disable_compression?: boolean;
16
17
  }
17
18
  export interface SessionData {
18
19
  value: string;
package/dist/vtilt.d.ts CHANGED
@@ -1,20 +1,21 @@
1
1
  import { VTiltConfig, EventPayload } from "./types";
2
2
  import { HistoryAutocapture } from "./extensions/history-autocapture";
3
- interface QueuedRequest {
4
- url: string;
5
- event: any;
6
- }
3
+ import { type QueuedRequest } from "./request-queue";
7
4
  export declare class VTilt {
8
5
  private configManager;
9
6
  private sessionManager;
10
7
  private userManager;
11
8
  private webVitalsManager;
9
+ private requestQueue;
10
+ private retryQueue;
11
+ private rateLimiter;
12
12
  historyAutocapture?: HistoryAutocapture;
13
13
  __loaded: boolean;
14
14
  private _initialPageviewCaptured;
15
15
  private _visibilityStateListener;
16
16
  __request_queue: QueuedRequest[];
17
17
  private _hasWarnedAboutConfig;
18
+ private _setOncePropertiesSent;
18
19
  constructor(config?: Partial<VTiltConfig>);
19
20
  /**
20
21
  * Initializes a new instance of the VTilt tracking object.
@@ -52,6 +53,11 @@ export declare class VTilt {
52
53
  * This internal method should only be called by `init()`.
53
54
  */
54
55
  private _init;
56
+ /**
57
+ * Set up handler to flush event queue on page unload
58
+ * Uses both beforeunload and pagehide for maximum compatibility
59
+ */
60
+ private _setupUnloadHandler;
55
61
  /**
56
62
  * Returns a string representation of the instance name
57
63
  * Used for debugging and logging
@@ -77,8 +83,26 @@ export declare class VTilt {
77
83
  /**
78
84
  * Send HTTP request
79
85
  * This is the central entry point for all tracking requests
86
+ * Events are batched and sent every 3 seconds for better performance
80
87
  */
81
88
  private sendRequest;
89
+ /**
90
+ * Send a batched request with multiple events
91
+ * Called by RequestQueue when flushing
92
+ * Uses RetryQueue for automatic retry on failure
93
+ */
94
+ private _sendBatchedRequest;
95
+ /**
96
+ * Send HTTP request and return status code
97
+ * Uses GZip compression for payloads > 1KB
98
+ * Used by RetryQueue for retryable requests
99
+ */
100
+ private _sendHttpRequest;
101
+ /**
102
+ * Send request using sendBeacon for reliable delivery on page unload
103
+ * Uses GZip compression for payloads > 1KB
104
+ */
105
+ private _sendBeaconRequest;
82
106
  /**
83
107
  * Send a queued request (called after DOM is loaded)
84
108
  */
@@ -92,8 +116,16 @@ export declare class VTilt {
92
116
  *
93
117
  * @param name - Event name
94
118
  * @param payload - Event payload
119
+ * @param options - Optional capture options
120
+ */
121
+ capture(name: string, payload: EventPayload, options?: {
122
+ skip_client_rate_limiting?: boolean;
123
+ }): void;
124
+ /**
125
+ * Internal capture method that bypasses rate limiting
126
+ * Used for system events like rate limit warnings
95
127
  */
96
- capture(name: string, payload: EventPayload): void;
128
+ private _captureInternal;
97
129
  /**
98
130
  * Track a custom event (alias for capture)
99
131
  */
@@ -214,7 +246,7 @@ export declare class VTilt {
214
246
  */
215
247
  _execute_array(array: any[]): void;
216
248
  /**
217
- * Called when DOM is loaded - processes queued requests
249
+ * Called when DOM is loaded - processes queued requests and enables batching
218
250
  */
219
251
  _dom_loaded(): void;
220
252
  }
@@ -254,4 +286,3 @@ export declare function init_as_module(): VTilt;
254
286
  * ]
255
287
  */
256
288
  export declare function init_from_snippet(): void;
257
- export {};
@@ -0,0 +1,52 @@
1
+ /**
2
+ * Rate Limiter - Token Bucket Algorithm (PostHog-style)
3
+ *
4
+ * Prevents runaway loops from flooding the server with events.
5
+ * Uses a token bucket algorithm with configurable rate and burst limits.
6
+ *
7
+ * Features:
8
+ * - Configurable events per second (default: 10)
9
+ * - Configurable burst limit (default: 100)
10
+ * - Token replenishment over time
11
+ * - Warning event when rate limited
12
+ */
13
+ export declare const RATE_LIMIT_WARNING_EVENT = "$$client_ingestion_warning";
14
+ export interface RateLimitBucket {
15
+ tokens: number;
16
+ last: number;
17
+ }
18
+ export interface RateLimiterConfig {
19
+ eventsPerSecond?: number;
20
+ eventsBurstLimit?: number;
21
+ persistence?: {
22
+ get: (key: string) => RateLimitBucket | null;
23
+ set: (key: string, value: RateLimitBucket) => void;
24
+ };
25
+ captureWarning?: (message: string) => void;
26
+ }
27
+ export declare class RateLimiter {
28
+ private eventsPerSecond;
29
+ private eventsBurstLimit;
30
+ private lastEventRateLimited;
31
+ private persistence?;
32
+ private captureWarning?;
33
+ constructor(config?: RateLimiterConfig);
34
+ /**
35
+ * Check if the client should be rate limited
36
+ *
37
+ * @param checkOnly - If true, don't consume a token (just check)
38
+ * @returns Object with isRateLimited flag and remaining tokens
39
+ */
40
+ checkRateLimit(checkOnly?: boolean): {
41
+ isRateLimited: boolean;
42
+ remainingTokens: number;
43
+ };
44
+ /**
45
+ * Check if an event should be allowed (consumes a token if allowed)
46
+ */
47
+ shouldAllowEvent(): boolean;
48
+ /**
49
+ * Get remaining tokens without consuming
50
+ */
51
+ getRemainingTokens(): number;
52
+ }
@@ -0,0 +1,80 @@
1
+ "use strict";
2
+ /**
3
+ * Rate Limiter - Token Bucket Algorithm (PostHog-style)
4
+ *
5
+ * Prevents runaway loops from flooding the server with events.
6
+ * Uses a token bucket algorithm with configurable rate and burst limits.
7
+ *
8
+ * Features:
9
+ * - Configurable events per second (default: 10)
10
+ * - Configurable burst limit (default: 100)
11
+ * - Token replenishment over time
12
+ * - Warning event when rate limited
13
+ */
14
+ Object.defineProperty(exports, "__esModule", { value: true });
15
+ exports.RateLimiter = exports.RATE_LIMIT_WARNING_EVENT = void 0;
16
+ const RATE_LIMIT_STORAGE_KEY = "vt_rate_limit";
17
+ exports.RATE_LIMIT_WARNING_EVENT = "$$client_ingestion_warning";
18
+ class RateLimiter {
19
+ constructor(config = {}) {
20
+ var _a, _b;
21
+ this.lastEventRateLimited = false;
22
+ this.eventsPerSecond = (_a = config.eventsPerSecond) !== null && _a !== void 0 ? _a : 10;
23
+ this.eventsBurstLimit = Math.max((_b = config.eventsBurstLimit) !== null && _b !== void 0 ? _b : this.eventsPerSecond * 10, this.eventsPerSecond);
24
+ this.persistence = config.persistence;
25
+ this.captureWarning = config.captureWarning;
26
+ // Initialize lastEventRateLimited from current state
27
+ this.lastEventRateLimited = this.checkRateLimit(true).isRateLimited;
28
+ }
29
+ /**
30
+ * Check if the client should be rate limited
31
+ *
32
+ * @param checkOnly - If true, don't consume a token (just check)
33
+ * @returns Object with isRateLimited flag and remaining tokens
34
+ */
35
+ checkRateLimit(checkOnly = false) {
36
+ var _a, _b, _c, _d;
37
+ const now = Date.now();
38
+ // Get current bucket state from persistence or create new
39
+ const bucket = (_b = (_a = this.persistence) === null || _a === void 0 ? void 0 : _a.get(RATE_LIMIT_STORAGE_KEY)) !== null && _b !== void 0 ? _b : {
40
+ tokens: this.eventsBurstLimit,
41
+ last: now,
42
+ };
43
+ // Replenish tokens based on time elapsed
44
+ const secondsElapsed = (now - bucket.last) / 1000;
45
+ bucket.tokens += secondsElapsed * this.eventsPerSecond;
46
+ bucket.last = now;
47
+ // Cap tokens at burst limit
48
+ if (bucket.tokens > this.eventsBurstLimit) {
49
+ bucket.tokens = this.eventsBurstLimit;
50
+ }
51
+ const isRateLimited = bucket.tokens < 1;
52
+ // Consume a token if not just checking
53
+ if (!isRateLimited && !checkOnly) {
54
+ bucket.tokens = Math.max(0, bucket.tokens - 1);
55
+ }
56
+ // Capture warning event when first rate limited
57
+ if (isRateLimited && !this.lastEventRateLimited && !checkOnly) {
58
+ (_c = this.captureWarning) === null || _c === void 0 ? void 0 : _c.call(this, `vTilt client rate limited. Config: ${this.eventsPerSecond} events/second, ${this.eventsBurstLimit} burst limit.`);
59
+ }
60
+ this.lastEventRateLimited = isRateLimited;
61
+ (_d = this.persistence) === null || _d === void 0 ? void 0 : _d.set(RATE_LIMIT_STORAGE_KEY, bucket);
62
+ return {
63
+ isRateLimited,
64
+ remainingTokens: bucket.tokens,
65
+ };
66
+ }
67
+ /**
68
+ * Check if an event should be allowed (consumes a token if allowed)
69
+ */
70
+ shouldAllowEvent() {
71
+ return !this.checkRateLimit(false).isRateLimited;
72
+ }
73
+ /**
74
+ * Get remaining tokens without consuming
75
+ */
76
+ getRemainingTokens() {
77
+ return this.checkRateLimit(true).remainingTokens;
78
+ }
79
+ }
80
+ exports.RateLimiter = RateLimiter;
@@ -0,0 +1,78 @@
1
+ /**
2
+ * Request Queue - Event Batching (PostHog-style)
3
+ *
4
+ * Batches multiple events together and sends them at configurable intervals.
5
+ * This reduces the number of HTTP requests significantly for active users.
6
+ *
7
+ * Features:
8
+ * - Configurable flush interval (default 3 seconds)
9
+ * - Batches events by URL/batchKey
10
+ * - Uses sendBeacon on page unload for reliable delivery
11
+ * - Converts absolute timestamps to relative offsets before sending
12
+ */
13
+ import type { TrackingEvent } from "./types";
14
+ export declare const DEFAULT_FLUSH_INTERVAL_MS = 3000;
15
+ export interface QueuedRequest {
16
+ url: string;
17
+ event: TrackingEvent;
18
+ batchKey?: string;
19
+ transport?: "xhr" | "sendBeacon";
20
+ }
21
+ export interface BatchedRequest {
22
+ url: string;
23
+ events: TrackingEvent[];
24
+ batchKey?: string;
25
+ transport?: "xhr" | "sendBeacon";
26
+ }
27
+ export interface RequestQueueConfig {
28
+ flush_interval_ms?: number;
29
+ }
30
+ export declare class RequestQueue {
31
+ private _isPaused;
32
+ private _queue;
33
+ private _flushTimeout?;
34
+ private _flushTimeoutMs;
35
+ private _sendRequest;
36
+ constructor(sendRequest: (req: BatchedRequest) => void, config?: RequestQueueConfig);
37
+ /**
38
+ * Get the current queue length
39
+ */
40
+ get length(): number;
41
+ /**
42
+ * Enqueue an event for batched sending
43
+ */
44
+ enqueue(req: QueuedRequest): void;
45
+ /**
46
+ * Flush all queued events immediately using sendBeacon
47
+ * Called on page unload to ensure events are delivered
48
+ */
49
+ unload(): void;
50
+ /**
51
+ * Enable the queue and start flushing
52
+ */
53
+ enable(): void;
54
+ /**
55
+ * Pause the queue (stops flushing but keeps events)
56
+ */
57
+ pause(): void;
58
+ /**
59
+ * Force an immediate flush
60
+ */
61
+ flush(): void;
62
+ /**
63
+ * Set up the flush timeout
64
+ */
65
+ private _setFlushTimeout;
66
+ /**
67
+ * Clear the flush timeout
68
+ */
69
+ private _clearFlushTimeout;
70
+ /**
71
+ * Flush all queued events now
72
+ */
73
+ private _flushNow;
74
+ /**
75
+ * Format the queue into batched requests by URL/batchKey
76
+ */
77
+ private _formatQueue;
78
+ }
@@ -0,0 +1,156 @@
1
+ "use strict";
2
+ /**
3
+ * Request Queue - Event Batching (PostHog-style)
4
+ *
5
+ * Batches multiple events together and sends them at configurable intervals.
6
+ * This reduces the number of HTTP requests significantly for active users.
7
+ *
8
+ * Features:
9
+ * - Configurable flush interval (default 3 seconds)
10
+ * - Batches events by URL/batchKey
11
+ * - Uses sendBeacon on page unload for reliable delivery
12
+ * - Converts absolute timestamps to relative offsets before sending
13
+ */
14
+ Object.defineProperty(exports, "__esModule", { value: true });
15
+ exports.RequestQueue = exports.DEFAULT_FLUSH_INTERVAL_MS = void 0;
16
+ exports.DEFAULT_FLUSH_INTERVAL_MS = 3000;
17
+ /**
18
+ * Clamp a value to a range
19
+ */
20
+ function clampToRange(value, min, max, defaultValue) {
21
+ if (typeof value !== "number" || isNaN(value)) {
22
+ return defaultValue;
23
+ }
24
+ return Math.min(Math.max(value, min), max);
25
+ }
26
+ class RequestQueue {
27
+ constructor(sendRequest, config) {
28
+ // We start in a paused state and only start flushing when enabled
29
+ this._isPaused = true;
30
+ this._queue = [];
31
+ this._flushTimeoutMs = clampToRange((config === null || config === void 0 ? void 0 : config.flush_interval_ms) || exports.DEFAULT_FLUSH_INTERVAL_MS, 250, // Min 250ms
32
+ 5000, // Max 5 seconds
33
+ exports.DEFAULT_FLUSH_INTERVAL_MS);
34
+ this._sendRequest = sendRequest;
35
+ }
36
+ /**
37
+ * Get the current queue length
38
+ */
39
+ get length() {
40
+ return this._queue.length;
41
+ }
42
+ /**
43
+ * Enqueue an event for batched sending
44
+ */
45
+ enqueue(req) {
46
+ this._queue.push(req);
47
+ // Start flush timer if not already running
48
+ if (!this._flushTimeout) {
49
+ this._setFlushTimeout();
50
+ }
51
+ }
52
+ /**
53
+ * Flush all queued events immediately using sendBeacon
54
+ * Called on page unload to ensure events are delivered
55
+ */
56
+ unload() {
57
+ this._clearFlushTimeout();
58
+ if (this._queue.length === 0) {
59
+ return;
60
+ }
61
+ const requests = this._formatQueue();
62
+ // Send each batched request using sendBeacon for reliable delivery
63
+ for (const key in requests) {
64
+ const req = requests[key];
65
+ this._sendRequest({ ...req, transport: "sendBeacon" });
66
+ }
67
+ }
68
+ /**
69
+ * Enable the queue and start flushing
70
+ */
71
+ enable() {
72
+ this._isPaused = false;
73
+ this._setFlushTimeout();
74
+ }
75
+ /**
76
+ * Pause the queue (stops flushing but keeps events)
77
+ */
78
+ pause() {
79
+ this._isPaused = true;
80
+ this._clearFlushTimeout();
81
+ }
82
+ /**
83
+ * Force an immediate flush
84
+ */
85
+ flush() {
86
+ this._clearFlushTimeout();
87
+ this._flushNow();
88
+ this._setFlushTimeout();
89
+ }
90
+ /**
91
+ * Set up the flush timeout
92
+ */
93
+ _setFlushTimeout() {
94
+ if (this._isPaused) {
95
+ return;
96
+ }
97
+ this._flushTimeout = setTimeout(() => {
98
+ this._clearFlushTimeout();
99
+ this._flushNow();
100
+ // Restart the timeout for continuous flushing
101
+ if (this._queue.length > 0) {
102
+ this._setFlushTimeout();
103
+ }
104
+ }, this._flushTimeoutMs);
105
+ }
106
+ /**
107
+ * Clear the flush timeout
108
+ */
109
+ _clearFlushTimeout() {
110
+ if (this._flushTimeout) {
111
+ clearTimeout(this._flushTimeout);
112
+ this._flushTimeout = undefined;
113
+ }
114
+ }
115
+ /**
116
+ * Flush all queued events now
117
+ */
118
+ _flushNow() {
119
+ if (this._queue.length === 0) {
120
+ return;
121
+ }
122
+ const requests = this._formatQueue();
123
+ const now = Date.now();
124
+ for (const key in requests) {
125
+ const req = requests[key];
126
+ // Convert absolute timestamps to relative offsets
127
+ // This helps with clock skew between client and server
128
+ req.events.forEach((event) => {
129
+ const eventTime = new Date(event.timestamp).getTime();
130
+ event.$offset = Math.abs(eventTime - now);
131
+ });
132
+ this._sendRequest(req);
133
+ }
134
+ }
135
+ /**
136
+ * Format the queue into batched requests by URL/batchKey
137
+ */
138
+ _formatQueue() {
139
+ const requests = {};
140
+ this._queue.forEach((request) => {
141
+ const key = request.batchKey || request.url;
142
+ if (!requests[key]) {
143
+ requests[key] = {
144
+ url: request.url,
145
+ events: [],
146
+ batchKey: request.batchKey,
147
+ };
148
+ }
149
+ requests[key].events.push(request.event);
150
+ });
151
+ // Clear the queue
152
+ this._queue = [];
153
+ return requests;
154
+ }
155
+ }
156
+ exports.RequestQueue = RequestQueue;