@sentienguard/apm 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,214 @@
1
+ /**
2
+ * Transport Layer
3
+ * Handles periodic flush of aggregated metrics to backend.
4
+ *
5
+ * Rules:
6
+ * - Use async, non-blocking HTTP
7
+ * - Never block application requests
8
+ * - Retry with exponential backoff
9
+ * - Drop data on sustained failure
10
+ * - Never exceed payload size limits
11
+ *
12
+ * Data loss is acceptable. App slowdown is not.
13
+ */
14
+
15
+ import https from 'https';
16
+ import http from 'http';
17
+ import { getAggregator } from './aggregator.js';
18
+ import config, { debug, warn, isEnabled } from './config.js';
19
+
20
+ let flushInterval = null;
21
+ let isRunning = false;
22
+ let consecutiveFailures = 0;
23
+
24
+ // Max consecutive failures before we stop retrying
25
+ const MAX_CONSECUTIVE_FAILURES = 5;
26
+
27
+ // Base retry delay (doubles with each failure)
28
+ const BASE_RETRY_DELAY_MS = 1000;
29
+
30
+ // Max payload size (1MB default)
31
+ const MAX_PAYLOAD_SIZE = config.maxPayloadSize || 1024 * 1024;
32
+
33
+ /**
34
+ * Send data to backend using native http/https
35
+ * Returns a promise that resolves on success, rejects on failure
36
+ */
37
+ function sendToBackend(payload) {
38
+ return new Promise((resolve, reject) => {
39
+ const data = JSON.stringify(payload);
40
+
41
+ // Check payload size
42
+ if (data.length > MAX_PAYLOAD_SIZE) {
43
+ warn(`Payload too large (${data.length} bytes), dropping`);
44
+ return reject(new Error('Payload too large'));
45
+ }
46
+
47
+ let url;
48
+ try {
49
+ url = new URL(config.endpoint);
50
+ } catch {
51
+ return reject(new Error('Invalid endpoint URL'));
52
+ }
53
+
54
+ const isHttps = url.protocol === 'https:';
55
+ const transport = isHttps ? https : http;
56
+
57
+ const options = {
58
+ hostname: url.hostname,
59
+ port: url.port || (isHttps ? 443 : 80),
60
+ path: url.pathname + url.search,
61
+ method: 'POST',
62
+ headers: {
63
+ 'Content-Type': 'application/json',
64
+ 'Content-Length': Buffer.byteLength(data),
65
+ 'X-APM-Key': config.apiKey,
66
+ 'User-Agent': '@sentienguard/apm/1.0.0'
67
+ },
68
+ // Timeout to prevent hanging
69
+ timeout: 5000
70
+ };
71
+
72
+ const req = transport.request(options, (res) => {
73
+ let responseData = '';
74
+
75
+ res.on('data', (chunk) => {
76
+ responseData += chunk;
77
+ });
78
+
79
+ res.on('end', () => {
80
+ if (res.statusCode >= 200 && res.statusCode < 300) {
81
+ resolve({ statusCode: res.statusCode, data: responseData });
82
+ } else {
83
+ reject(new Error(`HTTP ${res.statusCode}: ${responseData}`));
84
+ }
85
+ });
86
+ });
87
+
88
+ req.on('error', (error) => {
89
+ reject(error);
90
+ });
91
+
92
+ req.on('timeout', () => {
93
+ req.destroy();
94
+ reject(new Error('Request timeout'));
95
+ });
96
+
97
+ // Send data
98
+ req.write(data);
99
+ req.end();
100
+ });
101
+ }
102
+
103
+ /**
104
+ * Perform a flush of aggregated metrics
105
+ * Non-blocking, async operation
106
+ */
107
+ async function flush() {
108
+ if (!isEnabled()) {
109
+ return;
110
+ }
111
+
112
+ const aggregator = getAggregator();
113
+
114
+ // Skip if no data
115
+ if (!aggregator.hasData()) {
116
+ debug('No data to flush');
117
+ return;
118
+ }
119
+
120
+ // Get and reset metrics
121
+ const payload = aggregator.flush();
122
+
123
+ debug(`Flushing ${payload.requests.length} request metrics, ${payload.dependencies.length} dependency metrics`);
124
+
125
+ try {
126
+ const startTime = Date.now();
127
+ await sendToBackend(payload);
128
+ const duration = Date.now() - startTime;
129
+
130
+ debug(`Flush successful in ${duration}ms`);
131
+
132
+ // Reset failure counter on success
133
+ consecutiveFailures = 0;
134
+
135
+ } catch (error) {
136
+ consecutiveFailures++;
137
+ warn(`Flush failed (attempt ${consecutiveFailures}): ${error.message}`);
138
+
139
+ // If too many failures, warn but continue (data loss is acceptable)
140
+ if (consecutiveFailures >= MAX_CONSECUTIVE_FAILURES) {
141
+ warn('Max consecutive failures reached, data will be dropped until backend recovers');
142
+ }
143
+ }
144
+ }
145
+
146
+ /**
147
+ * Start the periodic flush timer
148
+ */
149
+ export function startFlushing() {
150
+ if (isRunning) {
151
+ debug('Flushing already started');
152
+ return;
153
+ }
154
+
155
+ if (!isEnabled()) {
156
+ debug('SDK disabled, not starting flush timer');
157
+ return;
158
+ }
159
+
160
+ const intervalMs = config.flushInterval * 1000;
161
+
162
+ flushInterval = setInterval(() => {
163
+ // Run flush in next tick to avoid blocking
164
+ setImmediate(flush);
165
+ }, intervalMs);
166
+
167
+ // Ensure timer doesn't prevent process exit
168
+ if (flushInterval.unref) {
169
+ flushInterval.unref();
170
+ }
171
+
172
+ isRunning = true;
173
+ debug(`Flush timer started (every ${config.flushInterval}s)`);
174
+ }
175
+
176
+ /**
177
+ * Stop the periodic flush timer
178
+ */
179
+ export function stopFlushing() {
180
+ if (!isRunning) return;
181
+
182
+ if (flushInterval) {
183
+ clearInterval(flushInterval);
184
+ flushInterval = null;
185
+ }
186
+
187
+ isRunning = false;
188
+ debug('Flush timer stopped');
189
+ }
190
+
191
+ /**
192
+ * Perform a final flush before shutdown
193
+ * This is sync-ish but uses async internally
194
+ */
195
+ export async function finalFlush() {
196
+ debug('Performing final flush');
197
+ try {
198
+ await flush();
199
+ } catch (error) {
200
+ warn('Final flush failed:', error.message);
201
+ }
202
+ }
203
+
204
+ /**
205
+ * Manual flush trigger (for testing)
206
+ */
207
+ export { flush };
208
+
209
+ export default {
210
+ startFlushing,
211
+ stopFlushing,
212
+ finalFlush,
213
+ flush
214
+ };