@depup/elastic-apm-node 4.15.0-depup.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (154) hide show
  1. package/LICENSE +26 -0
  2. package/NOTICE.md +442 -0
  3. package/README.md +48 -0
  4. package/changes.json +78 -0
  5. package/index.d.ts +398 -0
  6. package/index.js +11 -0
  7. package/lib/InflightEventSet.js +53 -0
  8. package/lib/activation-method.js +119 -0
  9. package/lib/agent.js +941 -0
  10. package/lib/apm-client/apm-client.js +313 -0
  11. package/lib/apm-client/http-apm-client/CHANGELOG.md +271 -0
  12. package/lib/apm-client/http-apm-client/README.md +485 -0
  13. package/lib/apm-client/http-apm-client/central-config.js +41 -0
  14. package/lib/apm-client/http-apm-client/container-info.js +111 -0
  15. package/lib/apm-client/http-apm-client/detect-hostname.js +96 -0
  16. package/lib/apm-client/http-apm-client/index.js +1975 -0
  17. package/lib/apm-client/http-apm-client/logging.js +31 -0
  18. package/lib/apm-client/http-apm-client/ndjson.js +20 -0
  19. package/lib/apm-client/http-apm-client/truncate.js +434 -0
  20. package/lib/apm-client/noop-apm-client.js +73 -0
  21. package/lib/async-hooks-polyfill.js +58 -0
  22. package/lib/cloud-metadata/aws.js +175 -0
  23. package/lib/cloud-metadata/azure.js +123 -0
  24. package/lib/cloud-metadata/callback-coordination.js +159 -0
  25. package/lib/cloud-metadata/gcp.js +133 -0
  26. package/lib/cloud-metadata/index.js +175 -0
  27. package/lib/config/config.js +458 -0
  28. package/lib/config/normalizers.js +701 -0
  29. package/lib/config/schema.js +1007 -0
  30. package/lib/constants.js +35 -0
  31. package/lib/errors.js +303 -0
  32. package/lib/filters/sanitize-field-names.js +69 -0
  33. package/lib/http-request.js +249 -0
  34. package/lib/instrumentation/azure-functions.js +519 -0
  35. package/lib/instrumentation/context.js +56 -0
  36. package/lib/instrumentation/dropped-spans-stats.js +112 -0
  37. package/lib/instrumentation/elasticsearch-shared.js +63 -0
  38. package/lib/instrumentation/express-utils.js +91 -0
  39. package/lib/instrumentation/generic-span.js +322 -0
  40. package/lib/instrumentation/http-shared.js +424 -0
  41. package/lib/instrumentation/ids.js +39 -0
  42. package/lib/instrumentation/index.js +1127 -0
  43. package/lib/instrumentation/modules/@apollo/server.js +30 -0
  44. package/lib/instrumentation/modules/@aws-sdk/client-dynamodb.js +143 -0
  45. package/lib/instrumentation/modules/@aws-sdk/client-s3.js +230 -0
  46. package/lib/instrumentation/modules/@aws-sdk/client-sns.js +197 -0
  47. package/lib/instrumentation/modules/@aws-sdk/client-sqs.js +336 -0
  48. package/lib/instrumentation/modules/@elastic/elasticsearch.js +343 -0
  49. package/lib/instrumentation/modules/@hapi/hapi.js +221 -0
  50. package/lib/instrumentation/modules/@opentelemetry/api.js +86 -0
  51. package/lib/instrumentation/modules/@opentelemetry/sdk-metrics.js +79 -0
  52. package/lib/instrumentation/modules/@redis/client/dist/lib/client/commands-queue.js +178 -0
  53. package/lib/instrumentation/modules/@redis/client/dist/lib/client/index.js +49 -0
  54. package/lib/instrumentation/modules/@smithy/smithy-client.js +198 -0
  55. package/lib/instrumentation/modules/_lambda-handler.js +40 -0
  56. package/lib/instrumentation/modules/apollo-server-core.js +49 -0
  57. package/lib/instrumentation/modules/aws-sdk/dynamodb.js +155 -0
  58. package/lib/instrumentation/modules/aws-sdk/s3.js +184 -0
  59. package/lib/instrumentation/modules/aws-sdk/sns.js +232 -0
  60. package/lib/instrumentation/modules/aws-sdk/sqs.js +361 -0
  61. package/lib/instrumentation/modules/aws-sdk.js +76 -0
  62. package/lib/instrumentation/modules/bluebird.js +93 -0
  63. package/lib/instrumentation/modules/cassandra-driver.js +280 -0
  64. package/lib/instrumentation/modules/elasticsearch.js +191 -0
  65. package/lib/instrumentation/modules/express-graphql.js +66 -0
  66. package/lib/instrumentation/modules/express-queue.js +28 -0
  67. package/lib/instrumentation/modules/express.js +162 -0
  68. package/lib/instrumentation/modules/fastify.js +172 -0
  69. package/lib/instrumentation/modules/finalhandler.js +41 -0
  70. package/lib/instrumentation/modules/generic-pool.js +85 -0
  71. package/lib/instrumentation/modules/graphql.js +256 -0
  72. package/lib/instrumentation/modules/handlebars.js +22 -0
  73. package/lib/instrumentation/modules/http.js +112 -0
  74. package/lib/instrumentation/modules/http2.js +320 -0
  75. package/lib/instrumentation/modules/https.js +68 -0
  76. package/lib/instrumentation/modules/ioredis.js +94 -0
  77. package/lib/instrumentation/modules/jade.js +18 -0
  78. package/lib/instrumentation/modules/kafkajs.js +476 -0
  79. package/lib/instrumentation/modules/knex.js +91 -0
  80. package/lib/instrumentation/modules/koa-router.js +74 -0
  81. package/lib/instrumentation/modules/koa.js +15 -0
  82. package/lib/instrumentation/modules/memcached.js +99 -0
  83. package/lib/instrumentation/modules/mimic-response.js +45 -0
  84. package/lib/instrumentation/modules/mongodb/lib/cmap/connection_pool.js +40 -0
  85. package/lib/instrumentation/modules/mongodb-core.js +206 -0
  86. package/lib/instrumentation/modules/mongodb.js +259 -0
  87. package/lib/instrumentation/modules/mysql.js +200 -0
  88. package/lib/instrumentation/modules/mysql2.js +140 -0
  89. package/lib/instrumentation/modules/pg.js +148 -0
  90. package/lib/instrumentation/modules/pug.js +18 -0
  91. package/lib/instrumentation/modules/redis.js +176 -0
  92. package/lib/instrumentation/modules/restify.js +52 -0
  93. package/lib/instrumentation/modules/tedious.js +159 -0
  94. package/lib/instrumentation/modules/undici.js +270 -0
  95. package/lib/instrumentation/modules/ws.js +59 -0
  96. package/lib/instrumentation/noop-transaction.js +81 -0
  97. package/lib/instrumentation/run-context/AbstractRunContextManager.js +215 -0
  98. package/lib/instrumentation/run-context/AsyncHooksRunContextManager.js +106 -0
  99. package/lib/instrumentation/run-context/AsyncLocalStorageRunContextManager.js +73 -0
  100. package/lib/instrumentation/run-context/BasicRunContextManager.js +82 -0
  101. package/lib/instrumentation/run-context/RunContext.js +151 -0
  102. package/lib/instrumentation/run-context/index.js +23 -0
  103. package/lib/instrumentation/shimmer.js +123 -0
  104. package/lib/instrumentation/span-compression.js +239 -0
  105. package/lib/instrumentation/span.js +621 -0
  106. package/lib/instrumentation/template-shared.js +43 -0
  107. package/lib/instrumentation/timer.js +84 -0
  108. package/lib/instrumentation/transaction.js +571 -0
  109. package/lib/lambda.js +992 -0
  110. package/lib/load-source-map.js +100 -0
  111. package/lib/logging.js +212 -0
  112. package/lib/metrics/index.js +92 -0
  113. package/lib/metrics/platforms/generic/index.js +40 -0
  114. package/lib/metrics/platforms/generic/process-cpu.js +22 -0
  115. package/lib/metrics/platforms/generic/process-top.js +157 -0
  116. package/lib/metrics/platforms/generic/stats.js +34 -0
  117. package/lib/metrics/platforms/generic/system-cpu.js +51 -0
  118. package/lib/metrics/platforms/linux/index.js +19 -0
  119. package/lib/metrics/platforms/linux/stats.js +213 -0
  120. package/lib/metrics/queue.js +90 -0
  121. package/lib/metrics/registry.js +52 -0
  122. package/lib/metrics/reporter.js +119 -0
  123. package/lib/metrics/runtime.js +77 -0
  124. package/lib/middleware/connect.js +16 -0
  125. package/lib/opentelemetry-bridge/OTelBridgeNonRecordingSpan.js +150 -0
  126. package/lib/opentelemetry-bridge/OTelBridgeRunContext.js +124 -0
  127. package/lib/opentelemetry-bridge/OTelContextManager.js +82 -0
  128. package/lib/opentelemetry-bridge/OTelSpan.js +344 -0
  129. package/lib/opentelemetry-bridge/OTelTracer.js +201 -0
  130. package/lib/opentelemetry-bridge/OTelTracerProvider.js +25 -0
  131. package/lib/opentelemetry-bridge/README.md +244 -0
  132. package/lib/opentelemetry-bridge/index.js +15 -0
  133. package/lib/opentelemetry-bridge/oblog.js +23 -0
  134. package/lib/opentelemetry-bridge/opentelemetry-core-mini/README.md +3 -0
  135. package/lib/opentelemetry-bridge/opentelemetry-core-mini/internal/validators.js +52 -0
  136. package/lib/opentelemetry-bridge/opentelemetry-core-mini/trace/TraceState.js +109 -0
  137. package/lib/opentelemetry-bridge/otelutils.js +99 -0
  138. package/lib/opentelemetry-bridge/setup.js +76 -0
  139. package/lib/opentelemetry-metrics/ElasticApmMetricExporter.js +285 -0
  140. package/lib/opentelemetry-metrics/index.js +50 -0
  141. package/lib/parsers.js +225 -0
  142. package/lib/propwrap.js +147 -0
  143. package/lib/stacktraces.js +537 -0
  144. package/lib/symbols.js +15 -0
  145. package/lib/tracecontext/index.js +118 -0
  146. package/lib/tracecontext/traceparent.js +185 -0
  147. package/lib/tracecontext/tracestate.js +388 -0
  148. package/lib/wildcard-matcher.js +52 -0
  149. package/loader.mjs +7 -0
  150. package/package.json +299 -0
  151. package/start.d.ts +8 -0
  152. package/start.js +29 -0
  153. package/types/aws-lambda.d.ts +98 -0
  154. package/types/connect.d.ts +23 -0
@@ -0,0 +1,1975 @@
1
+ /*
2
+ * Copyright Elasticsearch B.V. and other contributors where applicable.
3
+ * Licensed under the BSD 2-Clause License; you may not use this file except in
4
+ * compliance with the BSD 2-Clause License.
5
+ */
6
+
7
+ 'use strict';
8
+
9
+ const assert = require('assert');
10
+ const crypto = require('crypto');
11
+ const fs = require('fs');
12
+ const http = require('http');
13
+ const https = require('https');
14
+ const util = require('util');
15
+ const { performance } = require('perf_hooks');
16
+ const { URL } = require('url');
17
+ const zlib = require('zlib');
18
+
19
+ const HttpAgentKeepAlive = require('agentkeepalive');
20
+ const HttpsAgentKeepAlive = HttpAgentKeepAlive.HttpsAgent;
21
+ const Filters = require('object-filter-sequence');
22
+ const querystring = require('querystring');
23
+ const Writable = require('readable-stream').Writable;
24
+ const getContainerInfo = require('./container-info');
25
+ const eos = require('end-of-stream');
26
+ const semver = require('semver');
27
+ const streamToBuffer = require('fast-stream-to-buffer');
28
+ const StreamChopper = require('stream-chopper');
29
+
30
+ const { detectHostname } = require('./detect-hostname');
31
+ const ndjson = require('./ndjson');
32
+ const { NoopLogger } = require('./logging');
33
+ const truncate = require('./truncate');
34
+ const { getCentralConfigIntervalS } = require('./central-config');
35
+
36
+ module.exports = {
37
+ HttpApmClient: Client,
38
+ };
39
+
40
+ // These symbols are used as markers in the client stream to indicate special
41
+ // flush handling.
42
+ const kFlush = Symbol('flush');
43
+ const kLambdaEndFlush = Symbol('lambdaEndFlush');
44
+ function isFlushMarker(obj) {
45
+ return obj === kFlush || obj === kLambdaEndFlush;
46
+ }
47
+
48
+ const requiredOpts = ['agentName', 'agentVersion', 'serviceName', 'userAgent'];
49
+
50
+ // Get handles on uninstrumented functions for making HTTP(S) requests before
51
+ // the APM agent has a chance to wrap them. This allows the Client to make
52
+ // requests to APM server without interfering with the APM agent's tracing
53
+ // of the user application.
54
+ const httpGet = http.get;
55
+ const httpRequest = http.request;
56
+ const httpsGet = https.get;
57
+ const httpsRequest = https.request;
58
+
59
+ const containerInfo = getContainerInfo.sync();
60
+
61
+ const isLambdaExecutionEnvironment = !!process.env.AWS_LAMBDA_FUNCTION_NAME;
62
+
63
+ // All sockets on the agent are unreffed when they are created. This means that
64
+ // when the user process's event loop is done, and these are the only handles
65
+ // left, the process 'beforeExit' event will be emitted. By listening for this
66
+ // we can make sure to end the requests properly before process exit. This way
67
+ // we don't keep the process running until the `time` timeout happens.
68
+ //
69
+ // An exception to this is AWS Lambda which, in some cases (sync function
70
+ // handlers that use a callback), will wait for 'beforeExit' to freeze the
71
+ // Lambda instance VM *for later re-use*. This means we never want to shutdown
72
+ // the `Client` on 'beforeExit'.
73
+ const clientsToAutoEnd = [];
74
+ if (!isLambdaExecutionEnvironment) {
75
+ process.once('beforeExit', function () {
76
+ clientsToAutoEnd.forEach(function (client) {
77
+ if (!client) {
78
+ // Clients remove themselves from the array when they end.
79
+ return;
80
+ }
81
+ client._gracefulExit();
82
+ });
83
+ });
84
+ }
85
+
86
+ util.inherits(Client, Writable);
87
+
88
+ Client.encoding = Object.freeze({
89
+ METADATA: Symbol('metadata'),
90
+ TRANSACTION: Symbol('transaction'),
91
+ SPAN: Symbol('span'),
92
+ ERROR: Symbol('error'),
93
+ METRICSET: Symbol('metricset'),
94
+ });
95
+
96
+ function Client(opts) {
97
+ if (!(this instanceof Client)) return new Client(opts);
98
+
99
+ Writable.call(this, { objectMode: true });
100
+
101
+ this._corkTimer = null;
102
+ this._agent = null;
103
+ this._activeIntakeReq = false;
104
+ this._onIntakeReqConcluded = null;
105
+ this._transport = null;
106
+ this._configTimer = null;
107
+ this._backoffReconnectCount = 0;
108
+ this._intakeRequestGracefulExitFn = null; // set in makeIntakeRequest
109
+ this._encodedMetadata = null;
110
+ this._cloudMetadata = null;
111
+ this._extraMetadata = null;
112
+ this._metadataFilters = new Filters();
113
+ // _lambdaActive indicates if a Lambda function invocation is active. It is
114
+ // only meaningful if `isLambdaExecutionEnvironment`.
115
+ this._lambdaActive = false;
116
+ // Whether to forward `.lambdaRegisterTransaction()` calls to the Lambda
117
+ // extension. This will be set false if a previous attempt failed.
118
+ this._lambdaShouldRegisterTransactions = true;
119
+
120
+ // Internal runtime stats for developer debugging/tuning.
121
+ this._numEvents = 0; // number of events given to the client
122
+ this._numEventsDropped = 0; // number of events dropped because overloaded
123
+ this._numEventsEnqueued = 0; // number of events written through to chopper
124
+ this.sent = 0; // number of events sent to APM server (not necessarily accepted)
125
+ this._slowWriteBatch = {
126
+ // data on slow or the slowest _writeBatch
127
+ numOver10Ms: 0,
128
+ // Data for the slowest _writeBatch:
129
+ encodeTimeMs: 0,
130
+ fullTimeMs: 0,
131
+ numEvents: 0,
132
+ numBytes: 0,
133
+ };
134
+
135
+ this.config(opts);
136
+ this._log = this._conf.logger || new NoopLogger();
137
+
138
+ // `_apmServerVersion` is one of:
139
+ // - `undefined`: the version has not yet been fetched
140
+ // - `null`: the APM server version is unknown, could not be determined
141
+ // - a semver.SemVer instance
142
+ this._apmServerVersion = this._conf.apmServerVersion
143
+ ? new semver.SemVer(this._conf.apmServerVersion)
144
+ : undefined;
145
+ if (!this._apmServerVersion) {
146
+ this._fetchApmServerVersion();
147
+ }
148
+
149
+ const numExtraMdOpts = [
150
+ this._conf.cloudMetadataFetcher,
151
+ this._conf.expectExtraMetadata,
152
+ this._conf.extraMetadata,
153
+ ].reduce((accum, curr) => (curr ? accum + 1 : accum), 0);
154
+ if (numExtraMdOpts > 1) {
155
+ throw new Error(
156
+ 'it is an error to configure a Client with more than one of "cloudMetadataFetcher", "expectExtraMetadata", or "extraMetadata"',
157
+ );
158
+ } else if (this._conf.cloudMetadataFetcher) {
159
+ // Start stream in corked mode, uncork when cloud metadata is fetched and
160
+ // assigned. Also, the _maybeUncork will not uncork until _encodedMetadata
161
+ // is set.
162
+ this._log.trace('corking (cloudMetadataFetcher)');
163
+ this.cork();
164
+ this._fetchAndEncodeMetadata(() => {
165
+ // _fetchAndEncodeMetadata will have set/memoized the encoded
166
+ // metadata to the _encodedMetadata property.
167
+
168
+ // This reverses the cork() call in the constructor above. "Maybe" uncork,
169
+ // in case the client has been destroyed before this callback is called.
170
+ this._maybeUncork();
171
+ this._log.trace('uncorked (cloudMetadataFetcher)');
172
+
173
+ // the `cloud-metadata` event allows listeners to know when the
174
+ // agent has finished fetching and encoding its metadata for the
175
+ // first time
176
+ this.emit('cloud-metadata', this._encodedMetadata);
177
+ });
178
+ } else if (this._conf.expectExtraMetadata) {
179
+ // Uncorking will happen in the expected `.setExtraMetadata()` call.
180
+ this._log.trace('corking (expectExtraMetadata)');
181
+ this.cork();
182
+ } else if (this._conf.extraMetadata) {
183
+ this.setExtraMetadata(this._conf.extraMetadata);
184
+ } else {
185
+ this._resetEncodedMetadata();
186
+ }
187
+
188
+ this._chopper = new StreamChopper({
189
+ size: this._conf.size,
190
+ time: this._conf.time,
191
+ type: StreamChopper.overflow,
192
+ transform() {
193
+ return zlib.createGzip({
194
+ level: zlib.constants.Z_BEST_SPEED,
195
+ });
196
+ },
197
+ });
198
+ const onIntakeError = (err) => {
199
+ if (this.destroyed === false) {
200
+ this.emit('request-error', err);
201
+ }
202
+ };
203
+ this._chopper.on('stream', getChoppedStreamHandler(this, onIntakeError));
204
+
205
+ // We don't expect the chopper stream to end until the client is ending.
206
+ // Make sure to clean up if this does happen unexpectedly.
207
+ const fail = () => {
208
+ if (this._writableState.ending === false) this.destroy();
209
+ };
210
+ eos(this._chopper, fail);
211
+
212
+ this._index = clientsToAutoEnd.length;
213
+ clientsToAutoEnd.push(this);
214
+
215
+ // The 'beforeExit' event is significant in Lambda invocation completion
216
+ // handling, so we log it for debugging.
217
+ if (isLambdaExecutionEnvironment && this._log.isLevelEnabled('trace')) {
218
+ process.prependListener('beforeExit', () => {
219
+ this._log.trace('process "beforeExit"');
220
+ });
221
+ }
222
+
223
+ if (this._conf.centralConfig) {
224
+ this._pollConfig();
225
+ }
226
+ }
227
+
228
+ // Return current internal stats.
229
+ Client.prototype._getStats = function () {
230
+ return {
231
+ numEvents: this._numEvents,
232
+ numEventsDropped: this._numEventsDropped,
233
+ numEventsEnqueued: this._numEventsEnqueued,
234
+ numEventsSent: this.sent,
235
+ slowWriteBatch: this._slowWriteBatch,
236
+ backoffReconnectCount: this._backoffReconnectCount,
237
+ };
238
+ };
239
+
240
+ Client.prototype.config = function (opts) {
241
+ this._conf = Object.assign(this._conf || {}, opts);
242
+
243
+ this._conf.globalLabels = normalizeGlobalLabels(this._conf.globalLabels);
244
+
245
+ const missing = requiredOpts.filter((name) => !this._conf[name]);
246
+ if (missing.length > 0)
247
+ throw new Error('Missing required option(s): ' + missing.join(', '));
248
+
249
+ // default values
250
+ if (!this._conf.size && this._conf.size !== 0) this._conf.size = 750 * 1024;
251
+ if (!this._conf.time && this._conf.time !== 0) this._conf.time = 10000;
252
+ if (!this._conf.serverTimeout && this._conf.serverTimeout !== 0)
253
+ this._conf.serverTimeout = 15000;
254
+ if (!this._conf.serverUrl) this._conf.serverUrl = 'http://127.0.0.1:8200';
255
+ if (!this._conf.truncateKeywordsAt) this._conf.truncateKeywordsAt = 1024;
256
+ if (!this._conf.truncateStringsAt) this._conf.truncateStringsAt = 1024;
257
+ if (!this._conf.truncateCustomKeysAt) this._conf.truncateCustomKeysAt = 1024;
258
+ if (!this._conf.truncateLongFieldsAt) this._conf.truncateLongFieldsAt = 10000;
259
+ // The deprecated `truncateErrorMessagesAt` will be honored if specified.
260
+ if (!this._conf.bufferWindowTime) this._conf.bufferWindowTime = 20;
261
+ if (!this._conf.bufferWindowSize) this._conf.bufferWindowSize = 50;
262
+ if (!this._conf.maxQueueSize) this._conf.maxQueueSize = 1024;
263
+ if (!this._conf.intakeResTimeout) this._conf.intakeResTimeout = 10000;
264
+ if (!this._conf.intakeResTimeoutOnEnd)
265
+ this._conf.intakeResTimeoutOnEnd = 1000;
266
+ this._conf.keepAlive = this._conf.keepAlive !== false;
267
+ this._conf.centralConfig = this._conf.centralConfig || false;
268
+ if (!('keepAliveMsecs' in this._conf)) this._conf.keepAliveMsecs = 1000;
269
+ if (!('maxSockets' in this._conf)) this._conf.maxSockets = Infinity;
270
+ if (!('maxFreeSockets' in this._conf)) this._conf.maxFreeSockets = 256;
271
+ if (!('freeSocketTimeout' in this._conf)) this._conf.freeSocketTimeout = 4000;
272
+
273
+ // processed values
274
+ this._conf.serverUrl = new URL(this._conf.serverUrl);
275
+
276
+ this._conf.detectedHostname = detectHostname();
277
+
278
+ if (containerInfo) {
279
+ if (!this._conf.containerId && containerInfo.containerId) {
280
+ this._conf.containerId = containerInfo.containerId;
281
+ }
282
+ if (!this._conf.kubernetesPodUID && containerInfo.podId) {
283
+ this._conf.kubernetesPodUID = containerInfo.podId;
284
+ }
285
+ if (!this._conf.kubernetesPodName && containerInfo.podId) {
286
+ // https://kubernetes.io/docs/concepts/workloads/pods/#working-with-pods
287
+ // suggests a pod name should just be the shorter "DNS label", and my
288
+ // guess is k8s defaults a pod name to just the *short* hostname, not
289
+ // the FQDN.
290
+ this._conf.kubernetesPodName = this._conf.detectedHostname.split(
291
+ '.',
292
+ 1,
293
+ )[0];
294
+ }
295
+ }
296
+
297
+ let AgentKeepAlive;
298
+ switch (this._conf.serverUrl.protocol) {
299
+ case 'http:':
300
+ this._transport = http;
301
+ this._transportRequest = httpRequest;
302
+ this._transportGet = httpGet;
303
+ AgentKeepAlive = HttpAgentKeepAlive;
304
+ break;
305
+ case 'https:':
306
+ this._transport = https;
307
+ this._transportRequest = httpsRequest;
308
+ this._transportGet = httpsGet;
309
+ AgentKeepAlive = HttpsAgentKeepAlive;
310
+ break;
311
+ default:
312
+ throw new Error('Unknown protocol ' + this._conf.serverUrl.protocol);
313
+ }
314
+
315
+ // Only reset `this._agent` if the serverUrl has changed to avoid
316
+ // unnecessarily abandoning keep-alive connections.
317
+ if (!this._agent || (opts && 'serverUrl' in opts)) {
318
+ if (this._agent) {
319
+ this._agent.destroy();
320
+ }
321
+ this._agent = new AgentKeepAlive({
322
+ keepAlive: this._conf.keepAlive,
323
+ keepAliveMsecs: this._conf.keepAliveMsecs,
324
+ freeSocketTimeout: this._conf.freeSocketTimeout,
325
+ timeout: this._conf.serverTimeout,
326
+ maxSockets: this._conf.maxSockets,
327
+ maxFreeSockets: this._conf.maxFreeSockets,
328
+ });
329
+ }
330
+
331
+ // http request options
332
+ this._conf.requestIntake = getIntakeRequestOptions(this._conf, this._agent);
333
+ this._conf.requestConfig = getConfigRequestOptions(this._conf, this._agent);
334
+ this._conf.requestSignalLambdaEnd = getSignalLambdaEndRequestOptions(
335
+ this._conf,
336
+ this._agent,
337
+ );
338
+ this._conf.requestRegisterTransaction = getRegisterTransactionRequestOptions(
339
+ this._conf,
340
+ this._agent,
341
+ );
342
+
343
+ // fixes bug where cached/memoized _encodedMetadata wouldn't be
344
+ // updated when client was reconfigured
345
+ if (this._encodedMetadata) {
346
+ this._resetEncodedMetadata();
347
+ }
348
+ };
349
+
350
+ /**
351
+ * Set extra additional metadata to be sent to APM Server in intake requests.
352
+ *
353
+ * If the Client was configured with `expectExtraMetadata: true` then will
354
+ * uncork the client to allow intake requests to begin.
355
+ *
356
+ * If this is called multiple times, it is additive.
357
+ */
358
+ Client.prototype.setExtraMetadata = function (extraMetadata) {
359
+ if (!this._extraMetadata) {
360
+ this._extraMetadata = extraMetadata;
361
+ } else {
362
+ metadataMergeDeep(this._extraMetadata, extraMetadata);
363
+ }
364
+ this._resetEncodedMetadata();
365
+
366
+ if (this._conf.expectExtraMetadata) {
367
+ this._log.trace('maybe uncork (expectExtraMetadata)');
368
+ this._maybeUncork();
369
+ }
370
+ };
371
+
372
+ /**
373
+ * Add a filter function used to filter the "metadata" object sent to APM
374
+ * server. See the APM Agent `addMetadataFilter` documentation for details.
375
+ * https://www.elastic.co/guide/en/apm/agent/nodejs/current/agent-api.html#apm-add-metadata-filter
376
+ */
377
+ Client.prototype.addMetadataFilter = function (fn) {
378
+ assert.strictEqual(typeof fn, 'function', 'fn arg must be a function');
379
+ this._metadataFilters.push(fn);
380
+ if (this._encodedMetadata) {
381
+ this._resetEncodedMetadata();
382
+ }
383
+ };
384
+
385
+ /**
386
+ * (Re)set `_encodedMetadata` from this._conf, this._cloudMetadata,
387
+ * this._extraMetadata and possible this._metadataFilters.
388
+ */
389
+ Client.prototype._resetEncodedMetadata = function () {
390
+ // Make a deep clone so that the originals are not modified when (a) adding
391
+ // `.cloud` and (b) filtering. This isn't perf-sensitive code, so this JSON
392
+ // cycle for cloning should suffice.
393
+ let metadata = metadataFromConf(this._conf, this);
394
+ if (this._cloudMetadata) {
395
+ metadata.cloud = deepClone(this._cloudMetadata);
396
+ }
397
+ if (this._extraMetadata) {
398
+ metadataMergeDeep(metadata, deepClone(this._extraMetadata));
399
+ }
400
+
401
+ // Possible filters from APM agent's `apm.addMetadataFilter()`.
402
+ if (this._metadataFilters && this._metadataFilters.length > 0) {
403
+ metadata = this._metadataFilters.process(metadata);
404
+ }
405
+
406
+ // This is the only code path that should set `_encodedMetadata`.
407
+ this._encodedMetadata = this._encode({ metadata }, Client.encoding.METADATA);
408
+ if (!this._encodedMetadata) {
409
+ // The APM client cannot function without encoded metadata. Handling this
410
+ // could be improved (e.g. log details and disable the APM agent). However,
411
+ // this suffices for now as we have never observed a metadata encoding
412
+ // failure.
413
+ throw new Error(
414
+ 'could not encode metadata (trace-level logging will include details)',
415
+ );
416
+ }
417
+ this._log.trace(
418
+ { _encodedMetadata: this._encodedMetadata },
419
+ '_resetEncodedMetadata',
420
+ );
421
+ };
422
+
423
+ Client.prototype._pollConfig = function () {
424
+ const opts = this._conf.requestConfig;
425
+ if (this._conf.lastConfigEtag) {
426
+ opts.headers['If-None-Match'] = this._conf.lastConfigEtag;
427
+ }
428
+
429
+ const req = this._transportGet(opts, (res) => {
430
+ res.on('error', (err) => {
431
+ // Not sure this event can ever be emitted, but just in case
432
+ res.destroy(err);
433
+ });
434
+
435
+ this._scheduleNextConfigPoll(getMaxAge(res));
436
+
437
+ // Spec: https://github.com/elastic/apm/blob/main/specs/agents/configuration.md#dealing-with-errors
438
+ if (res.statusCode === 304) {
439
+ this._log.trace('_pollConfig: no new central config since last poll');
440
+ res.resume();
441
+ return;
442
+ } else if (res.statusCode === 403) {
443
+ this._log.debug('_pollConfig: central config not enabled in APM Server');
444
+ res.resume();
445
+ return;
446
+ } else if (res.statusCode === 404) {
447
+ // Either a very old APM server, or early fully-managed (aka serverless).
448
+ this._log.debug(
449
+ '_pollConfig: APM server does not support central config',
450
+ );
451
+ res.resume();
452
+ return;
453
+ }
454
+
455
+ streamToBuffer(res, (err, buf) => {
456
+ if (err) {
457
+ this.emit('request-error', processConfigErrorResponse(res, buf, err));
458
+ return;
459
+ }
460
+
461
+ if (res.statusCode === 200) {
462
+ // 200: New config available (or no config for the given service.name / service.environment)
463
+ const etag = res.headers.etag;
464
+ if (etag) this._conf.lastConfigEtag = etag;
465
+
466
+ let config;
467
+ try {
468
+ config = JSON.parse(buf);
469
+ } catch (parseErr) {
470
+ this.emit(
471
+ 'request-error',
472
+ processConfigErrorResponse(res, buf, parseErr),
473
+ );
474
+ return;
475
+ }
476
+ this.emit('config', config);
477
+ } else {
478
+ this.emit('request-error', processConfigErrorResponse(res, buf));
479
+ }
480
+ });
481
+ });
482
+
483
+ req.on('error', (err) => {
484
+ this._scheduleNextConfigPoll();
485
+ this.emit('request-error', err);
486
+ });
487
+ };
488
+
489
+ Client.prototype._scheduleNextConfigPoll = function (seconds) {
490
+ if (this._configTimer !== null) return;
491
+
492
+ const delayS = getCentralConfigIntervalS(seconds);
493
+ this._configTimer = setTimeout(() => {
494
+ this._configTimer = null;
495
+ this._pollConfig();
496
+ }, delayS * 1000);
497
+
498
+ this._configTimer.unref();
499
+ };
500
+
501
+ // re-ref the open socket handles
502
+ Client.prototype._ref = function () {
503
+ Object.keys(this._agent.sockets).forEach((remote) => {
504
+ this._agent.sockets[remote].forEach(function (socket) {
505
+ socket.ref();
506
+ });
507
+ });
508
+ };
509
+
510
+ Client.prototype._write = function (obj, enc, cb) {
511
+ if (isFlushMarker(obj)) {
512
+ this._writeFlush(obj, cb);
513
+ } else {
514
+ const t = process.hrtime();
515
+ const chunk = this._encode(obj, enc);
516
+ if (!chunk) {
517
+ return;
518
+ }
519
+ this._numEventsEnqueued++;
520
+ this._chopper.write(chunk, cb);
521
+ this._log.trace(
522
+ {
523
+ fullTimeMs: deltaMs(t),
524
+ numEvents: 1,
525
+ numBytes: chunk.length,
526
+ },
527
+ '_write: encode object',
528
+ );
529
+ }
530
+ };
531
+
532
+ Client.prototype._writev = function (objs, cb) {
533
+ // Limit the size of individual writes to manageable batches, primarily to
534
+ // limit large sync pauses due to `_encode`ing in `_writeBatch`. This value
535
+ // is not particularly well tuned. It was selected to get sync pauses under
536
+ // 10ms on a developer machine.
537
+ const MAX_WRITE_BATCH_SIZE = 32;
538
+
539
+ let offset = 0;
540
+
541
+ const processBatch = () => {
542
+ if (this.destroyed) {
543
+ cb();
544
+ return;
545
+ }
546
+
547
+ let flushIdx = -1;
548
+ const limit = Math.min(objs.length, offset + MAX_WRITE_BATCH_SIZE);
549
+ for (let i = offset; i < limit; i++) {
550
+ if (isFlushMarker(objs[i].chunk)) {
551
+ flushIdx = i;
552
+ break;
553
+ }
554
+ }
555
+
556
+ if (
557
+ offset === 0 &&
558
+ flushIdx === -1 &&
559
+ objs.length <= MAX_WRITE_BATCH_SIZE
560
+ ) {
561
+ // A shortcut if there is no flush marker and the whole `objs` fits in a batch.
562
+ this._writeBatch(objs, cb);
563
+ } else if (flushIdx === -1) {
564
+ // No flush marker in this batch.
565
+ this._writeBatch(
566
+ objs.slice(offset, limit),
567
+ limit === objs.length ? cb : processBatch,
568
+ );
569
+ offset = limit;
570
+ } else if (flushIdx > offset) {
571
+ // There are some events in the queue before a flush marker.
572
+ this._writeBatch(objs.slice(offset, flushIdx), processBatch);
573
+ offset = flushIdx;
574
+ } else if (flushIdx === objs.length - 1) {
575
+ // The next item is a flush marker, and it is the *last* item in the queue.
576
+ this._writeFlush(objs[flushIdx].chunk, cb);
577
+ } else {
578
+ // The next item in the queue is a flush.
579
+ this._writeFlush(objs[flushIdx].chunk, processBatch);
580
+ offset++;
581
+ }
582
+ };
583
+
584
+ processBatch();
585
+ };
586
+
587
+ // Write a batch of events (excluding specially handled "flush" events) to
588
+ // the stream chopper.
589
+ Client.prototype._writeBatch = function (objs, cb) {
590
+ const t = process.hrtime();
591
+ const chunks = [];
592
+ for (var i = 0; i < objs.length; i++) {
593
+ const obj = objs[i];
594
+ const encoded = this._encode(obj.chunk, obj.encoding);
595
+ if (encoded) {
596
+ chunks.push(encoded);
597
+ }
598
+ }
599
+ if (chunks.length === 0) {
600
+ return;
601
+ }
602
+ const chunk = chunks.join('');
603
+ const encodeTimeMs = deltaMs(t);
604
+
605
+ this._numEventsEnqueued += chunks.length;
606
+ this._chopper.write(chunk, cb);
607
+ const fullTimeMs = deltaMs(t);
608
+
609
+ if (fullTimeMs > this._slowWriteBatch.fullTimeMs) {
610
+ this._slowWriteBatch.encodeTimeMs = encodeTimeMs;
611
+ this._slowWriteBatch.fullTimeMs = fullTimeMs;
612
+ this._slowWriteBatch.numEvents = objs.length;
613
+ this._slowWriteBatch.numBytes = chunk.length;
614
+ }
615
+ if (fullTimeMs > 10) {
616
+ this._slowWriteBatch.numOver10Ms++;
617
+ }
618
+ this._log.trace(
619
+ {
620
+ encodeTimeMs,
621
+ fullTimeMs,
622
+ numEvents: chunks.length,
623
+ numBytes: chunk.length,
624
+ },
625
+ '_writeBatch',
626
+ );
627
+ };
628
+
629
+ Client.prototype._writeFlush = function (flushMarker, cb) {
630
+ this._log.trace(
631
+ {
632
+ activeIntakeReq: this._activeIntakeReq,
633
+ lambdaEnd: flushMarker === kLambdaEndFlush,
634
+ },
635
+ '_writeFlush',
636
+ );
637
+
638
+ let onFlushed = cb;
639
+ if (isLambdaExecutionEnvironment && flushMarker === kLambdaEndFlush) {
640
+ onFlushed = () => {
641
+ // Signal the Elastic AWS Lambda extension that it is done passing data
642
+ // for this invocation, then call `cb()` so the wrapped Lambda handler
643
+ // can finish.
644
+ this._signalLambdaEnd(cb);
645
+ };
646
+ }
647
+
648
+ if (this._activeIntakeReq) {
649
+ this._onIntakeReqConcluded = onFlushed;
650
+ this._chopper.chop();
651
+ } else {
652
+ this._chopper.chop(onFlushed);
653
+ }
654
+ };
655
+
656
+ Client.prototype._maybeCork = function () {
657
+ if (!this._writableState.corked) {
658
+ if (isLambdaExecutionEnvironment && !this._lambdaActive) {
659
+ this.cork();
660
+ } else if (this._conf.bufferWindowTime !== -1) {
661
+ this.cork();
662
+ if (this._corkTimer && this._corkTimer.refresh) {
663
+ // the refresh function was added in Node 10.2.0
664
+ this._corkTimer.refresh();
665
+ } else {
666
+ this._corkTimer = setTimeout(() => {
667
+ this.uncork();
668
+ }, this._conf.bufferWindowTime);
669
+ }
670
+ }
671
+ } else if (this._writableState.length >= this._conf.bufferWindowSize) {
672
+ this._maybeUncork();
673
+ }
674
+ };
675
+
676
+ Client.prototype._maybeUncork = function () {
677
+ if (!this._encodedMetadata) {
678
+ // The client must remain corked until cloud metadata has been
679
+ // fetched-or-skipped.
680
+ return;
681
+ } else if (isLambdaExecutionEnvironment && !this._lambdaActive) {
682
+ // In a Lambda env, we must only uncork when an invocation is active,
683
+ // otherwise we could start an intake request just before the VM is frozen.
684
+ return;
685
+ }
686
+
687
+ if (this._writableState.corked) {
688
+ // Wait till next tick, so that the current write that triggered the call
689
+ // to `_maybeUncork` have time to be added to the queue. If we didn't do
690
+ // this, that last write would trigger a single call to `_write`.
691
+ process.nextTick(() => {
692
+ if (
693
+ this.destroyed === false &&
694
+ !(isLambdaExecutionEnvironment && !this._lambdaActive)
695
+ ) {
696
+ this.uncork();
697
+ }
698
+ });
699
+
700
+ if (this._corkTimer) {
701
+ clearTimeout(this._corkTimer);
702
+ this._corkTimer = null;
703
+ }
704
+ }
705
+ };
706
+
707
+ Client.prototype._encode = function (obj, enc) {
708
+ let thing;
709
+ let truncFunc;
710
+ let outAttr;
711
+ switch (enc) {
712
+ case Client.encoding.SPAN:
713
+ thing = obj.span;
714
+ truncFunc = truncate.span;
715
+ outAttr = 'span';
716
+ break;
717
+ case Client.encoding.TRANSACTION:
718
+ thing = obj.transaction;
719
+ truncFunc = truncate.transaction;
720
+ outAttr = 'transaction';
721
+ break;
722
+ case Client.encoding.METADATA:
723
+ thing = obj.metadata;
724
+ truncFunc = truncate.metadata;
725
+ outAttr = 'metadata';
726
+ break;
727
+ case Client.encoding.ERROR:
728
+ thing = obj.error;
729
+ truncFunc = truncate.error;
730
+ outAttr = 'error';
731
+ break;
732
+ case Client.encoding.METRICSET:
733
+ thing = obj.metricset;
734
+ truncFunc = truncate.metricset;
735
+ outAttr = 'metricset';
736
+ break;
737
+ }
738
+
739
+ const out = {};
740
+ try {
741
+ out[outAttr] = truncFunc(thing, this._conf);
742
+ } catch (err) {
743
+ this._log.warn(
744
+ {
745
+ err,
746
+ // Only log full problematic object at TRACE level to limit noise.
747
+ thing: this._log.isLevelEnabled('trace') ? thing : '[REDACTED]',
748
+ thing_id: thing?.id,
749
+ thing_name: thing?.name,
750
+ },
751
+ `could not encode "${outAttr}" object`,
752
+ );
753
+ return null;
754
+ }
755
+
756
+ return ndjson.serialize(out);
757
+ };
758
+
759
+ Client.prototype.lambdaStart = function () {
760
+ this._lambdaActive = true;
761
+ };
762
+
763
+ /**
764
+ * Indicate whether the APM agent -- when in a Lambda environment -- should
765
+ * bother calling `.lambdaRegisterTransaction(...)`.
766
+ *
767
+ * @returns {boolean}
768
+ */
769
+ Client.prototype.lambdaShouldRegisterTransactions = function () {
770
+ return this._lambdaShouldRegisterTransactions;
771
+ };
772
+
773
+ /**
774
+ * Tell the local Lambda extension about the just-started transaction. This
775
+ * allows the extension to report the transaction in certain error cases
776
+ * where the APM agent isn't able to *end* the transaction and report it,
777
+ * e.g. if the function is about to timeout, or if the process crashes.
778
+ *
779
+ * The expected request is as follows, and a 200 status code is expected in
780
+ * response:
781
+ *
782
+ * POST /register/transaction
783
+ * Content-Type: application/vnd.elastic.apm.transaction+ndjson
784
+ * x-elastic-aws-request-id: ${awsRequestId}
785
+ *
786
+ * {"metadata":{...}}
787
+ * {"transaction":{...partial transaction data...}}
788
+ *
789
+ * @param {object} trans - a mostly complete APM Transaction object. It should
790
+ * have a default `outcome` value. `duration` and `result` (and possibly
791
+ * `outcome`) fields will be set by the Elastic Lambda extension if this
792
+ * transaction is used.
793
+ * @param {import('crypto').UUID} awsRequestId
794
+ * @returns {Promise || undefined} So this can, and should, be `await`ed.
795
+ * If returning a promise, it will only resolve, never reject.
796
+ */
797
+ Client.prototype.lambdaRegisterTransaction = function (trans, awsRequestId) {
798
+ if (!isLambdaExecutionEnvironment) {
799
+ return;
800
+ }
801
+ if (!this._lambdaShouldRegisterTransactions) {
802
+ return;
803
+ }
804
+ assert(this._encodedMetadata, '_encodedMetadata is set');
805
+
806
+ // We expect to be talking to the localhost Elastic Lambda extension, so we
807
+ // want a shorter timeout than `_conf.serverTimeout`.
808
+ const TIMEOUT_MS = 5000;
809
+ const startTime = performance.now();
810
+
811
+ return new Promise((resolve, reject) => {
812
+ this._log.trace(
813
+ { awsRequestId, traceId: trans.trace_id, transId: trans.id },
814
+ 'lambdaRegisterTransaction start',
815
+ );
816
+
817
+ const finish = (errOrErrMsg) => {
818
+ const durationMs = performance.now() - startTime;
819
+ if (errOrErrMsg) {
820
+ this._log.debug(
821
+ { awsRequestId, err: errOrErrMsg, durationMs },
822
+ 'lambdaRegisterTransaction unsuccessful',
823
+ );
824
+ this._lambdaShouldRegisterTransactions = false;
825
+ } else {
826
+ this._log.trace(
827
+ { awsRequestId, durationMs },
828
+ 'lambdaRegisterTransaction success',
829
+ );
830
+ }
831
+ resolve(); // always resolve, never reject
832
+ };
833
+
834
+ var out = this._encode({ transaction: trans }, Client.encoding.TRANSACTION);
835
+ if (!out) {
836
+ finish('could not encode transaction');
837
+ return;
838
+ }
839
+
840
+ // Every `POST /register/transaction` request must set the
841
+ // `x-elastic-aws-request-id` header. Instead of creating a new options obj
842
+ // each time, we just modify in-place.
843
+ this._conf.requestRegisterTransaction.headers['x-elastic-aws-request-id'] =
844
+ awsRequestId;
845
+
846
+ const req = this._transportRequest(
847
+ this._conf.requestRegisterTransaction,
848
+ (res) => {
849
+ res.on('error', (err) => {
850
+ // Not sure this event can ever be emitted, but just in case.
851
+ res.destroy(err);
852
+ });
853
+ res.resume();
854
+ if (res.statusCode !== 200) {
855
+ finish(`unexpected response status code: ${res.statusCode}`);
856
+ return;
857
+ }
858
+ res.on('end', function () {
859
+ finish();
860
+ });
861
+ },
862
+ );
863
+ req.setTimeout(TIMEOUT_MS);
864
+ req.on('timeout', () => {
865
+ req.destroy(
866
+ new Error(`timeout (${TIMEOUT_MS}ms) registering lambda transaction`),
867
+ );
868
+ });
869
+ req.on('error', (err) => {
870
+ finish(err);
871
+ });
872
+ req.write(this._encodedMetadata);
873
+ req.write(out);
874
+ req.end();
875
+ });
876
+ };
877
+
878
+ // With the cork/uncork handling on this stream, `this.write`ing on this
879
+ // stream when already destroyed will lead to:
880
+ // Error: Cannot call write after a stream was destroyed
881
+ // when the `_corkTimer` expires.
882
+ Client.prototype._isUnsafeToWrite = function () {
883
+ return this.destroyed;
884
+ };
885
+
886
+ Client.prototype._shouldDropEvent = function () {
887
+ this._numEvents++;
888
+ const shouldDrop = this._writableState.length >= this._conf.maxQueueSize;
889
+ if (shouldDrop) {
890
+ this._numEventsDropped++;
891
+ }
892
+ return shouldDrop;
893
+ };
894
+
895
+ Client.prototype.sendSpan = function (span, cb) {
896
+ if (this._isUnsafeToWrite() || this._shouldDropEvent()) {
897
+ return;
898
+ }
899
+ this._maybeCork();
900
+ return this.write({ span }, Client.encoding.SPAN, cb);
901
+ };
902
+
903
+ Client.prototype.sendTransaction = function (transaction, cb) {
904
+ if (this._isUnsafeToWrite() || this._shouldDropEvent()) {
905
+ return;
906
+ }
907
+ this._maybeCork();
908
+ return this.write({ transaction }, Client.encoding.TRANSACTION, cb);
909
+ };
910
+
911
+ Client.prototype.sendError = function (error, cb) {
912
+ if (this._isUnsafeToWrite() || this._shouldDropEvent()) {
913
+ return;
914
+ }
915
+ this._maybeCork();
916
+ return this.write({ error }, Client.encoding.ERROR, cb);
917
+ };
918
+
919
+ Client.prototype.sendMetricSet = function (metricset, cb) {
920
+ if (this._isUnsafeToWrite() || this._shouldDropEvent()) {
921
+ return;
922
+ }
923
+ this._maybeCork();
924
+ return this.write({ metricset }, Client.encoding.METRICSET, cb);
925
+ };
926
+
927
+ /**
928
+ * If possible, start a flush of currently queued APM events to APM server.
929
+ *
930
+ * "If possible," because there are some guards on uncorking. See `_maybeUncork`.
931
+ *
932
+ * @param {Object} opts - Optional.
933
+ * - {Boolean} opts.lambdaEnd - Optional. Default false. Setting this true
934
+ * tells the client to also handle the end of a Lambda function invocation.
935
+ * @param {Function} cb - Optional. `cb()` will be called when the data has
936
+ * be sent to APM Server (or failed in the attempt).
937
+ */
938
+ Client.prototype.flush = function (opts, cb) {
939
+ if (typeof opts === 'function') {
940
+ cb = opts;
941
+ opts = {};
942
+ } else if (!opts) {
943
+ opts = {};
944
+ }
945
+ const lambdaEnd = !!opts.lambdaEnd;
946
+
947
+ // Write the special "flush" signal. We do this so that the order of writes
948
+ // and flushes are kept. If we where to just flush the client right here, the
949
+ // internal Writable buffer might still contain data that hasn't yet been
950
+ // given to the _write function.
951
+
952
+ if (lambdaEnd && isLambdaExecutionEnvironment && this._lambdaActive) {
953
+ // To flush the current data and ensure that subsequently sent events *in
954
+ // the same tick* do not start a new intake request, we must uncork
955
+ // synchronously -- rather than the nextTick uncork done in `_maybeUncork()`.
956
+ assert(
957
+ this._encodedMetadata,
958
+ 'client.flush({lambdaEnd:true}) must not be called before metadata has been set',
959
+ );
960
+ const rv = this.write(kLambdaEndFlush, cb);
961
+ this.uncork();
962
+ this._lambdaActive = false;
963
+ return rv;
964
+ } else {
965
+ this._maybeUncork();
966
+ return this.write(kFlush, cb);
967
+ }
968
+ };
969
+
970
+ // A handler that can be called on process "beforeExit" to attempt quick and
971
+ // orderly shutdown of the client. It attempts to ensure that the current
972
+ // active intake API request to APM server is completed quickly.
973
+ Client.prototype._gracefulExit = function () {
974
+ this._log.trace('_gracefulExit');
975
+
976
+ if (this._intakeRequestGracefulExitFn) {
977
+ this._intakeRequestGracefulExitFn();
978
+ }
979
+
980
+ // Calling _ref here, instead of relying on the _ref call in `_final`,
981
+ // is necessary because `client.end()` does *not* result in the Client's
982
+ // `_final()` being called when the process is exiting.
983
+ this._ref();
984
+ this.end();
985
+ };
986
+
987
+ Client.prototype._final = function (cb) {
988
+ this._log.trace('_final');
989
+ if (this._configTimer) {
990
+ clearTimeout(this._configTimer);
991
+ this._configTimer = null;
992
+ }
993
+ clientsToAutoEnd[this._index] = null; // remove global reference to ease garbage collection
994
+ this._ref();
995
+ this._chopper.end();
996
+ cb();
997
+ };
998
+
999
+ Client.prototype._destroy = function (err, cb) {
1000
+ this._log.trace({ err }, '_destroy');
1001
+ if (this._configTimer) {
1002
+ clearTimeout(this._configTimer);
1003
+ this._configTimer = null;
1004
+ }
1005
+ if (this._corkTimer) {
1006
+ clearTimeout(this._corkTimer);
1007
+ this._corkTimer = null;
1008
+ }
1009
+ clientsToAutoEnd[this._index] = null; // remove global reference to ease garbage collection
1010
+ this._chopper.destroy();
1011
+ this._agent.destroy();
1012
+ cb(err);
1013
+ };
1014
+
1015
+ // Return the appropriate backoff delay (in milliseconds) before a next possible
1016
+ // request to APM server.
1017
+ // Spec: https://github.com/elastic/apm/blob/main/specs/agents/transport.md#transport-errors
1018
+ //
1019
+ // In a Lambda environment, a backoff delay can be harmful: The backoff
1020
+ // setTimeout is unref'd, to not hold the process open. A subsequent Lambda
1021
+ // function invocation during that timer will result in no active handles and
1022
+ // a process "beforeExit" event. That event is interpreted by the Lambda Runtime
1023
+ // as "the Lambda function callback was never called", and it terminates the
1024
+ // function and responds with `null`. The solution is to never backoff in a
1025
+ // Lambda environment -- we expect and assume the Lambda extension is working,
1026
+ // and pass responsibility for backoff to the extension.
1027
+ Client.prototype._getBackoffDelay = function (isErr) {
1028
+ let reconnectCount = this._backoffReconnectCount;
1029
+ if (isErr && !isLambdaExecutionEnvironment) {
1030
+ this._backoffReconnectCount++;
1031
+ } else {
1032
+ this._backoffReconnectCount = 0;
1033
+ reconnectCount = 0;
1034
+ }
1035
+
1036
+ // min(reconnectCount++, 6) ** 2 ± 10%
1037
+ const delayS = Math.pow(Math.min(reconnectCount, 6), 2);
1038
+ const jitterS = delayS * (0.2 * Math.random() - 0.1);
1039
+ const delayMs = (delayS + jitterS) * 1000;
1040
+ return delayMs;
1041
+ };
1042
+
1043
+ function getChoppedStreamHandler(client, onerror) {
1044
+ // Make a request to the apm-server intake API.
1045
+ // https://www.elastic.co/guide/en/apm/server/current/events-api.html
1046
+ //
1047
+ // In normal operation this works as follows:
1048
+ // - The StreamChopper (`this._chopper`) calls this function with a newly
1049
+ // created Gzip stream, to which it writes encoded event data.
1050
+ // - It `gzipStream.end()`s the stream when:
1051
+ // (a) approximately `apiRequestSize` of data have been written,
1052
+ // (b) `apiRequestTime` seconds have passed, or
1053
+ // (c) `_chopper.chop()` is explicitly called via `client.flush()`,
1054
+ // e.g. used by the Node.js APM agent after `client.sendError()`.
1055
+ // - This function makes the HTTP POST to the apm-server, pipes the gzipStream
1056
+ // to it, and waits for the completion of the request and the apm-server
1057
+ // response.
1058
+ // - Then it calls the given `next` callback to signal StreamChopper that
1059
+ // another chopped stream can be created, when there is more the send.
1060
+ //
1061
+ // Of course, things can go wrong. Here are the known ways this pipeline can
1062
+ // conclude.
1063
+ // - intake response success - A successful response from the APM server. This
1064
+ // is the normal operation case described above.
1065
+ // - gzipStream error - An "error" event on the gzip stream.
1066
+ // - intake request error - An "error" event on the intake HTTP request, e.g.
1067
+ // ECONNREFUSED or ECONNRESET.
1068
+ // - intakeResTimeout - A timer started *after* we are finished sending data
1069
+ // to the APM server by which we require a response (including its body). By
1070
+ // default this is 10s -- a very long time to allow for a slow or far
1071
+ // apm-server. If we hit this, APM server is problematic anyway, so the
1072
+ // delay doesn't add to the problems.
1073
+ // - serverTimeout - An idle timeout value (default 30s) set on the socket.
1074
+ // This is a catch-all fallback for an otherwised wedged connection. If this
1075
+ // is being hit, there is some major issue in the application (possibly a
1076
+ // bug in the APM agent).
1077
+ // - process completion - The Client takes pains to always `.unref()` its
1078
+ // handles to never keep a using process open if it is ready to exit. When
1079
+ // the process is ready to exit, the following happens:
1080
+ // - The "beforeExit" handler above will call `client._gracefulExit()` ...
1081
+ // - ... which calls `client._ref()` to *hold the process open* to
1082
+ // complete this request, and `client.end()` to end the `gzipStream` so
1083
+ // this request can complete soon.
1084
+ // - We then expect this request to complete quickly and the process will
1085
+ // then finish exiting. A subtlety is if the APM server is not responding
1086
+ // then we'll wait on the shorter `intakeResTimeoutOnEnd` (by default 1s).
1087
+ return function makeIntakeRequest(gzipStream, next) {
1088
+ const reqId = crypto.randomBytes(16).toString('hex');
1089
+ const log = client._log.child({ reqId });
1090
+ const startTime = process.hrtime();
1091
+ const timeline = [];
1092
+ let bytesWritten = 0;
1093
+ let intakeRes;
1094
+ let intakeReqSocket = null;
1095
+ let intakeResTimer = null;
1096
+ let intakeRequestGracefulExitCalled = false;
1097
+ const intakeResTimeout = client._conf.intakeResTimeout;
1098
+ const intakeResTimeoutOnEnd = client._conf.intakeResTimeoutOnEnd;
1099
+
1100
+ // `_activeIntakeReq` is used to coordinate the callback to `client.flush(db)`.
1101
+ client._activeIntakeReq = true;
1102
+
1103
+ // Handle conclusion of this intake request. Each "part" is expected to call
1104
+ // `completePart()` at least once -- multiple calls are okay for cases like
1105
+ // the "error" and "close" events on a stream being called. When a part
1106
+ // errors or all parts are completed, then we can conclude.
1107
+ let concluded = false;
1108
+ const completedFromPart = {
1109
+ gzipStream: false,
1110
+ intakeReq: false,
1111
+ intakeRes: false,
1112
+ };
1113
+ let numToComplete = Object.keys(completedFromPart).length;
1114
+ const completePart = (part, err) => {
1115
+ log.trace({ err, concluded }, 'completePart %s', part);
1116
+ timeline.push([
1117
+ deltaMs(startTime),
1118
+ `completePart ${part}`,
1119
+ err && err.message,
1120
+ ]);
1121
+ assert(part in completedFromPart, `'${part}' is in completedFromPart`);
1122
+
1123
+ if (concluded) {
1124
+ return;
1125
+ }
1126
+
1127
+ // If this is the final part to complete, then we are ready to conclude.
1128
+ let allPartsCompleted = false;
1129
+ if (!completedFromPart[part]) {
1130
+ completedFromPart[part] = true;
1131
+ numToComplete--;
1132
+ if (numToComplete === 0) {
1133
+ allPartsCompleted = true;
1134
+ }
1135
+ }
1136
+ if (!err && !allPartsCompleted) {
1137
+ return;
1138
+ }
1139
+
1140
+ // Conclude.
1141
+ concluded = true;
1142
+ if (err) {
1143
+ // There was an error: clean up resources.
1144
+
1145
+ // Note that in Node v8, destroying the gzip stream results in it
1146
+ // emitting an "error" event as follows. No harm, however.
1147
+ // Error: gzip stream error: zlib binding closed
1148
+ // at Gzip._transform (zlib.js:369:15)
1149
+ // ...
1150
+ destroyStream(gzipStream);
1151
+ intakeReq.destroy();
1152
+ if (intakeResTimer) {
1153
+ log.trace('cancel intakeResTimer');
1154
+ clearTimeout(intakeResTimer);
1155
+ intakeResTimer = null;
1156
+ }
1157
+ }
1158
+ client._intakeRequestGracefulExitFn = null;
1159
+
1160
+ client.sent = client._numEventsEnqueued;
1161
+ client._activeIntakeReq = false;
1162
+ const backoffDelayMs = client._getBackoffDelay(!!err);
1163
+ if (err) {
1164
+ log.trace(
1165
+ { timeline, bytesWritten, backoffDelayMs, err },
1166
+ 'conclude intake request: error',
1167
+ );
1168
+ onerror(err);
1169
+ } else {
1170
+ log.trace(
1171
+ { timeline, bytesWritten, backoffDelayMs },
1172
+ 'conclude intake request: success',
1173
+ );
1174
+ }
1175
+ if (client._onIntakeReqConcluded) {
1176
+ client._onIntakeReqConcluded();
1177
+ client._onIntakeReqConcluded = null;
1178
+ }
1179
+
1180
+ if (backoffDelayMs > 0) {
1181
+ setTimeout(next, backoffDelayMs).unref();
1182
+ } else {
1183
+ setImmediate(next);
1184
+ }
1185
+ };
1186
+
1187
+ // Provide a function on the client for it to signal this intake request
1188
+ // to gracefully shutdown, i.e. finish up quickly.
1189
+ client._intakeRequestGracefulExitFn = () => {
1190
+ intakeRequestGracefulExitCalled = true;
1191
+ if (intakeReqSocket) {
1192
+ log.trace('_intakeRequestGracefulExitFn: re-ref intakeReqSocket');
1193
+ intakeReqSocket.ref();
1194
+ }
1195
+ if (intakeResTimer) {
1196
+ log.trace(
1197
+ '_intakeRequestGracefulExitFn: reset intakeResTimer to short timeout',
1198
+ );
1199
+ clearTimeout(intakeResTimer);
1200
+ intakeResTimer = setTimeout(() => {
1201
+ completePart(
1202
+ 'intakeRes',
1203
+ new Error(
1204
+ 'intake response timeout: APM server did not respond ' +
1205
+ `within ${
1206
+ intakeResTimeoutOnEnd / 1000
1207
+ }s of graceful exit signal`,
1208
+ ),
1209
+ );
1210
+ }, intakeResTimeoutOnEnd).unref();
1211
+ }
1212
+ };
1213
+
1214
+ // Start the request and set its timeout.
1215
+ const intakeReq = client._transportRequest(client._conf.requestIntake);
1216
+ if (Number.isFinite(client._conf.serverTimeout)) {
1217
+ intakeReq.setTimeout(client._conf.serverTimeout);
1218
+ }
1219
+ // TODO: log intakeReq and intakeRes when
1220
+ // https://github.com/elastic/ecs-logging-nodejs/issues/67 is implemented.
1221
+ log.trace('intake request start');
1222
+
1223
+ // Handle events on the intake request.
1224
+ // https://nodejs.org/api/http.html#http_http_request_options_callback docs
1225
+ // emitted events on the req and res objects for different scenarios.
1226
+ intakeReq.on('timeout', () => {
1227
+ log.trace('intakeReq "timeout"');
1228
+ // `.destroy(err)` will result in an "error" event.
1229
+ intakeReq.destroy(
1230
+ new Error(
1231
+ `APM Server response timeout (${client._conf.serverTimeout}ms)`,
1232
+ ),
1233
+ );
1234
+ });
1235
+
1236
+ intakeReq.on('socket', function (socket) {
1237
+ intakeReqSocket = socket;
1238
+ // Unref the socket for this request so that the Client does not keep
1239
+ // the node process running if it otherwise would be done. (This is
1240
+ // tested by the "unref-client" test in test/side-effects.js.)
1241
+ //
1242
+ // The HTTP keep-alive agent will unref sockets when unused, and ref them
1243
+ // during a request. Given that the normal makeIntakeRequest behaviour
1244
+ // is to keep a request open for up to 10s (`apiRequestTime`), we must
1245
+ // manually unref the socket.
1246
+ //
1247
+ // The exception is when in a Lambda environment, where we *do* want to
1248
+ // keep the node process running to complete this intake request.
1249
+ // Otherwise a 'beforeExit' event can be sent, which the Lambda runtime
1250
+ // interprets as "the Lambda handler callback was never called".
1251
+ if (!isLambdaExecutionEnvironment && !intakeRequestGracefulExitCalled) {
1252
+ log.trace('intakeReq "socket": unref it');
1253
+ intakeReqSocket.unref();
1254
+ }
1255
+ });
1256
+
1257
+ intakeReq.on('response', (intakeRes_) => {
1258
+ intakeRes = intakeRes_;
1259
+ log.trace(
1260
+ { statusCode: intakeRes.statusCode, reqFinished: intakeReq.finished },
1261
+ 'intakeReq "response"',
1262
+ );
1263
+ let err;
1264
+ const chunks = [];
1265
+
1266
+ if (!intakeReq.finished) {
1267
+ // Premature response from APM server. Typically this is for errors
1268
+ // like "queue is full", for which the response body will be parsed
1269
+ // below. However, set an `err` as a fallback for the unexpected case
1270
+ // that is with a 2xx response.
1271
+ if (intakeRes.statusCode >= 200 && intakeRes.statusCode < 300) {
1272
+ err = new Error(
1273
+ `premature apm-server response with statusCode=${intakeRes.statusCode}`,
1274
+ );
1275
+ }
1276
+ // There is no point (though no harm) in sending more data to the APM
1277
+ // server. In case reading the error response body takes a while, pause
1278
+ // the gzip stream until it is destroyed in `completePart()`.
1279
+ gzipStream.pause();
1280
+ }
1281
+
1282
+ // Handle events on the intake response.
1283
+ intakeRes.on('error', (intakeResErr) => {
1284
+ // I am not aware of a way to get an "error" event on the
1285
+ // IncomingMessage (see also https://stackoverflow.com/q/53691119), but
1286
+ // handling it here is preferable to an uncaughtException.
1287
+ intakeResErr = wrapError(intakeResErr, 'intake response error event');
1288
+ completePart('intakeRes', intakeResErr);
1289
+ });
1290
+ intakeRes.on('data', (chunk) => {
1291
+ chunks.push(chunk);
1292
+ });
1293
+ // intakeRes.on('close', () => { log.trace('intakeRes "close"') })
1294
+ // intakeRes.on('aborted', () => { log.trace('intakeRes "aborted"') })
1295
+ intakeRes.on('end', () => {
1296
+ log.trace('intakeRes "end"');
1297
+ if (intakeResTimer) {
1298
+ clearTimeout(intakeResTimer);
1299
+ intakeResTimer = null;
1300
+ }
1301
+ if (intakeRes.statusCode < 200 || intakeRes.statusCode > 299) {
1302
+ err = processIntakeErrorResponse(intakeRes, Buffer.concat(chunks));
1303
+ }
1304
+ completePart('intakeRes', err);
1305
+ });
1306
+ });
1307
+
1308
+ // intakeReq.on('abort', () => { log.trace('intakeReq "abort"') })
1309
+ // intakeReq.on('close', () => { log.trace('intakeReq "close"') })
1310
+ intakeReq.on('finish', () => {
1311
+ log.trace('intakeReq "finish"');
1312
+ completePart('intakeReq');
1313
+ });
1314
+ intakeReq.on('error', (err) => {
1315
+ log.trace('intakeReq "error"');
1316
+ completePart('intakeReq', err);
1317
+ });
1318
+
1319
+ // Handle events on the gzip stream.
1320
+ gzipStream.on('data', (chunk) => {
1321
+ bytesWritten += chunk.length;
1322
+ });
1323
+ gzipStream.on('error', (gzipErr) => {
1324
+ log.trace('gzipStream "error"');
1325
+ gzipErr = wrapError(gzipErr, 'gzip stream error');
1326
+ completePart('gzipStream', gzipErr);
1327
+ });
1328
+ gzipStream.on('finish', () => {
1329
+ // If the apm-server is not reading its input and the gzip data is large
1330
+ // enough to fill buffers, then the gzip stream will emit "finish", but
1331
+ // not "end". Therefore, this "finish" event is the best indicator that
1332
+ // the ball is now in the apm-server's court.
1333
+ //
1334
+ // We now start a timer waiting on the response, provided we still expect
1335
+ // one (we don't if the request has already errored out, e.g.
1336
+ // ECONNREFUSED) and it hasn't already completed (e.g. if it replied
1337
+ // quickly with "queue is full").
1338
+ log.trace('gzipStream "finish"');
1339
+ if (!completedFromPart.intakeReq && !completedFromPart.intakeRes) {
1340
+ const timeout =
1341
+ client._writableState.ending || intakeRequestGracefulExitCalled
1342
+ ? intakeResTimeoutOnEnd
1343
+ : intakeResTimeout;
1344
+ log.trace({ timeout }, 'start intakeResTimer');
1345
+ intakeResTimer = setTimeout(() => {
1346
+ completePart(
1347
+ 'intakeRes',
1348
+ new Error(
1349
+ 'intake response timeout: APM server did not respond ' +
1350
+ `within ${timeout / 1000}s of gzip stream finish`,
1351
+ ),
1352
+ );
1353
+ }, timeout).unref();
1354
+ }
1355
+ });
1356
+ // Watch the gzip "end" event for its completion, because the "close" event
1357
+ // that we would prefer to use, *does not get emitted* for the
1358
+ // `client.sendSpan(callback) + client.flush()` test case with
1359
+ // *node v12-only*.
1360
+ gzipStream.on('end', () => {
1361
+ log.trace('gzipStream "end"');
1362
+ completePart('gzipStream');
1363
+ });
1364
+ // gzipStream.on('close', () => { log.trace('gzipStream "close"') })
1365
+
1366
+ // Hook up writing data to a file (only intended for local debugging).
1367
+ // Append the intake data to `payloadLogFile`, if given. This is only
1368
+ // intended for local debugging because it can have a significant perf
1369
+ // impact.
1370
+ if (client._conf.payloadLogFile) {
1371
+ const payloadLogStream = fs.createWriteStream(
1372
+ client._conf.payloadLogFile,
1373
+ { flags: 'a' },
1374
+ );
1375
+ gzipStream.pipe(zlib.createGunzip()).pipe(payloadLogStream);
1376
+ }
1377
+
1378
+ // Send the metadata object (always first) and hook up the streams.
1379
+ assert(client._encodedMetadata, 'client._encodedMetadata is set');
1380
+ gzipStream.write(client._encodedMetadata);
1381
+ gzipStream.pipe(intakeReq);
1382
+ };
1383
+ }
1384
+
1385
+ /**
1386
+ * Some behaviors in the APM depend on the APM Server version. These are
1387
+ * exposed as `Client#supports...` boolean methods.
1388
+ *
1389
+ * These `Client#supports...` method names, if not always the implementation,
1390
+ * intentionally match those from the Java agent:
1391
+ * https://github.com/elastic/apm-agent-java/blob/master/apm-agent-core/src/main/java/co/elastic/apm/agent/report/ApmServerClient.java#L322-L349
1392
+ */
1393
+ Client.prototype.supportsKeepingUnsampledTransaction = function () {
1394
+ // Default to assuming we are using a pre-8.0 APM Server if we haven't
1395
+ // yet fetched the version. There is no harm in sending unsampled transactions
1396
+ // to APM Server >=v8.0.
1397
+ if (!this._apmServerVersion) {
1398
+ return true;
1399
+ } else {
1400
+ return this._apmServerVersion.major < 8;
1401
+ }
1402
+ };
1403
+ Client.prototype.supportsActivationMethodField = function () {
1404
+ // APM server 8.7.0 had a bug where continuing to send `activation_method` is
1405
+ // harmful.
1406
+ if (!this._apmServerVersion) {
1407
+ return true; // Optimistically assume APM server isn't v8.7.0.
1408
+ } else {
1409
+ return semver.gte(this._apmServerVersion, '8.7.1');
1410
+ }
1411
+ };
1412
+ Client.prototype.supportsConfiguredAndDetectedHostname = function () {
1413
+ if (!this._apmServerVersion) {
1414
+ return true; // Optimistically assume APM server is >=7.4.
1415
+ } else {
1416
+ return semver.gte(this._apmServerVersion, '7.4.0');
1417
+ }
1418
+ };
1419
+
1420
+ /**
1421
+ * Signal to the Elastic AWS Lambda extension that a lambda function execution
1422
+ * is done.
1423
+ * https://github.com/elastic/apm/blob/main/specs/agents/tracing-instrumentation-aws-lambda.md#data-flushing
1424
+ *
1425
+ * @param {Function} cb() is called when finished. There are no arguments.
1426
+ */
1427
+ Client.prototype._signalLambdaEnd = function (cb) {
1428
+ this._log.trace('_signalLambdaEnd start');
1429
+ const startTime = performance.now();
1430
+ const finish = (errOrErrMsg) => {
1431
+ const durationMs = performance.now() - startTime;
1432
+ if (errOrErrMsg) {
1433
+ this._log.error(
1434
+ { err: errOrErrMsg, durationMs },
1435
+ '_signalLambdaEnd error',
1436
+ );
1437
+ } else {
1438
+ this._log.trace({ durationMs }, '_signalLambdaEnd success');
1439
+ }
1440
+ cb();
1441
+ };
1442
+
1443
+ // We expect to be talking to the localhost Elastic Lambda extension, so we
1444
+ // want a shorter timeout than `_conf.serverTimeout`.
1445
+ const TIMEOUT_MS = 5000;
1446
+
1447
+ const req = this._transportRequest(
1448
+ this._conf.requestSignalLambdaEnd,
1449
+ (res) => {
1450
+ res.on('error', (err) => {
1451
+ // Not sure this event can ever be emitted, but just in case.
1452
+ res.destroy(err);
1453
+ });
1454
+ res.resume();
1455
+ if (res.statusCode !== 202) {
1456
+ finish(`unexpected response status code: ${res.statusCode}`);
1457
+ return;
1458
+ }
1459
+ res.on('end', function () {
1460
+ finish();
1461
+ });
1462
+ },
1463
+ );
1464
+ req.setTimeout(TIMEOUT_MS);
1465
+ req.on('timeout', () => {
1466
+ req.destroy(
1467
+ new Error(`timeout (${TIMEOUT_MS}ms) signaling Lambda invocation done`),
1468
+ );
1469
+ });
1470
+ req.on('error', (err) => {
1471
+ finish(err);
1472
+ });
1473
+ req.end();
1474
+ };
1475
+
1476
+ /**
1477
+ * Fetch the APM Server version and set `this._apmServerVersion`.
1478
+ * https://www.elastic.co/guide/en/apm/server/current/server-info.html
1479
+ *
1480
+ * If fetching/parsing fails then the APM server version will be set to `null`
1481
+ * to indicate "unknown version".
1482
+ */
1483
+ Client.prototype._fetchApmServerVersion = function () {
1484
+ const setVerUnknownAndNotify = (errmsg) => {
1485
+ this._apmServerVersion = null; // means "unknown version"
1486
+ this._resetEncodedMetadata();
1487
+ if (isLambdaExecutionEnvironment) {
1488
+ // In a Lambda environment, where the process can be frozen, it is not
1489
+ // unusual for this request to hit an error. As long as APM Server version
1490
+ // fetching is not critical to tracing of Lambda invocations, then it is
1491
+ // preferable to not add an error message to the users log.
1492
+ this._log.debug('verfetch: ' + errmsg);
1493
+ } else {
1494
+ this.emit('request-error', new Error(errmsg));
1495
+ }
1496
+ };
1497
+ const headers = getHeaders(this._conf);
1498
+ // Explicitly do *not* pass in `this._agent` -- the keep-alive http.Agent
1499
+ // used for intake requests -- because the socket.ref() handling in
1500
+ // `Client#_ref()` conflicts with the socket.unref() below.
1501
+ const reqOpts = getBasicRequestOptions('GET', '/', headers, this._conf);
1502
+ reqOpts.timeout = 30000;
1503
+
1504
+ const req = this._transportGet(reqOpts, (res) => {
1505
+ res.on('error', (err) => {
1506
+ // Not sure this event can ever be emitted, but just in case
1507
+ res.destroy(err);
1508
+ });
1509
+
1510
+ if (res.statusCode !== 200) {
1511
+ res.resume();
1512
+ setVerUnknownAndNotify(
1513
+ `unexpected status from APM Server information endpoint: ${res.statusCode}`,
1514
+ );
1515
+ return;
1516
+ }
1517
+
1518
+ const chunks = [];
1519
+ res.on('data', (chunk) => {
1520
+ chunks.push(chunk);
1521
+ });
1522
+ res.on('end', () => {
1523
+ if (chunks.length === 0) {
1524
+ setVerUnknownAndNotify(
1525
+ 'APM Server information endpoint returned no body, often this indicates authentication ("apiKey" or "secretToken") is incorrect',
1526
+ );
1527
+ return;
1528
+ }
1529
+
1530
+ let serverInfo;
1531
+ try {
1532
+ serverInfo = JSON.parse(Buffer.concat(chunks));
1533
+ } catch (parseErr) {
1534
+ setVerUnknownAndNotify(
1535
+ `could not parse APM Server information endpoint body: ${parseErr.message}`,
1536
+ );
1537
+ return;
1538
+ }
1539
+
1540
+ if (serverInfo) {
1541
+ // APM Server 7.0.0 dropped the "ok"-level in the info endpoint body.
1542
+ const verStr = serverInfo.ok
1543
+ ? serverInfo.ok.version
1544
+ : serverInfo.version;
1545
+ try {
1546
+ this._apmServerVersion = new semver.SemVer(verStr);
1547
+ } catch (verErr) {
1548
+ setVerUnknownAndNotify(
1549
+ `could not parse APM Server version "${verStr}": ${verErr.message}`,
1550
+ );
1551
+ return;
1552
+ }
1553
+ this._resetEncodedMetadata();
1554
+ this._log.debug(
1555
+ { apmServerVersion: verStr },
1556
+ 'fetched APM Server version',
1557
+ );
1558
+ } else {
1559
+ setVerUnknownAndNotify(
1560
+ `could not determine APM Server version from information endpoint body: ${JSON.stringify(
1561
+ serverInfo,
1562
+ )}`,
1563
+ );
1564
+ }
1565
+ });
1566
+ });
1567
+
1568
+ req.on('socket', (socket) => {
1569
+ // Unref our socket to ensure this request does not keep the process alive.
1570
+ socket.unref();
1571
+ });
1572
+ req.on('timeout', () => {
1573
+ this._log.trace('_fetchApmServerVersion timeout');
1574
+ req.destroy(
1575
+ new Error(`timeout (${reqOpts.timeout}ms) fetching APM Server version`),
1576
+ );
1577
+ });
1578
+ req.on('error', (err) => {
1579
+ setVerUnknownAndNotify(`error fetching APM Server version: ${err.message}`);
1580
+ });
1581
+ };
1582
+
1583
+ /**
1584
+ * Fetches cloud metadata, if any, and encodes metadata (to `_encodedMetadata`).
1585
+ *
1586
+ * @param {function} cb - Called, with no arguments, when complete.
1587
+ */
1588
+ Client.prototype._fetchAndEncodeMetadata = function (cb) {
1589
+ assert(
1590
+ this._conf.cloudMetadataFetcher,
1591
+ '_fetchAndEncodeMetadata should not be called without a configured cloudMetadataFetcher',
1592
+ );
1593
+ this._conf.cloudMetadataFetcher.getCloudMetadata((err, cloudMetadata) => {
1594
+ if (err) {
1595
+ // We ignore this error (other than logging it). A common case, when
1596
+ // not running on one of the big 3 clouds, is "all callbacks failed",
1597
+ // which is *fine*. Because it is a common "error" we don't log the
1598
+ // stack trace.
1599
+ this._log.trace('getCloudMetadata err: %s', err);
1600
+ } else if (cloudMetadata) {
1601
+ this._cloudMetadata = cloudMetadata;
1602
+ }
1603
+ this._resetEncodedMetadata();
1604
+ cb();
1605
+ });
1606
+ };
1607
+
1608
+ function getIntakeRequestOptions(opts, agent) {
1609
+ const headers = getHeaders(opts);
1610
+ headers['Content-Type'] = 'application/x-ndjson';
1611
+ headers['Content-Encoding'] = 'gzip';
1612
+
1613
+ return getBasicRequestOptions(
1614
+ 'POST',
1615
+ '/intake/v2/events',
1616
+ headers,
1617
+ opts,
1618
+ agent,
1619
+ );
1620
+ }
1621
+
1622
+ function getSignalLambdaEndRequestOptions(opts, agent) {
1623
+ const headers = getHeaders(opts);
1624
+ headers['Content-Length'] = 0;
1625
+
1626
+ return getBasicRequestOptions(
1627
+ 'POST',
1628
+ '/intake/v2/events?flushed=true',
1629
+ headers,
1630
+ opts,
1631
+ agent,
1632
+ );
1633
+ }
1634
+
1635
+ function getRegisterTransactionRequestOptions(opts, agent) {
1636
+ const headers = getHeaders(opts);
1637
+ headers['Content-Type'] = 'application/vnd.elastic.apm.transaction+ndjson';
1638
+ return getBasicRequestOptions(
1639
+ 'POST',
1640
+ '/register/transaction',
1641
+ headers,
1642
+ opts,
1643
+ agent,
1644
+ );
1645
+ }
1646
+
1647
+ function getConfigRequestOptions(opts, agent) {
1648
+ const path =
1649
+ '/config/v1/agents?' +
1650
+ querystring.stringify({
1651
+ 'service.name': opts.serviceName,
1652
+ 'service.environment': opts.environment,
1653
+ });
1654
+
1655
+ const headers = getHeaders(opts);
1656
+
1657
+ return getBasicRequestOptions('GET', path, headers, opts, agent);
1658
+ }
1659
+
1660
+ function getBasicRequestOptions(method, defaultPath, headers, opts, agent) {
1661
+ return {
1662
+ agent,
1663
+ rejectUnauthorized: opts.rejectUnauthorized !== false,
1664
+ ca: opts.serverCaCert,
1665
+ hostname: opts.serverUrl.hostname,
1666
+ port: opts.serverUrl.port,
1667
+ method,
1668
+ path:
1669
+ opts.serverUrl.pathname === '/'
1670
+ ? defaultPath
1671
+ : opts.serverUrl.pathname + defaultPath,
1672
+ headers,
1673
+ };
1674
+ }
1675
+
1676
+ function getHeaders(opts) {
1677
+ const headers = {};
1678
+ if (opts.secretToken) headers.Authorization = 'Bearer ' + opts.secretToken;
1679
+ if (opts.apiKey) headers.Authorization = 'ApiKey ' + opts.apiKey;
1680
+ headers.Accept = 'application/json';
1681
+ headers['User-Agent'] = opts.userAgent;
1682
+ return Object.assign(headers, opts.headers);
1683
+ }
1684
+
1685
+ function metadataFromConf(opts, client) {
1686
+ var payload = {
1687
+ service: {
1688
+ name: opts.serviceName,
1689
+ environment: opts.environment,
1690
+ runtime: {
1691
+ name: process.release.name,
1692
+ version: process.versions.node,
1693
+ },
1694
+ language: {
1695
+ name: 'javascript',
1696
+ },
1697
+ agent: {
1698
+ name: opts.agentName,
1699
+ version: opts.agentVersion,
1700
+ },
1701
+ framework: undefined,
1702
+ version: undefined,
1703
+ node: undefined,
1704
+ },
1705
+ process: {
1706
+ pid: process.pid,
1707
+ ppid: process.ppid,
1708
+ title: process.title,
1709
+ argv: process.argv,
1710
+ },
1711
+ system: {
1712
+ architecture: process.arch,
1713
+ platform: process.platform,
1714
+ container: undefined,
1715
+ kubernetes: undefined,
1716
+ },
1717
+ labels: opts.globalLabels,
1718
+ };
1719
+
1720
+ // On `system.*hostname` fields:
1721
+ // - `hostname` was deprecated in APM server v7.4, replaced by the next two.
1722
+ // - Around Elastic v8.9, ECS changed `host.name` to prefer the FQDN,
1723
+ // hence APM agents now prefer FQDN for `detected_hostname`.
1724
+ if (client.supportsConfiguredAndDetectedHostname()) {
1725
+ payload.system.detected_hostname = opts.detectedHostname;
1726
+ if (opts.configuredHostname) {
1727
+ payload.system.configured_hostname = opts.configuredHostname;
1728
+ }
1729
+ } else {
1730
+ payload.system.hostname = opts.configuredHostname || opts.detectedHostname;
1731
+ }
1732
+
1733
+ if (opts.agentActivationMethod && client.supportsActivationMethodField()) {
1734
+ payload.service.agent.activation_method = opts.agentActivationMethod;
1735
+ }
1736
+
1737
+ if (opts.serviceNodeName) {
1738
+ payload.service.node = {
1739
+ configured_name: opts.serviceNodeName,
1740
+ };
1741
+ }
1742
+
1743
+ if (opts.serviceVersion) payload.service.version = opts.serviceVersion;
1744
+
1745
+ if (opts.frameworkName || opts.frameworkVersion) {
1746
+ payload.service.framework = {
1747
+ name: opts.frameworkName,
1748
+ version: opts.frameworkVersion,
1749
+ };
1750
+ }
1751
+
1752
+ if (opts.containerId) {
1753
+ payload.system.container = {
1754
+ id: opts.containerId,
1755
+ };
1756
+ }
1757
+
1758
+ if (
1759
+ opts.kubernetesNodeName ||
1760
+ opts.kubernetesNamespace ||
1761
+ opts.kubernetesPodName ||
1762
+ opts.kubernetesPodUID
1763
+ ) {
1764
+ payload.system.kubernetes = {
1765
+ namespace: opts.kubernetesNamespace,
1766
+ node: opts.kubernetesNodeName
1767
+ ? { name: opts.kubernetesNodeName }
1768
+ : undefined,
1769
+ pod:
1770
+ opts.kubernetesPodName || opts.kubernetesPodUID
1771
+ ? { name: opts.kubernetesPodName, uid: opts.kubernetesPodUID }
1772
+ : undefined,
1773
+ };
1774
+ }
1775
+
1776
+ return payload;
1777
+ }
1778
+
1779
+ function destroyStream(stream) {
1780
+ if (
1781
+ stream instanceof zlib.Gzip ||
1782
+ stream instanceof zlib.Gunzip ||
1783
+ stream instanceof zlib.Deflate ||
1784
+ stream instanceof zlib.DeflateRaw ||
1785
+ stream instanceof zlib.Inflate ||
1786
+ stream instanceof zlib.InflateRaw ||
1787
+ stream instanceof zlib.Unzip
1788
+ ) {
1789
+ // Zlib streams doesn't have a destroy function in Node.js 6. On top of
1790
+ // that simply calling destroy on a zlib stream in Node.js 8+ will result
1791
+ // in a memory leak as the handle isn't closed (an operation normally done
1792
+ // by calling close). So until that is fixed, we need to manually close the
1793
+ // handle after destroying the stream.
1794
+ //
1795
+ // PR: https://github.com/nodejs/node/pull/23734
1796
+ if (typeof stream.destroy === 'function') {
1797
+ // Manually close the stream instead of calling `close()` as that would
1798
+ // have emitted 'close' again when calling `destroy()`
1799
+ if (stream._handle && typeof stream._handle.close === 'function') {
1800
+ stream._handle.close();
1801
+ stream._handle = null;
1802
+ }
1803
+
1804
+ stream.destroy();
1805
+ } else if (typeof stream.close === 'function') {
1806
+ stream.close();
1807
+ }
1808
+ } else {
1809
+ // For other streams we assume calling destroy is enough
1810
+ if (typeof stream.destroy === 'function') stream.destroy();
1811
+ // Or if there's no destroy (which Node.js 6 will not have on regular
1812
+ // streams), emit `close` as that should trigger almost the same effect
1813
+ else if (typeof stream.emit === 'function') stream.emit('close');
1814
+ }
1815
+ }
1816
+
1817
+ function oneOf(value, list) {
1818
+ return list.indexOf(value) >= 0;
1819
+ }
1820
+
1821
+ function normalizeGlobalLabels(labels) {
1822
+ if (!labels) return;
1823
+ const result = {};
1824
+
1825
+ for (const key of Object.keys(labels)) {
1826
+ const value = labels[key];
1827
+ result[key] = oneOf(typeof value, ['string', 'number', 'boolean'])
1828
+ ? value
1829
+ : value.toString();
1830
+ }
1831
+
1832
+ return result;
1833
+ }
1834
+
1835
+ // https://httpwg.org/specs/rfc9111.html#cache-response-directive.max-age
1836
+ function getMaxAge(res) {
1837
+ const header = res.headers['cache-control'];
1838
+ if (!header) {
1839
+ return undefined;
1840
+ }
1841
+ const match = header.match(/max-age=(\d+)/i);
1842
+ if (!match) {
1843
+ return undefined;
1844
+ }
1845
+ return parseInt(match[1], 10);
1846
+ }
1847
+
1848
+ // Wrap the given Error object, including the given message.
1849
+ //
1850
+ // Dev Note: Various techniques exist to wrap `Error`s in node.js and JavaScript
1851
+ // to provide a cause chain, e.g. see
1852
+ // https://www.joyent.com/node-js/production/design/errors
1853
+ // However, I'm not aware of a de facto "winner". Eventually there may be
1854
+ // https://github.com/tc39/proposal-error-cause
1855
+ // For now we will simply prefix the existing error object's `message` property.
1856
+ // This is simple and preserves the root error `stack`.
1857
+ function wrapError(err, msg) {
1858
+ err.message = msg + ': ' + err.message;
1859
+ return err;
1860
+ }
1861
+
1862
+ function processIntakeErrorResponse(res, buf) {
1863
+ const err = new Error('Unexpected APM Server response');
1864
+
1865
+ err.code = res.statusCode;
1866
+
1867
+ if (buf.length > 0) {
1868
+ // https://www.elastic.co/guide/en/apm/server/current/events-api.html#events-api-errors
1869
+ const body = buf.toString('utf8');
1870
+ const contentType = res.headers['content-type'];
1871
+ if (contentType && contentType.startsWith('application/json')) {
1872
+ try {
1873
+ const data = JSON.parse(body);
1874
+ err.accepted = data.accepted;
1875
+ err.errors = data.errors;
1876
+ if (!err.errors) err.response = body;
1877
+ } catch (e) {
1878
+ err.response = body;
1879
+ }
1880
+ } else {
1881
+ err.response = body;
1882
+ }
1883
+ }
1884
+
1885
+ return err;
1886
+ }
1887
+
1888
+ // Construct or decorate an Error instance from a failing response from the
1889
+ // APM server central config endpoint.
1890
+ //
1891
+ // @param {IncomingMessage} res
1892
+ // @param {Buffer|undefined} buf - Optional. A Buffer holding the response body.
1893
+ // @param {Error|undefined} err - Optional. A cause Error instance.
1894
+ function processConfigErrorResponse(res, buf, err) {
1895
+ // This library doesn't have a pattern for wrapping errors yet, so if
1896
+ // we already have an Error instance, we will just decorate it. That preserves
1897
+ // the stack of the root cause error.
1898
+ const errMsg = 'Unexpected APM Server response when polling config';
1899
+ if (!err) {
1900
+ err = new Error(errMsg);
1901
+ } else {
1902
+ err.message = errMsg + ': ' + err.message;
1903
+ }
1904
+
1905
+ err.code = res.statusCode;
1906
+
1907
+ if (buf && buf.length > 0) {
1908
+ const body = buf.toString('utf8');
1909
+ const contentType = res.headers['content-type'];
1910
+ if (contentType && contentType.startsWith('application/json')) {
1911
+ try {
1912
+ const response = JSON.parse(body);
1913
+ if (typeof response === 'string') {
1914
+ err.response = response;
1915
+ } else if (
1916
+ typeof response === 'object' &&
1917
+ response !== null &&
1918
+ typeof response.error === 'string'
1919
+ ) {
1920
+ err.response = response.error;
1921
+ } else {
1922
+ err.response = body;
1923
+ }
1924
+ } catch (e) {
1925
+ err.response = body;
1926
+ }
1927
+ } else {
1928
+ err.response = body;
1929
+ }
1930
+ }
1931
+
1932
+ return err;
1933
+ }
1934
+
1935
+ // Return the time difference (in milliseconds) between the given time `t`
1936
+ // (a 2-tuple as returned by `process.hrtime()`) and now.
1937
+ function deltaMs(t) {
1938
+ const d = process.hrtime(t);
1939
+ return d[0] * 1e3 + d[1] / 1e6;
1940
+ }
1941
+
1942
+ /**
1943
+ * Performs a deep merge of `source` into `target`. Mutates `target` only but
1944
+ * not its objects. Objects are merged, Arrays are not.
1945
+ *
1946
+ * @author inspired by [eden](https://gist.github.com/ahtcx/0cd94e62691f539160b32ecda18af3d6#gistcomment-2930530)
1947
+ */
1948
+ function metadataMergeDeep(target, source) {
1949
+ const isObject = (obj) =>
1950
+ obj && typeof obj === 'object' && !Array.isArray(obj);
1951
+
1952
+ if (!isObject(target) || !isObject(source)) {
1953
+ return source;
1954
+ }
1955
+
1956
+ Object.keys(source).forEach((key) => {
1957
+ const targetValue = target[key];
1958
+ const sourceValue = source[key];
1959
+
1960
+ if (isObject(targetValue) && isObject(sourceValue)) {
1961
+ target[key] = metadataMergeDeep(
1962
+ Object.assign({}, targetValue),
1963
+ sourceValue,
1964
+ );
1965
+ } else {
1966
+ target[key] = sourceValue;
1967
+ }
1968
+ });
1969
+
1970
+ return target;
1971
+ }
1972
+
1973
+ function deepClone(obj) {
1974
+ return JSON.parse(JSON.stringify(obj));
1975
+ }