@prairielearn/opentelemetry 1.5.2 → 1.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +43 -1
- package/dist/index.d.ts +4 -25
- package/dist/index.js +16 -252
- package/dist/index.js.map +1 -1
- package/dist/init.d.ts +25 -0
- package/dist/init.js +258 -0
- package/dist/init.js.map +1 -0
- package/dist/metrics.d.ts +8 -0
- package/dist/metrics.js +67 -0
- package/dist/metrics.js.map +1 -0
- package/dist/{index.test.d.ts → metrics.test.d.ts} +0 -0
- package/dist/metrics.test.js +89 -0
- package/dist/metrics.test.js.map +1 -0
- package/dist/tracing.d.ts +2 -0
- package/dist/tracing.js +28 -0
- package/dist/tracing.js.map +1 -0
- package/dist/tracing.test.d.ts +1 -0
- package/dist/{index.test.js → tracing.test.js} +1 -1
- package/dist/tracing.test.js.map +1 -0
- package/package.json +17 -10
- package/src/index.ts +5 -294
- package/src/init.ts +317 -0
- package/src/metrics.test.ts +86 -0
- package/src/metrics.ts +93 -0
- package/src/{index.test.ts → tracing.test.ts} +0 -0
- package/src/tracing.ts +25 -0
- package/dist/index.test.js.map +0 -1
package/README.md
CHANGED
|
@@ -14,6 +14,8 @@ import { init } from '@prairielearn/opentelemetry';
|
|
|
14
14
|
await init({
|
|
15
15
|
openTelemetryEnabled: true,
|
|
16
16
|
openTelemetryExporter: 'honeycomb',
|
|
17
|
+
openTelemetryMetricExporter: 'honeycomb',
|
|
18
|
+
openTelemetryMetricExportIntervalMillis: 30_000,
|
|
17
19
|
openTelemetrySamplerType: 'always-on',
|
|
18
20
|
openTelemetrySampleRate: 0.1,
|
|
19
21
|
honeycombApiKey: 'KEY',
|
|
@@ -23,6 +25,10 @@ await init({
|
|
|
23
25
|
|
|
24
26
|
This will automatically instrument a variety of commonly-used Node packages.
|
|
25
27
|
|
|
28
|
+
When using code from the OpenTelemetry libraries, make sure you import it via `@prairielearn/opentelemetry` instead of installing it separately to ensure that there is only one version of each OpenTelemetry package in use at once. If the desired functionality is not yet exported, please add it!
|
|
29
|
+
|
|
30
|
+
## Traces
|
|
31
|
+
|
|
26
32
|
To easily instrument individual pieces of functionality, you can use the `instrumented()` helper function:
|
|
27
33
|
|
|
28
34
|
```ts
|
|
@@ -59,4 +65,40 @@ await tracer.startActiveSpan('span.name', async (span) => {
|
|
|
59
65
|
});
|
|
60
66
|
```
|
|
61
67
|
|
|
62
|
-
|
|
68
|
+
## Metrics
|
|
69
|
+
|
|
70
|
+
You can manually create counters and other metrics with the following functions
|
|
71
|
+
|
|
72
|
+
- `getHistogram`
|
|
73
|
+
- `getCounter`
|
|
74
|
+
- `getUpDownCounter`
|
|
75
|
+
- `getObservableCounter`
|
|
76
|
+
- `getObservableUpDownCounter`
|
|
77
|
+
- `getObservableGauge`
|
|
78
|
+
|
|
79
|
+
```ts
|
|
80
|
+
import { metrics, getCounter, ValueType } from '@prairielearn/opentelemetry';
|
|
81
|
+
|
|
82
|
+
function handleRequest(req, res) {
|
|
83
|
+
const meter = metrics.getMeter('meter-name');
|
|
84
|
+
const requestCounter = getCounter(meter, 'request.count', {
|
|
85
|
+
valueType: ValueType.INT,
|
|
86
|
+
});
|
|
87
|
+
requestCounter.add(1);
|
|
88
|
+
}
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
You can also use the `instrumentedWithMetrics` helper to automatically capture a duration histogram and error count:
|
|
92
|
+
|
|
93
|
+
```ts
|
|
94
|
+
import { metrics, instrumentedWithMetrics } from '@prairielearn/opentelemetry';
|
|
95
|
+
|
|
96
|
+
const meter = metrics.getMeter('meter-name');
|
|
97
|
+
await instrumentedWithMetrics(meter, 'operation.name', async () => {
|
|
98
|
+
const random = Math.random() * 1000;
|
|
99
|
+
await new Promise((resolve) => setTimeout(resolve, random));
|
|
100
|
+
if (random > 900) {
|
|
101
|
+
throw new Error('Failed!');
|
|
102
|
+
}
|
|
103
|
+
});
|
|
104
|
+
```
|
package/dist/index.d.ts
CHANGED
|
@@ -1,26 +1,5 @@
|
|
|
1
|
-
|
|
2
|
-
import { Span } from '@opentelemetry/api';
|
|
3
|
-
export interface OpenTelemetryConfig {
|
|
4
|
-
openTelemetryEnabled: boolean;
|
|
5
|
-
openTelemetryExporter: 'console' | 'honeycomb' | 'jaeger' | SpanExporter;
|
|
6
|
-
openTelemetrySamplerType: 'always-on' | 'always-off' | 'trace-id-ratio';
|
|
7
|
-
openTelemetrySampleRate?: number;
|
|
8
|
-
openTelemetrySpanProcessor?: 'batch' | 'simple';
|
|
9
|
-
honeycombApiKey?: string;
|
|
10
|
-
honeycombDataset?: string;
|
|
11
|
-
serviceName?: string;
|
|
12
|
-
}
|
|
13
|
-
/**
|
|
14
|
-
* Should be called once we've loaded our config; this will allow us to set up
|
|
15
|
-
* the correct metadata for the Honeycomb exporter. We don't actually have that
|
|
16
|
-
* information available until we've loaded our config.
|
|
17
|
-
*/
|
|
18
|
-
export declare function init(config: OpenTelemetryConfig): Promise<void>;
|
|
19
|
-
/**
|
|
20
|
-
* Gracefully shuts down the OpenTelemetry instrumentation. Should be called
|
|
21
|
-
* when a `SIGTERM` signal is handled.
|
|
22
|
-
*/
|
|
23
|
-
export declare function shutdown(): Promise<void>;
|
|
24
|
-
export declare function instrumented<T>(name: string, fn: (span: Span) => Promise<T> | T): Promise<T>;
|
|
25
|
-
export { trace, context, SpanStatusCode } from '@opentelemetry/api';
|
|
1
|
+
export { trace, metrics, context, SpanStatusCode, ValueType } from '@opentelemetry/api';
|
|
26
2
|
export { suppressTracing } from '@opentelemetry/core';
|
|
3
|
+
export { init, shutdown } from './init';
|
|
4
|
+
export { instrumented } from './tracing';
|
|
5
|
+
export { instrumentedWithMetrics } from './metrics';
|
package/dist/index.js
CHANGED
|
@@ -1,255 +1,19 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
const instrumentation_connect_1 = require("@opentelemetry/instrumentation-connect");
|
|
20
|
-
const instrumentation_dns_1 = require("@opentelemetry/instrumentation-dns");
|
|
21
|
-
const instrumentation_express_2 = require("@opentelemetry/instrumentation-express");
|
|
22
|
-
const instrumentation_http_1 = require("@opentelemetry/instrumentation-http");
|
|
23
|
-
const instrumentation_pg_1 = require("@opentelemetry/instrumentation-pg");
|
|
24
|
-
const instrumentation_redis_1 = require("@opentelemetry/instrumentation-redis");
|
|
25
|
-
// Resource detectors go here.
|
|
26
|
-
const resource_detector_aws_1 = require("@opentelemetry/resource-detector-aws");
|
|
27
|
-
const resources_2 = require("@opentelemetry/resources");
|
|
28
|
-
/**
|
|
29
|
-
* Extends `BatchSpanProcessor` to give it the ability to filter out spans
|
|
30
|
-
* before they're queued up to send. This enhances our sampling process so
|
|
31
|
-
* that we can filter spans _after_ they've been emitted.
|
|
32
|
-
*/
|
|
33
|
-
class FilterBatchSpanProcessor extends sdk_trace_base_2.BatchSpanProcessor {
|
|
34
|
-
constructor(exporter, filter) {
|
|
35
|
-
super(exporter);
|
|
36
|
-
this.filter = filter;
|
|
37
|
-
}
|
|
38
|
-
/**
|
|
39
|
-
* This is invoked after a span is "finalized". `super.onEnd` will queue up
|
|
40
|
-
* the span to be exported, but if we don't call that, we can just drop the
|
|
41
|
-
* span and the parent will be none the wiser!
|
|
42
|
-
*/
|
|
43
|
-
onEnd(span) {
|
|
44
|
-
if (!this.filter(span))
|
|
45
|
-
return;
|
|
46
|
-
super.onEnd(span);
|
|
47
|
-
}
|
|
48
|
-
}
|
|
49
|
-
/**
|
|
50
|
-
* This will be used with our {@link FilterBatchSpanProcessor} to filter out
|
|
51
|
-
* events that we're not interested in. This helps reduce our event volume
|
|
52
|
-
* but still gives us fine-grained control over which events we keep.
|
|
53
|
-
*/
|
|
54
|
-
function filter(span) {
|
|
55
|
-
if (span.name === 'pg-pool.connect') {
|
|
56
|
-
// Looking at historical data, this generally happens in under a millisecond,
|
|
57
|
-
// precisely because we maintain a pool of long-lived connections. The only
|
|
58
|
-
// time obtaining a client should take longer than that is if we're
|
|
59
|
-
// establishing a connection for the first time, which should happen only at
|
|
60
|
-
// bootup, or if a connection errors out. Those are the cases we're
|
|
61
|
-
// interested in, so we'll filter accordingly.
|
|
62
|
-
return (0, core_1.hrTimeToMilliseconds)(span.duration) > 1;
|
|
63
|
-
}
|
|
64
|
-
// Always return true so that we default to including a span.
|
|
65
|
-
return true;
|
|
66
|
-
}
|
|
67
|
-
const instrumentations = [
|
|
68
|
-
new instrumentation_aws_sdk_1.AwsInstrumentation(),
|
|
69
|
-
new instrumentation_connect_1.ConnectInstrumentation(),
|
|
70
|
-
new instrumentation_dns_1.DnsInstrumentation(),
|
|
71
|
-
new instrumentation_express_2.ExpressInstrumentation({
|
|
72
|
-
// We use a lot of middleware; it makes the traces way too noisy. If we
|
|
73
|
-
// want telemetry on a particular middleware, we should instrument it
|
|
74
|
-
// manually.
|
|
75
|
-
ignoreLayersType: [instrumentation_express_1.ExpressLayerType.MIDDLEWARE],
|
|
76
|
-
ignoreLayers: [
|
|
77
|
-
// These don't provide useful information to us.
|
|
78
|
-
'router - /',
|
|
79
|
-
'request handler - /*',
|
|
80
|
-
],
|
|
81
|
-
}),
|
|
82
|
-
new instrumentation_http_1.HttpInstrumentation({
|
|
83
|
-
ignoreIncomingPaths: [
|
|
84
|
-
// socket.io requests are generally just long-polling; they don't add
|
|
85
|
-
// useful information for us.
|
|
86
|
-
/\/socket.io\//,
|
|
87
|
-
// We get several of these per second; they just chew through our event quota.
|
|
88
|
-
// They don't really do anything interesting anyways.
|
|
89
|
-
/\/pl\/webhooks\/ping/,
|
|
90
|
-
],
|
|
91
|
-
}),
|
|
92
|
-
new instrumentation_pg_1.PgInstrumentation(),
|
|
93
|
-
new instrumentation_redis_1.RedisInstrumentation(),
|
|
94
|
-
];
|
|
95
|
-
// Enable all instrumentations now, even though we haven't configured our
|
|
96
|
-
// span processors or trace exporters yet. We'll set those up later.
|
|
97
|
-
instrumentations.forEach((i) => {
|
|
98
|
-
i.enable();
|
|
99
|
-
});
|
|
100
|
-
let tracerProvider;
|
|
101
|
-
/**
|
|
102
|
-
* Should be called once we've loaded our config; this will allow us to set up
|
|
103
|
-
* the correct metadata for the Honeycomb exporter. We don't actually have that
|
|
104
|
-
* information available until we've loaded our config.
|
|
105
|
-
*/
|
|
106
|
-
async function init(config) {
|
|
107
|
-
if (!config.openTelemetryEnabled) {
|
|
108
|
-
// If not enabled, do nothing. We used to disable the instrumentations, but
|
|
109
|
-
// per maintainers, that can actually be problematic. See the comments on
|
|
110
|
-
// https://github.com/open-telemetry/opentelemetry-js-contrib/issues/970
|
|
111
|
-
// The Express instrumentation also logs a benign error, which can be
|
|
112
|
-
// confusing to users. There's a fix in progress if we want to switch back
|
|
113
|
-
// to disabling instrumentations in the future:
|
|
114
|
-
// https://github.com/open-telemetry/opentelemetry-js-contrib/pull/972
|
|
115
|
-
return;
|
|
116
|
-
}
|
|
117
|
-
let exporter;
|
|
118
|
-
if (typeof config.openTelemetryExporter === 'object') {
|
|
119
|
-
exporter = config.openTelemetryExporter;
|
|
120
|
-
}
|
|
121
|
-
else {
|
|
122
|
-
switch (config.openTelemetryExporter) {
|
|
123
|
-
case 'console': {
|
|
124
|
-
// Export spans to the console for testing purposes.
|
|
125
|
-
exporter = new sdk_node_1.tracing.ConsoleSpanExporter();
|
|
126
|
-
break;
|
|
127
|
-
}
|
|
128
|
-
case 'honeycomb': {
|
|
129
|
-
if (!config.honeycombApiKey)
|
|
130
|
-
throw new Error('Missing Honeycomb API key');
|
|
131
|
-
if (!config.honeycombDataset)
|
|
132
|
-
throw new Error('Missing Honeycomb dataset');
|
|
133
|
-
// Create a Honeycomb exporter with the appropriate metadata from the
|
|
134
|
-
// config we've been provided with.
|
|
135
|
-
const metadata = new grpc_js_1.Metadata();
|
|
136
|
-
metadata.set('x-honeycomb-team', config.honeycombApiKey);
|
|
137
|
-
metadata.set('x-honeycomb-dataset', config.honeycombDataset);
|
|
138
|
-
exporter = new exporter_trace_otlp_grpc_1.OTLPTraceExporter({
|
|
139
|
-
url: 'grpc://api.honeycomb.io:443/',
|
|
140
|
-
credentials: grpc_js_1.credentials.createSsl(),
|
|
141
|
-
metadata,
|
|
142
|
-
});
|
|
143
|
-
break;
|
|
144
|
-
}
|
|
145
|
-
case 'jaeger': {
|
|
146
|
-
exporter = new exporter_jaeger_1.JaegerExporter({
|
|
147
|
-
// By default, the UDP sender will be used, but that causes issues
|
|
148
|
-
// with packet sizes when Jaeger is running in Docker. We'll instead
|
|
149
|
-
// configure it to use the HTTP sender, which shouldn't face those
|
|
150
|
-
// same issues. We'll still allow the endpoint to be overridden via
|
|
151
|
-
// environment variable if needed.
|
|
152
|
-
endpoint: process.env.OTEL_EXPORTER_JAEGER_ENDPOINT ?? 'http://localhost:14268/api/traces',
|
|
153
|
-
});
|
|
154
|
-
break;
|
|
155
|
-
}
|
|
156
|
-
default:
|
|
157
|
-
throw new Error(`Unknown OpenTelemetry exporter: ${config.openTelemetryExporter}`);
|
|
158
|
-
}
|
|
159
|
-
}
|
|
160
|
-
let sampler;
|
|
161
|
-
switch (config.openTelemetrySamplerType ?? 'always-on') {
|
|
162
|
-
case 'always-on': {
|
|
163
|
-
sampler = new sdk_trace_base_1.AlwaysOnSampler();
|
|
164
|
-
break;
|
|
165
|
-
}
|
|
166
|
-
case 'always-off': {
|
|
167
|
-
sampler = new sdk_trace_base_1.AlwaysOffSampler();
|
|
168
|
-
break;
|
|
169
|
-
}
|
|
170
|
-
case 'trace-id-ratio': {
|
|
171
|
-
sampler = new sdk_trace_base_1.ParentBasedSampler({
|
|
172
|
-
root: new sdk_trace_base_1.TraceIdRatioBasedSampler(config.openTelemetrySampleRate),
|
|
173
|
-
});
|
|
174
|
-
break;
|
|
175
|
-
}
|
|
176
|
-
default:
|
|
177
|
-
throw new Error(`Unknown OpenTelemetry sampler type: ${config.openTelemetrySamplerType}`);
|
|
178
|
-
}
|
|
179
|
-
let spanProcessor;
|
|
180
|
-
switch (config.openTelemetrySpanProcessor ?? 'batch') {
|
|
181
|
-
case 'batch': {
|
|
182
|
-
spanProcessor = new FilterBatchSpanProcessor(exporter, filter);
|
|
183
|
-
break;
|
|
184
|
-
}
|
|
185
|
-
case 'simple': {
|
|
186
|
-
spanProcessor = new sdk_trace_base_1.SimpleSpanProcessor(exporter);
|
|
187
|
-
break;
|
|
188
|
-
}
|
|
189
|
-
default: {
|
|
190
|
-
throw new Error(`Unknown OpenTelemetry span processor: ${config.openTelemetrySpanProcessor}`);
|
|
191
|
-
}
|
|
192
|
-
}
|
|
193
|
-
// Much of this functionality is copied from `@opentelemetry/sdk-node`, but
|
|
194
|
-
// we can't use the SDK directly because of the fact that we load our config
|
|
195
|
-
// asynchronously. We need to initialize our instrumentations first; only
|
|
196
|
-
// then can we actually start requiring all of our code that loads our config
|
|
197
|
-
// and ultimately tells us how to configure OpenTelemetry.
|
|
198
|
-
let resource = await (0, resources_1.detectResources)({
|
|
199
|
-
detectors: [resource_detector_aws_1.awsEc2Detector, resources_2.processDetector, resources_2.envDetector],
|
|
200
|
-
});
|
|
201
|
-
if (config.serviceName) {
|
|
202
|
-
resource = resource.merge(new resources_1.Resource({ [semantic_conventions_1.SemanticResourceAttributes.SERVICE_NAME]: config.serviceName }));
|
|
203
|
-
}
|
|
204
|
-
const nodeTracerProvider = new sdk_trace_node_1.NodeTracerProvider({
|
|
205
|
-
sampler,
|
|
206
|
-
resource,
|
|
207
|
-
});
|
|
208
|
-
nodeTracerProvider.addSpanProcessor(spanProcessor);
|
|
209
|
-
nodeTracerProvider.register();
|
|
210
|
-
instrumentations.forEach((i) => i.setTracerProvider(nodeTracerProvider));
|
|
211
|
-
// Save the provider so we can shut it down later.
|
|
212
|
-
tracerProvider = tracerProvider;
|
|
213
|
-
}
|
|
214
|
-
exports.init = init;
|
|
215
|
-
/**
|
|
216
|
-
* Gracefully shuts down the OpenTelemetry instrumentation. Should be called
|
|
217
|
-
* when a `SIGTERM` signal is handled.
|
|
218
|
-
*/
|
|
219
|
-
async function shutdown() {
|
|
220
|
-
if (tracerProvider) {
|
|
221
|
-
await tracerProvider.shutdown();
|
|
222
|
-
tracerProvider = null;
|
|
223
|
-
}
|
|
224
|
-
}
|
|
225
|
-
exports.shutdown = shutdown;
|
|
226
|
-
async function instrumented(name, fn) {
|
|
227
|
-
return api_1.trace
|
|
228
|
-
.getTracer('default')
|
|
229
|
-
.startActiveSpan(name, async (span) => {
|
|
230
|
-
try {
|
|
231
|
-
const result = await fn(span);
|
|
232
|
-
span.setStatus({ code: api_1.SpanStatusCode.OK });
|
|
233
|
-
return result;
|
|
234
|
-
}
|
|
235
|
-
catch (e) {
|
|
236
|
-
span.setStatus({
|
|
237
|
-
code: api_1.SpanStatusCode.ERROR,
|
|
238
|
-
message: e.message,
|
|
239
|
-
});
|
|
240
|
-
span.recordException(e);
|
|
241
|
-
throw e;
|
|
242
|
-
}
|
|
243
|
-
finally {
|
|
244
|
-
span.end();
|
|
245
|
-
}
|
|
246
|
-
});
|
|
247
|
-
}
|
|
248
|
-
exports.instrumented = instrumented;
|
|
249
|
-
var api_2 = require("@opentelemetry/api");
|
|
250
|
-
Object.defineProperty(exports, "trace", { enumerable: true, get: function () { return api_2.trace; } });
|
|
251
|
-
Object.defineProperty(exports, "context", { enumerable: true, get: function () { return api_2.context; } });
|
|
252
|
-
Object.defineProperty(exports, "SpanStatusCode", { enumerable: true, get: function () { return api_2.SpanStatusCode; } });
|
|
253
|
-
var core_2 = require("@opentelemetry/core");
|
|
254
|
-
Object.defineProperty(exports, "suppressTracing", { enumerable: true, get: function () { return core_2.suppressTracing; } });
|
|
3
|
+
exports.instrumentedWithMetrics = exports.instrumented = exports.shutdown = exports.init = exports.suppressTracing = exports.ValueType = exports.SpanStatusCode = exports.context = exports.metrics = exports.trace = void 0;
|
|
4
|
+
var api_1 = require("@opentelemetry/api");
|
|
5
|
+
Object.defineProperty(exports, "trace", { enumerable: true, get: function () { return api_1.trace; } });
|
|
6
|
+
Object.defineProperty(exports, "metrics", { enumerable: true, get: function () { return api_1.metrics; } });
|
|
7
|
+
Object.defineProperty(exports, "context", { enumerable: true, get: function () { return api_1.context; } });
|
|
8
|
+
Object.defineProperty(exports, "SpanStatusCode", { enumerable: true, get: function () { return api_1.SpanStatusCode; } });
|
|
9
|
+
Object.defineProperty(exports, "ValueType", { enumerable: true, get: function () { return api_1.ValueType; } });
|
|
10
|
+
var core_1 = require("@opentelemetry/core");
|
|
11
|
+
Object.defineProperty(exports, "suppressTracing", { enumerable: true, get: function () { return core_1.suppressTracing; } });
|
|
12
|
+
var init_1 = require("./init");
|
|
13
|
+
Object.defineProperty(exports, "init", { enumerable: true, get: function () { return init_1.init; } });
|
|
14
|
+
Object.defineProperty(exports, "shutdown", { enumerable: true, get: function () { return init_1.shutdown; } });
|
|
15
|
+
var tracing_1 = require("./tracing");
|
|
16
|
+
Object.defineProperty(exports, "instrumented", { enumerable: true, get: function () { return tracing_1.instrumented; } });
|
|
17
|
+
var metrics_1 = require("./metrics");
|
|
18
|
+
Object.defineProperty(exports, "instrumentedWithMetrics", { enumerable: true, get: function () { return metrics_1.instrumentedWithMetrics; } });
|
|
255
19
|
//# sourceMappingURL=index.js.map
|
package/dist/index.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":";;;AAAA,
|
|
1
|
+
{"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":";;;AAAA,0CAAwF;AAA/E,4FAAA,KAAK,OAAA;AAAE,8FAAA,OAAO,OAAA;AAAE,8FAAA,OAAO,OAAA;AAAE,qGAAA,cAAc,OAAA;AAAE,gGAAA,SAAS,OAAA;AAC3D,4CAAsD;AAA7C,uGAAA,eAAe,OAAA;AAExB,+BAAwC;AAA/B,4FAAA,IAAI,OAAA;AAAE,gGAAA,QAAQ,OAAA;AACvB,qCAAyC;AAAhC,uGAAA,YAAY,OAAA;AACrB,qCAAoD;AAA3C,kHAAA,uBAAuB,OAAA"}
|
package/dist/init.d.ts
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import { PushMetricExporter } from '@opentelemetry/sdk-metrics';
|
|
2
|
+
import { SpanExporter } from '@opentelemetry/sdk-trace-base';
|
|
3
|
+
export interface OpenTelemetryConfig {
|
|
4
|
+
openTelemetryEnabled: boolean;
|
|
5
|
+
openTelemetryExporter: 'console' | 'honeycomb' | 'jaeger' | SpanExporter;
|
|
6
|
+
openTelemetryMetricExporter?: 'console' | 'honeycomb' | PushMetricExporter;
|
|
7
|
+
openTelemetryMetricExportIntervalMillis?: number;
|
|
8
|
+
openTelemetrySamplerType: 'always-on' | 'always-off' | 'trace-id-ratio';
|
|
9
|
+
openTelemetrySampleRate?: number;
|
|
10
|
+
openTelemetrySpanProcessor?: 'batch' | 'simple';
|
|
11
|
+
honeycombApiKey?: string;
|
|
12
|
+
honeycombDataset?: string;
|
|
13
|
+
serviceName?: string;
|
|
14
|
+
}
|
|
15
|
+
/**
|
|
16
|
+
* Should be called once we've loaded our config; this will allow us to set up
|
|
17
|
+
* the correct metadata for the Honeycomb exporter. We don't actually have that
|
|
18
|
+
* information available until we've loaded our config.
|
|
19
|
+
*/
|
|
20
|
+
export declare function init(config: OpenTelemetryConfig): Promise<void>;
|
|
21
|
+
/**
|
|
22
|
+
* Gracefully shuts down the OpenTelemetry instrumentation. Should be called
|
|
23
|
+
* when a `SIGTERM` signal is handled.
|
|
24
|
+
*/
|
|
25
|
+
export declare function shutdown(): Promise<void>;
|
package/dist/init.js
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.shutdown = exports.init = void 0;
|
|
4
|
+
const grpc_js_1 = require("@grpc/grpc-js");
|
|
5
|
+
const sdk_trace_node_1 = require("@opentelemetry/sdk-trace-node");
|
|
6
|
+
const sdk_metrics_1 = require("@opentelemetry/sdk-metrics");
|
|
7
|
+
const sdk_trace_base_1 = require("@opentelemetry/sdk-trace-base");
|
|
8
|
+
const resources_1 = require("@opentelemetry/resources");
|
|
9
|
+
const semantic_conventions_1 = require("@opentelemetry/semantic-conventions");
|
|
10
|
+
const instrumentation_express_1 = require("@opentelemetry/instrumentation-express");
|
|
11
|
+
const api_1 = require("@opentelemetry/api");
|
|
12
|
+
const core_1 = require("@opentelemetry/core");
|
|
13
|
+
// Exporters go here.
|
|
14
|
+
const exporter_trace_otlp_grpc_1 = require("@opentelemetry/exporter-trace-otlp-grpc");
|
|
15
|
+
const exporter_jaeger_1 = require("@opentelemetry/exporter-jaeger");
|
|
16
|
+
const exporter_metrics_otlp_grpc_1 = require("@opentelemetry/exporter-metrics-otlp-grpc");
|
|
17
|
+
// Instrumentations go here.
|
|
18
|
+
const instrumentation_aws_sdk_1 = require("@opentelemetry/instrumentation-aws-sdk");
|
|
19
|
+
const instrumentation_connect_1 = require("@opentelemetry/instrumentation-connect");
|
|
20
|
+
const instrumentation_dns_1 = require("@opentelemetry/instrumentation-dns");
|
|
21
|
+
const instrumentation_express_2 = require("@opentelemetry/instrumentation-express");
|
|
22
|
+
const instrumentation_http_1 = require("@opentelemetry/instrumentation-http");
|
|
23
|
+
const instrumentation_pg_1 = require("@opentelemetry/instrumentation-pg");
|
|
24
|
+
const instrumentation_redis_1 = require("@opentelemetry/instrumentation-redis");
|
|
25
|
+
// Resource detectors go here.
|
|
26
|
+
const resource_detector_aws_1 = require("@opentelemetry/resource-detector-aws");
|
|
27
|
+
const resources_2 = require("@opentelemetry/resources");
|
|
28
|
+
/**
|
|
29
|
+
* Extends `BatchSpanProcessor` to give it the ability to filter out spans
|
|
30
|
+
* before they're queued up to send. This enhances our sampling process so
|
|
31
|
+
* that we can filter spans _after_ they've been emitted.
|
|
32
|
+
*/
|
|
33
|
+
class FilterBatchSpanProcessor extends sdk_trace_base_1.BatchSpanProcessor {
|
|
34
|
+
constructor(exporter, filter) {
|
|
35
|
+
super(exporter);
|
|
36
|
+
this.filter = filter;
|
|
37
|
+
}
|
|
38
|
+
/**
|
|
39
|
+
* This is invoked after a span is "finalized". `super.onEnd` will queue up
|
|
40
|
+
* the span to be exported, but if we don't call that, we can just drop the
|
|
41
|
+
* span and the parent will be none the wiser!
|
|
42
|
+
*/
|
|
43
|
+
onEnd(span) {
|
|
44
|
+
if (!this.filter(span))
|
|
45
|
+
return;
|
|
46
|
+
super.onEnd(span);
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
/**
|
|
50
|
+
* This will be used with our {@link FilterBatchSpanProcessor} to filter out
|
|
51
|
+
* events that we're not interested in. This helps reduce our event volume
|
|
52
|
+
* but still gives us fine-grained control over which events we keep.
|
|
53
|
+
*/
|
|
54
|
+
function filter(span) {
|
|
55
|
+
if (span.name === 'pg-pool.connect') {
|
|
56
|
+
// Looking at historical data, this generally happens in under a millisecond,
|
|
57
|
+
// precisely because we maintain a pool of long-lived connections. The only
|
|
58
|
+
// time obtaining a client should take longer than that is if we're
|
|
59
|
+
// establishing a connection for the first time, which should happen only at
|
|
60
|
+
// bootup, or if a connection errors out. Those are the cases we're
|
|
61
|
+
// interested in, so we'll filter accordingly.
|
|
62
|
+
return (0, core_1.hrTimeToMilliseconds)(span.duration) > 1;
|
|
63
|
+
}
|
|
64
|
+
// Always return true so that we default to including a span.
|
|
65
|
+
return true;
|
|
66
|
+
}
|
|
67
|
+
const instrumentations = [
|
|
68
|
+
new instrumentation_aws_sdk_1.AwsInstrumentation(),
|
|
69
|
+
new instrumentation_connect_1.ConnectInstrumentation(),
|
|
70
|
+
new instrumentation_dns_1.DnsInstrumentation(),
|
|
71
|
+
new instrumentation_express_2.ExpressInstrumentation({
|
|
72
|
+
// We use a lot of middleware; it makes the traces way too noisy. If we
|
|
73
|
+
// want telemetry on a particular middleware, we should instrument it
|
|
74
|
+
// manually.
|
|
75
|
+
ignoreLayersType: [instrumentation_express_1.ExpressLayerType.MIDDLEWARE],
|
|
76
|
+
ignoreLayers: [
|
|
77
|
+
// These don't provide useful information to us.
|
|
78
|
+
'router - /',
|
|
79
|
+
'request handler - /*',
|
|
80
|
+
],
|
|
81
|
+
}),
|
|
82
|
+
new instrumentation_http_1.HttpInstrumentation({
|
|
83
|
+
ignoreIncomingPaths: [
|
|
84
|
+
// socket.io requests are generally just long-polling; they don't add
|
|
85
|
+
// useful information for us.
|
|
86
|
+
/\/socket.io\//,
|
|
87
|
+
// We get several of these per second; they just chew through our event quota.
|
|
88
|
+
// They don't really do anything interesting anyways.
|
|
89
|
+
/\/pl\/webhooks\/ping/,
|
|
90
|
+
],
|
|
91
|
+
}),
|
|
92
|
+
new instrumentation_pg_1.PgInstrumentation(),
|
|
93
|
+
new instrumentation_redis_1.RedisInstrumentation(),
|
|
94
|
+
];
|
|
95
|
+
// Enable all instrumentations now, even though we haven't configured our
|
|
96
|
+
// span processors or trace exporters yet. We'll set those up later.
|
|
97
|
+
instrumentations.forEach((i) => {
|
|
98
|
+
i.enable();
|
|
99
|
+
});
|
|
100
|
+
let tracerProvider;
|
|
101
|
+
function getHoneycombMetadata(config, datasetSuffix = '') {
|
|
102
|
+
if (!config.honeycombApiKey)
|
|
103
|
+
throw new Error('Missing Honeycomb API key');
|
|
104
|
+
if (!config.honeycombDataset)
|
|
105
|
+
throw new Error('Missing Honeycomb dataset');
|
|
106
|
+
const metadata = new grpc_js_1.Metadata();
|
|
107
|
+
metadata.set('x-honeycomb-team', config.honeycombApiKey);
|
|
108
|
+
metadata.set('x-honeycomb-dataset', config.honeycombDataset + datasetSuffix);
|
|
109
|
+
return metadata;
|
|
110
|
+
}
|
|
111
|
+
function getTraceExporter(config) {
|
|
112
|
+
if (typeof config.openTelemetryExporter === 'object') {
|
|
113
|
+
return config.openTelemetryExporter;
|
|
114
|
+
}
|
|
115
|
+
switch (config.openTelemetryExporter) {
|
|
116
|
+
case 'console':
|
|
117
|
+
return new sdk_trace_base_1.ConsoleSpanExporter();
|
|
118
|
+
case 'honeycomb':
|
|
119
|
+
return new exporter_trace_otlp_grpc_1.OTLPTraceExporter({
|
|
120
|
+
url: 'grpc://api.honeycomb.io:443/',
|
|
121
|
+
credentials: grpc_js_1.credentials.createSsl(),
|
|
122
|
+
metadata: getHoneycombMetadata(config),
|
|
123
|
+
});
|
|
124
|
+
break;
|
|
125
|
+
case 'jaeger':
|
|
126
|
+
return new exporter_jaeger_1.JaegerExporter({
|
|
127
|
+
// By default, the UDP sender will be used, but that causes issues
|
|
128
|
+
// with packet sizes when Jaeger is running in Docker. We'll instead
|
|
129
|
+
// configure it to use the HTTP sender, which shouldn't face those
|
|
130
|
+
// same issues. We'll still allow the endpoint to be overridden via
|
|
131
|
+
// environment variable if needed.
|
|
132
|
+
endpoint: process.env.OTEL_EXPORTER_JAEGER_ENDPOINT ?? 'http://localhost:14268/api/traces',
|
|
133
|
+
});
|
|
134
|
+
default:
|
|
135
|
+
throw new Error(`Unknown OpenTelemetry exporter: ${config.openTelemetryExporter}`);
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
function getMetricExporter(config) {
|
|
139
|
+
if (!config.openTelemetryMetricExporter)
|
|
140
|
+
return null;
|
|
141
|
+
if (typeof config.openTelemetryMetricExporter === 'object') {
|
|
142
|
+
return config.openTelemetryMetricExporter;
|
|
143
|
+
}
|
|
144
|
+
switch (config.openTelemetryMetricExporter) {
|
|
145
|
+
case 'console':
|
|
146
|
+
return new sdk_metrics_1.ConsoleMetricExporter();
|
|
147
|
+
case 'honeycomb':
|
|
148
|
+
return new exporter_metrics_otlp_grpc_1.OTLPMetricExporter({
|
|
149
|
+
url: 'grpc://api.honeycomb.io:443/',
|
|
150
|
+
credentials: grpc_js_1.credentials.createSsl(),
|
|
151
|
+
// Honeycomb recommends using a separate dataset for metrics, so we'll
|
|
152
|
+
// adopt the convention of appending '-metrics' to the dataset name.
|
|
153
|
+
metadata: getHoneycombMetadata(config, '-metrics'),
|
|
154
|
+
// Delta temporality means that sums, histograms, etc. will reset each
|
|
155
|
+
// time data is collected. This more closely matches how we want to
|
|
156
|
+
// observe our metrics than the default cumulative temporality.
|
|
157
|
+
temporalityPreference: sdk_metrics_1.AggregationTemporality.DELTA,
|
|
158
|
+
});
|
|
159
|
+
default:
|
|
160
|
+
throw new Error(`Unknown OpenTelemetry metric exporter: ${config.openTelemetryMetricExporter}`);
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
/**
|
|
164
|
+
* Should be called once we've loaded our config; this will allow us to set up
|
|
165
|
+
* the correct metadata for the Honeycomb exporter. We don't actually have that
|
|
166
|
+
* information available until we've loaded our config.
|
|
167
|
+
*/
|
|
168
|
+
async function init(config) {
|
|
169
|
+
if (!config.openTelemetryEnabled) {
|
|
170
|
+
// If not enabled, do nothing. We used to disable the instrumentations, but
|
|
171
|
+
// per maintainers, that can actually be problematic. See the comments on
|
|
172
|
+
// https://github.com/open-telemetry/opentelemetry-js-contrib/issues/970
|
|
173
|
+
// The Express instrumentation also logs a benign error, which can be
|
|
174
|
+
// confusing to users. There's a fix in progress if we want to switch back
|
|
175
|
+
// to disabling instrumentations in the future:
|
|
176
|
+
// https://github.com/open-telemetry/opentelemetry-js-contrib/pull/972
|
|
177
|
+
return;
|
|
178
|
+
}
|
|
179
|
+
const traceExporter = getTraceExporter(config);
|
|
180
|
+
const metricExporter = getMetricExporter(config);
|
|
181
|
+
let sampler;
|
|
182
|
+
switch (config.openTelemetrySamplerType ?? 'always-on') {
|
|
183
|
+
case 'always-on': {
|
|
184
|
+
sampler = new sdk_trace_base_1.AlwaysOnSampler();
|
|
185
|
+
break;
|
|
186
|
+
}
|
|
187
|
+
case 'always-off': {
|
|
188
|
+
sampler = new sdk_trace_base_1.AlwaysOffSampler();
|
|
189
|
+
break;
|
|
190
|
+
}
|
|
191
|
+
case 'trace-id-ratio': {
|
|
192
|
+
sampler = new sdk_trace_base_1.ParentBasedSampler({
|
|
193
|
+
root: new sdk_trace_base_1.TraceIdRatioBasedSampler(config.openTelemetrySampleRate),
|
|
194
|
+
});
|
|
195
|
+
break;
|
|
196
|
+
}
|
|
197
|
+
default:
|
|
198
|
+
throw new Error(`Unknown OpenTelemetry sampler type: ${config.openTelemetrySamplerType}`);
|
|
199
|
+
}
|
|
200
|
+
let spanProcessor;
|
|
201
|
+
switch (config.openTelemetrySpanProcessor ?? 'batch') {
|
|
202
|
+
case 'batch': {
|
|
203
|
+
spanProcessor = new FilterBatchSpanProcessor(traceExporter, filter);
|
|
204
|
+
break;
|
|
205
|
+
}
|
|
206
|
+
case 'simple': {
|
|
207
|
+
spanProcessor = new sdk_trace_base_1.SimpleSpanProcessor(traceExporter);
|
|
208
|
+
break;
|
|
209
|
+
}
|
|
210
|
+
default: {
|
|
211
|
+
throw new Error(`Unknown OpenTelemetry span processor: ${config.openTelemetrySpanProcessor}`);
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
// Much of this functionality is copied from `@opentelemetry/sdk-node`, but
|
|
215
|
+
// we can't use the SDK directly because of the fact that we load our config
|
|
216
|
+
// asynchronously. We need to initialize our instrumentations first; only
|
|
217
|
+
// then can we actually start requiring all of our code that loads our config
|
|
218
|
+
// and ultimately tells us how to configure OpenTelemetry.
|
|
219
|
+
let resource = await (0, resources_1.detectResources)({
|
|
220
|
+
detectors: [resource_detector_aws_1.awsEc2Detector, resources_2.processDetector, resources_2.envDetector],
|
|
221
|
+
});
|
|
222
|
+
if (config.serviceName) {
|
|
223
|
+
resource = resource.merge(new resources_1.Resource({ [semantic_conventions_1.SemanticResourceAttributes.SERVICE_NAME]: config.serviceName }));
|
|
224
|
+
}
|
|
225
|
+
// Set up tracing instrumentation.
|
|
226
|
+
const nodeTracerProvider = new sdk_trace_node_1.NodeTracerProvider({
|
|
227
|
+
sampler,
|
|
228
|
+
resource,
|
|
229
|
+
});
|
|
230
|
+
nodeTracerProvider.addSpanProcessor(spanProcessor);
|
|
231
|
+
nodeTracerProvider.register();
|
|
232
|
+
instrumentations.forEach((i) => i.setTracerProvider(nodeTracerProvider));
|
|
233
|
+
// Save the provider so we can shut it down later.
|
|
234
|
+
tracerProvider = nodeTracerProvider;
|
|
235
|
+
// Set up metrics instrumentation if it's enabled.
|
|
236
|
+
if (metricExporter) {
|
|
237
|
+
const meterProvider = new sdk_metrics_1.MeterProvider({ resource });
|
|
238
|
+
api_1.metrics.setGlobalMeterProvider(meterProvider);
|
|
239
|
+
const metricReader = new sdk_metrics_1.PeriodicExportingMetricReader({
|
|
240
|
+
exporter: metricExporter,
|
|
241
|
+
exportIntervalMillis: config.openTelemetryMetricExportIntervalMillis ?? 30000,
|
|
242
|
+
});
|
|
243
|
+
meterProvider.addMetricReader(metricReader);
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
exports.init = init;
|
|
247
|
+
/**
|
|
248
|
+
* Gracefully shuts down the OpenTelemetry instrumentation. Should be called
|
|
249
|
+
* when a `SIGTERM` signal is handled.
|
|
250
|
+
*/
|
|
251
|
+
async function shutdown() {
|
|
252
|
+
if (tracerProvider) {
|
|
253
|
+
await tracerProvider.shutdown();
|
|
254
|
+
tracerProvider = null;
|
|
255
|
+
}
|
|
256
|
+
}
|
|
257
|
+
exports.shutdown = shutdown;
|
|
258
|
+
//# sourceMappingURL=init.js.map
|