iii-sdk 0.0.2-alpha

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,908 @@
1
+ import { SpanKind, SpanStatusCode, context, metrics, propagation, trace } from "@opentelemetry/api";
2
+ import { WebSocket } from "ws";
3
+ import { SeverityNumber as SeverityNumber$1 } from "@opentelemetry/api-logs";
4
+ import { Resource } from "@opentelemetry/resources";
5
+ import { ATTR_SERVICE_NAME } from "@opentelemetry/semantic-conventions";
6
+ import { randomUUID } from "crypto";
7
+ import { SimpleSpanProcessor } from "@opentelemetry/sdk-trace-base";
8
+ import { MeterProvider, PeriodicExportingMetricReader } from "@opentelemetry/sdk-metrics";
9
+ import { CompositePropagator, ExportResultCode, W3CBaggagePropagator, W3CTraceContextPropagator } from "@opentelemetry/core";
10
+ import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node";
11
+ import { registerInstrumentations } from "@opentelemetry/instrumentation";
12
+ import { LoggerProvider, SimpleLogRecordProcessor } from "@opentelemetry/sdk-logs";
13
+ import { JsonLogsSerializer, JsonMetricsSerializer, JsonTraceSerializer } from "@opentelemetry/otlp-transformer";
14
+ import { monitorEventLoopDelay, performance } from "node:perf_hooks";
15
+
16
+ //#region src/iii-constants.ts
17
+ /**
18
+ * Constants for the III module.
19
+ */
20
+ /** Engine function paths for internal operations */
21
+ const EngineFunctions = {
22
+ LIST_FUNCTIONS: "engine.functions.list",
23
+ LIST_WORKERS: "engine.workers.list",
24
+ REGISTER_WORKER: "engine.workers.register"
25
+ };
26
+ /** Engine trigger types */
27
+ const EngineTriggers = {
28
+ FUNCTIONS_AVAILABLE: "engine::functions-available",
29
+ LOG: "log"
30
+ };
31
+ /** Log function paths */
32
+ const LogFunctions = {
33
+ INFO: "log.info",
34
+ WARN: "log.warn",
35
+ ERROR: "log.error",
36
+ DEBUG: "log.debug"
37
+ };
38
+ /** Default reconnection configuration */
39
+ const DEFAULT_BRIDGE_RECONNECTION_CONFIG = {
40
+ initialDelayMs: 1e3,
41
+ maxDelayMs: 3e4,
42
+ backoffMultiplier: 2,
43
+ jitterFactor: .3,
44
+ maxRetries: -1
45
+ };
46
+ /** Default invocation timeout in milliseconds */
47
+ const DEFAULT_INVOCATION_TIMEOUT_MS = 3e4;
48
+
49
+ //#endregion
50
+ //#region src/telemetry-system/types.ts
51
+ const ATTR_SERVICE_VERSION = "service.version";
52
+ const ATTR_SERVICE_NAMESPACE = "service.namespace";
53
+ const ATTR_SERVICE_INSTANCE_ID = "service.instance.id";
54
+ /** Magic prefixes for binary frames over WebSocket */
55
+ const PREFIX_TRACES = "OTLP";
56
+ const PREFIX_METRICS = "MTRC";
57
+ const PREFIX_LOGS = "LOGS";
58
+ /** Default reconnection configuration */
59
+ const DEFAULT_RECONNECTION_CONFIG = {
60
+ initialDelayMs: 1e3,
61
+ maxDelayMs: 3e4,
62
+ backoffMultiplier: 2,
63
+ jitterFactor: .3,
64
+ maxRetries: -1
65
+ };
66
+
67
+ //#endregion
68
+ //#region src/telemetry-system/connection.ts
69
+ /**
70
+ * Shared WebSocket connection for OpenTelemetry exporters.
71
+ */
72
+ /**
73
+ * Shared WebSocket connection for all OTEL exporters (traces, metrics, logs).
74
+ * Uses a single connection with message prefixes to identify signal type.
75
+ */
76
+ var SharedEngineConnection = class SharedEngineConnection {
77
+ static {
78
+ this.MAX_PENDING_MESSAGES = 1e3;
79
+ }
80
+ constructor(wsUrl, config = {}) {
81
+ this.ws = null;
82
+ this.connecting = false;
83
+ this.shuttingDown = false;
84
+ this.pendingMessages = [];
85
+ this.reconnectAttempt = 0;
86
+ this.reconnectTimeout = null;
87
+ this.state = "disconnected";
88
+ this.onConnectedCallbacks = [];
89
+ this.wsUrl = wsUrl;
90
+ this.config = {
91
+ ...DEFAULT_RECONNECTION_CONFIG,
92
+ ...config
93
+ };
94
+ this.connect();
95
+ }
96
+ connect() {
97
+ if (this.connecting || this.ws && this.ws.readyState === WebSocket.OPEN) return;
98
+ this.connecting = true;
99
+ this.state = "connecting";
100
+ try {
101
+ this.ws = new WebSocket(this.wsUrl);
102
+ this.ws.on("open", () => {
103
+ this.connecting = false;
104
+ this.state = "connected";
105
+ console.log(`[OTel] Connected to engine at ${this.wsUrl}`);
106
+ if (this.reconnectAttempt > 0) console.log("[OTel] Successfully reconnected");
107
+ this.reconnectAttempt = 0;
108
+ if (this.reconnectTimeout) {
109
+ clearTimeout(this.reconnectTimeout);
110
+ this.reconnectTimeout = null;
111
+ }
112
+ const pending = this.pendingMessages.splice(0, this.pendingMessages.length);
113
+ for (const { frame, callback } of pending) this.ws?.send(frame, (err) => callback?.(err));
114
+ for (const cb of this.onConnectedCallbacks) cb();
115
+ });
116
+ this.ws.on("close", () => {
117
+ this.connecting = false;
118
+ this.ws = null;
119
+ if (this.shuttingDown) {
120
+ this.state = "disconnected";
121
+ console.log("[OTel] Connection closed during shutdown");
122
+ return;
123
+ }
124
+ this.state = "disconnected";
125
+ console.log("[OTel] Disconnected from engine, will reconnect...");
126
+ this.scheduleReconnect();
127
+ });
128
+ this.ws.on("error", (err) => {
129
+ this.connecting = false;
130
+ if (this.shuttingDown) return;
131
+ console.error("[OTel] WebSocket error:", err.message);
132
+ });
133
+ } catch (err) {
134
+ this.connecting = false;
135
+ console.error("[OTel] Connection failed:", err);
136
+ this.scheduleReconnect();
137
+ }
138
+ }
139
+ scheduleReconnect() {
140
+ if (this.config.maxRetries !== -1 && this.reconnectAttempt >= this.config.maxRetries) {
141
+ this.state = "failed";
142
+ console.error(`[OTel] Max retries (${this.config.maxRetries}) reached, giving up`);
143
+ const pending = this.pendingMessages.splice(0, this.pendingMessages.length);
144
+ const failedError = /* @__PURE__ */ new Error("Connection failed after max retries");
145
+ for (const { callback } of pending) callback?.(failedError);
146
+ return;
147
+ }
148
+ if (this.reconnectTimeout) return;
149
+ const exponentialDelay = this.config.initialDelayMs * this.config.backoffMultiplier ** this.reconnectAttempt;
150
+ const cappedDelay = Math.min(exponentialDelay, this.config.maxDelayMs);
151
+ const jitter = cappedDelay * this.config.jitterFactor * (2 * Math.random() - 1);
152
+ const delay = Math.max(0, Math.floor(cappedDelay + jitter));
153
+ this.state = "reconnecting";
154
+ console.log(`[OTel] Reconnecting in ${delay}ms (attempt ${this.reconnectAttempt + 1})...`);
155
+ this.reconnectTimeout = setTimeout(() => {
156
+ this.reconnectTimeout = null;
157
+ this.reconnectAttempt++;
158
+ this.connect();
159
+ }, delay);
160
+ }
161
+ /**
162
+ * Send a message with a signal prefix.
163
+ */
164
+ send(prefix, data, callback) {
165
+ const prefixBytes = Buffer.from(prefix, "utf-8");
166
+ const frame = Buffer.concat([prefixBytes, Buffer.from(data)]);
167
+ if (this.ws && this.ws.readyState === WebSocket.OPEN) this.ws.send(frame, callback);
168
+ else {
169
+ if (this.pendingMessages.length >= SharedEngineConnection.MAX_PENDING_MESSAGES) {
170
+ console.warn("[OTel] Pending message queue full, dropping oldest message");
171
+ this.pendingMessages.shift()?.callback?.(/* @__PURE__ */ new Error("Message dropped due to queue overflow"));
172
+ }
173
+ this.pendingMessages.push({
174
+ frame,
175
+ callback
176
+ });
177
+ this.connect();
178
+ }
179
+ }
180
+ /**
181
+ * Register a callback to be called when connected.
182
+ */
183
+ onConnected(callback) {
184
+ this.onConnectedCallbacks.push(callback);
185
+ if (this.state === "connected") callback();
186
+ }
187
+ /**
188
+ * Get the current connection state.
189
+ */
190
+ getState() {
191
+ return this.state;
192
+ }
193
+ /**
194
+ * Shutdown the connection.
195
+ */
196
+ async shutdown() {
197
+ this.shuttingDown = true;
198
+ if (this.reconnectTimeout) {
199
+ clearTimeout(this.reconnectTimeout);
200
+ this.reconnectTimeout = null;
201
+ }
202
+ if (this.ws) {
203
+ this.ws.close();
204
+ this.ws = null;
205
+ }
206
+ const pending = this.pendingMessages.splice(0, this.pendingMessages.length);
207
+ const shutdownError = /* @__PURE__ */ new Error("Connection shutdown before message could be sent");
208
+ for (const { callback } of pending) callback?.(shutdownError);
209
+ this.onConnectedCallbacks = [];
210
+ this.state = "disconnected";
211
+ }
212
+ };
213
+
214
+ //#endregion
215
+ //#region src/telemetry-system/span-exporter.ts
216
+ /**
217
+ * Span exporter for the III Engine.
218
+ */
219
+ /**
220
+ * Span exporter using the shared WebSocket connection.
221
+ */
222
+ var EngineSpanExporter = class EngineSpanExporter {
223
+ static {
224
+ this.MAX_PENDING_EXPORTS = 100;
225
+ }
226
+ constructor(connection) {
227
+ this.pendingExports = [];
228
+ this.connection = connection;
229
+ this.connection.onConnected(() => this.flushPending());
230
+ }
231
+ flushPending() {
232
+ const pending = this.pendingExports.splice(0, this.pendingExports.length);
233
+ for (const { spans, resultCallback } of pending) this.sendExport(spans, resultCallback);
234
+ }
235
+ sendExport(spans, resultCallback) {
236
+ try {
237
+ const serialized = JsonTraceSerializer.serializeRequest(spans);
238
+ if (!serialized) {
239
+ resultCallback?.({ code: ExportResultCode.SUCCESS });
240
+ return;
241
+ }
242
+ this.connection.send(PREFIX_TRACES, serialized, (err) => {
243
+ if (err) {
244
+ console.error("[OTel] Failed to send spans:", err.message);
245
+ resultCallback?.({
246
+ code: ExportResultCode.FAILED,
247
+ error: err
248
+ });
249
+ } else resultCallback?.({ code: ExportResultCode.SUCCESS });
250
+ });
251
+ } catch (err) {
252
+ console.error("[OTel] Error exporting spans:", err);
253
+ resultCallback?.({
254
+ code: ExportResultCode.FAILED,
255
+ error: err
256
+ });
257
+ }
258
+ }
259
+ doExport(spans, resultCallback) {
260
+ if (this.connection.getState() !== "connected") {
261
+ if (this.pendingExports.length >= EngineSpanExporter.MAX_PENDING_EXPORTS) {
262
+ this.pendingExports.shift()?.resultCallback?.({
263
+ code: ExportResultCode.FAILED,
264
+ error: /* @__PURE__ */ new Error("Queue overflow")
265
+ });
266
+ console.warn("[OTel] Spans export queue full, dropped oldest entry");
267
+ }
268
+ this.pendingExports.push({
269
+ spans,
270
+ resultCallback
271
+ });
272
+ return;
273
+ }
274
+ this.sendExport(spans, resultCallback);
275
+ }
276
+ export(spans, resultCallback) {
277
+ this.doExport(spans, resultCallback);
278
+ }
279
+ async shutdown() {
280
+ const pending = this.pendingExports.splice(0, this.pendingExports.length);
281
+ const shutdownError = /* @__PURE__ */ new Error("Exporter shutdown before export completed");
282
+ for (const { resultCallback } of pending) resultCallback?.({
283
+ code: ExportResultCode.FAILED,
284
+ error: shutdownError
285
+ });
286
+ }
287
+ async forceFlush() {}
288
+ };
289
+
290
+ //#endregion
291
+ //#region src/telemetry-system/metrics-exporter.ts
292
+ /**
293
+ * Metrics exporter for the III Engine.
294
+ */
295
+ /**
296
+ * Metrics exporter using the shared WebSocket connection.
297
+ */
298
+ var EngineMetricsExporter = class EngineMetricsExporter {
299
+ static {
300
+ this.MAX_PENDING_EXPORTS = 100;
301
+ }
302
+ constructor(connection) {
303
+ this.pendingExports = [];
304
+ this.connection = connection;
305
+ this.connection.onConnected(() => this.flushPending());
306
+ }
307
+ flushPending() {
308
+ const pending = this.pendingExports.splice(0, this.pendingExports.length);
309
+ for (const { metrics: metrics$1, resultCallback } of pending) this.sendExport(metrics$1, resultCallback);
310
+ }
311
+ sendExport(metricsData, resultCallback) {
312
+ try {
313
+ const serialized = JsonMetricsSerializer.serializeRequest(metricsData);
314
+ if (!serialized) {
315
+ resultCallback?.({ code: ExportResultCode.SUCCESS });
316
+ return;
317
+ }
318
+ this.connection.send(PREFIX_METRICS, serialized, (err) => {
319
+ if (err) {
320
+ console.error("[OTel] Failed to send metrics:", err.message);
321
+ resultCallback?.({
322
+ code: ExportResultCode.FAILED,
323
+ error: err
324
+ });
325
+ } else resultCallback?.({ code: ExportResultCode.SUCCESS });
326
+ });
327
+ } catch (err) {
328
+ console.error("[OTel] Error exporting metrics:", err);
329
+ resultCallback?.({
330
+ code: ExportResultCode.FAILED,
331
+ error: err
332
+ });
333
+ }
334
+ }
335
+ doExport(metricsData, resultCallback) {
336
+ if (this.connection.getState() !== "connected") {
337
+ if (this.pendingExports.length >= EngineMetricsExporter.MAX_PENDING_EXPORTS) {
338
+ this.pendingExports.shift()?.resultCallback?.({
339
+ code: ExportResultCode.FAILED,
340
+ error: /* @__PURE__ */ new Error("Queue overflow")
341
+ });
342
+ console.warn("[OTel] Metrics export queue full, dropped oldest entry");
343
+ }
344
+ this.pendingExports.push({
345
+ metrics: metricsData,
346
+ resultCallback
347
+ });
348
+ return;
349
+ }
350
+ this.sendExport(metricsData, resultCallback);
351
+ }
352
+ export(metrics$1, resultCallback) {
353
+ this.doExport(metrics$1, resultCallback);
354
+ }
355
+ async shutdown() {
356
+ const pending = this.pendingExports.splice(0, this.pendingExports.length);
357
+ const shutdownError = /* @__PURE__ */ new Error("Exporter shutdown before export completed");
358
+ for (const { resultCallback } of pending) resultCallback?.({
359
+ code: ExportResultCode.FAILED,
360
+ error: shutdownError
361
+ });
362
+ }
363
+ async forceFlush() {}
364
+ };
365
+
366
+ //#endregion
367
+ //#region src/telemetry-system/log-exporter.ts
368
+ /**
369
+ * Log exporter for the III Engine.
370
+ */
371
+ /**
372
+ * Log exporter using the shared WebSocket connection.
373
+ */
374
+ var EngineLogExporter = class {
375
+ constructor(connection) {
376
+ this.pendingExports = [];
377
+ this.connection = connection;
378
+ this.connection.onConnected(() => this.flushPending());
379
+ }
380
+ flushPending() {
381
+ const pending = this.pendingExports.splice(0, this.pendingExports.length);
382
+ for (const { logs, callback } of pending) this.doExport(logs, callback);
383
+ }
384
+ doExport(logs, resultCallback) {
385
+ if (this.connection.getState() !== "connected") {
386
+ this.pendingExports.push({
387
+ logs,
388
+ callback: resultCallback
389
+ });
390
+ return;
391
+ }
392
+ try {
393
+ const serialized = JsonLogsSerializer.serializeRequest(logs);
394
+ if (!serialized) {
395
+ resultCallback({ code: ExportResultCode.SUCCESS });
396
+ return;
397
+ }
398
+ this.connection.send(PREFIX_LOGS, serialized, (err) => {
399
+ if (err) {
400
+ console.error("[OTel] Failed to send logs:", err.message);
401
+ resultCallback({
402
+ code: ExportResultCode.FAILED,
403
+ error: err
404
+ });
405
+ } else resultCallback({ code: ExportResultCode.SUCCESS });
406
+ });
407
+ } catch (err) {
408
+ console.error("[OTel] Error exporting logs:", err);
409
+ resultCallback({
410
+ code: ExportResultCode.FAILED,
411
+ error: err
412
+ });
413
+ }
414
+ }
415
+ export(logs, resultCallback) {
416
+ this.doExport(logs, resultCallback);
417
+ }
418
+ async shutdown() {
419
+ for (const { callback } of this.pendingExports) callback({
420
+ code: ExportResultCode.FAILED,
421
+ error: /* @__PURE__ */ new Error("Exporter shutdown")
422
+ });
423
+ this.pendingExports = [];
424
+ }
425
+ };
426
+
427
+ //#endregion
428
+ //#region src/telemetry-system/context.ts
429
+ /**
430
+ * Trace context and baggage propagation utilities.
431
+ */
432
+ /**
433
+ * Extract the current trace ID from the active span context.
434
+ */
435
+ function currentTraceId() {
436
+ const span = trace.getActiveSpan();
437
+ if (span) {
438
+ const spanContext = span.spanContext();
439
+ if (spanContext.traceId && spanContext.traceId !== "00000000000000000000000000000000") return spanContext.traceId;
440
+ }
441
+ }
442
+ /**
443
+ * Extract the current span ID from the active span context.
444
+ */
445
+ function currentSpanId() {
446
+ const span = trace.getActiveSpan();
447
+ if (span) {
448
+ const spanContext = span.spanContext();
449
+ if (spanContext.spanId && spanContext.spanId !== "0000000000000000") return spanContext.spanId;
450
+ }
451
+ }
452
+ /**
453
+ * Inject the current trace context into a W3C traceparent header string.
454
+ */
455
+ function injectTraceparent() {
456
+ const carrier = {};
457
+ propagation.inject(context.active(), carrier);
458
+ return carrier["traceparent"];
459
+ }
460
+ /**
461
+ * Extract a trace context from a W3C traceparent header string.
462
+ */
463
+ function extractTraceparent(traceparent) {
464
+ const carrier = { traceparent };
465
+ return propagation.extract(context.active(), carrier);
466
+ }
467
+ /**
468
+ * Inject the current baggage into a W3C baggage header string.
469
+ */
470
+ function injectBaggage() {
471
+ const carrier = {};
472
+ propagation.inject(context.active(), carrier);
473
+ return carrier["baggage"];
474
+ }
475
+ /**
476
+ * Extract baggage from a W3C baggage header string.
477
+ */
478
+ function extractBaggage(baggage) {
479
+ const carrier = { baggage };
480
+ return propagation.extract(context.active(), carrier);
481
+ }
482
+ /**
483
+ * Extract both trace context and baggage from their respective headers.
484
+ */
485
+ function extractContext(traceparent, baggage) {
486
+ const carrier = {};
487
+ if (traceparent) carrier["traceparent"] = traceparent;
488
+ if (baggage) carrier["baggage"] = baggage;
489
+ return propagation.extract(context.active(), carrier);
490
+ }
491
+ /**
492
+ * Get a baggage entry from the current context.
493
+ */
494
+ function getBaggageEntry(key) {
495
+ return propagation.getBaggage(context.active())?.getEntry(key)?.value;
496
+ }
497
+ /**
498
+ * Set a baggage entry in the current context.
499
+ */
500
+ function setBaggageEntry(key, value) {
501
+ let bag = propagation.getBaggage(context.active()) ?? propagation.createBaggage();
502
+ bag = bag.setEntry(key, { value });
503
+ return propagation.setBaggage(context.active(), bag);
504
+ }
505
+ /**
506
+ * Remove a baggage entry from the current context.
507
+ */
508
+ function removeBaggageEntry(key) {
509
+ const bag = propagation.getBaggage(context.active());
510
+ if (!bag) return context.active();
511
+ const newBag = bag.removeEntry(key);
512
+ return propagation.setBaggage(context.active(), newBag);
513
+ }
514
+ /**
515
+ * Get all baggage entries from the current context.
516
+ */
517
+ function getAllBaggage() {
518
+ const bag = propagation.getBaggage(context.active());
519
+ if (!bag) return {};
520
+ const entries = {};
521
+ for (const [key, entry] of bag.getAllEntries()) entries[key] = entry.value;
522
+ return entries;
523
+ }
524
+
525
+ //#endregion
526
+ //#region src/telemetry-system/index.ts
527
+ /**
528
+ * OpenTelemetry initialization for the III Node SDK.
529
+ *
530
+ * This module provides trace, metrics, and log export to the III Engine
531
+ * via a shared WebSocket connection using OTLP JSON format.
532
+ */
533
+ let sharedConnection = null;
534
+ let tracerProvider = null;
535
+ let meterProvider = null;
536
+ let loggerProvider = null;
537
+ let tracer = null;
538
+ let meter = null;
539
+ let logger = null;
540
+ let serviceName = "iii-node-iii";
541
+ /**
542
+ * Initialize OpenTelemetry with the given configuration.
543
+ * This should be called once at application startup.
544
+ */
545
+ function initOtel(config = {}) {
546
+ if (!(config.enabled ?? (process.env.OTEL_ENABLED === "true" || process.env.OTEL_ENABLED === "1"))) {
547
+ console.log("[OTel] OpenTelemetry is disabled");
548
+ return;
549
+ }
550
+ if (config.instrumentations?.length) registerInstrumentations({ instrumentations: config.instrumentations });
551
+ serviceName = config.serviceName ?? process.env.OTEL_SERVICE_NAME ?? "iii-node-iii";
552
+ const serviceVersion = config.serviceVersion ?? process.env.SERVICE_VERSION ?? "unknown";
553
+ const serviceNamespace = config.serviceNamespace ?? process.env.SERVICE_NAMESPACE;
554
+ const serviceInstanceId = config.serviceInstanceId ?? process.env.SERVICE_INSTANCE_ID ?? randomUUID();
555
+ const engineWsUrl = config.engineWsUrl ?? process.env.III_BRIDGE_URL ?? "ws://localhost:49134";
556
+ const resourceAttributes = {
557
+ [ATTR_SERVICE_NAME]: serviceName,
558
+ [ATTR_SERVICE_VERSION]: serviceVersion,
559
+ [ATTR_SERVICE_INSTANCE_ID]: serviceInstanceId
560
+ };
561
+ if (serviceNamespace) resourceAttributes[ATTR_SERVICE_NAMESPACE] = serviceNamespace;
562
+ const resource = new Resource(resourceAttributes);
563
+ sharedConnection = new SharedEngineConnection(engineWsUrl, config.reconnectionConfig);
564
+ tracerProvider = new NodeTracerProvider({
565
+ resource,
566
+ spanProcessors: [new SimpleSpanProcessor(new EngineSpanExporter(sharedConnection))]
567
+ });
568
+ propagation.setGlobalPropagator(new CompositePropagator({ propagators: [new W3CTraceContextPropagator(), new W3CBaggagePropagator()] }));
569
+ tracerProvider.register();
570
+ tracer = trace.getTracer(serviceName);
571
+ console.log(`[OTel] Traces initialized: engine=${engineWsUrl}, service=${serviceName}`);
572
+ if (config.metricsEnabled ?? (process.env.OTEL_METRICS_ENABLED === "true" || process.env.OTEL_METRICS_ENABLED === "1")) {
573
+ const metricsExporter = new EngineMetricsExporter(sharedConnection);
574
+ const exportIntervalMs = config.metricsExportIntervalMs ?? 6e4;
575
+ meterProvider = new MeterProvider({
576
+ resource,
577
+ readers: [new PeriodicExportingMetricReader({
578
+ exporter: metricsExporter,
579
+ exportIntervalMillis: exportIntervalMs
580
+ })]
581
+ });
582
+ metrics.setGlobalMeterProvider(meterProvider);
583
+ meter = meterProvider.getMeter(serviceName);
584
+ console.log(`[OTel] Metrics initialized: interval=${exportIntervalMs}ms`);
585
+ }
586
+ const logExporter = new EngineLogExporter(sharedConnection);
587
+ loggerProvider = new LoggerProvider({ resource });
588
+ loggerProvider.addLogRecordProcessor(new SimpleLogRecordProcessor(logExporter));
589
+ logger = loggerProvider.getLogger(serviceName);
590
+ console.log("[OTel] Logs initialized");
591
+ }
592
+ /**
593
+ * Shutdown OpenTelemetry, flushing any pending data.
594
+ */
595
+ async function shutdownOtel() {
596
+ if (tracerProvider) {
597
+ await tracerProvider.shutdown();
598
+ tracerProvider = null;
599
+ }
600
+ if (meterProvider) {
601
+ await meterProvider.shutdown();
602
+ meterProvider = null;
603
+ }
604
+ if (loggerProvider) {
605
+ await loggerProvider.shutdown();
606
+ loggerProvider = null;
607
+ }
608
+ if (sharedConnection) {
609
+ await sharedConnection.shutdown();
610
+ sharedConnection = null;
611
+ }
612
+ tracer = null;
613
+ meter = null;
614
+ logger = null;
615
+ }
616
+ /**
617
+ * Get the OpenTelemetry tracer instance.
618
+ */
619
+ function getTracer() {
620
+ return tracer;
621
+ }
622
+ /**
623
+ * Get the OpenTelemetry meter instance.
624
+ */
625
+ function getMeter() {
626
+ return meter;
627
+ }
628
+ /**
629
+ * Get the OpenTelemetry logger instance.
630
+ */
631
+ function getLogger() {
632
+ return logger;
633
+ }
634
+ /**
635
+ * Start a new span with the given name and run the callback within it.
636
+ */
637
+ async function withSpan(name, options, fn) {
638
+ if (!tracer) {
639
+ const noopSpan = {
640
+ spanContext: () => ({
641
+ traceId: "",
642
+ spanId: "",
643
+ traceFlags: 0
644
+ }),
645
+ setAttribute: () => noopSpan,
646
+ setAttributes: () => noopSpan,
647
+ addEvent: () => noopSpan,
648
+ addLink: () => noopSpan,
649
+ setStatus: () => noopSpan,
650
+ updateName: () => noopSpan,
651
+ end: () => {},
652
+ isRecording: () => false,
653
+ recordException: () => {},
654
+ addLinks: () => noopSpan
655
+ };
656
+ return fn(noopSpan);
657
+ }
658
+ const parentContext = options.traceparent ? extractTraceparent(options.traceparent) : context.active();
659
+ return tracer.startActiveSpan(name, { kind: options.kind ?? SpanKind.INTERNAL }, parentContext, async (span) => {
660
+ try {
661
+ const result = await fn(span);
662
+ span.setStatus({ code: SpanStatusCode.OK });
663
+ return result;
664
+ } catch (error) {
665
+ span.setStatus({
666
+ code: SpanStatusCode.ERROR,
667
+ message: error.message
668
+ });
669
+ span.recordException(error);
670
+ throw error;
671
+ } finally {
672
+ span.end();
673
+ }
674
+ });
675
+ }
676
+
677
+ //#endregion
678
+ //#region src/utils.ts
679
+ /**
680
+ * Safely stringify a value, handling circular references, BigInt, and other edge cases.
681
+ * Returns "[unserializable]" if serialization fails for any reason.
682
+ */
683
+ function safeStringify(value) {
684
+ const seen = /* @__PURE__ */ new WeakSet();
685
+ try {
686
+ return JSON.stringify(value, (_key, val) => {
687
+ if (typeof val === "bigint") return val.toString();
688
+ if (val !== null && typeof val === "object") {
689
+ if (seen.has(val)) return "[Circular]";
690
+ seen.add(val);
691
+ }
692
+ return val;
693
+ });
694
+ } catch {
695
+ return "[unserializable]";
696
+ }
697
+ }
698
+
699
+ //#endregion
700
+ //#region src/worker-metrics.ts
701
+ /**
702
+ * Worker metrics collection for the III Node SDK.
703
+ *
704
+ * Collects CPU, memory, and event loop metrics for worker health monitoring.
705
+ * Uses the Node.js built-in `monitorEventLoopDelay` API for accurate
706
+ * event loop lag measurements.
707
+ */
708
+ /**
709
+ * Collects worker resource metrics including CPU, memory, and event loop lag.
710
+ *
711
+ * Uses the Node.js `monitorEventLoopDelay` API for high-precision event loop
712
+ * delay measurements instead of manual `setImmediate` timing.
713
+ *
714
+ * @example
715
+ * ```typescript
716
+ * const collector = new WorkerMetricsCollector()
717
+ *
718
+ * // Collect metrics periodically
719
+ * setInterval(() => {
720
+ * const metrics = collector.collect()
721
+ * console.log('CPU:', metrics.cpu_percent, '%')
722
+ * console.log('Event Loop Lag:', metrics.event_loop_lag_ms, 'ms')
723
+ * }, 5000)
724
+ *
725
+ * // Clean up when done
726
+ * collector.stopMonitoring()
727
+ * ```
728
+ */
729
+ var WorkerMetricsCollector = class {
730
+ /**
731
+ * Creates a new WorkerMetricsCollector instance.
732
+ *
733
+ * @param options - Configuration options
734
+ */
735
+ constructor(options = {}) {
736
+ this.eventLoopHistogram = null;
737
+ this.startTime = Date.now();
738
+ this.lastCpuUsage = process.cpuUsage();
739
+ this.lastCpuTime = performance.now();
740
+ this.startEventLoopMonitoring(options.eventLoopResolutionMs ?? 20);
741
+ }
742
+ /**
743
+ * Starts the event loop delay histogram monitoring.
744
+ *
745
+ * @param resolutionMs - Histogram resolution in milliseconds
746
+ */
747
+ startEventLoopMonitoring(resolutionMs) {
748
+ this.eventLoopHistogram = monitorEventLoopDelay({ resolution: Number.isFinite(resolutionMs) && resolutionMs > 0 ? Math.max(1, Math.floor(resolutionMs)) : 20 });
749
+ this.eventLoopHistogram.enable();
750
+ }
751
+ /**
752
+ * Stops the event loop monitoring and releases resources.
753
+ * Should be called when the collector is no longer needed.
754
+ */
755
+ stopMonitoring() {
756
+ if (this.eventLoopHistogram) {
757
+ this.eventLoopHistogram.disable();
758
+ this.eventLoopHistogram = null;
759
+ }
760
+ }
761
+ /**
762
+ * Collects current worker metrics.
763
+ *
764
+ * This method calculates CPU usage since the last collection,
765
+ * reads memory usage, and gets event loop delay statistics.
766
+ * The event loop histogram is reset after each collection for
767
+ * accurate per-interval measurements.
768
+ *
769
+ * @returns Current worker metrics snapshot
770
+ */
771
+ collect() {
772
+ const memoryUsage = process.memoryUsage();
773
+ const cpuUsage = process.cpuUsage();
774
+ const now = performance.now();
775
+ const cpuDelta = {
776
+ user: cpuUsage.user - this.lastCpuUsage.user,
777
+ system: cpuUsage.system - this.lastCpuUsage.system
778
+ };
779
+ const timeDelta = (now - this.lastCpuTime) * 1e3;
780
+ const cpuPercent = timeDelta > 0 ? (cpuDelta.user + cpuDelta.system) / timeDelta * 100 : 0;
781
+ this.lastCpuUsage = cpuUsage;
782
+ this.lastCpuTime = now;
783
+ let eventLoopLagMs = 0;
784
+ if (this.eventLoopHistogram) {
785
+ eventLoopLagMs = this.eventLoopHistogram.mean / 1e6;
786
+ this.eventLoopHistogram.reset();
787
+ }
788
+ return {
789
+ memory_heap_used: memoryUsage.heapUsed,
790
+ memory_heap_total: memoryUsage.heapTotal,
791
+ memory_rss: memoryUsage.rss,
792
+ memory_external: memoryUsage.external,
793
+ cpu_user_micros: cpuUsage.user,
794
+ cpu_system_micros: cpuUsage.system,
795
+ cpu_percent: Math.min(cpuPercent, 100),
796
+ event_loop_lag_ms: eventLoopLagMs,
797
+ uptime_seconds: Math.floor((Date.now() - this.startTime) / 1e3),
798
+ timestamp_ms: Date.now(),
799
+ runtime: "node"
800
+ };
801
+ }
802
+ };
803
+
804
+ //#endregion
805
+ //#region src/otel-worker-gauges.ts
806
+ let registeredGauges = false;
807
+ let metricsCollector = null;
808
+ let registeredMeter = null;
809
+ let registeredBatchCallback = null;
810
+ let registeredObservables = [];
811
+ function registerWorkerGauges(meter$1, options) {
812
+ if (registeredGauges) return;
813
+ const { workerId, workerName } = options;
814
+ const baseAttributes = {
815
+ "worker.id": workerId,
816
+ ...workerName && { "worker.name": workerName }
817
+ };
818
+ metricsCollector = new WorkerMetricsCollector();
819
+ const memoryHeapUsed = meter$1.createObservableGauge("iii.worker.memory.heap_used", {
820
+ description: "Worker heap memory used in bytes",
821
+ unit: "bytes"
822
+ });
823
+ const memoryHeapTotal = meter$1.createObservableGauge("iii.worker.memory.heap_total", {
824
+ description: "Worker total heap memory in bytes",
825
+ unit: "bytes"
826
+ });
827
+ const memoryRss = meter$1.createObservableGauge("iii.worker.memory.rss", {
828
+ description: "Worker resident set size in bytes",
829
+ unit: "bytes"
830
+ });
831
+ const memoryExternal = meter$1.createObservableGauge("iii.worker.memory.external", {
832
+ description: "Worker external memory in bytes",
833
+ unit: "bytes"
834
+ });
835
+ const cpuPercent = meter$1.createObservableGauge("iii.worker.cpu.percent", {
836
+ description: "Worker CPU usage percentage",
837
+ unit: "%"
838
+ });
839
+ const cpuUserMicros = meter$1.createObservableGauge("iii.worker.cpu.user_micros", {
840
+ description: "Worker CPU user time in microseconds",
841
+ unit: "us"
842
+ });
843
+ const cpuSystemMicros = meter$1.createObservableGauge("iii.worker.cpu.system_micros", {
844
+ description: "Worker CPU system time in microseconds",
845
+ unit: "us"
846
+ });
847
+ const eventLoopLag = meter$1.createObservableGauge("iii.worker.event_loop.lag_ms", {
848
+ description: "Worker event loop lag in milliseconds",
849
+ unit: "ms"
850
+ });
851
+ const uptimeSeconds = meter$1.createObservableGauge("iii.worker.uptime_seconds", {
852
+ description: "Worker uptime in seconds",
853
+ unit: "s"
854
+ });
855
+ const batchCallback = (observableResult) => {
856
+ if (!metricsCollector) return;
857
+ const metrics$1 = metricsCollector.collect();
858
+ if (metrics$1.memory_heap_used !== void 0) observableResult.observe(memoryHeapUsed, metrics$1.memory_heap_used, baseAttributes);
859
+ if (metrics$1.memory_heap_total !== void 0) observableResult.observe(memoryHeapTotal, metrics$1.memory_heap_total, baseAttributes);
860
+ if (metrics$1.memory_rss !== void 0) observableResult.observe(memoryRss, metrics$1.memory_rss, baseAttributes);
861
+ if (metrics$1.memory_external !== void 0) observableResult.observe(memoryExternal, metrics$1.memory_external, baseAttributes);
862
+ if (metrics$1.cpu_percent !== void 0) observableResult.observe(cpuPercent, metrics$1.cpu_percent, baseAttributes);
863
+ if (metrics$1.cpu_user_micros !== void 0) observableResult.observe(cpuUserMicros, metrics$1.cpu_user_micros, baseAttributes);
864
+ if (metrics$1.cpu_system_micros !== void 0) observableResult.observe(cpuSystemMicros, metrics$1.cpu_system_micros, baseAttributes);
865
+ if (metrics$1.event_loop_lag_ms !== void 0) observableResult.observe(eventLoopLag, metrics$1.event_loop_lag_ms, baseAttributes);
866
+ if (metrics$1.uptime_seconds !== void 0) observableResult.observe(uptimeSeconds, metrics$1.uptime_seconds, baseAttributes);
867
+ };
868
+ meter$1.addBatchObservableCallback(batchCallback, [
869
+ memoryHeapUsed,
870
+ memoryHeapTotal,
871
+ memoryRss,
872
+ memoryExternal,
873
+ cpuPercent,
874
+ cpuUserMicros,
875
+ cpuSystemMicros,
876
+ eventLoopLag,
877
+ uptimeSeconds
878
+ ]);
879
+ registeredMeter = meter$1;
880
+ registeredBatchCallback = batchCallback;
881
+ registeredObservables = [
882
+ memoryHeapUsed,
883
+ memoryHeapTotal,
884
+ memoryRss,
885
+ memoryExternal,
886
+ cpuPercent,
887
+ cpuUserMicros,
888
+ cpuSystemMicros,
889
+ eventLoopLag,
890
+ uptimeSeconds
891
+ ];
892
+ registeredGauges = true;
893
+ }
894
+ function stopWorkerGauges() {
895
+ if (registeredMeter && registeredBatchCallback) registeredMeter.removeBatchObservableCallback(registeredBatchCallback, registeredObservables);
896
+ if (metricsCollector) {
897
+ metricsCollector.stopMonitoring();
898
+ metricsCollector = null;
899
+ }
900
+ registeredMeter = null;
901
+ registeredBatchCallback = null;
902
+ registeredObservables = [];
903
+ registeredGauges = false;
904
+ }
905
+
906
+ //#endregion
907
+ export { removeBaggageEntry as C, EngineFunctions as D, DEFAULT_INVOCATION_TIMEOUT_MS as E, EngineTriggers as O, injectTraceparent as S, DEFAULT_BRIDGE_RECONNECTION_CONFIG as T, extractContext as _, SeverityNumber$1 as a, getBaggageEntry as b, getLogger as c, initOtel as d, shutdownOtel as f, extractBaggage as g, currentTraceId as h, safeStringify as i, LogFunctions as k, getMeter as l, currentSpanId as m, stopWorkerGauges as n, SpanKind as o, withSpan as p, WorkerMetricsCollector as r, SpanStatusCode as s, registerWorkerGauges as t, getTracer as u, extractTraceparent as v, setBaggageEntry as w, injectBaggage as x, getAllBaggage as y };
908
+ //# sourceMappingURL=otel-worker-gauges-ELciXZRg.mjs.map