@lssm/lib.observability 0.0.0-canary-20251217063201 → 0.0.0-canary-20251217072406
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +2 -2
- package/dist/anomaly/alert-manager.mjs +22 -1
- package/dist/anomaly/anomaly-detector.mjs +57 -1
- package/dist/anomaly/baseline-calculator.mjs +36 -1
- package/dist/anomaly/root-cause-analyzer.mjs +26 -1
- package/dist/index.mjs +14 -1
- package/dist/intent/aggregator.mjs +95 -1
- package/dist/intent/detector.mjs +121 -1
- package/dist/lifecycle/dist/index.mjs +4 -1
- package/dist/lifecycle/dist/types/milestones.mjs +1 -1
- package/dist/lifecycle/dist/types/signals.mjs +1 -1
- package/dist/lifecycle/dist/types/stages.mjs +151 -1
- package/dist/lifecycle/dist/utils/formatters.mjs +7 -1
- package/dist/logging/index.mjs +39 -1
- package/dist/metrics/index.mjs +25 -1
- package/dist/pipeline/evolution-pipeline.mjs +65 -1
- package/dist/pipeline/lifecycle-pipeline.mjs +73 -1
- package/dist/tracing/index.mjs +46 -1
- package/dist/tracing/middleware.mjs +79 -1
- package/package.json +4 -2
- package/dist/lifecycle/dist/types/axes.mjs +0 -1
package/CHANGELOG.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# @lssm/lib.observability
|
|
2
2
|
|
|
3
|
-
## 0.0.0-canary-
|
|
3
|
+
## 0.0.0-canary-20251217072406
|
|
4
4
|
|
|
5
5
|
### Minor Changes
|
|
6
6
|
|
|
@@ -9,7 +9,7 @@
|
|
|
9
9
|
### Patch Changes
|
|
10
10
|
|
|
11
11
|
- Updated dependencies [66a5dfd]
|
|
12
|
-
- @lssm/lib.lifecycle@0.0.0-canary-
|
|
12
|
+
- @lssm/lib.lifecycle@0.0.0-canary-20251217072406
|
|
13
13
|
|
|
14
14
|
## 0.5.0
|
|
15
15
|
|
|
@@ -1 +1,22 @@
|
|
|
1
|
-
|
|
1
|
+
//#region src/anomaly/alert-manager.ts
|
|
2
|
+
var AlertManager = class {
|
|
3
|
+
cooldownMs;
|
|
4
|
+
lastAlert = /* @__PURE__ */ new Map();
|
|
5
|
+
constructor(options) {
|
|
6
|
+
this.options = options;
|
|
7
|
+
this.cooldownMs = options.cooldownMs ?? 6e4;
|
|
8
|
+
}
|
|
9
|
+
async notify(signal, analysis) {
|
|
10
|
+
const key = `${signal.type}:${analysis.culprit?.id ?? "none"}`;
|
|
11
|
+
const now = Date.now();
|
|
12
|
+
if (now - (this.lastAlert.get(key) ?? 0) < this.cooldownMs) return;
|
|
13
|
+
await this.options.transport({
|
|
14
|
+
signal,
|
|
15
|
+
analysis
|
|
16
|
+
});
|
|
17
|
+
this.lastAlert.set(key, now);
|
|
18
|
+
}
|
|
19
|
+
};
|
|
20
|
+
|
|
21
|
+
//#endregion
|
|
22
|
+
export { AlertManager };
|
|
@@ -1 +1,57 @@
|
|
|
1
|
-
import{BaselineCalculator
|
|
1
|
+
import { BaselineCalculator } from "./baseline-calculator.mjs";
|
|
2
|
+
|
|
3
|
+
//#region src/anomaly/anomaly-detector.ts
|
|
4
|
+
var AnomalyDetector = class {
|
|
5
|
+
baseline;
|
|
6
|
+
thresholds = {
|
|
7
|
+
errorRateDelta: .5,
|
|
8
|
+
latencyDelta: .35,
|
|
9
|
+
throughputDrop: .4,
|
|
10
|
+
minSamples: 10
|
|
11
|
+
};
|
|
12
|
+
constructor(options = {}) {
|
|
13
|
+
this.baseline = new BaselineCalculator();
|
|
14
|
+
this.thresholds = {
|
|
15
|
+
...this.thresholds,
|
|
16
|
+
...options
|
|
17
|
+
};
|
|
18
|
+
}
|
|
19
|
+
evaluate(point) {
|
|
20
|
+
const baselineSnapshot = this.baseline.update(point);
|
|
21
|
+
if (baselineSnapshot.sampleCount < this.thresholds.minSamples) return [];
|
|
22
|
+
const signals = [];
|
|
23
|
+
const errorDelta = this.relativeDelta(point.errorRate, baselineSnapshot.errorRate);
|
|
24
|
+
if (errorDelta > this.thresholds.errorRateDelta) signals.push({
|
|
25
|
+
type: "error_rate_spike",
|
|
26
|
+
delta: errorDelta,
|
|
27
|
+
point,
|
|
28
|
+
baseline: baselineSnapshot
|
|
29
|
+
});
|
|
30
|
+
const latencyDelta = this.relativeDelta(point.latencyP99, baselineSnapshot.latencyP99);
|
|
31
|
+
if (latencyDelta > this.thresholds.latencyDelta) signals.push({
|
|
32
|
+
type: "latency_regression",
|
|
33
|
+
delta: latencyDelta,
|
|
34
|
+
point,
|
|
35
|
+
baseline: baselineSnapshot
|
|
36
|
+
});
|
|
37
|
+
const throughputDelta = this.relativeDrop(point.throughput, baselineSnapshot.throughput);
|
|
38
|
+
if (throughputDelta > this.thresholds.throughputDrop) signals.push({
|
|
39
|
+
type: "throughput_drop",
|
|
40
|
+
delta: throughputDelta,
|
|
41
|
+
point,
|
|
42
|
+
baseline: baselineSnapshot
|
|
43
|
+
});
|
|
44
|
+
return signals;
|
|
45
|
+
}
|
|
46
|
+
relativeDelta(value, baseline) {
|
|
47
|
+
if (baseline === 0) return 0;
|
|
48
|
+
return (value - baseline) / baseline;
|
|
49
|
+
}
|
|
50
|
+
relativeDrop(value, baseline) {
|
|
51
|
+
if (baseline === 0) return 0;
|
|
52
|
+
return (baseline - value) / baseline;
|
|
53
|
+
}
|
|
54
|
+
};
|
|
55
|
+
|
|
56
|
+
//#endregion
|
|
57
|
+
export { AnomalyDetector };
|
|
@@ -1 +1,36 @@
|
|
|
1
|
-
|
|
1
|
+
//#region src/anomaly/baseline-calculator.ts
|
|
2
|
+
var BaselineCalculator = class {
|
|
3
|
+
snapshot = {
|
|
4
|
+
latencyP99: 0,
|
|
5
|
+
latencyP95: 0,
|
|
6
|
+
errorRate: 0,
|
|
7
|
+
throughput: 0,
|
|
8
|
+
sampleCount: 0
|
|
9
|
+
};
|
|
10
|
+
constructor(alpha = .2) {
|
|
11
|
+
this.alpha = alpha;
|
|
12
|
+
}
|
|
13
|
+
update(point) {
|
|
14
|
+
const { sampleCount } = this.snapshot;
|
|
15
|
+
const nextCount = sampleCount + 1;
|
|
16
|
+
const weight = sampleCount === 0 ? 1 : this.alpha;
|
|
17
|
+
this.snapshot = {
|
|
18
|
+
latencyP99: this.mix(this.snapshot.latencyP99, point.latencyP99, weight),
|
|
19
|
+
latencyP95: this.mix(this.snapshot.latencyP95, point.latencyP95, weight),
|
|
20
|
+
errorRate: this.mix(this.snapshot.errorRate, point.errorRate, weight),
|
|
21
|
+
throughput: this.mix(this.snapshot.throughput, point.throughput, weight),
|
|
22
|
+
sampleCount: nextCount
|
|
23
|
+
};
|
|
24
|
+
return this.snapshot;
|
|
25
|
+
}
|
|
26
|
+
getSnapshot() {
|
|
27
|
+
return this.snapshot;
|
|
28
|
+
}
|
|
29
|
+
mix(current, next, weight) {
|
|
30
|
+
if (this.snapshot.sampleCount === 0) return next;
|
|
31
|
+
return current * (1 - weight) + next * weight;
|
|
32
|
+
}
|
|
33
|
+
};
|
|
34
|
+
|
|
35
|
+
//#endregion
|
|
36
|
+
export { BaselineCalculator };
|
|
@@ -1 +1,26 @@
|
|
|
1
|
-
|
|
1
|
+
//#region src/anomaly/root-cause-analyzer.ts
|
|
2
|
+
var RootCauseAnalyzer = class {
|
|
3
|
+
constructor(lookbackMs = 900 * 1e3) {
|
|
4
|
+
this.lookbackMs = lookbackMs;
|
|
5
|
+
}
|
|
6
|
+
analyze(signal, deployments) {
|
|
7
|
+
const windowStart = new Date(signal.point.timestamp.getTime() - this.lookbackMs);
|
|
8
|
+
const candidates = deployments.filter((deployment) => deployment.deployedAt >= windowStart).sort((a, b) => b.deployedAt.getTime() - a.deployedAt.getTime());
|
|
9
|
+
const notes = [];
|
|
10
|
+
let culprit;
|
|
11
|
+
if (candidates.length > 0) {
|
|
12
|
+
culprit = candidates[0];
|
|
13
|
+
notes.push(`Closest deployment ${culprit.id} (${culprit.operation}) at ${culprit.deployedAt.toISOString()}`);
|
|
14
|
+
} else notes.push("No deployments found within lookback window.");
|
|
15
|
+
if (signal.type === "latency_regression") notes.push("Verify recent schema changes and external dependency latency.");
|
|
16
|
+
if (signal.type === "error_rate_spike") notes.push("Check SLO monitor for correlated incidents.");
|
|
17
|
+
return {
|
|
18
|
+
signal,
|
|
19
|
+
culprit,
|
|
20
|
+
notes
|
|
21
|
+
};
|
|
22
|
+
}
|
|
23
|
+
};
|
|
24
|
+
|
|
25
|
+
//#endregion
|
|
26
|
+
export { RootCauseAnalyzer };
|
package/dist/index.mjs
CHANGED
|
@@ -1 +1,14 @@
|
|
|
1
|
-
import
|
|
1
|
+
import { getTracer, traceAsync, traceSync } from "./tracing/index.mjs";
|
|
2
|
+
import { createCounter, createHistogram, createUpDownCounter, getMeter, standardMetrics } from "./metrics/index.mjs";
|
|
3
|
+
import { Logger, logger } from "./logging/index.mjs";
|
|
4
|
+
import { createTracingMiddleware } from "./tracing/middleware.mjs";
|
|
5
|
+
import { IntentAggregator } from "./intent/aggregator.mjs";
|
|
6
|
+
import { IntentDetector } from "./intent/detector.mjs";
|
|
7
|
+
import { EvolutionPipeline } from "./pipeline/evolution-pipeline.mjs";
|
|
8
|
+
import { LifecycleKpiPipeline } from "./pipeline/lifecycle-pipeline.mjs";
|
|
9
|
+
import { BaselineCalculator } from "./anomaly/baseline-calculator.mjs";
|
|
10
|
+
import { AnomalyDetector } from "./anomaly/anomaly-detector.mjs";
|
|
11
|
+
import { RootCauseAnalyzer } from "./anomaly/root-cause-analyzer.mjs";
|
|
12
|
+
import { AlertManager } from "./anomaly/alert-manager.mjs";
|
|
13
|
+
|
|
14
|
+
export { AlertManager, AnomalyDetector, BaselineCalculator, EvolutionPipeline, IntentAggregator, IntentDetector, LifecycleKpiPipeline, Logger, RootCauseAnalyzer, createCounter, createHistogram, createTracingMiddleware, createUpDownCounter, getMeter, getTracer, logger, standardMetrics, traceAsync, traceSync };
|
|
@@ -1 +1,95 @@
|
|
|
1
|
-
|
|
1
|
+
//#region src/intent/aggregator.ts
|
|
2
|
+
const DEFAULT_WINDOW_MS = 900 * 1e3;
|
|
3
|
+
var IntentAggregator = class {
|
|
4
|
+
windowMs;
|
|
5
|
+
sequenceSampleSize;
|
|
6
|
+
samples = [];
|
|
7
|
+
constructor(options = {}) {
|
|
8
|
+
this.windowMs = options.windowMs ?? DEFAULT_WINDOW_MS;
|
|
9
|
+
this.sequenceSampleSize = options.sequenceSampleSize ?? 1e3;
|
|
10
|
+
}
|
|
11
|
+
add(sample) {
|
|
12
|
+
this.samples.push(sample);
|
|
13
|
+
}
|
|
14
|
+
flush(now = /* @__PURE__ */ new Date()) {
|
|
15
|
+
const minTimestamp = now.getTime() - this.windowMs;
|
|
16
|
+
const windowSamples = this.samples.filter((sample) => sample.timestamp.getTime() >= minTimestamp);
|
|
17
|
+
this.samples.length = 0;
|
|
18
|
+
const metrics = this.aggregateMetrics(windowSamples);
|
|
19
|
+
const sequences = this.buildSequences(windowSamples);
|
|
20
|
+
const timestamps = windowSamples.map((sample) => sample.timestamp.getTime());
|
|
21
|
+
return {
|
|
22
|
+
metrics,
|
|
23
|
+
sequences,
|
|
24
|
+
sampleCount: windowSamples.length,
|
|
25
|
+
windowStart: timestamps.length ? new Date(Math.min(...timestamps)) : void 0,
|
|
26
|
+
windowEnd: timestamps.length ? new Date(Math.max(...timestamps)) : void 0
|
|
27
|
+
};
|
|
28
|
+
}
|
|
29
|
+
aggregateMetrics(samples) {
|
|
30
|
+
if (!samples.length) return [];
|
|
31
|
+
const groups = /* @__PURE__ */ new Map();
|
|
32
|
+
for (const sample of samples) {
|
|
33
|
+
const key = `${sample.operation.name}.v${sample.operation.version}`;
|
|
34
|
+
const arr = groups.get(key) ?? [];
|
|
35
|
+
arr.push(sample);
|
|
36
|
+
groups.set(key, arr);
|
|
37
|
+
}
|
|
38
|
+
return [...groups.values()].map((group) => {
|
|
39
|
+
const durations = group.map((s) => s.durationMs).sort((a, b) => a - b);
|
|
40
|
+
const errors = group.filter((s) => !s.success);
|
|
41
|
+
const totalCalls = group.length;
|
|
42
|
+
const topErrors = errors.reduce((acc, sample) => {
|
|
43
|
+
if (!sample.errorCode) return acc;
|
|
44
|
+
acc[sample.errorCode] = (acc[sample.errorCode] ?? 0) + 1;
|
|
45
|
+
return acc;
|
|
46
|
+
}, {});
|
|
47
|
+
const timestamps = group.map((s) => s.timestamp.getTime());
|
|
48
|
+
return {
|
|
49
|
+
operation: group[0].operation,
|
|
50
|
+
totalCalls,
|
|
51
|
+
successRate: (totalCalls - errors.length) / totalCalls,
|
|
52
|
+
errorRate: errors.length / totalCalls,
|
|
53
|
+
averageLatencyMs: durations.reduce((sum, value) => sum + value, 0) / totalCalls,
|
|
54
|
+
p95LatencyMs: percentile(durations, .95),
|
|
55
|
+
p99LatencyMs: percentile(durations, .99),
|
|
56
|
+
maxLatencyMs: Math.max(...durations),
|
|
57
|
+
windowStart: new Date(Math.min(...timestamps)),
|
|
58
|
+
windowEnd: new Date(Math.max(...timestamps)),
|
|
59
|
+
topErrors
|
|
60
|
+
};
|
|
61
|
+
});
|
|
62
|
+
}
|
|
63
|
+
buildSequences(samples) {
|
|
64
|
+
const byTrace = /* @__PURE__ */ new Map();
|
|
65
|
+
for (const sample of samples.slice(-this.sequenceSampleSize)) {
|
|
66
|
+
if (!sample.traceId) continue;
|
|
67
|
+
const arr = byTrace.get(sample.traceId) ?? [];
|
|
68
|
+
arr.push(sample);
|
|
69
|
+
byTrace.set(sample.traceId, arr);
|
|
70
|
+
}
|
|
71
|
+
const sequences = {};
|
|
72
|
+
for (const [traceId, events] of byTrace.entries()) {
|
|
73
|
+
const ordered = events.sort((a, b) => a.timestamp.getTime() - b.timestamp.getTime());
|
|
74
|
+
const steps = ordered.map((event) => event.operation.name);
|
|
75
|
+
if (steps.length < 2) continue;
|
|
76
|
+
const key = `${steps.join(">")}@${ordered[0]?.tenantId ?? "global"}`;
|
|
77
|
+
const existing = sequences[key];
|
|
78
|
+
if (existing) existing.count += 1;
|
|
79
|
+
else sequences[key] = {
|
|
80
|
+
steps,
|
|
81
|
+
tenantId: ordered[0]?.tenantId,
|
|
82
|
+
count: 1
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
return Object.values(sequences).sort((a, b) => b.count - a.count);
|
|
86
|
+
}
|
|
87
|
+
};
|
|
88
|
+
function percentile(values, ratio) {
|
|
89
|
+
if (!values.length) return 0;
|
|
90
|
+
if (values.length === 1) return values[0];
|
|
91
|
+
return values[Math.min(values.length - 1, Math.floor(ratio * values.length))];
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
//#endregion
|
|
95
|
+
export { IntentAggregator };
|
package/dist/intent/detector.mjs
CHANGED
|
@@ -1 +1,121 @@
|
|
|
1
|
-
import{randomUUID
|
|
1
|
+
import { randomUUID } from "node:crypto";
|
|
2
|
+
|
|
3
|
+
//#region src/intent/detector.ts
|
|
4
|
+
const DEFAULTS = {
|
|
5
|
+
errorRateThreshold: .05,
|
|
6
|
+
latencyP99ThresholdMs: 750,
|
|
7
|
+
throughputDropThreshold: .3,
|
|
8
|
+
minSequenceLength: 3
|
|
9
|
+
};
|
|
10
|
+
var IntentDetector = class {
|
|
11
|
+
options;
|
|
12
|
+
constructor(options = {}) {
|
|
13
|
+
this.options = {
|
|
14
|
+
errorRateThreshold: options.errorRateThreshold ?? DEFAULTS.errorRateThreshold,
|
|
15
|
+
latencyP99ThresholdMs: options.latencyP99ThresholdMs ?? DEFAULTS.latencyP99ThresholdMs,
|
|
16
|
+
throughputDropThreshold: options.throughputDropThreshold ?? DEFAULTS.throughputDropThreshold,
|
|
17
|
+
minSequenceLength: options.minSequenceLength ?? DEFAULTS.minSequenceLength
|
|
18
|
+
};
|
|
19
|
+
}
|
|
20
|
+
detectFromMetrics(current, previous) {
|
|
21
|
+
const signals = [];
|
|
22
|
+
const baseline = new Map((previous ?? []).map((metric) => [`${metric.operation.name}.v${metric.operation.version}`, metric]));
|
|
23
|
+
for (const metric of current) {
|
|
24
|
+
if (metric.errorRate >= this.options.errorRateThreshold) {
|
|
25
|
+
signals.push({
|
|
26
|
+
id: randomUUID(),
|
|
27
|
+
type: "error-spike",
|
|
28
|
+
operation: metric.operation,
|
|
29
|
+
confidence: Math.min(1, metric.errorRate / this.options.errorRateThreshold),
|
|
30
|
+
description: `Error rate ${metric.errorRate.toFixed(2)} exceeded threshold`,
|
|
31
|
+
metadata: {
|
|
32
|
+
errorRate: metric.errorRate,
|
|
33
|
+
topErrors: metric.topErrors
|
|
34
|
+
},
|
|
35
|
+
evidence: [{
|
|
36
|
+
type: "metric",
|
|
37
|
+
description: "error-rate",
|
|
38
|
+
data: {
|
|
39
|
+
errorRate: metric.errorRate,
|
|
40
|
+
threshold: this.options.errorRateThreshold
|
|
41
|
+
}
|
|
42
|
+
}]
|
|
43
|
+
});
|
|
44
|
+
continue;
|
|
45
|
+
}
|
|
46
|
+
if (metric.p99LatencyMs >= this.options.latencyP99ThresholdMs) {
|
|
47
|
+
signals.push({
|
|
48
|
+
id: randomUUID(),
|
|
49
|
+
type: "latency-regression",
|
|
50
|
+
operation: metric.operation,
|
|
51
|
+
confidence: Math.min(1, metric.p99LatencyMs / this.options.latencyP99ThresholdMs),
|
|
52
|
+
description: `P99 latency ${metric.p99LatencyMs}ms exceeded threshold`,
|
|
53
|
+
metadata: { p99LatencyMs: metric.p99LatencyMs },
|
|
54
|
+
evidence: [{
|
|
55
|
+
type: "metric",
|
|
56
|
+
description: "p99-latency",
|
|
57
|
+
data: {
|
|
58
|
+
p99LatencyMs: metric.p99LatencyMs,
|
|
59
|
+
threshold: this.options.latencyP99ThresholdMs
|
|
60
|
+
}
|
|
61
|
+
}]
|
|
62
|
+
});
|
|
63
|
+
continue;
|
|
64
|
+
}
|
|
65
|
+
const base = baseline.get(`${metric.operation.name}.v${metric.operation.version}`);
|
|
66
|
+
if (base) {
|
|
67
|
+
const drop = (base.totalCalls - metric.totalCalls) / Math.max(base.totalCalls, 1);
|
|
68
|
+
if (drop >= this.options.throughputDropThreshold) signals.push({
|
|
69
|
+
id: randomUUID(),
|
|
70
|
+
type: "throughput-drop",
|
|
71
|
+
operation: metric.operation,
|
|
72
|
+
confidence: Math.min(1, drop / this.options.throughputDropThreshold),
|
|
73
|
+
description: `Throughput dropped ${(drop * 100).toFixed(1)}% vs baseline`,
|
|
74
|
+
metadata: {
|
|
75
|
+
baselineCalls: base.totalCalls,
|
|
76
|
+
currentCalls: metric.totalCalls
|
|
77
|
+
},
|
|
78
|
+
evidence: [{
|
|
79
|
+
type: "metric",
|
|
80
|
+
description: "throughput-drop",
|
|
81
|
+
data: {
|
|
82
|
+
baselineCalls: base.totalCalls,
|
|
83
|
+
currentCalls: metric.totalCalls
|
|
84
|
+
}
|
|
85
|
+
}]
|
|
86
|
+
});
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
return signals;
|
|
90
|
+
}
|
|
91
|
+
detectSequentialIntents(sequences) {
|
|
92
|
+
const signals = [];
|
|
93
|
+
for (const sequence of sequences) {
|
|
94
|
+
if (sequence.steps.length < this.options.minSequenceLength) continue;
|
|
95
|
+
const description = sequence.steps.join(" → ");
|
|
96
|
+
signals.push({
|
|
97
|
+
id: randomUUID(),
|
|
98
|
+
type: "missing-workflow-step",
|
|
99
|
+
confidence: .6,
|
|
100
|
+
description: `Repeated workflow detected: ${description}`,
|
|
101
|
+
metadata: {
|
|
102
|
+
steps: sequence.steps,
|
|
103
|
+
tenantId: sequence.tenantId,
|
|
104
|
+
occurrences: sequence.count
|
|
105
|
+
},
|
|
106
|
+
evidence: [{
|
|
107
|
+
type: "sequence",
|
|
108
|
+
description: "sequential-calls",
|
|
109
|
+
data: {
|
|
110
|
+
steps: sequence.steps,
|
|
111
|
+
count: sequence.count
|
|
112
|
+
}
|
|
113
|
+
}]
|
|
114
|
+
});
|
|
115
|
+
}
|
|
116
|
+
return signals;
|
|
117
|
+
}
|
|
118
|
+
};
|
|
119
|
+
|
|
120
|
+
//#endregion
|
|
121
|
+
export { IntentDetector };
|
|
@@ -1 +1,4 @@
|
|
|
1
|
-
import{
|
|
1
|
+
import { LIFECYCLE_STAGE_META, LifecycleStage } from "./types/stages.mjs";
|
|
2
|
+
import "./types/signals.mjs";
|
|
3
|
+
import "./types/milestones.mjs";
|
|
4
|
+
import { getStageLabel } from "./utils/formatters.mjs";
|
|
@@ -1 +1 @@
|
|
|
1
|
-
import"./stages.mjs";
|
|
1
|
+
import "./stages.mjs";
|
|
@@ -1 +1 @@
|
|
|
1
|
-
import"./stages.mjs";
|
|
1
|
+
import "./stages.mjs";
|
|
@@ -1 +1,151 @@
|
|
|
1
|
-
|
|
1
|
+
//#region ../lifecycle/dist/types/stages.js
|
|
2
|
+
let LifecycleStage = /* @__PURE__ */ function(LifecycleStage$1) {
|
|
3
|
+
LifecycleStage$1[LifecycleStage$1["Exploration"] = 0] = "Exploration";
|
|
4
|
+
LifecycleStage$1[LifecycleStage$1["ProblemSolutionFit"] = 1] = "ProblemSolutionFit";
|
|
5
|
+
LifecycleStage$1[LifecycleStage$1["MvpEarlyTraction"] = 2] = "MvpEarlyTraction";
|
|
6
|
+
LifecycleStage$1[LifecycleStage$1["ProductMarketFit"] = 3] = "ProductMarketFit";
|
|
7
|
+
LifecycleStage$1[LifecycleStage$1["GrowthScaleUp"] = 4] = "GrowthScaleUp";
|
|
8
|
+
LifecycleStage$1[LifecycleStage$1["ExpansionPlatform"] = 5] = "ExpansionPlatform";
|
|
9
|
+
LifecycleStage$1[LifecycleStage$1["MaturityRenewal"] = 6] = "MaturityRenewal";
|
|
10
|
+
return LifecycleStage$1;
|
|
11
|
+
}({});
|
|
12
|
+
const LIFECYCLE_STAGE_ORDER = [
|
|
13
|
+
LifecycleStage.Exploration,
|
|
14
|
+
LifecycleStage.ProblemSolutionFit,
|
|
15
|
+
LifecycleStage.MvpEarlyTraction,
|
|
16
|
+
LifecycleStage.ProductMarketFit,
|
|
17
|
+
LifecycleStage.GrowthScaleUp,
|
|
18
|
+
LifecycleStage.ExpansionPlatform,
|
|
19
|
+
LifecycleStage.MaturityRenewal
|
|
20
|
+
];
|
|
21
|
+
const LIFECYCLE_STAGE_META = {
|
|
22
|
+
[LifecycleStage.Exploration]: {
|
|
23
|
+
id: LifecycleStage.Exploration,
|
|
24
|
+
order: 0,
|
|
25
|
+
slug: "exploration",
|
|
26
|
+
name: "Exploration / Ideation",
|
|
27
|
+
question: "Is there a problem worth my time?",
|
|
28
|
+
signals: [
|
|
29
|
+
"20+ discovery interviews",
|
|
30
|
+
"Clear problem statement",
|
|
31
|
+
"Named ICP"
|
|
32
|
+
],
|
|
33
|
+
traps: ["Branding before discovery", "Premature tooling decisions"],
|
|
34
|
+
focusAreas: [
|
|
35
|
+
"Customer discovery",
|
|
36
|
+
"Problem definition",
|
|
37
|
+
"Segment clarity"
|
|
38
|
+
]
|
|
39
|
+
},
|
|
40
|
+
[LifecycleStage.ProblemSolutionFit]: {
|
|
41
|
+
id: LifecycleStage.ProblemSolutionFit,
|
|
42
|
+
order: 1,
|
|
43
|
+
slug: "problem-solution-fit",
|
|
44
|
+
name: "Problem–Solution Fit",
|
|
45
|
+
question: "Do people care enough about this solution?",
|
|
46
|
+
signals: [
|
|
47
|
+
"Prototype reuse",
|
|
48
|
+
"Referral energy",
|
|
49
|
+
"Pre-pay interest"
|
|
50
|
+
],
|
|
51
|
+
traps: ["“Market is huge” without users", "Skipping qualitative loops"],
|
|
52
|
+
focusAreas: [
|
|
53
|
+
"Solution hypothesis",
|
|
54
|
+
"Value messaging",
|
|
55
|
+
"Feedback capture"
|
|
56
|
+
]
|
|
57
|
+
},
|
|
58
|
+
[LifecycleStage.MvpEarlyTraction]: {
|
|
59
|
+
id: LifecycleStage.MvpEarlyTraction,
|
|
60
|
+
order: 2,
|
|
61
|
+
slug: "mvp-early-traction",
|
|
62
|
+
name: "MVP & Early Traction",
|
|
63
|
+
question: "Can we get real usage and learn fast?",
|
|
64
|
+
signals: [
|
|
65
|
+
"20–50 named active users",
|
|
66
|
+
"Weekly releases",
|
|
67
|
+
"Noisy feedback"
|
|
68
|
+
],
|
|
69
|
+
traps: ["Overbuilt infra for 10 users", "Undefined retention metric"],
|
|
70
|
+
focusAreas: [
|
|
71
|
+
"Activation",
|
|
72
|
+
"Cohort tracking",
|
|
73
|
+
"Feedback rituals"
|
|
74
|
+
]
|
|
75
|
+
},
|
|
76
|
+
[LifecycleStage.ProductMarketFit]: {
|
|
77
|
+
id: LifecycleStage.ProductMarketFit,
|
|
78
|
+
order: 3,
|
|
79
|
+
slug: "product-market-fit",
|
|
80
|
+
name: "Product–Market Fit",
|
|
81
|
+
question: "Is this pulling us forward?",
|
|
82
|
+
signals: [
|
|
83
|
+
"Retention without heroics",
|
|
84
|
+
"Organic word-of-mouth",
|
|
85
|
+
"Value stories"
|
|
86
|
+
],
|
|
87
|
+
traps: ["Hero growth that does not scale", "Ignoring churn signals"],
|
|
88
|
+
focusAreas: [
|
|
89
|
+
"Retention",
|
|
90
|
+
"Reliability",
|
|
91
|
+
"ICP clarity"
|
|
92
|
+
]
|
|
93
|
+
},
|
|
94
|
+
[LifecycleStage.GrowthScaleUp]: {
|
|
95
|
+
id: LifecycleStage.GrowthScaleUp,
|
|
96
|
+
order: 4,
|
|
97
|
+
slug: "growth-scale-up",
|
|
98
|
+
name: "Growth / Scale-up",
|
|
99
|
+
question: "Can we grow this repeatably?",
|
|
100
|
+
signals: [
|
|
101
|
+
"Predictable channels",
|
|
102
|
+
"Specialized hires",
|
|
103
|
+
"Unit economics on track"
|
|
104
|
+
],
|
|
105
|
+
traps: ["Paid spend masking retention gaps", "Infra debt blocking launches"],
|
|
106
|
+
focusAreas: [
|
|
107
|
+
"Ops systems",
|
|
108
|
+
"Growth loops",
|
|
109
|
+
"Reliability engineering"
|
|
110
|
+
]
|
|
111
|
+
},
|
|
112
|
+
[LifecycleStage.ExpansionPlatform]: {
|
|
113
|
+
id: LifecycleStage.ExpansionPlatform,
|
|
114
|
+
order: 5,
|
|
115
|
+
slug: "expansion-platform",
|
|
116
|
+
name: "Expansion / Platform",
|
|
117
|
+
question: "What is the next growth curve?",
|
|
118
|
+
signals: [
|
|
119
|
+
"Stable core metrics",
|
|
120
|
+
"Partner/API demand",
|
|
121
|
+
"Ecosystem pull"
|
|
122
|
+
],
|
|
123
|
+
traps: ["Platform theater before wedge is solid"],
|
|
124
|
+
focusAreas: [
|
|
125
|
+
"Partnerships",
|
|
126
|
+
"APIs",
|
|
127
|
+
"New market validation"
|
|
128
|
+
]
|
|
129
|
+
},
|
|
130
|
+
[LifecycleStage.MaturityRenewal]: {
|
|
131
|
+
id: LifecycleStage.MaturityRenewal,
|
|
132
|
+
order: 6,
|
|
133
|
+
slug: "maturity-renewal",
|
|
134
|
+
name: "Maturity / Renewal",
|
|
135
|
+
question: "Optimize, reinvent, or sunset?",
|
|
136
|
+
signals: [
|
|
137
|
+
"Margin focus",
|
|
138
|
+
"Portfolio bets",
|
|
139
|
+
"Narrative refresh"
|
|
140
|
+
],
|
|
141
|
+
traps: ["Assuming past success is enough"],
|
|
142
|
+
focusAreas: [
|
|
143
|
+
"Cost optimization",
|
|
144
|
+
"Reinvention bets",
|
|
145
|
+
"Sunset planning"
|
|
146
|
+
]
|
|
147
|
+
}
|
|
148
|
+
};
|
|
149
|
+
|
|
150
|
+
//#endregion
|
|
151
|
+
export { LIFECYCLE_STAGE_META, LifecycleStage };
|
|
@@ -1 +1,7 @@
|
|
|
1
|
-
import{
|
|
1
|
+
import { LIFECYCLE_STAGE_META } from "../types/stages.mjs";
|
|
2
|
+
|
|
3
|
+
//#region ../lifecycle/dist/utils/formatters.js
|
|
4
|
+
const getStageLabel = (stage) => LIFECYCLE_STAGE_META[stage].name;
|
|
5
|
+
|
|
6
|
+
//#endregion
|
|
7
|
+
export { getStageLabel };
|
package/dist/logging/index.mjs
CHANGED
|
@@ -1 +1,39 @@
|
|
|
1
|
-
import{context
|
|
1
|
+
import { context, trace } from "@opentelemetry/api";
|
|
2
|
+
|
|
3
|
+
//#region src/logging/index.ts
|
|
4
|
+
var Logger = class {
|
|
5
|
+
constructor(serviceName) {
|
|
6
|
+
this.serviceName = serviceName;
|
|
7
|
+
}
|
|
8
|
+
log(level, message, meta = {}) {
|
|
9
|
+
const span = trace.getSpan(context.active());
|
|
10
|
+
const traceId = span?.spanContext().traceId;
|
|
11
|
+
const spanId = span?.spanContext().spanId;
|
|
12
|
+
const entry = {
|
|
13
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
14
|
+
service: this.serviceName,
|
|
15
|
+
level,
|
|
16
|
+
message,
|
|
17
|
+
traceId,
|
|
18
|
+
spanId,
|
|
19
|
+
...meta
|
|
20
|
+
};
|
|
21
|
+
console.log(JSON.stringify(entry));
|
|
22
|
+
}
|
|
23
|
+
debug(message, meta) {
|
|
24
|
+
this.log("debug", message, meta);
|
|
25
|
+
}
|
|
26
|
+
info(message, meta) {
|
|
27
|
+
this.log("info", message, meta);
|
|
28
|
+
}
|
|
29
|
+
warn(message, meta) {
|
|
30
|
+
this.log("warn", message, meta);
|
|
31
|
+
}
|
|
32
|
+
error(message, meta) {
|
|
33
|
+
this.log("error", message, meta);
|
|
34
|
+
}
|
|
35
|
+
};
|
|
36
|
+
const logger = new Logger(process.env.OTEL_SERVICE_NAME || "unknown-service");
|
|
37
|
+
|
|
38
|
+
//#endregion
|
|
39
|
+
export { Logger, logger };
|
package/dist/metrics/index.mjs
CHANGED
|
@@ -1 +1,25 @@
|
|
|
1
|
-
import{metrics
|
|
1
|
+
import { metrics } from "@opentelemetry/api";
|
|
2
|
+
|
|
3
|
+
//#region src/metrics/index.ts
|
|
4
|
+
const DEFAULT_METER_NAME = "@lssm/lib.observability";
|
|
5
|
+
function getMeter(name = DEFAULT_METER_NAME) {
|
|
6
|
+
return metrics.getMeter(name);
|
|
7
|
+
}
|
|
8
|
+
function createCounter(name, description, meterName) {
|
|
9
|
+
return getMeter(meterName).createCounter(name, { description });
|
|
10
|
+
}
|
|
11
|
+
function createUpDownCounter(name, description, meterName) {
|
|
12
|
+
return getMeter(meterName).createUpDownCounter(name, { description });
|
|
13
|
+
}
|
|
14
|
+
function createHistogram(name, description, meterName) {
|
|
15
|
+
return getMeter(meterName).createHistogram(name, { description });
|
|
16
|
+
}
|
|
17
|
+
const standardMetrics = {
|
|
18
|
+
httpRequests: createCounter("http_requests_total", "Total HTTP requests"),
|
|
19
|
+
httpDuration: createHistogram("http_request_duration_seconds", "HTTP request duration"),
|
|
20
|
+
operationErrors: createCounter("operation_errors_total", "Total operation errors"),
|
|
21
|
+
workflowDuration: createHistogram("workflow_duration_seconds", "Workflow execution duration")
|
|
22
|
+
};
|
|
23
|
+
|
|
24
|
+
//#endregion
|
|
25
|
+
export { createCounter, createHistogram, createUpDownCounter, getMeter, standardMetrics };
|
|
@@ -1 +1,65 @@
|
|
|
1
|
-
import{IntentAggregator
|
|
1
|
+
import { IntentAggregator } from "../intent/aggregator.mjs";
|
|
2
|
+
import { IntentDetector } from "../intent/detector.mjs";
|
|
3
|
+
import { EventEmitter } from "node:events";
|
|
4
|
+
|
|
5
|
+
//#region src/pipeline/evolution-pipeline.ts
|
|
6
|
+
var EvolutionPipeline = class {
|
|
7
|
+
detector;
|
|
8
|
+
aggregator;
|
|
9
|
+
emitter;
|
|
10
|
+
onIntent;
|
|
11
|
+
onSnapshot;
|
|
12
|
+
timer;
|
|
13
|
+
previousMetrics;
|
|
14
|
+
constructor(options = {}) {
|
|
15
|
+
this.detector = options.detector ?? new IntentDetector();
|
|
16
|
+
this.aggregator = options.aggregator ?? new IntentAggregator();
|
|
17
|
+
this.emitter = options.emitter ?? new EventEmitter();
|
|
18
|
+
this.onIntent = options.onIntent;
|
|
19
|
+
this.onSnapshot = options.onSnapshot;
|
|
20
|
+
}
|
|
21
|
+
ingest(sample) {
|
|
22
|
+
this.aggregator.add(sample);
|
|
23
|
+
}
|
|
24
|
+
on(listener) {
|
|
25
|
+
this.emitter.on("event", listener);
|
|
26
|
+
}
|
|
27
|
+
start(intervalMs = 300 * 1e3) {
|
|
28
|
+
this.stop();
|
|
29
|
+
this.timer = setInterval(() => {
|
|
30
|
+
this.run();
|
|
31
|
+
}, intervalMs);
|
|
32
|
+
}
|
|
33
|
+
stop() {
|
|
34
|
+
if (this.timer) {
|
|
35
|
+
clearInterval(this.timer);
|
|
36
|
+
this.timer = void 0;
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
async run() {
|
|
40
|
+
const snapshot = this.aggregator.flush();
|
|
41
|
+
this.emit({
|
|
42
|
+
type: "telemetry.window",
|
|
43
|
+
payload: { sampleCount: snapshot.sampleCount }
|
|
44
|
+
});
|
|
45
|
+
if (this.onSnapshot) await this.onSnapshot(snapshot);
|
|
46
|
+
if (!snapshot.sampleCount) return;
|
|
47
|
+
const metricSignals = this.detector.detectFromMetrics(snapshot.metrics, this.previousMetrics);
|
|
48
|
+
const sequenceSignals = this.detector.detectSequentialIntents(snapshot.sequences);
|
|
49
|
+
this.previousMetrics = snapshot.metrics;
|
|
50
|
+
const signals = [...metricSignals, ...sequenceSignals];
|
|
51
|
+
for (const signal of signals) {
|
|
52
|
+
if (this.onIntent) await this.onIntent(signal);
|
|
53
|
+
this.emit({
|
|
54
|
+
type: "intent.detected",
|
|
55
|
+
payload: signal
|
|
56
|
+
});
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
emit(event) {
|
|
60
|
+
this.emitter.emit("event", event);
|
|
61
|
+
}
|
|
62
|
+
};
|
|
63
|
+
|
|
64
|
+
//#endregion
|
|
65
|
+
export { EvolutionPipeline };
|
|
@@ -1 +1,73 @@
|
|
|
1
|
-
import{createCounter
|
|
1
|
+
import { createCounter, createHistogram, createUpDownCounter } from "../metrics/index.mjs";
|
|
2
|
+
import { getStageLabel } from "../lifecycle/dist/utils/formatters.mjs";
|
|
3
|
+
import "../lifecycle/dist/index.mjs";
|
|
4
|
+
import { EventEmitter } from "node:events";
|
|
5
|
+
|
|
6
|
+
//#region src/pipeline/lifecycle-pipeline.ts
|
|
7
|
+
var LifecycleKpiPipeline = class {
|
|
8
|
+
assessmentCounter;
|
|
9
|
+
confidenceHistogram;
|
|
10
|
+
stageUpDownCounter;
|
|
11
|
+
emitter;
|
|
12
|
+
lowConfidenceThreshold;
|
|
13
|
+
currentStageByTenant = /* @__PURE__ */ new Map();
|
|
14
|
+
constructor(options = {}) {
|
|
15
|
+
const meterName = options.meterName ?? "@lssm/lib.lifecycle-kpi";
|
|
16
|
+
this.assessmentCounter = createCounter("lifecycle_assessments_total", "Total lifecycle assessments", meterName);
|
|
17
|
+
this.confidenceHistogram = createHistogram("lifecycle_assessment_confidence", "Lifecycle assessment confidence distribution", meterName);
|
|
18
|
+
this.stageUpDownCounter = createUpDownCounter("lifecycle_stage_tenants", "Current tenants per lifecycle stage", meterName);
|
|
19
|
+
this.emitter = options.emitter ?? new EventEmitter();
|
|
20
|
+
this.lowConfidenceThreshold = options.lowConfidenceThreshold ?? .4;
|
|
21
|
+
}
|
|
22
|
+
recordAssessment(assessment, tenantId) {
|
|
23
|
+
const attributes = {
|
|
24
|
+
stage: getStageLabel(assessment.stage),
|
|
25
|
+
tenantId
|
|
26
|
+
};
|
|
27
|
+
this.assessmentCounter.add(1, attributes);
|
|
28
|
+
this.confidenceHistogram.record(assessment.confidence, attributes);
|
|
29
|
+
this.ensureStageCounters(assessment.stage, tenantId);
|
|
30
|
+
this.emitter.emit("event", {
|
|
31
|
+
type: "assessment.recorded",
|
|
32
|
+
payload: {
|
|
33
|
+
tenantId,
|
|
34
|
+
stage: assessment.stage
|
|
35
|
+
}
|
|
36
|
+
});
|
|
37
|
+
if (assessment.confidence < this.lowConfidenceThreshold) this.emitter.emit("event", {
|
|
38
|
+
type: "confidence.low",
|
|
39
|
+
payload: {
|
|
40
|
+
tenantId,
|
|
41
|
+
confidence: assessment.confidence
|
|
42
|
+
}
|
|
43
|
+
});
|
|
44
|
+
}
|
|
45
|
+
on(listener) {
|
|
46
|
+
this.emitter.on("event", listener);
|
|
47
|
+
}
|
|
48
|
+
ensureStageCounters(stage, tenantId) {
|
|
49
|
+
if (!tenantId) return;
|
|
50
|
+
const previous = this.currentStageByTenant.get(tenantId);
|
|
51
|
+
if (previous === stage) return;
|
|
52
|
+
if (previous !== void 0) this.stageUpDownCounter.add(-1, {
|
|
53
|
+
stage: getStageLabel(previous),
|
|
54
|
+
tenantId
|
|
55
|
+
});
|
|
56
|
+
this.stageUpDownCounter.add(1, {
|
|
57
|
+
stage: getStageLabel(stage),
|
|
58
|
+
tenantId
|
|
59
|
+
});
|
|
60
|
+
this.currentStageByTenant.set(tenantId, stage);
|
|
61
|
+
this.emitter.emit("event", {
|
|
62
|
+
type: "stage.changed",
|
|
63
|
+
payload: {
|
|
64
|
+
tenantId,
|
|
65
|
+
previousStage: previous,
|
|
66
|
+
nextStage: stage
|
|
67
|
+
}
|
|
68
|
+
});
|
|
69
|
+
}
|
|
70
|
+
};
|
|
71
|
+
|
|
72
|
+
//#endregion
|
|
73
|
+
export { LifecycleKpiPipeline };
|
package/dist/tracing/index.mjs
CHANGED
|
@@ -1 +1,46 @@
|
|
|
1
|
-
import{SpanStatusCode
|
|
1
|
+
import { SpanStatusCode, trace } from "@opentelemetry/api";
|
|
2
|
+
|
|
3
|
+
//#region src/tracing/index.ts
|
|
4
|
+
const DEFAULT_TRACER_NAME = "@lssm/lib.observability";
|
|
5
|
+
function getTracer(name = DEFAULT_TRACER_NAME) {
|
|
6
|
+
return trace.getTracer(name);
|
|
7
|
+
}
|
|
8
|
+
async function traceAsync(name, fn, tracerName) {
|
|
9
|
+
return getTracer(tracerName).startActiveSpan(name, async (span) => {
|
|
10
|
+
try {
|
|
11
|
+
const result = await fn(span);
|
|
12
|
+
span.setStatus({ code: SpanStatusCode.OK });
|
|
13
|
+
return result;
|
|
14
|
+
} catch (error) {
|
|
15
|
+
span.recordException(error);
|
|
16
|
+
span.setStatus({
|
|
17
|
+
code: SpanStatusCode.ERROR,
|
|
18
|
+
message: error instanceof Error ? error.message : String(error)
|
|
19
|
+
});
|
|
20
|
+
throw error;
|
|
21
|
+
} finally {
|
|
22
|
+
span.end();
|
|
23
|
+
}
|
|
24
|
+
});
|
|
25
|
+
}
|
|
26
|
+
function traceSync(name, fn, tracerName) {
|
|
27
|
+
return getTracer(tracerName).startActiveSpan(name, (span) => {
|
|
28
|
+
try {
|
|
29
|
+
const result = fn(span);
|
|
30
|
+
span.setStatus({ code: SpanStatusCode.OK });
|
|
31
|
+
return result;
|
|
32
|
+
} catch (error) {
|
|
33
|
+
span.recordException(error);
|
|
34
|
+
span.setStatus({
|
|
35
|
+
code: SpanStatusCode.ERROR,
|
|
36
|
+
message: error instanceof Error ? error.message : String(error)
|
|
37
|
+
});
|
|
38
|
+
throw error;
|
|
39
|
+
} finally {
|
|
40
|
+
span.end();
|
|
41
|
+
}
|
|
42
|
+
});
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
//#endregion
|
|
46
|
+
export { getTracer, traceAsync, traceSync };
|
|
@@ -1 +1,79 @@
|
|
|
1
|
-
import{traceAsync
|
|
1
|
+
import { traceAsync } from "./index.mjs";
|
|
2
|
+
import { standardMetrics } from "../metrics/index.mjs";
|
|
3
|
+
|
|
4
|
+
//#region src/tracing/middleware.ts
|
|
5
|
+
function createTracingMiddleware(options = {}) {
|
|
6
|
+
return async (req, next) => {
|
|
7
|
+
const method = req.method;
|
|
8
|
+
const path = new URL(req.url).pathname;
|
|
9
|
+
standardMetrics.httpRequests.add(1, {
|
|
10
|
+
method,
|
|
11
|
+
path
|
|
12
|
+
});
|
|
13
|
+
const startTime = performance.now();
|
|
14
|
+
return traceAsync(`HTTP ${method} ${path}`, async (span) => {
|
|
15
|
+
span.setAttribute("http.method", method);
|
|
16
|
+
span.setAttribute("http.url", req.url);
|
|
17
|
+
try {
|
|
18
|
+
const response = await next();
|
|
19
|
+
span.setAttribute("http.status_code", response.status);
|
|
20
|
+
const duration = (performance.now() - startTime) / 1e3;
|
|
21
|
+
standardMetrics.httpDuration.record(duration, {
|
|
22
|
+
method,
|
|
23
|
+
path,
|
|
24
|
+
status: response.status.toString()
|
|
25
|
+
});
|
|
26
|
+
emitTelemetrySample({
|
|
27
|
+
req,
|
|
28
|
+
res: response,
|
|
29
|
+
span,
|
|
30
|
+
success: true,
|
|
31
|
+
durationMs: duration * 1e3,
|
|
32
|
+
options
|
|
33
|
+
});
|
|
34
|
+
return response;
|
|
35
|
+
} catch (error) {
|
|
36
|
+
standardMetrics.operationErrors.add(1, {
|
|
37
|
+
method,
|
|
38
|
+
path
|
|
39
|
+
});
|
|
40
|
+
emitTelemetrySample({
|
|
41
|
+
req,
|
|
42
|
+
span,
|
|
43
|
+
success: false,
|
|
44
|
+
durationMs: performance.now() - startTime,
|
|
45
|
+
error,
|
|
46
|
+
options
|
|
47
|
+
});
|
|
48
|
+
throw error;
|
|
49
|
+
}
|
|
50
|
+
});
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
function emitTelemetrySample({ req, res, span, success, durationMs, error, options }) {
|
|
54
|
+
if (!options.onSample || !options.resolveOperation) return;
|
|
55
|
+
const operation = options.resolveOperation({
|
|
56
|
+
req,
|
|
57
|
+
res
|
|
58
|
+
});
|
|
59
|
+
if (!operation) return;
|
|
60
|
+
const sample = {
|
|
61
|
+
operation,
|
|
62
|
+
durationMs,
|
|
63
|
+
success,
|
|
64
|
+
timestamp: /* @__PURE__ */ new Date(),
|
|
65
|
+
errorCode: !success && error instanceof Error ? error.name : success ? void 0 : "unknown",
|
|
66
|
+
tenantId: options.tenantResolver?.(req),
|
|
67
|
+
actorId: options.actorResolver?.(req),
|
|
68
|
+
traceId: span.spanContext().traceId,
|
|
69
|
+
metadata: {
|
|
70
|
+
method: req.method,
|
|
71
|
+
path: new URL(req.url).pathname,
|
|
72
|
+
status: res?.status
|
|
73
|
+
}
|
|
74
|
+
};
|
|
75
|
+
options.onSample(sample);
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
//#endregion
|
|
79
|
+
export { createTracingMiddleware };
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@lssm/lib.observability",
|
|
3
|
-
"version": "0.0.0-canary-
|
|
3
|
+
"version": "0.0.0-canary-20251217072406",
|
|
4
4
|
"main": "./dist/index.mjs",
|
|
5
5
|
"types": "./dist/index.d.mts",
|
|
6
6
|
"scripts": {
|
|
@@ -17,12 +17,14 @@
|
|
|
17
17
|
"test": "bun run"
|
|
18
18
|
},
|
|
19
19
|
"dependencies": {
|
|
20
|
-
"@lssm/lib.lifecycle": "0.0.0-canary-
|
|
20
|
+
"@lssm/lib.lifecycle": "0.0.0-canary-20251217072406"
|
|
21
21
|
},
|
|
22
22
|
"peerDependencies": {
|
|
23
23
|
"@opentelemetry/api": "*"
|
|
24
24
|
},
|
|
25
25
|
"devDependencies": {
|
|
26
|
+
"@lssm/tool.tsdown": "0.0.0-canary-20251217072406",
|
|
27
|
+
"@lssm/tool.typescript": "0.0.0-canary-20251217072406",
|
|
26
28
|
"typescript": "^5.0.0"
|
|
27
29
|
},
|
|
28
30
|
"exports": {
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
(function(e){return e.Sketch=`Sketch`,e.Prototype=`Prototype`,e.Mvp=`MVP`,e.V1=`V1`,e.Ecosystem=`Ecosystem`,e})({}),function(e){return e.Solo=`Solo`,e.TinyTeam=`TinyTeam`,e.FunctionalOrg=`FunctionalOrg`,e.MultiTeam=`MultiTeam`,e.Bureaucratic=`Bureaucratic`,e}({}),function(e){return e.Bootstrapped=`Bootstrapped`,e.PreSeed=`PreSeed`,e.Seed=`Seed`,e.SeriesAorB=`SeriesAorB`,e.LateStage=`LateStage`,e}({});
|