@lssm/lib.observability 0.0.0-canary-20251217062943 → 0.0.0-canary-20251217072406

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # @lssm/lib.observability
2
2
 
3
- ## 0.0.0-canary-20251217062943
3
+ ## 0.0.0-canary-20251217072406
4
4
 
5
5
  ### Minor Changes
6
6
 
@@ -9,7 +9,7 @@
9
9
  ### Patch Changes
10
10
 
11
11
  - Updated dependencies [66a5dfd]
12
- - @lssm/lib.lifecycle@0.0.0-canary-20251217062943
12
+ - @lssm/lib.lifecycle@0.0.0-canary-20251217072406
13
13
 
14
14
  ## 0.5.0
15
15
 
@@ -1 +1,22 @@
1
- var e=class{cooldownMs;lastAlert=new Map;constructor(e){this.options=e,this.cooldownMs=e.cooldownMs??6e4}async notify(e,t){let n=`${e.type}:${t.culprit?.id??`none`}`,r=Date.now();r-(this.lastAlert.get(n)??0)<this.cooldownMs||(await this.options.transport({signal:e,analysis:t}),this.lastAlert.set(n,r))}};export{e as AlertManager};
1
+ //#region src/anomaly/alert-manager.ts
2
+ var AlertManager = class {
3
+ cooldownMs;
4
+ lastAlert = /* @__PURE__ */ new Map();
5
+ constructor(options) {
6
+ this.options = options;
7
+ this.cooldownMs = options.cooldownMs ?? 6e4;
8
+ }
9
+ async notify(signal, analysis) {
10
+ const key = `${signal.type}:${analysis.culprit?.id ?? "none"}`;
11
+ const now = Date.now();
12
+ if (now - (this.lastAlert.get(key) ?? 0) < this.cooldownMs) return;
13
+ await this.options.transport({
14
+ signal,
15
+ analysis
16
+ });
17
+ this.lastAlert.set(key, now);
18
+ }
19
+ };
20
+
21
+ //#endregion
22
+ export { AlertManager };
@@ -1 +1,57 @@
1
- import{BaselineCalculator as e}from"./baseline-calculator.mjs";var t=class{baseline;thresholds={errorRateDelta:.5,latencyDelta:.35,throughputDrop:.4,minSamples:10};constructor(t={}){this.baseline=new e,this.thresholds={...this.thresholds,...t}}evaluate(e){let t=this.baseline.update(e);if(t.sampleCount<this.thresholds.minSamples)return[];let n=[],r=this.relativeDelta(e.errorRate,t.errorRate);r>this.thresholds.errorRateDelta&&n.push({type:`error_rate_spike`,delta:r,point:e,baseline:t});let i=this.relativeDelta(e.latencyP99,t.latencyP99);i>this.thresholds.latencyDelta&&n.push({type:`latency_regression`,delta:i,point:e,baseline:t});let a=this.relativeDrop(e.throughput,t.throughput);return a>this.thresholds.throughputDrop&&n.push({type:`throughput_drop`,delta:a,point:e,baseline:t}),n}relativeDelta(e,t){return t===0?0:(e-t)/t}relativeDrop(e,t){return t===0?0:(t-e)/t}};export{t as AnomalyDetector};
1
+ import { BaselineCalculator } from "./baseline-calculator.mjs";
2
+
3
+ //#region src/anomaly/anomaly-detector.ts
4
+ var AnomalyDetector = class {
5
+ baseline;
6
+ thresholds = {
7
+ errorRateDelta: .5,
8
+ latencyDelta: .35,
9
+ throughputDrop: .4,
10
+ minSamples: 10
11
+ };
12
+ constructor(options = {}) {
13
+ this.baseline = new BaselineCalculator();
14
+ this.thresholds = {
15
+ ...this.thresholds,
16
+ ...options
17
+ };
18
+ }
19
+ evaluate(point) {
20
+ const baselineSnapshot = this.baseline.update(point);
21
+ if (baselineSnapshot.sampleCount < this.thresholds.minSamples) return [];
22
+ const signals = [];
23
+ const errorDelta = this.relativeDelta(point.errorRate, baselineSnapshot.errorRate);
24
+ if (errorDelta > this.thresholds.errorRateDelta) signals.push({
25
+ type: "error_rate_spike",
26
+ delta: errorDelta,
27
+ point,
28
+ baseline: baselineSnapshot
29
+ });
30
+ const latencyDelta = this.relativeDelta(point.latencyP99, baselineSnapshot.latencyP99);
31
+ if (latencyDelta > this.thresholds.latencyDelta) signals.push({
32
+ type: "latency_regression",
33
+ delta: latencyDelta,
34
+ point,
35
+ baseline: baselineSnapshot
36
+ });
37
+ const throughputDelta = this.relativeDrop(point.throughput, baselineSnapshot.throughput);
38
+ if (throughputDelta > this.thresholds.throughputDrop) signals.push({
39
+ type: "throughput_drop",
40
+ delta: throughputDelta,
41
+ point,
42
+ baseline: baselineSnapshot
43
+ });
44
+ return signals;
45
+ }
46
+ relativeDelta(value, baseline) {
47
+ if (baseline === 0) return 0;
48
+ return (value - baseline) / baseline;
49
+ }
50
+ relativeDrop(value, baseline) {
51
+ if (baseline === 0) return 0;
52
+ return (baseline - value) / baseline;
53
+ }
54
+ };
55
+
56
+ //#endregion
57
+ export { AnomalyDetector };
@@ -1 +1,36 @@
1
- var e=class{snapshot={latencyP99:0,latencyP95:0,errorRate:0,throughput:0,sampleCount:0};constructor(e=.2){this.alpha=e}update(e){let{sampleCount:t}=this.snapshot,n=t+1,r=t===0?1:this.alpha;return this.snapshot={latencyP99:this.mix(this.snapshot.latencyP99,e.latencyP99,r),latencyP95:this.mix(this.snapshot.latencyP95,e.latencyP95,r),errorRate:this.mix(this.snapshot.errorRate,e.errorRate,r),throughput:this.mix(this.snapshot.throughput,e.throughput,r),sampleCount:n},this.snapshot}getSnapshot(){return this.snapshot}mix(e,t,n){return this.snapshot.sampleCount===0?t:e*(1-n)+t*n}};export{e as BaselineCalculator};
1
+ //#region src/anomaly/baseline-calculator.ts
2
+ var BaselineCalculator = class {
3
+ snapshot = {
4
+ latencyP99: 0,
5
+ latencyP95: 0,
6
+ errorRate: 0,
7
+ throughput: 0,
8
+ sampleCount: 0
9
+ };
10
+ constructor(alpha = .2) {
11
+ this.alpha = alpha;
12
+ }
13
+ update(point) {
14
+ const { sampleCount } = this.snapshot;
15
+ const nextCount = sampleCount + 1;
16
+ const weight = sampleCount === 0 ? 1 : this.alpha;
17
+ this.snapshot = {
18
+ latencyP99: this.mix(this.snapshot.latencyP99, point.latencyP99, weight),
19
+ latencyP95: this.mix(this.snapshot.latencyP95, point.latencyP95, weight),
20
+ errorRate: this.mix(this.snapshot.errorRate, point.errorRate, weight),
21
+ throughput: this.mix(this.snapshot.throughput, point.throughput, weight),
22
+ sampleCount: nextCount
23
+ };
24
+ return this.snapshot;
25
+ }
26
+ getSnapshot() {
27
+ return this.snapshot;
28
+ }
29
+ mix(current, next, weight) {
30
+ if (this.snapshot.sampleCount === 0) return next;
31
+ return current * (1 - weight) + next * weight;
32
+ }
33
+ };
34
+
35
+ //#endregion
36
+ export { BaselineCalculator };
@@ -1 +1,26 @@
1
- var e=class{constructor(e=900*1e3){this.lookbackMs=e}analyze(e,t){let n=new Date(e.point.timestamp.getTime()-this.lookbackMs),r=t.filter(e=>e.deployedAt>=n).sort((e,t)=>t.deployedAt.getTime()-e.deployedAt.getTime()),i=[],a;return r.length>0?(a=r[0],i.push(`Closest deployment ${a.id} (${a.operation}) at ${a.deployedAt.toISOString()}`)):i.push(`No deployments found within lookback window.`),e.type===`latency_regression`&&i.push(`Verify recent schema changes and external dependency latency.`),e.type===`error_rate_spike`&&i.push(`Check SLO monitor for correlated incidents.`),{signal:e,culprit:a,notes:i}}};export{e as RootCauseAnalyzer};
1
+ //#region src/anomaly/root-cause-analyzer.ts
2
+ var RootCauseAnalyzer = class {
3
+ constructor(lookbackMs = 900 * 1e3) {
4
+ this.lookbackMs = lookbackMs;
5
+ }
6
+ analyze(signal, deployments) {
7
+ const windowStart = new Date(signal.point.timestamp.getTime() - this.lookbackMs);
8
+ const candidates = deployments.filter((deployment) => deployment.deployedAt >= windowStart).sort((a, b) => b.deployedAt.getTime() - a.deployedAt.getTime());
9
+ const notes = [];
10
+ let culprit;
11
+ if (candidates.length > 0) {
12
+ culprit = candidates[0];
13
+ notes.push(`Closest deployment ${culprit.id} (${culprit.operation}) at ${culprit.deployedAt.toISOString()}`);
14
+ } else notes.push("No deployments found within lookback window.");
15
+ if (signal.type === "latency_regression") notes.push("Verify recent schema changes and external dependency latency.");
16
+ if (signal.type === "error_rate_spike") notes.push("Check SLO monitor for correlated incidents.");
17
+ return {
18
+ signal,
19
+ culprit,
20
+ notes
21
+ };
22
+ }
23
+ };
24
+
25
+ //#endregion
26
+ export { RootCauseAnalyzer };
package/dist/index.mjs CHANGED
@@ -1 +1,14 @@
1
- import{getTracer as e,traceAsync as t,traceSync as n}from"./tracing/index.mjs";import{createCounter as r,createHistogram as i,createUpDownCounter as a,getMeter as o,standardMetrics as s}from"./metrics/index.mjs";import{Logger as c,logger as l}from"./logging/index.mjs";import{createTracingMiddleware as u}from"./tracing/middleware.mjs";import{IntentAggregator as d}from"./intent/aggregator.mjs";import{IntentDetector as f}from"./intent/detector.mjs";import{EvolutionPipeline as p}from"./pipeline/evolution-pipeline.mjs";import{LifecycleKpiPipeline as m}from"./pipeline/lifecycle-pipeline.mjs";import{BaselineCalculator as h}from"./anomaly/baseline-calculator.mjs";import{AnomalyDetector as g}from"./anomaly/anomaly-detector.mjs";import{RootCauseAnalyzer as _}from"./anomaly/root-cause-analyzer.mjs";import{AlertManager as v}from"./anomaly/alert-manager.mjs";export{v as AlertManager,g as AnomalyDetector,h as BaselineCalculator,p as EvolutionPipeline,d as IntentAggregator,f as IntentDetector,m as LifecycleKpiPipeline,c as Logger,_ as RootCauseAnalyzer,r as createCounter,i as createHistogram,u as createTracingMiddleware,a as createUpDownCounter,o as getMeter,e as getTracer,l as logger,s as standardMetrics,t as traceAsync,n as traceSync};
1
+ import { getTracer, traceAsync, traceSync } from "./tracing/index.mjs";
2
+ import { createCounter, createHistogram, createUpDownCounter, getMeter, standardMetrics } from "./metrics/index.mjs";
3
+ import { Logger, logger } from "./logging/index.mjs";
4
+ import { createTracingMiddleware } from "./tracing/middleware.mjs";
5
+ import { IntentAggregator } from "./intent/aggregator.mjs";
6
+ import { IntentDetector } from "./intent/detector.mjs";
7
+ import { EvolutionPipeline } from "./pipeline/evolution-pipeline.mjs";
8
+ import { LifecycleKpiPipeline } from "./pipeline/lifecycle-pipeline.mjs";
9
+ import { BaselineCalculator } from "./anomaly/baseline-calculator.mjs";
10
+ import { AnomalyDetector } from "./anomaly/anomaly-detector.mjs";
11
+ import { RootCauseAnalyzer } from "./anomaly/root-cause-analyzer.mjs";
12
+ import { AlertManager } from "./anomaly/alert-manager.mjs";
13
+
14
+ export { AlertManager, AnomalyDetector, BaselineCalculator, EvolutionPipeline, IntentAggregator, IntentDetector, LifecycleKpiPipeline, Logger, RootCauseAnalyzer, createCounter, createHistogram, createTracingMiddleware, createUpDownCounter, getMeter, getTracer, logger, standardMetrics, traceAsync, traceSync };
@@ -1 +1,95 @@
1
- var e=class{windowMs;sequenceSampleSize;samples=[];constructor(e={}){this.windowMs=e.windowMs??9e5,this.sequenceSampleSize=e.sequenceSampleSize??1e3}add(e){this.samples.push(e)}flush(e=new Date){let t=e.getTime()-this.windowMs,n=this.samples.filter(e=>e.timestamp.getTime()>=t);this.samples.length=0;let r=this.aggregateMetrics(n),i=this.buildSequences(n),a=n.map(e=>e.timestamp.getTime());return{metrics:r,sequences:i,sampleCount:n.length,windowStart:a.length?new Date(Math.min(...a)):void 0,windowEnd:a.length?new Date(Math.max(...a)):void 0}}aggregateMetrics(e){if(!e.length)return[];let n=new Map;for(let t of e){let e=`${t.operation.name}.v${t.operation.version}`,r=n.get(e)??[];r.push(t),n.set(e,r)}return[...n.values()].map(e=>{let n=e.map(e=>e.durationMs).sort((e,t)=>e-t),r=e.filter(e=>!e.success),i=e.length,a=r.reduce((e,t)=>(t.errorCode&&(e[t.errorCode]=(e[t.errorCode]??0)+1),e),{}),o=e.map(e=>e.timestamp.getTime());return{operation:e[0].operation,totalCalls:i,successRate:(i-r.length)/i,errorRate:r.length/i,averageLatencyMs:n.reduce((e,t)=>e+t,0)/i,p95LatencyMs:t(n,.95),p99LatencyMs:t(n,.99),maxLatencyMs:Math.max(...n),windowStart:new Date(Math.min(...o)),windowEnd:new Date(Math.max(...o)),topErrors:a}})}buildSequences(e){let t=new Map;for(let n of e.slice(-this.sequenceSampleSize)){if(!n.traceId)continue;let e=t.get(n.traceId)??[];e.push(n),t.set(n.traceId,e)}let n={};for(let[e,r]of t.entries()){let e=r.sort((e,t)=>e.timestamp.getTime()-t.timestamp.getTime()),t=e.map(e=>e.operation.name);if(t.length<2)continue;let i=`${t.join(`>`)}@${e[0]?.tenantId??`global`}`,a=n[i];a?a.count+=1:n[i]={steps:t,tenantId:e[0]?.tenantId,count:1}}return Object.values(n).sort((e,t)=>t.count-e.count)}};function t(e,t){return e.length?e.length===1?e[0]:e[Math.min(e.length-1,Math.floor(t*e.length))]:0}export{e as IntentAggregator};
1
+ //#region src/intent/aggregator.ts
2
+ const DEFAULT_WINDOW_MS = 900 * 1e3;
3
+ var IntentAggregator = class {
4
+ windowMs;
5
+ sequenceSampleSize;
6
+ samples = [];
7
+ constructor(options = {}) {
8
+ this.windowMs = options.windowMs ?? DEFAULT_WINDOW_MS;
9
+ this.sequenceSampleSize = options.sequenceSampleSize ?? 1e3;
10
+ }
11
+ add(sample) {
12
+ this.samples.push(sample);
13
+ }
14
+ flush(now = /* @__PURE__ */ new Date()) {
15
+ const minTimestamp = now.getTime() - this.windowMs;
16
+ const windowSamples = this.samples.filter((sample) => sample.timestamp.getTime() >= minTimestamp);
17
+ this.samples.length = 0;
18
+ const metrics = this.aggregateMetrics(windowSamples);
19
+ const sequences = this.buildSequences(windowSamples);
20
+ const timestamps = windowSamples.map((sample) => sample.timestamp.getTime());
21
+ return {
22
+ metrics,
23
+ sequences,
24
+ sampleCount: windowSamples.length,
25
+ windowStart: timestamps.length ? new Date(Math.min(...timestamps)) : void 0,
26
+ windowEnd: timestamps.length ? new Date(Math.max(...timestamps)) : void 0
27
+ };
28
+ }
29
+ aggregateMetrics(samples) {
30
+ if (!samples.length) return [];
31
+ const groups = /* @__PURE__ */ new Map();
32
+ for (const sample of samples) {
33
+ const key = `${sample.operation.name}.v${sample.operation.version}`;
34
+ const arr = groups.get(key) ?? [];
35
+ arr.push(sample);
36
+ groups.set(key, arr);
37
+ }
38
+ return [...groups.values()].map((group) => {
39
+ const durations = group.map((s) => s.durationMs).sort((a, b) => a - b);
40
+ const errors = group.filter((s) => !s.success);
41
+ const totalCalls = group.length;
42
+ const topErrors = errors.reduce((acc, sample) => {
43
+ if (!sample.errorCode) return acc;
44
+ acc[sample.errorCode] = (acc[sample.errorCode] ?? 0) + 1;
45
+ return acc;
46
+ }, {});
47
+ const timestamps = group.map((s) => s.timestamp.getTime());
48
+ return {
49
+ operation: group[0].operation,
50
+ totalCalls,
51
+ successRate: (totalCalls - errors.length) / totalCalls,
52
+ errorRate: errors.length / totalCalls,
53
+ averageLatencyMs: durations.reduce((sum, value) => sum + value, 0) / totalCalls,
54
+ p95LatencyMs: percentile(durations, .95),
55
+ p99LatencyMs: percentile(durations, .99),
56
+ maxLatencyMs: Math.max(...durations),
57
+ windowStart: new Date(Math.min(...timestamps)),
58
+ windowEnd: new Date(Math.max(...timestamps)),
59
+ topErrors
60
+ };
61
+ });
62
+ }
63
+ buildSequences(samples) {
64
+ const byTrace = /* @__PURE__ */ new Map();
65
+ for (const sample of samples.slice(-this.sequenceSampleSize)) {
66
+ if (!sample.traceId) continue;
67
+ const arr = byTrace.get(sample.traceId) ?? [];
68
+ arr.push(sample);
69
+ byTrace.set(sample.traceId, arr);
70
+ }
71
+ const sequences = {};
72
+ for (const [traceId, events] of byTrace.entries()) {
73
+ const ordered = events.sort((a, b) => a.timestamp.getTime() - b.timestamp.getTime());
74
+ const steps = ordered.map((event) => event.operation.name);
75
+ if (steps.length < 2) continue;
76
+ const key = `${steps.join(">")}@${ordered[0]?.tenantId ?? "global"}`;
77
+ const existing = sequences[key];
78
+ if (existing) existing.count += 1;
79
+ else sequences[key] = {
80
+ steps,
81
+ tenantId: ordered[0]?.tenantId,
82
+ count: 1
83
+ };
84
+ }
85
+ return Object.values(sequences).sort((a, b) => b.count - a.count);
86
+ }
87
+ };
88
+ function percentile(values, ratio) {
89
+ if (!values.length) return 0;
90
+ if (values.length === 1) return values[0];
91
+ return values[Math.min(values.length - 1, Math.floor(ratio * values.length))];
92
+ }
93
+
94
+ //#endregion
95
+ export { IntentAggregator };
@@ -1 +1,121 @@
1
- import{randomUUID as e}from"node:crypto";const t={errorRateThreshold:.05,latencyP99ThresholdMs:750,throughputDropThreshold:.3,minSequenceLength:3};var n=class{options;constructor(e={}){this.options={errorRateThreshold:e.errorRateThreshold??t.errorRateThreshold,latencyP99ThresholdMs:e.latencyP99ThresholdMs??t.latencyP99ThresholdMs,throughputDropThreshold:e.throughputDropThreshold??t.throughputDropThreshold,minSequenceLength:e.minSequenceLength??t.minSequenceLength}}detectFromMetrics(t,n){let r=[],i=new Map((n??[]).map(e=>[`${e.operation.name}.v${e.operation.version}`,e]));for(let n of t){if(n.errorRate>=this.options.errorRateThreshold){r.push({id:e(),type:`error-spike`,operation:n.operation,confidence:Math.min(1,n.errorRate/this.options.errorRateThreshold),description:`Error rate ${n.errorRate.toFixed(2)} exceeded threshold`,metadata:{errorRate:n.errorRate,topErrors:n.topErrors},evidence:[{type:`metric`,description:`error-rate`,data:{errorRate:n.errorRate,threshold:this.options.errorRateThreshold}}]});continue}if(n.p99LatencyMs>=this.options.latencyP99ThresholdMs){r.push({id:e(),type:`latency-regression`,operation:n.operation,confidence:Math.min(1,n.p99LatencyMs/this.options.latencyP99ThresholdMs),description:`P99 latency ${n.p99LatencyMs}ms exceeded threshold`,metadata:{p99LatencyMs:n.p99LatencyMs},evidence:[{type:`metric`,description:`p99-latency`,data:{p99LatencyMs:n.p99LatencyMs,threshold:this.options.latencyP99ThresholdMs}}]});continue}let t=i.get(`${n.operation.name}.v${n.operation.version}`);if(t){let i=(t.totalCalls-n.totalCalls)/Math.max(t.totalCalls,1);i>=this.options.throughputDropThreshold&&r.push({id:e(),type:`throughput-drop`,operation:n.operation,confidence:Math.min(1,i/this.options.throughputDropThreshold),description:`Throughput dropped ${(i*100).toFixed(1)}% vs baseline`,metadata:{baselineCalls:t.totalCalls,currentCalls:n.totalCalls},evidence:[{type:`metric`,description:`throughput-drop`,data:{baselineCalls:t.totalCalls,currentCalls:n.totalCalls}}]})}}return r}detectSequentialIntents(t){let n=[];for(let r of t){if(r.steps.length<this.options.minSequenceLength)continue;let t=r.steps.join(` → `);n.push({id:e(),type:`missing-workflow-step`,confidence:.6,description:`Repeated workflow detected: ${t}`,metadata:{steps:r.steps,tenantId:r.tenantId,occurrences:r.count},evidence:[{type:`sequence`,description:`sequential-calls`,data:{steps:r.steps,count:r.count}}]})}return n}};export{n as IntentDetector};
1
+ import { randomUUID } from "node:crypto";
2
+
3
+ //#region src/intent/detector.ts
4
+ const DEFAULTS = {
5
+ errorRateThreshold: .05,
6
+ latencyP99ThresholdMs: 750,
7
+ throughputDropThreshold: .3,
8
+ minSequenceLength: 3
9
+ };
10
+ var IntentDetector = class {
11
+ options;
12
+ constructor(options = {}) {
13
+ this.options = {
14
+ errorRateThreshold: options.errorRateThreshold ?? DEFAULTS.errorRateThreshold,
15
+ latencyP99ThresholdMs: options.latencyP99ThresholdMs ?? DEFAULTS.latencyP99ThresholdMs,
16
+ throughputDropThreshold: options.throughputDropThreshold ?? DEFAULTS.throughputDropThreshold,
17
+ minSequenceLength: options.minSequenceLength ?? DEFAULTS.minSequenceLength
18
+ };
19
+ }
20
+ detectFromMetrics(current, previous) {
21
+ const signals = [];
22
+ const baseline = new Map((previous ?? []).map((metric) => [`${metric.operation.name}.v${metric.operation.version}`, metric]));
23
+ for (const metric of current) {
24
+ if (metric.errorRate >= this.options.errorRateThreshold) {
25
+ signals.push({
26
+ id: randomUUID(),
27
+ type: "error-spike",
28
+ operation: metric.operation,
29
+ confidence: Math.min(1, metric.errorRate / this.options.errorRateThreshold),
30
+ description: `Error rate ${metric.errorRate.toFixed(2)} exceeded threshold`,
31
+ metadata: {
32
+ errorRate: metric.errorRate,
33
+ topErrors: metric.topErrors
34
+ },
35
+ evidence: [{
36
+ type: "metric",
37
+ description: "error-rate",
38
+ data: {
39
+ errorRate: metric.errorRate,
40
+ threshold: this.options.errorRateThreshold
41
+ }
42
+ }]
43
+ });
44
+ continue;
45
+ }
46
+ if (metric.p99LatencyMs >= this.options.latencyP99ThresholdMs) {
47
+ signals.push({
48
+ id: randomUUID(),
49
+ type: "latency-regression",
50
+ operation: metric.operation,
51
+ confidence: Math.min(1, metric.p99LatencyMs / this.options.latencyP99ThresholdMs),
52
+ description: `P99 latency ${metric.p99LatencyMs}ms exceeded threshold`,
53
+ metadata: { p99LatencyMs: metric.p99LatencyMs },
54
+ evidence: [{
55
+ type: "metric",
56
+ description: "p99-latency",
57
+ data: {
58
+ p99LatencyMs: metric.p99LatencyMs,
59
+ threshold: this.options.latencyP99ThresholdMs
60
+ }
61
+ }]
62
+ });
63
+ continue;
64
+ }
65
+ const base = baseline.get(`${metric.operation.name}.v${metric.operation.version}`);
66
+ if (base) {
67
+ const drop = (base.totalCalls - metric.totalCalls) / Math.max(base.totalCalls, 1);
68
+ if (drop >= this.options.throughputDropThreshold) signals.push({
69
+ id: randomUUID(),
70
+ type: "throughput-drop",
71
+ operation: metric.operation,
72
+ confidence: Math.min(1, drop / this.options.throughputDropThreshold),
73
+ description: `Throughput dropped ${(drop * 100).toFixed(1)}% vs baseline`,
74
+ metadata: {
75
+ baselineCalls: base.totalCalls,
76
+ currentCalls: metric.totalCalls
77
+ },
78
+ evidence: [{
79
+ type: "metric",
80
+ description: "throughput-drop",
81
+ data: {
82
+ baselineCalls: base.totalCalls,
83
+ currentCalls: metric.totalCalls
84
+ }
85
+ }]
86
+ });
87
+ }
88
+ }
89
+ return signals;
90
+ }
91
+ detectSequentialIntents(sequences) {
92
+ const signals = [];
93
+ for (const sequence of sequences) {
94
+ if (sequence.steps.length < this.options.minSequenceLength) continue;
95
+ const description = sequence.steps.join(" → ");
96
+ signals.push({
97
+ id: randomUUID(),
98
+ type: "missing-workflow-step",
99
+ confidence: .6,
100
+ description: `Repeated workflow detected: ${description}`,
101
+ metadata: {
102
+ steps: sequence.steps,
103
+ tenantId: sequence.tenantId,
104
+ occurrences: sequence.count
105
+ },
106
+ evidence: [{
107
+ type: "sequence",
108
+ description: "sequential-calls",
109
+ data: {
110
+ steps: sequence.steps,
111
+ count: sequence.count
112
+ }
113
+ }]
114
+ });
115
+ }
116
+ return signals;
117
+ }
118
+ };
119
+
120
+ //#endregion
121
+ export { IntentDetector };
@@ -1 +1,4 @@
1
- import{e,n as t,t as n}from"./types/stages.mjs";import"./types/axes.mjs";import"./types/signals.mjs";import"./types/milestones.mjs";import{o as r,r as i}from"./utils/formatters.mjs";
1
+ import { LIFECYCLE_STAGE_META, LifecycleStage } from "./types/stages.mjs";
2
+ import "./types/signals.mjs";
3
+ import "./types/milestones.mjs";
4
+ import { getStageLabel } from "./utils/formatters.mjs";
@@ -1 +1 @@
1
- import"./stages.mjs";
1
+ import "./stages.mjs";
@@ -1 +1 @@
1
- import"./stages.mjs";
1
+ import "./stages.mjs";
@@ -1 +1,151 @@
1
- let e=function(e){return e[e.Exploration=0]=`Exploration`,e[e.ProblemSolutionFit=1]=`ProblemSolutionFit`,e[e.MvpEarlyTraction=2]=`MvpEarlyTraction`,e[e.ProductMarketFit=3]=`ProductMarketFit`,e[e.GrowthScaleUp=4]=`GrowthScaleUp`,e[e.ExpansionPlatform=5]=`ExpansionPlatform`,e[e.MaturityRenewal=6]=`MaturityRenewal`,e}({});const t=[e.Exploration,e.ProblemSolutionFit,e.MvpEarlyTraction,e.ProductMarketFit,e.GrowthScaleUp,e.ExpansionPlatform,e.MaturityRenewal],n={[e.Exploration]:{id:e.Exploration,order:0,slug:`exploration`,name:`Exploration / Ideation`,question:`Is there a problem worth my time?`,signals:[`20+ discovery interviews`,`Clear problem statement`,`Named ICP`],traps:[`Branding before discovery`,`Premature tooling decisions`],focusAreas:[`Customer discovery`,`Problem definition`,`Segment clarity`]},[e.ProblemSolutionFit]:{id:e.ProblemSolutionFit,order:1,slug:`problem-solution-fit`,name:`Problem–Solution Fit`,question:`Do people care enough about this solution?`,signals:[`Prototype reuse`,`Referral energy`,`Pre-pay interest`],traps:[`“Market is huge” without users`,`Skipping qualitative loops`],focusAreas:[`Solution hypothesis`,`Value messaging`,`Feedback capture`]},[e.MvpEarlyTraction]:{id:e.MvpEarlyTraction,order:2,slug:`mvp-early-traction`,name:`MVP & Early Traction`,question:`Can we get real usage and learn fast?`,signals:[`20–50 named active users`,`Weekly releases`,`Noisy feedback`],traps:[`Overbuilt infra for 10 users`,`Undefined retention metric`],focusAreas:[`Activation`,`Cohort tracking`,`Feedback rituals`]},[e.ProductMarketFit]:{id:e.ProductMarketFit,order:3,slug:`product-market-fit`,name:`Product–Market Fit`,question:`Is this pulling us forward?`,signals:[`Retention without heroics`,`Organic word-of-mouth`,`Value stories`],traps:[`Hero growth that does not scale`,`Ignoring churn signals`],focusAreas:[`Retention`,`Reliability`,`ICP clarity`]},[e.GrowthScaleUp]:{id:e.GrowthScaleUp,order:4,slug:`growth-scale-up`,name:`Growth / Scale-up`,question:`Can we grow this repeatably?`,signals:[`Predictable channels`,`Specialized hires`,`Unit economics on track`],traps:[`Paid spend masking retention gaps`,`Infra debt blocking launches`],focusAreas:[`Ops systems`,`Growth loops`,`Reliability engineering`]},[e.ExpansionPlatform]:{id:e.ExpansionPlatform,order:5,slug:`expansion-platform`,name:`Expansion / Platform`,question:`What is the next growth curve?`,signals:[`Stable core metrics`,`Partner/API demand`,`Ecosystem pull`],traps:[`Platform theater before wedge is solid`],focusAreas:[`Partnerships`,`APIs`,`New market validation`]},[e.MaturityRenewal]:{id:e.MaturityRenewal,order:6,slug:`maturity-renewal`,name:`Maturity / Renewal`,question:`Optimize, reinvent, or sunset?`,signals:[`Margin focus`,`Portfolio bets`,`Narrative refresh`],traps:[`Assuming past success is enough`],focusAreas:[`Cost optimization`,`Reinvention bets`,`Sunset planning`]}};export{e,n,t};
1
+ //#region ../lifecycle/dist/types/stages.js
2
+ let LifecycleStage = /* @__PURE__ */ function(LifecycleStage$1) {
3
+ LifecycleStage$1[LifecycleStage$1["Exploration"] = 0] = "Exploration";
4
+ LifecycleStage$1[LifecycleStage$1["ProblemSolutionFit"] = 1] = "ProblemSolutionFit";
5
+ LifecycleStage$1[LifecycleStage$1["MvpEarlyTraction"] = 2] = "MvpEarlyTraction";
6
+ LifecycleStage$1[LifecycleStage$1["ProductMarketFit"] = 3] = "ProductMarketFit";
7
+ LifecycleStage$1[LifecycleStage$1["GrowthScaleUp"] = 4] = "GrowthScaleUp";
8
+ LifecycleStage$1[LifecycleStage$1["ExpansionPlatform"] = 5] = "ExpansionPlatform";
9
+ LifecycleStage$1[LifecycleStage$1["MaturityRenewal"] = 6] = "MaturityRenewal";
10
+ return LifecycleStage$1;
11
+ }({});
12
+ const LIFECYCLE_STAGE_ORDER = [
13
+ LifecycleStage.Exploration,
14
+ LifecycleStage.ProblemSolutionFit,
15
+ LifecycleStage.MvpEarlyTraction,
16
+ LifecycleStage.ProductMarketFit,
17
+ LifecycleStage.GrowthScaleUp,
18
+ LifecycleStage.ExpansionPlatform,
19
+ LifecycleStage.MaturityRenewal
20
+ ];
21
+ const LIFECYCLE_STAGE_META = {
22
+ [LifecycleStage.Exploration]: {
23
+ id: LifecycleStage.Exploration,
24
+ order: 0,
25
+ slug: "exploration",
26
+ name: "Exploration / Ideation",
27
+ question: "Is there a problem worth my time?",
28
+ signals: [
29
+ "20+ discovery interviews",
30
+ "Clear problem statement",
31
+ "Named ICP"
32
+ ],
33
+ traps: ["Branding before discovery", "Premature tooling decisions"],
34
+ focusAreas: [
35
+ "Customer discovery",
36
+ "Problem definition",
37
+ "Segment clarity"
38
+ ]
39
+ },
40
+ [LifecycleStage.ProblemSolutionFit]: {
41
+ id: LifecycleStage.ProblemSolutionFit,
42
+ order: 1,
43
+ slug: "problem-solution-fit",
44
+ name: "Problem–Solution Fit",
45
+ question: "Do people care enough about this solution?",
46
+ signals: [
47
+ "Prototype reuse",
48
+ "Referral energy",
49
+ "Pre-pay interest"
50
+ ],
51
+ traps: ["“Market is huge” without users", "Skipping qualitative loops"],
52
+ focusAreas: [
53
+ "Solution hypothesis",
54
+ "Value messaging",
55
+ "Feedback capture"
56
+ ]
57
+ },
58
+ [LifecycleStage.MvpEarlyTraction]: {
59
+ id: LifecycleStage.MvpEarlyTraction,
60
+ order: 2,
61
+ slug: "mvp-early-traction",
62
+ name: "MVP & Early Traction",
63
+ question: "Can we get real usage and learn fast?",
64
+ signals: [
65
+ "20–50 named active users",
66
+ "Weekly releases",
67
+ "Noisy feedback"
68
+ ],
69
+ traps: ["Overbuilt infra for 10 users", "Undefined retention metric"],
70
+ focusAreas: [
71
+ "Activation",
72
+ "Cohort tracking",
73
+ "Feedback rituals"
74
+ ]
75
+ },
76
+ [LifecycleStage.ProductMarketFit]: {
77
+ id: LifecycleStage.ProductMarketFit,
78
+ order: 3,
79
+ slug: "product-market-fit",
80
+ name: "Product–Market Fit",
81
+ question: "Is this pulling us forward?",
82
+ signals: [
83
+ "Retention without heroics",
84
+ "Organic word-of-mouth",
85
+ "Value stories"
86
+ ],
87
+ traps: ["Hero growth that does not scale", "Ignoring churn signals"],
88
+ focusAreas: [
89
+ "Retention",
90
+ "Reliability",
91
+ "ICP clarity"
92
+ ]
93
+ },
94
+ [LifecycleStage.GrowthScaleUp]: {
95
+ id: LifecycleStage.GrowthScaleUp,
96
+ order: 4,
97
+ slug: "growth-scale-up",
98
+ name: "Growth / Scale-up",
99
+ question: "Can we grow this repeatably?",
100
+ signals: [
101
+ "Predictable channels",
102
+ "Specialized hires",
103
+ "Unit economics on track"
104
+ ],
105
+ traps: ["Paid spend masking retention gaps", "Infra debt blocking launches"],
106
+ focusAreas: [
107
+ "Ops systems",
108
+ "Growth loops",
109
+ "Reliability engineering"
110
+ ]
111
+ },
112
+ [LifecycleStage.ExpansionPlatform]: {
113
+ id: LifecycleStage.ExpansionPlatform,
114
+ order: 5,
115
+ slug: "expansion-platform",
116
+ name: "Expansion / Platform",
117
+ question: "What is the next growth curve?",
118
+ signals: [
119
+ "Stable core metrics",
120
+ "Partner/API demand",
121
+ "Ecosystem pull"
122
+ ],
123
+ traps: ["Platform theater before wedge is solid"],
124
+ focusAreas: [
125
+ "Partnerships",
126
+ "APIs",
127
+ "New market validation"
128
+ ]
129
+ },
130
+ [LifecycleStage.MaturityRenewal]: {
131
+ id: LifecycleStage.MaturityRenewal,
132
+ order: 6,
133
+ slug: "maturity-renewal",
134
+ name: "Maturity / Renewal",
135
+ question: "Optimize, reinvent, or sunset?",
136
+ signals: [
137
+ "Margin focus",
138
+ "Portfolio bets",
139
+ "Narrative refresh"
140
+ ],
141
+ traps: ["Assuming past success is enough"],
142
+ focusAreas: [
143
+ "Cost optimization",
144
+ "Reinvention bets",
145
+ "Sunset planning"
146
+ ]
147
+ }
148
+ };
149
+
150
+ //#endregion
151
+ export { LIFECYCLE_STAGE_META, LifecycleStage };
@@ -1 +1,7 @@
1
- import{n as e,t}from"../types/stages.mjs";const n=e=>[`Product: ${e.product}`,`Company: ${e.company}`,`Capital: ${e.capital}`],r=t=>e[t].name;export{r as o,n as r};
1
+ import { LIFECYCLE_STAGE_META } from "../types/stages.mjs";
2
+
3
+ //#region ../lifecycle/dist/utils/formatters.js
4
+ const getStageLabel = (stage) => LIFECYCLE_STAGE_META[stage].name;
5
+
6
+ //#endregion
7
+ export { getStageLabel };
@@ -1 +1,39 @@
1
- import{context as e,trace as t}from"@opentelemetry/api";var n=class{constructor(e){this.serviceName=e}log(n,r,i={}){let a=t.getSpan(e.active()),o=a?.spanContext().traceId,s=a?.spanContext().spanId,c={timestamp:new Date().toISOString(),service:this.serviceName,level:n,message:r,traceId:o,spanId:s,...i};console.log(JSON.stringify(c))}debug(e,t){this.log(`debug`,e,t)}info(e,t){this.log(`info`,e,t)}warn(e,t){this.log(`warn`,e,t)}error(e,t){this.log(`error`,e,t)}};const r=new n(process.env.OTEL_SERVICE_NAME||`unknown-service`);export{n as Logger,r as logger};
1
+ import { context, trace } from "@opentelemetry/api";
2
+
3
+ //#region src/logging/index.ts
4
+ var Logger = class {
5
+ constructor(serviceName) {
6
+ this.serviceName = serviceName;
7
+ }
8
+ log(level, message, meta = {}) {
9
+ const span = trace.getSpan(context.active());
10
+ const traceId = span?.spanContext().traceId;
11
+ const spanId = span?.spanContext().spanId;
12
+ const entry = {
13
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
14
+ service: this.serviceName,
15
+ level,
16
+ message,
17
+ traceId,
18
+ spanId,
19
+ ...meta
20
+ };
21
+ console.log(JSON.stringify(entry));
22
+ }
23
+ debug(message, meta) {
24
+ this.log("debug", message, meta);
25
+ }
26
+ info(message, meta) {
27
+ this.log("info", message, meta);
28
+ }
29
+ warn(message, meta) {
30
+ this.log("warn", message, meta);
31
+ }
32
+ error(message, meta) {
33
+ this.log("error", message, meta);
34
+ }
35
+ };
36
+ const logger = new Logger(process.env.OTEL_SERVICE_NAME || "unknown-service");
37
+
38
+ //#endregion
39
+ export { Logger, logger };
@@ -1 +1,25 @@
1
- import{metrics as e}from"@opentelemetry/api";function t(t=`@lssm/lib.observability`){return e.getMeter(t)}function n(e,n,r){return t(r).createCounter(e,{description:n})}function r(e,n,r){return t(r).createUpDownCounter(e,{description:n})}function i(e,n,r){return t(r).createHistogram(e,{description:n})}const a={httpRequests:n(`http_requests_total`,`Total HTTP requests`),httpDuration:i(`http_request_duration_seconds`,`HTTP request duration`),operationErrors:n(`operation_errors_total`,`Total operation errors`),workflowDuration:i(`workflow_duration_seconds`,`Workflow execution duration`)};export{n as createCounter,i as createHistogram,r as createUpDownCounter,t as getMeter,a as standardMetrics};
1
+ import { metrics } from "@opentelemetry/api";
2
+
3
+ //#region src/metrics/index.ts
4
+ const DEFAULT_METER_NAME = "@lssm/lib.observability";
5
+ function getMeter(name = DEFAULT_METER_NAME) {
6
+ return metrics.getMeter(name);
7
+ }
8
+ function createCounter(name, description, meterName) {
9
+ return getMeter(meterName).createCounter(name, { description });
10
+ }
11
+ function createUpDownCounter(name, description, meterName) {
12
+ return getMeter(meterName).createUpDownCounter(name, { description });
13
+ }
14
+ function createHistogram(name, description, meterName) {
15
+ return getMeter(meterName).createHistogram(name, { description });
16
+ }
17
+ const standardMetrics = {
18
+ httpRequests: createCounter("http_requests_total", "Total HTTP requests"),
19
+ httpDuration: createHistogram("http_request_duration_seconds", "HTTP request duration"),
20
+ operationErrors: createCounter("operation_errors_total", "Total operation errors"),
21
+ workflowDuration: createHistogram("workflow_duration_seconds", "Workflow execution duration")
22
+ };
23
+
24
+ //#endregion
25
+ export { createCounter, createHistogram, createUpDownCounter, getMeter, standardMetrics };
@@ -1 +1,65 @@
1
- import{IntentAggregator as e}from"../intent/aggregator.mjs";import{IntentDetector as t}from"../intent/detector.mjs";import{EventEmitter as n}from"node:events";var r=class{detector;aggregator;emitter;onIntent;onSnapshot;timer;previousMetrics;constructor(r={}){this.detector=r.detector??new t,this.aggregator=r.aggregator??new e,this.emitter=r.emitter??new n,this.onIntent=r.onIntent,this.onSnapshot=r.onSnapshot}ingest(e){this.aggregator.add(e)}on(e){this.emitter.on(`event`,e)}start(e=300*1e3){this.stop(),this.timer=setInterval(()=>{this.run()},e)}stop(){this.timer&&=(clearInterval(this.timer),void 0)}async run(){let e=this.aggregator.flush();if(this.emit({type:`telemetry.window`,payload:{sampleCount:e.sampleCount}}),this.onSnapshot&&await this.onSnapshot(e),!e.sampleCount)return;let t=this.detector.detectFromMetrics(e.metrics,this.previousMetrics),n=this.detector.detectSequentialIntents(e.sequences);this.previousMetrics=e.metrics;let r=[...t,...n];for(let e of r)this.onIntent&&await this.onIntent(e),this.emit({type:`intent.detected`,payload:e})}emit(e){this.emitter.emit(`event`,e)}};export{r as EvolutionPipeline};
1
+ import { IntentAggregator } from "../intent/aggregator.mjs";
2
+ import { IntentDetector } from "../intent/detector.mjs";
3
+ import { EventEmitter } from "node:events";
4
+
5
+ //#region src/pipeline/evolution-pipeline.ts
6
+ var EvolutionPipeline = class {
7
+ detector;
8
+ aggregator;
9
+ emitter;
10
+ onIntent;
11
+ onSnapshot;
12
+ timer;
13
+ previousMetrics;
14
+ constructor(options = {}) {
15
+ this.detector = options.detector ?? new IntentDetector();
16
+ this.aggregator = options.aggregator ?? new IntentAggregator();
17
+ this.emitter = options.emitter ?? new EventEmitter();
18
+ this.onIntent = options.onIntent;
19
+ this.onSnapshot = options.onSnapshot;
20
+ }
21
+ ingest(sample) {
22
+ this.aggregator.add(sample);
23
+ }
24
+ on(listener) {
25
+ this.emitter.on("event", listener);
26
+ }
27
+ start(intervalMs = 300 * 1e3) {
28
+ this.stop();
29
+ this.timer = setInterval(() => {
30
+ this.run();
31
+ }, intervalMs);
32
+ }
33
+ stop() {
34
+ if (this.timer) {
35
+ clearInterval(this.timer);
36
+ this.timer = void 0;
37
+ }
38
+ }
39
+ async run() {
40
+ const snapshot = this.aggregator.flush();
41
+ this.emit({
42
+ type: "telemetry.window",
43
+ payload: { sampleCount: snapshot.sampleCount }
44
+ });
45
+ if (this.onSnapshot) await this.onSnapshot(snapshot);
46
+ if (!snapshot.sampleCount) return;
47
+ const metricSignals = this.detector.detectFromMetrics(snapshot.metrics, this.previousMetrics);
48
+ const sequenceSignals = this.detector.detectSequentialIntents(snapshot.sequences);
49
+ this.previousMetrics = snapshot.metrics;
50
+ const signals = [...metricSignals, ...sequenceSignals];
51
+ for (const signal of signals) {
52
+ if (this.onIntent) await this.onIntent(signal);
53
+ this.emit({
54
+ type: "intent.detected",
55
+ payload: signal
56
+ });
57
+ }
58
+ }
59
+ emit(event) {
60
+ this.emitter.emit("event", event);
61
+ }
62
+ };
63
+
64
+ //#endregion
65
+ export { EvolutionPipeline };
@@ -1 +1,73 @@
1
- import{createCounter as e,createHistogram as t,createUpDownCounter as n}from"../metrics/index.mjs";import{o as r}from"../lifecycle/dist/utils/formatters.mjs";import"../lifecycle/dist/index.mjs";import{EventEmitter as i}from"node:events";var a=class{assessmentCounter;confidenceHistogram;stageUpDownCounter;emitter;lowConfidenceThreshold;currentStageByTenant=new Map;constructor(r={}){let a=r.meterName??`@lssm/lib.lifecycle-kpi`;this.assessmentCounter=e(`lifecycle_assessments_total`,`Total lifecycle assessments`,a),this.confidenceHistogram=t(`lifecycle_assessment_confidence`,`Lifecycle assessment confidence distribution`,a),this.stageUpDownCounter=n(`lifecycle_stage_tenants`,`Current tenants per lifecycle stage`,a),this.emitter=r.emitter??new i,this.lowConfidenceThreshold=r.lowConfidenceThreshold??.4}recordAssessment(e,t){let n={stage:r(e.stage),tenantId:t};this.assessmentCounter.add(1,n),this.confidenceHistogram.record(e.confidence,n),this.ensureStageCounters(e.stage,t),this.emitter.emit(`event`,{type:`assessment.recorded`,payload:{tenantId:t,stage:e.stage}}),e.confidence<this.lowConfidenceThreshold&&this.emitter.emit(`event`,{type:`confidence.low`,payload:{tenantId:t,confidence:e.confidence}})}on(e){this.emitter.on(`event`,e)}ensureStageCounters(e,t){if(!t)return;let n=this.currentStageByTenant.get(t);n!==e&&(n!==void 0&&this.stageUpDownCounter.add(-1,{stage:r(n),tenantId:t}),this.stageUpDownCounter.add(1,{stage:r(e),tenantId:t}),this.currentStageByTenant.set(t,e),this.emitter.emit(`event`,{type:`stage.changed`,payload:{tenantId:t,previousStage:n,nextStage:e}}))}};export{a as LifecycleKpiPipeline};
1
+ import { createCounter, createHistogram, createUpDownCounter } from "../metrics/index.mjs";
2
+ import { getStageLabel } from "../lifecycle/dist/utils/formatters.mjs";
3
+ import "../lifecycle/dist/index.mjs";
4
+ import { EventEmitter } from "node:events";
5
+
6
+ //#region src/pipeline/lifecycle-pipeline.ts
7
+ var LifecycleKpiPipeline = class {
8
+ assessmentCounter;
9
+ confidenceHistogram;
10
+ stageUpDownCounter;
11
+ emitter;
12
+ lowConfidenceThreshold;
13
+ currentStageByTenant = /* @__PURE__ */ new Map();
14
+ constructor(options = {}) {
15
+ const meterName = options.meterName ?? "@lssm/lib.lifecycle-kpi";
16
+ this.assessmentCounter = createCounter("lifecycle_assessments_total", "Total lifecycle assessments", meterName);
17
+ this.confidenceHistogram = createHistogram("lifecycle_assessment_confidence", "Lifecycle assessment confidence distribution", meterName);
18
+ this.stageUpDownCounter = createUpDownCounter("lifecycle_stage_tenants", "Current tenants per lifecycle stage", meterName);
19
+ this.emitter = options.emitter ?? new EventEmitter();
20
+ this.lowConfidenceThreshold = options.lowConfidenceThreshold ?? .4;
21
+ }
22
+ recordAssessment(assessment, tenantId) {
23
+ const attributes = {
24
+ stage: getStageLabel(assessment.stage),
25
+ tenantId
26
+ };
27
+ this.assessmentCounter.add(1, attributes);
28
+ this.confidenceHistogram.record(assessment.confidence, attributes);
29
+ this.ensureStageCounters(assessment.stage, tenantId);
30
+ this.emitter.emit("event", {
31
+ type: "assessment.recorded",
32
+ payload: {
33
+ tenantId,
34
+ stage: assessment.stage
35
+ }
36
+ });
37
+ if (assessment.confidence < this.lowConfidenceThreshold) this.emitter.emit("event", {
38
+ type: "confidence.low",
39
+ payload: {
40
+ tenantId,
41
+ confidence: assessment.confidence
42
+ }
43
+ });
44
+ }
45
+ on(listener) {
46
+ this.emitter.on("event", listener);
47
+ }
48
+ ensureStageCounters(stage, tenantId) {
49
+ if (!tenantId) return;
50
+ const previous = this.currentStageByTenant.get(tenantId);
51
+ if (previous === stage) return;
52
+ if (previous !== void 0) this.stageUpDownCounter.add(-1, {
53
+ stage: getStageLabel(previous),
54
+ tenantId
55
+ });
56
+ this.stageUpDownCounter.add(1, {
57
+ stage: getStageLabel(stage),
58
+ tenantId
59
+ });
60
+ this.currentStageByTenant.set(tenantId, stage);
61
+ this.emitter.emit("event", {
62
+ type: "stage.changed",
63
+ payload: {
64
+ tenantId,
65
+ previousStage: previous,
66
+ nextStage: stage
67
+ }
68
+ });
69
+ }
70
+ };
71
+
72
+ //#endregion
73
+ export { LifecycleKpiPipeline };
@@ -1 +1,46 @@
1
- import{SpanStatusCode as e,trace as t}from"@opentelemetry/api";function n(e=`@lssm/lib.observability`){return t.getTracer(e)}async function r(t,r,i){return n(i).startActiveSpan(t,async t=>{try{let n=await r(t);return t.setStatus({code:e.OK}),n}catch(n){throw t.recordException(n),t.setStatus({code:e.ERROR,message:n instanceof Error?n.message:String(n)}),n}finally{t.end()}})}function i(t,r,i){return n(i).startActiveSpan(t,t=>{try{let n=r(t);return t.setStatus({code:e.OK}),n}catch(n){throw t.recordException(n),t.setStatus({code:e.ERROR,message:n instanceof Error?n.message:String(n)}),n}finally{t.end()}})}export{n as getTracer,r as traceAsync,i as traceSync};
1
+ import { SpanStatusCode, trace } from "@opentelemetry/api";
2
+
3
+ //#region src/tracing/index.ts
4
+ const DEFAULT_TRACER_NAME = "@lssm/lib.observability";
5
+ function getTracer(name = DEFAULT_TRACER_NAME) {
6
+ return trace.getTracer(name);
7
+ }
8
+ async function traceAsync(name, fn, tracerName) {
9
+ return getTracer(tracerName).startActiveSpan(name, async (span) => {
10
+ try {
11
+ const result = await fn(span);
12
+ span.setStatus({ code: SpanStatusCode.OK });
13
+ return result;
14
+ } catch (error) {
15
+ span.recordException(error);
16
+ span.setStatus({
17
+ code: SpanStatusCode.ERROR,
18
+ message: error instanceof Error ? error.message : String(error)
19
+ });
20
+ throw error;
21
+ } finally {
22
+ span.end();
23
+ }
24
+ });
25
+ }
26
+ function traceSync(name, fn, tracerName) {
27
+ return getTracer(tracerName).startActiveSpan(name, (span) => {
28
+ try {
29
+ const result = fn(span);
30
+ span.setStatus({ code: SpanStatusCode.OK });
31
+ return result;
32
+ } catch (error) {
33
+ span.recordException(error);
34
+ span.setStatus({
35
+ code: SpanStatusCode.ERROR,
36
+ message: error instanceof Error ? error.message : String(error)
37
+ });
38
+ throw error;
39
+ } finally {
40
+ span.end();
41
+ }
42
+ });
43
+ }
44
+
45
+ //#endregion
46
+ export { getTracer, traceAsync, traceSync };
@@ -1 +1,79 @@
1
- import{traceAsync as e}from"./index.mjs";import{standardMetrics as t}from"../metrics/index.mjs";function n(n={}){return async(i,a)=>{let o=i.method,s=new URL(i.url).pathname;t.httpRequests.add(1,{method:o,path:s});let c=performance.now();return e(`HTTP ${o} ${s}`,async e=>{e.setAttribute(`http.method`,o),e.setAttribute(`http.url`,i.url);try{let l=await a();e.setAttribute(`http.status_code`,l.status);let u=(performance.now()-c)/1e3;return t.httpDuration.record(u,{method:o,path:s,status:l.status.toString()}),r({req:i,res:l,span:e,success:!0,durationMs:u*1e3,options:n}),l}catch(a){throw t.operationErrors.add(1,{method:o,path:s}),r({req:i,span:e,success:!1,durationMs:performance.now()-c,error:a,options:n}),a}})}}function r({req:e,res:t,span:n,success:r,durationMs:i,error:a,options:o}){if(!o.onSample||!o.resolveOperation)return;let s=o.resolveOperation({req:e,res:t});if(!s)return;let c={operation:s,durationMs:i,success:r,timestamp:new Date,errorCode:!r&&a instanceof Error?a.name:r?void 0:`unknown`,tenantId:o.tenantResolver?.(e),actorId:o.actorResolver?.(e),traceId:n.spanContext().traceId,metadata:{method:e.method,path:new URL(e.url).pathname,status:t?.status}};o.onSample(c)}export{n as createTracingMiddleware};
1
+ import { traceAsync } from "./index.mjs";
2
+ import { standardMetrics } from "../metrics/index.mjs";
3
+
4
+ //#region src/tracing/middleware.ts
5
+ function createTracingMiddleware(options = {}) {
6
+ return async (req, next) => {
7
+ const method = req.method;
8
+ const path = new URL(req.url).pathname;
9
+ standardMetrics.httpRequests.add(1, {
10
+ method,
11
+ path
12
+ });
13
+ const startTime = performance.now();
14
+ return traceAsync(`HTTP ${method} ${path}`, async (span) => {
15
+ span.setAttribute("http.method", method);
16
+ span.setAttribute("http.url", req.url);
17
+ try {
18
+ const response = await next();
19
+ span.setAttribute("http.status_code", response.status);
20
+ const duration = (performance.now() - startTime) / 1e3;
21
+ standardMetrics.httpDuration.record(duration, {
22
+ method,
23
+ path,
24
+ status: response.status.toString()
25
+ });
26
+ emitTelemetrySample({
27
+ req,
28
+ res: response,
29
+ span,
30
+ success: true,
31
+ durationMs: duration * 1e3,
32
+ options
33
+ });
34
+ return response;
35
+ } catch (error) {
36
+ standardMetrics.operationErrors.add(1, {
37
+ method,
38
+ path
39
+ });
40
+ emitTelemetrySample({
41
+ req,
42
+ span,
43
+ success: false,
44
+ durationMs: performance.now() - startTime,
45
+ error,
46
+ options
47
+ });
48
+ throw error;
49
+ }
50
+ });
51
+ };
52
+ }
53
+ function emitTelemetrySample({ req, res, span, success, durationMs, error, options }) {
54
+ if (!options.onSample || !options.resolveOperation) return;
55
+ const operation = options.resolveOperation({
56
+ req,
57
+ res
58
+ });
59
+ if (!operation) return;
60
+ const sample = {
61
+ operation,
62
+ durationMs,
63
+ success,
64
+ timestamp: /* @__PURE__ */ new Date(),
65
+ errorCode: !success && error instanceof Error ? error.name : success ? void 0 : "unknown",
66
+ tenantId: options.tenantResolver?.(req),
67
+ actorId: options.actorResolver?.(req),
68
+ traceId: span.spanContext().traceId,
69
+ metadata: {
70
+ method: req.method,
71
+ path: new URL(req.url).pathname,
72
+ status: res?.status
73
+ }
74
+ };
75
+ options.onSample(sample);
76
+ }
77
+
78
+ //#endregion
79
+ export { createTracingMiddleware };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lssm/lib.observability",
3
- "version": "0.0.0-canary-20251217062943",
3
+ "version": "0.0.0-canary-20251217072406",
4
4
  "main": "./dist/index.mjs",
5
5
  "types": "./dist/index.d.mts",
6
6
  "scripts": {
@@ -17,12 +17,14 @@
17
17
  "test": "bun run"
18
18
  },
19
19
  "dependencies": {
20
- "@lssm/lib.lifecycle": "0.0.0-canary-20251217062943"
20
+ "@lssm/lib.lifecycle": "0.0.0-canary-20251217072406"
21
21
  },
22
22
  "peerDependencies": {
23
23
  "@opentelemetry/api": "*"
24
24
  },
25
25
  "devDependencies": {
26
+ "@lssm/tool.tsdown": "0.0.0-canary-20251217072406",
27
+ "@lssm/tool.typescript": "0.0.0-canary-20251217072406",
26
28
  "typescript": "^5.0.0"
27
29
  },
28
30
  "exports": {
@@ -1 +0,0 @@
1
- (function(e){return e.Sketch=`Sketch`,e.Prototype=`Prototype`,e.Mvp=`MVP`,e.V1=`V1`,e.Ecosystem=`Ecosystem`,e})({}),function(e){return e.Solo=`Solo`,e.TinyTeam=`TinyTeam`,e.FunctionalOrg=`FunctionalOrg`,e.MultiTeam=`MultiTeam`,e.Bureaucratic=`Bureaucratic`,e}({}),function(e){return e.Bootstrapped=`Bootstrapped`,e.PreSeed=`PreSeed`,e.Seed=`Seed`,e.SeriesAorB=`SeriesAorB`,e.LateStage=`LateStage`,e}({});